blob: 1f8d9f2bcf66dabd5a5ccfab8ff8e0a583749e07 [file] [log] [blame]
Carl Shapirocd8f5e72011-04-20 16:12:46 -07001/*
2 * This file was generated automatically by gen-mterp.py for 'x86'.
3 *
4 * --> DO NOT EDIT <--
5 */
6
7/* File: c/header.cpp */
8/*
9 * Copyright (C) 2008 The Android Open Source Project
10 *
11 * Licensed under the Apache License, Version 2.0 (the "License");
12 * you may not use this file except in compliance with the License.
13 * You may obtain a copy of the License at
14 *
15 * http://www.apache.org/licenses/LICENSE-2.0
16 *
17 * Unless required by applicable law or agreed to in writing, software
18 * distributed under the License is distributed on an "AS IS" BASIS,
19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 * See the License for the specific language governing permissions and
21 * limitations under the License.
22 */
23
24/* common includes */
25#include "Dalvik.h"
26#include "interp/InterpDefs.h"
27#include "mterp/Mterp.h"
28#include <math.h> // needed for fmod, fmodf
29#include "mterp/common/FindInterface.h"
30
31/*
32 * Configuration defines. These affect the C implementations, i.e. the
33 * portable interpreter(s) and C stubs.
34 *
35 * Some defines are controlled by the Makefile, e.g.:
36 * WITH_INSTR_CHECKS
37 * WITH_TRACKREF_CHECKS
38 * EASY_GDB
39 * NDEBUG
40 */
41
42#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia (slow!) */
43# define CHECK_BRANCH_OFFSETS
44# define CHECK_REGISTER_INDICES
45#endif
46
47/*
48 * Some architectures require 64-bit alignment for access to 64-bit data
49 * types. We can't just use pointers to copy 64-bit values out of our
50 * interpreted register set, because gcc may assume the pointer target is
51 * aligned and generate invalid code.
52 *
53 * There are two common approaches:
54 * (1) Use a union that defines a 32-bit pair and a 64-bit value.
55 * (2) Call memcpy().
56 *
57 * Depending upon what compiler you're using and what options are specified,
58 * one may be faster than the other. For example, the compiler might
59 * convert a memcpy() of 8 bytes into a series of instructions and omit
60 * the call. The union version could cause some strange side-effects,
61 * e.g. for a while ARM gcc thought it needed separate storage for each
62 * inlined instance, and generated instructions to zero out ~700 bytes of
63 * stack space at the top of the interpreter.
64 *
65 * The default is to use memcpy(). The current gcc for ARM seems to do
66 * better with the union.
67 */
68#if defined(__ARM_EABI__)
69# define NO_UNALIGN_64__UNION
70#endif
71
72
73//#define LOG_INSTR /* verbose debugging */
74/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
75
76/*
77 * Keep a tally of accesses to fields. Currently only works if full DEX
78 * optimization is disabled.
79 */
80#ifdef PROFILE_FIELD_ACCESS
81# define UPDATE_FIELD_GET(_field) { (_field)->gets++; }
82# define UPDATE_FIELD_PUT(_field) { (_field)->puts++; }
83#else
84# define UPDATE_FIELD_GET(_field) ((void)0)
85# define UPDATE_FIELD_PUT(_field) ((void)0)
86#endif
87
88/*
89 * Export another copy of the PC on every instruction; this is largely
90 * redundant with EXPORT_PC and the debugger code. This value can be
91 * compared against what we have stored on the stack with EXPORT_PC to
92 * help ensure that we aren't missing any export calls.
93 */
94#if WITH_EXTRA_GC_CHECKS > 1
95# define EXPORT_EXTRA_PC() (self->currentPc2 = pc)
96#else
97# define EXPORT_EXTRA_PC()
98#endif
99
100/*
101 * Adjust the program counter. "_offset" is a signed int, in 16-bit units.
102 *
103 * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
104 *
105 * We don't advance the program counter until we finish an instruction or
106 * branch, because we do want to have to unroll the PC if there's an
107 * exception.
108 */
109#ifdef CHECK_BRANCH_OFFSETS
110# define ADJUST_PC(_offset) do { \
111 int myoff = _offset; /* deref only once */ \
112 if (pc + myoff < curMethod->insns || \
113 pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
114 { \
115 char* desc; \
116 desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
117 LOGE("Invalid branch %d at 0x%04x in %s.%s %s\n", \
118 myoff, (int) (pc - curMethod->insns), \
119 curMethod->clazz->descriptor, curMethod->name, desc); \
120 free(desc); \
121 dvmAbort(); \
122 } \
123 pc += myoff; \
124 EXPORT_EXTRA_PC(); \
125 } while (false)
126#else
127# define ADJUST_PC(_offset) do { \
128 pc += _offset; \
129 EXPORT_EXTRA_PC(); \
130 } while (false)
131#endif
132
133/*
134 * If enabled, log instructions as we execute them.
135 */
136#ifdef LOG_INSTR
137# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
138# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
139# define ILOG(_level, ...) do { \
140 char debugStrBuf[128]; \
141 snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
142 if (curMethod != NULL) \
143 LOG(_level, LOG_TAG"i", "%-2d|%04x%s\n", \
144 self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
145 else \
146 LOG(_level, LOG_TAG"i", "%-2d|####%s\n", \
147 self->threadId, debugStrBuf); \
148 } while(false)
149void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
150# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
151static const char kSpacing[] = " ";
152#else
153# define ILOGD(...) ((void)0)
154# define ILOGV(...) ((void)0)
155# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
156#endif
157
158/* get a long from an array of u4 */
159static inline s8 getLongFromArray(const u4* ptr, int idx)
160{
161#if defined(NO_UNALIGN_64__UNION)
162 union { s8 ll; u4 parts[2]; } conv;
163
164 ptr += idx;
165 conv.parts[0] = ptr[0];
166 conv.parts[1] = ptr[1];
167 return conv.ll;
168#else
169 s8 val;
170 memcpy(&val, &ptr[idx], 8);
171 return val;
172#endif
173}
174
175/* store a long into an array of u4 */
176static inline void putLongToArray(u4* ptr, int idx, s8 val)
177{
178#if defined(NO_UNALIGN_64__UNION)
179 union { s8 ll; u4 parts[2]; } conv;
180
181 ptr += idx;
182 conv.ll = val;
183 ptr[0] = conv.parts[0];
184 ptr[1] = conv.parts[1];
185#else
186 memcpy(&ptr[idx], &val, 8);
187#endif
188}
189
190/* get a double from an array of u4 */
191static inline double getDoubleFromArray(const u4* ptr, int idx)
192{
193#if defined(NO_UNALIGN_64__UNION)
194 union { double d; u4 parts[2]; } conv;
195
196 ptr += idx;
197 conv.parts[0] = ptr[0];
198 conv.parts[1] = ptr[1];
199 return conv.d;
200#else
201 double dval;
202 memcpy(&dval, &ptr[idx], 8);
203 return dval;
204#endif
205}
206
207/* store a double into an array of u4 */
208static inline void putDoubleToArray(u4* ptr, int idx, double dval)
209{
210#if defined(NO_UNALIGN_64__UNION)
211 union { double d; u4 parts[2]; } conv;
212
213 ptr += idx;
214 conv.d = dval;
215 ptr[0] = conv.parts[0];
216 ptr[1] = conv.parts[1];
217#else
218 memcpy(&ptr[idx], &dval, 8);
219#endif
220}
221
222/*
223 * If enabled, validate the register number on every access. Otherwise,
224 * just do an array access.
225 *
226 * Assumes the existence of "u4* fp".
227 *
228 * "_idx" may be referenced more than once.
229 */
230#ifdef CHECK_REGISTER_INDICES
231# define GET_REGISTER(_idx) \
232 ( (_idx) < curMethod->registersSize ? \
233 (fp[(_idx)]) : (assert(!"bad reg"),1969) )
234# define SET_REGISTER(_idx, _val) \
235 ( (_idx) < curMethod->registersSize ? \
236 (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
237# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
238# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
239# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
240# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
241# define GET_REGISTER_WIDE(_idx) \
242 ( (_idx) < curMethod->registersSize-1 ? \
243 getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
244# define SET_REGISTER_WIDE(_idx, _val) \
245 ( (_idx) < curMethod->registersSize-1 ? \
246 putLongToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969) )
247# define GET_REGISTER_FLOAT(_idx) \
248 ( (_idx) < curMethod->registersSize ? \
249 (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
250# define SET_REGISTER_FLOAT(_idx, _val) \
251 ( (_idx) < curMethod->registersSize ? \
252 (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
253# define GET_REGISTER_DOUBLE(_idx) \
254 ( (_idx) < curMethod->registersSize-1 ? \
255 getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
256# define SET_REGISTER_DOUBLE(_idx, _val) \
257 ( (_idx) < curMethod->registersSize-1 ? \
258 putDoubleToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969.0) )
259#else
260# define GET_REGISTER(_idx) (fp[(_idx)])
261# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
262# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
263# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
264# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
265# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
266# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
267# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
268# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
269# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
270# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
271# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
272#endif
273
274/*
275 * Get 16 bits from the specified offset of the program counter. We always
276 * want to load 16 bits at a time from the instruction stream -- it's more
277 * efficient than 8 and won't have the alignment problems that 32 might.
278 *
279 * Assumes existence of "const u2* pc".
280 */
281#define FETCH(_offset) (pc[(_offset)])
282
283/*
284 * Extract instruction byte from 16-bit fetch (_inst is a u2).
285 */
286#define INST_INST(_inst) ((_inst) & 0xff)
287
288/*
289 * Replace the opcode (used when handling breakpoints). _opcode is a u1.
290 */
291#define INST_REPLACE_OP(_inst, _opcode) (((_inst) & 0xff00) | _opcode)
292
293/*
294 * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
295 */
296#define INST_A(_inst) (((_inst) >> 8) & 0x0f)
297#define INST_B(_inst) ((_inst) >> 12)
298
299/*
300 * Get the 8-bit "vAA" 8-bit register index from the instruction word.
301 * (_inst is u2)
302 */
303#define INST_AA(_inst) ((_inst) >> 8)
304
305/*
306 * The current PC must be available to Throwable constructors, e.g.
307 * those created by the various exception throw routines, so that the
308 * exception stack trace can be generated correctly. If we don't do this,
309 * the offset within the current method won't be shown correctly. See the
310 * notes in Exception.c.
311 *
312 * This is also used to determine the address for precise GC.
313 *
314 * Assumes existence of "u4* fp" and "const u2* pc".
315 */
316#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
317
318/*
319 * Check to see if "obj" is NULL. If so, throw an exception. Assumes the
320 * pc has already been exported to the stack.
321 *
322 * Perform additional checks on debug builds.
323 *
324 * Use this to check for NULL when the instruction handler calls into
325 * something that could throw an exception (so we have already called
326 * EXPORT_PC at the top).
327 */
328static inline bool checkForNull(Object* obj)
329{
330 if (obj == NULL) {
331 dvmThrowNullPointerException(NULL);
332 return false;
333 }
334#ifdef WITH_EXTRA_OBJECT_VALIDATION
335 if (!dvmIsValidObject(obj)) {
336 LOGE("Invalid object %p\n", obj);
337 dvmAbort();
338 }
339#endif
340#ifndef NDEBUG
341 if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
342 /* probable heap corruption */
343 LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
344 dvmAbort();
345 }
346#endif
347 return true;
348}
349
350/*
351 * Check to see if "obj" is NULL. If so, export the PC into the stack
352 * frame and throw an exception.
353 *
354 * Perform additional checks on debug builds.
355 *
356 * Use this to check for NULL when the instruction handler doesn't do
357 * anything else that can throw an exception.
358 */
359static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
360{
361 if (obj == NULL) {
362 EXPORT_PC();
363 dvmThrowNullPointerException(NULL);
364 return false;
365 }
366#ifdef WITH_EXTRA_OBJECT_VALIDATION
367 if (!dvmIsValidObject(obj)) {
368 LOGE("Invalid object %p\n", obj);
369 dvmAbort();
370 }
371#endif
372#ifndef NDEBUG
373 if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
374 /* probable heap corruption */
375 LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
376 dvmAbort();
377 }
378#endif
379 return true;
380}
381
382/* File: cstubs/stubdefs.cpp */
383/*
384 * In the C mterp stubs, "goto" is a function call followed immediately
385 * by a return.
386 */
387
388#define GOTO_TARGET_DECL(_target, ...) \
389 extern "C" void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
390
391/* (void)xxx to quiet unused variable compiler warnings. */
392#define GOTO_TARGET(_target, ...) \
393 void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) { \
394 u2 ref, vsrc1, vsrc2, vdst; \
395 u2 inst = FETCH(0); \
396 const Method* methodToCall; \
397 StackSaveArea* debugSaveArea; \
398 (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
399 (void)methodToCall; (void)debugSaveArea;
400
401#define GOTO_TARGET_END }
402
403/*
404 * Redefine what used to be local variable accesses into Thread struct
405 * references. (These are undefined down in "footer.c".)
406 */
407#define retval self->retval
408#define pc self->interpSave.pc
buzbee30bc0d42011-04-22 10:27:14 -0700409#define fp self->interpSave.curFrame
Carl Shapirocd8f5e72011-04-20 16:12:46 -0700410#define curMethod self->interpSave.method
411#define methodClassDex self->interpSave.methodClassDex
412#define debugTrackedRefStart self->interpSave.debugTrackedRefStart
413
414/* ugh */
415#define STUB_HACK(x) x
416#if defined(WITH_JIT)
417#define JIT_STUB_HACK(x) x
418#else
419#define JIT_STUB_HACK(x)
420#endif
421
422/*
423 * InterpSave's pc and fp must be valid when breaking out to a
424 * "Reportxxx" routine. Because the portable interpreter uses local
425 * variables for these, we must flush prior. Stubs, however, use
426 * the interpSave vars directly, so this is a nop for stubs.
427 */
428#define PC_FP_TO_SELF()
buzbee30bc0d42011-04-22 10:27:14 -0700429#define PC_TO_SELF()
Carl Shapirocd8f5e72011-04-20 16:12:46 -0700430
431/*
432 * Opcode handler framing macros. Here, each opcode is a separate function
433 * that takes a "self" argument and returns void. We can't declare
434 * these "static" because they may be called from an assembly stub.
435 * (void)xxx to quiet unused variable compiler warnings.
436 */
437#define HANDLE_OPCODE(_op) \
438 extern "C" void dvmMterp_##_op(Thread* self); \
439 void dvmMterp_##_op(Thread* self) { \
440 u4 ref; \
441 u2 vsrc1, vsrc2, vdst; \
442 u2 inst = FETCH(0); \
443 (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
444
445#define OP_END }
446
447/*
448 * Like the "portable" FINISH, but don't reload "inst", and return to caller
449 * when done. Further, debugger/profiler checks are handled
450 * before handler execution in mterp, so we don't do them here either.
451 */
452#if defined(WITH_JIT)
453#define FINISH(_offset) { \
454 ADJUST_PC(_offset); \
455 if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { \
456 dvmCheckJit(pc, self); \
457 } \
458 return; \
459 }
460#else
461#define FINISH(_offset) { \
462 ADJUST_PC(_offset); \
463 return; \
464 }
465#endif
466
467
468/*
469 * The "goto label" statements turn into function calls followed by
470 * return statements. Some of the functions take arguments, which in the
471 * portable interpreter are handled by assigning values to globals.
472 */
473
474#define GOTO_exceptionThrown() \
475 do { \
476 dvmMterp_exceptionThrown(self); \
477 return; \
478 } while(false)
479
480#define GOTO_returnFromMethod() \
481 do { \
482 dvmMterp_returnFromMethod(self); \
483 return; \
484 } while(false)
485
486#define GOTO_invoke(_target, _methodCallRange, _jumboFormat) \
487 do { \
488 dvmMterp_##_target(self, _methodCallRange, _jumboFormat); \
489 return; \
490 } while(false)
491
492#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \
493 do { \
494 dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall, \
495 _vsrc1, _vdst); \
496 return; \
497 } while(false)
498
499/*
500 * As a special case, "goto bail" turns into a longjmp.
501 */
502#define GOTO_bail() \
503 dvmMterpStdBail(self, false);
504
505/*
506 * Periodically check for thread suspension.
507 *
508 * While we're at it, see if a debugger has attached or the profiler has
509 * started.
510 */
511#define PERIODIC_CHECKS(_pcadj) { \
512 if (dvmCheckSuspendQuick(self)) { \
513 EXPORT_PC(); /* need for precise GC */ \
514 dvmCheckSuspendPending(self); \
515 } \
516 }
517
518/* File: c/opcommon.cpp */
519/* forward declarations of goto targets */
520GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
521GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
522GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
523GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
524GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
525GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
526GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
527GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
528GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
529 u2 count, u2 regs);
530GOTO_TARGET_DECL(returnFromMethod);
531GOTO_TARGET_DECL(exceptionThrown);
532
533/*
534 * ===========================================================================
535 *
536 * What follows are opcode definitions shared between multiple opcodes with
537 * minor substitutions handled by the C pre-processor. These should probably
538 * use the mterp substitution mechanism instead, with the code here moved
539 * into common fragment files (like the asm "binop.S"), although it's hard
540 * to give up the C preprocessor in favor of the much simpler text subst.
541 *
542 * ===========================================================================
543 */
544
545#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
546 HANDLE_OPCODE(_opcode /*vA, vB*/) \
547 vdst = INST_A(inst); \
548 vsrc1 = INST_B(inst); \
549 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
550 SET_REGISTER##_totype(vdst, \
551 GET_REGISTER##_fromtype(vsrc1)); \
552 FINISH(1);
553
554#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
555 _tovtype, _tortype) \
556 HANDLE_OPCODE(_opcode /*vA, vB*/) \
557 { \
558 /* spec defines specific handling for +/- inf and NaN values */ \
559 _fromvtype val; \
560 _tovtype intMin, intMax, result; \
561 vdst = INST_A(inst); \
562 vsrc1 = INST_B(inst); \
563 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
564 val = GET_REGISTER##_fromrtype(vsrc1); \
565 intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
566 intMax = ~intMin; \
567 result = (_tovtype) val; \
568 if (val >= intMax) /* +inf */ \
569 result = intMax; \
570 else if (val <= intMin) /* -inf */ \
571 result = intMin; \
572 else if (val != val) /* NaN */ \
573 result = 0; \
574 else \
575 result = (_tovtype) val; \
576 SET_REGISTER##_tortype(vdst, result); \
577 } \
578 FINISH(1);
579
580#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
581 HANDLE_OPCODE(_opcode /*vA, vB*/) \
582 vdst = INST_A(inst); \
583 vsrc1 = INST_B(inst); \
584 ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
585 SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
586 FINISH(1);
587
588/* NOTE: the comparison result is always a signed 4-byte integer */
589#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
590 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
591 { \
592 int result; \
593 u2 regs; \
594 _varType val1, val2; \
595 vdst = INST_AA(inst); \
596 regs = FETCH(1); \
597 vsrc1 = regs & 0xff; \
598 vsrc2 = regs >> 8; \
599 ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
600 val1 = GET_REGISTER##_type(vsrc1); \
601 val2 = GET_REGISTER##_type(vsrc2); \
602 if (val1 == val2) \
603 result = 0; \
604 else if (val1 < val2) \
605 result = -1; \
606 else if (val1 > val2) \
607 result = 1; \
608 else \
609 result = (_nanVal); \
610 ILOGV("+ result=%d\n", result); \
611 SET_REGISTER(vdst, result); \
612 } \
613 FINISH(2);
614
615#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
616 HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
617 vsrc1 = INST_A(inst); \
618 vsrc2 = INST_B(inst); \
619 if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
620 int branchOffset = (s2)FETCH(1); /* sign-extended */ \
621 ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
622 branchOffset); \
623 ILOGV("> branch taken"); \
624 if (branchOffset < 0) \
625 PERIODIC_CHECKS(branchOffset); \
626 FINISH(branchOffset); \
627 } else { \
628 ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
629 FINISH(2); \
630 }
631
632#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
633 HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
634 vsrc1 = INST_AA(inst); \
635 if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
636 int branchOffset = (s2)FETCH(1); /* sign-extended */ \
637 ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
638 ILOGV("> branch taken"); \
639 if (branchOffset < 0) \
640 PERIODIC_CHECKS(branchOffset); \
641 FINISH(branchOffset); \
642 } else { \
643 ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
644 FINISH(2); \
645 }
646
647#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
648 HANDLE_OPCODE(_opcode /*vA, vB*/) \
649 vdst = INST_A(inst); \
650 vsrc1 = INST_B(inst); \
651 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
652 SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
653 FINISH(1);
654
655#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
656 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
657 { \
658 u2 srcRegs; \
659 vdst = INST_AA(inst); \
660 srcRegs = FETCH(1); \
661 vsrc1 = srcRegs & 0xff; \
662 vsrc2 = srcRegs >> 8; \
663 ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
664 if (_chkdiv != 0) { \
665 s4 firstVal, secondVal, result; \
666 firstVal = GET_REGISTER(vsrc1); \
667 secondVal = GET_REGISTER(vsrc2); \
668 if (secondVal == 0) { \
669 EXPORT_PC(); \
670 dvmThrowArithmeticException("divide by zero"); \
671 GOTO_exceptionThrown(); \
672 } \
673 if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
674 if (_chkdiv == 1) \
675 result = firstVal; /* division */ \
676 else \
677 result = 0; /* remainder */ \
678 } else { \
679 result = firstVal _op secondVal; \
680 } \
681 SET_REGISTER(vdst, result); \
682 } else { \
683 /* non-div/rem case */ \
684 SET_REGISTER(vdst, \
685 (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
686 } \
687 } \
688 FINISH(2);
689
690#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
691 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
692 { \
693 u2 srcRegs; \
694 vdst = INST_AA(inst); \
695 srcRegs = FETCH(1); \
696 vsrc1 = srcRegs & 0xff; \
697 vsrc2 = srcRegs >> 8; \
698 ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
699 SET_REGISTER(vdst, \
700 _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
701 } \
702 FINISH(2);
703
704#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
705 HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
706 vdst = INST_A(inst); \
707 vsrc1 = INST_B(inst); \
708 vsrc2 = FETCH(1); \
709 ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
710 (_opname), vdst, vsrc1, vsrc2); \
711 if (_chkdiv != 0) { \
712 s4 firstVal, result; \
713 firstVal = GET_REGISTER(vsrc1); \
714 if ((s2) vsrc2 == 0) { \
715 EXPORT_PC(); \
716 dvmThrowArithmeticException("divide by zero"); \
717 GOTO_exceptionThrown(); \
718 } \
719 if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
720 /* won't generate /lit16 instr for this; check anyway */ \
721 if (_chkdiv == 1) \
722 result = firstVal; /* division */ \
723 else \
724 result = 0; /* remainder */ \
725 } else { \
726 result = firstVal _op (s2) vsrc2; \
727 } \
728 SET_REGISTER(vdst, result); \
729 } else { \
730 /* non-div/rem case */ \
731 SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
732 } \
733 FINISH(2);
734
735#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
736 HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
737 { \
738 u2 litInfo; \
739 vdst = INST_AA(inst); \
740 litInfo = FETCH(1); \
741 vsrc1 = litInfo & 0xff; \
742 vsrc2 = litInfo >> 8; /* constant */ \
743 ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
744 (_opname), vdst, vsrc1, vsrc2); \
745 if (_chkdiv != 0) { \
746 s4 firstVal, result; \
747 firstVal = GET_REGISTER(vsrc1); \
748 if ((s1) vsrc2 == 0) { \
749 EXPORT_PC(); \
750 dvmThrowArithmeticException("divide by zero"); \
751 GOTO_exceptionThrown(); \
752 } \
753 if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
754 if (_chkdiv == 1) \
755 result = firstVal; /* division */ \
756 else \
757 result = 0; /* remainder */ \
758 } else { \
759 result = firstVal _op ((s1) vsrc2); \
760 } \
761 SET_REGISTER(vdst, result); \
762 } else { \
763 SET_REGISTER(vdst, \
764 (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
765 } \
766 } \
767 FINISH(2);
768
769#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
770 HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
771 { \
772 u2 litInfo; \
773 vdst = INST_AA(inst); \
774 litInfo = FETCH(1); \
775 vsrc1 = litInfo & 0xff; \
776 vsrc2 = litInfo >> 8; /* constant */ \
777 ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
778 (_opname), vdst, vsrc1, vsrc2); \
779 SET_REGISTER(vdst, \
780 _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
781 } \
782 FINISH(2);
783
784#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
785 HANDLE_OPCODE(_opcode /*vA, vB*/) \
786 vdst = INST_A(inst); \
787 vsrc1 = INST_B(inst); \
788 ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
789 if (_chkdiv != 0) { \
790 s4 firstVal, secondVal, result; \
791 firstVal = GET_REGISTER(vdst); \
792 secondVal = GET_REGISTER(vsrc1); \
793 if (secondVal == 0) { \
794 EXPORT_PC(); \
795 dvmThrowArithmeticException("divide by zero"); \
796 GOTO_exceptionThrown(); \
797 } \
798 if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
799 if (_chkdiv == 1) \
800 result = firstVal; /* division */ \
801 else \
802 result = 0; /* remainder */ \
803 } else { \
804 result = firstVal _op secondVal; \
805 } \
806 SET_REGISTER(vdst, result); \
807 } else { \
808 SET_REGISTER(vdst, \
809 (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
810 } \
811 FINISH(1);
812
813#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
814 HANDLE_OPCODE(_opcode /*vA, vB*/) \
815 vdst = INST_A(inst); \
816 vsrc1 = INST_B(inst); \
817 ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
818 SET_REGISTER(vdst, \
819 _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
820 FINISH(1);
821
822#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
823 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
824 { \
825 u2 srcRegs; \
826 vdst = INST_AA(inst); \
827 srcRegs = FETCH(1); \
828 vsrc1 = srcRegs & 0xff; \
829 vsrc2 = srcRegs >> 8; \
830 ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
831 if (_chkdiv != 0) { \
832 s8 firstVal, secondVal, result; \
833 firstVal = GET_REGISTER_WIDE(vsrc1); \
834 secondVal = GET_REGISTER_WIDE(vsrc2); \
835 if (secondVal == 0LL) { \
836 EXPORT_PC(); \
837 dvmThrowArithmeticException("divide by zero"); \
838 GOTO_exceptionThrown(); \
839 } \
840 if ((u8)firstVal == 0x8000000000000000ULL && \
841 secondVal == -1LL) \
842 { \
843 if (_chkdiv == 1) \
844 result = firstVal; /* division */ \
845 else \
846 result = 0; /* remainder */ \
847 } else { \
848 result = firstVal _op secondVal; \
849 } \
850 SET_REGISTER_WIDE(vdst, result); \
851 } else { \
852 SET_REGISTER_WIDE(vdst, \
853 (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
854 } \
855 } \
856 FINISH(2);
857
858#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
859 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
860 { \
861 u2 srcRegs; \
862 vdst = INST_AA(inst); \
863 srcRegs = FETCH(1); \
864 vsrc1 = srcRegs & 0xff; \
865 vsrc2 = srcRegs >> 8; \
866 ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
867 SET_REGISTER_WIDE(vdst, \
868 _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
869 } \
870 FINISH(2);
871
872#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
873 HANDLE_OPCODE(_opcode /*vA, vB*/) \
874 vdst = INST_A(inst); \
875 vsrc1 = INST_B(inst); \
876 ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
877 if (_chkdiv != 0) { \
878 s8 firstVal, secondVal, result; \
879 firstVal = GET_REGISTER_WIDE(vdst); \
880 secondVal = GET_REGISTER_WIDE(vsrc1); \
881 if (secondVal == 0LL) { \
882 EXPORT_PC(); \
883 dvmThrowArithmeticException("divide by zero"); \
884 GOTO_exceptionThrown(); \
885 } \
886 if ((u8)firstVal == 0x8000000000000000ULL && \
887 secondVal == -1LL) \
888 { \
889 if (_chkdiv == 1) \
890 result = firstVal; /* division */ \
891 else \
892 result = 0; /* remainder */ \
893 } else { \
894 result = firstVal _op secondVal; \
895 } \
896 SET_REGISTER_WIDE(vdst, result); \
897 } else { \
898 SET_REGISTER_WIDE(vdst, \
899 (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
900 } \
901 FINISH(1);
902
903#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
904 HANDLE_OPCODE(_opcode /*vA, vB*/) \
905 vdst = INST_A(inst); \
906 vsrc1 = INST_B(inst); \
907 ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
908 SET_REGISTER_WIDE(vdst, \
909 _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
910 FINISH(1);
911
912#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
913 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
914 { \
915 u2 srcRegs; \
916 vdst = INST_AA(inst); \
917 srcRegs = FETCH(1); \
918 vsrc1 = srcRegs & 0xff; \
919 vsrc2 = srcRegs >> 8; \
920 ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
921 SET_REGISTER_FLOAT(vdst, \
922 GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
923 } \
924 FINISH(2);
925
926#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
927 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
928 { \
929 u2 srcRegs; \
930 vdst = INST_AA(inst); \
931 srcRegs = FETCH(1); \
932 vsrc1 = srcRegs & 0xff; \
933 vsrc2 = srcRegs >> 8; \
934 ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
935 SET_REGISTER_DOUBLE(vdst, \
936 GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
937 } \
938 FINISH(2);
939
940#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
941 HANDLE_OPCODE(_opcode /*vA, vB*/) \
942 vdst = INST_A(inst); \
943 vsrc1 = INST_B(inst); \
944 ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
945 SET_REGISTER_FLOAT(vdst, \
946 GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
947 FINISH(1);
948
949#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
950 HANDLE_OPCODE(_opcode /*vA, vB*/) \
951 vdst = INST_A(inst); \
952 vsrc1 = INST_B(inst); \
953 ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
954 SET_REGISTER_DOUBLE(vdst, \
955 GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
956 FINISH(1);
957
958#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
959 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
960 { \
961 ArrayObject* arrayObj; \
962 u2 arrayInfo; \
963 EXPORT_PC(); \
964 vdst = INST_AA(inst); \
965 arrayInfo = FETCH(1); \
966 vsrc1 = arrayInfo & 0xff; /* array ptr */ \
967 vsrc2 = arrayInfo >> 8; /* index */ \
968 ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
969 arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
970 if (!checkForNull((Object*) arrayObj)) \
971 GOTO_exceptionThrown(); \
972 if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
973 dvmThrowArrayIndexOutOfBoundsException( \
974 arrayObj->length, GET_REGISTER(vsrc2)); \
975 GOTO_exceptionThrown(); \
976 } \
977 SET_REGISTER##_regsize(vdst, \
978 ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)]); \
979 ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
980 } \
981 FINISH(2);
982
983#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
984 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
985 { \
986 ArrayObject* arrayObj; \
987 u2 arrayInfo; \
988 EXPORT_PC(); \
989 vdst = INST_AA(inst); /* AA: source value */ \
990 arrayInfo = FETCH(1); \
991 vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
992 vsrc2 = arrayInfo >> 8; /* CC: index */ \
993 ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
994 arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
995 if (!checkForNull((Object*) arrayObj)) \
996 GOTO_exceptionThrown(); \
997 if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
998 dvmThrowArrayIndexOutOfBoundsException( \
999 arrayObj->length, GET_REGISTER(vsrc2)); \
1000 GOTO_exceptionThrown(); \
1001 } \
1002 ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
1003 ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)] = \
1004 GET_REGISTER##_regsize(vdst); \
1005 } \
1006 FINISH(2);
1007
1008/*
1009 * It's possible to get a bad value out of a field with sub-32-bit stores
1010 * because the -quick versions always operate on 32 bits. Consider:
1011 * short foo = -1 (sets a 32-bit register to 0xffffffff)
1012 * iput-quick foo (writes all 32 bits to the field)
1013 * short bar = 1 (sets a 32-bit register to 0x00000001)
1014 * iput-short (writes the low 16 bits to the field)
1015 * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
1016 * This can only happen when optimized and non-optimized code has interleaved
1017 * access to the same field. This is unlikely but possible.
1018 *
1019 * The easiest way to fix this is to always read/write 32 bits at a time. On
1020 * a device with a 16-bit data bus this is sub-optimal. (The alternative
1021 * approach is to have sub-int versions of iget-quick, but now we're wasting
1022 * Dalvik instruction space and making it less likely that handler code will
1023 * already be in the CPU i-cache.)
1024 */
1025#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
1026 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
1027 { \
1028 InstField* ifield; \
1029 Object* obj; \
1030 EXPORT_PC(); \
1031 vdst = INST_A(inst); \
1032 vsrc1 = INST_B(inst); /* object ptr */ \
1033 ref = FETCH(1); /* field ref */ \
1034 ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
1035 obj = (Object*) GET_REGISTER(vsrc1); \
1036 if (!checkForNull(obj)) \
1037 GOTO_exceptionThrown(); \
1038 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
1039 if (ifield == NULL) { \
1040 ifield = dvmResolveInstField(curMethod->clazz, ref); \
1041 if (ifield == NULL) \
1042 GOTO_exceptionThrown(); \
1043 } \
1044 SET_REGISTER##_regsize(vdst, \
1045 dvmGetField##_ftype(obj, ifield->byteOffset)); \
1046 ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
1047 (u8) GET_REGISTER##_regsize(vdst)); \
1048 UPDATE_FIELD_GET(&ifield->field); \
1049 } \
1050 FINISH(2);
1051
1052#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize) \
1053 HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/) \
1054 { \
1055 InstField* ifield; \
1056 Object* obj; \
1057 EXPORT_PC(); \
1058 ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \
1059 vdst = FETCH(3); \
1060 vsrc1 = FETCH(4); /* object ptr */ \
1061 ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x", \
1062 (_opname), vdst, vsrc1, ref); \
1063 obj = (Object*) GET_REGISTER(vsrc1); \
1064 if (!checkForNull(obj)) \
1065 GOTO_exceptionThrown(); \
1066 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
1067 if (ifield == NULL) { \
1068 ifield = dvmResolveInstField(curMethod->clazz, ref); \
1069 if (ifield == NULL) \
1070 GOTO_exceptionThrown(); \
1071 } \
1072 SET_REGISTER##_regsize(vdst, \
1073 dvmGetField##_ftype(obj, ifield->byteOffset)); \
1074 ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
1075 (u8) GET_REGISTER##_regsize(vdst)); \
1076 UPDATE_FIELD_GET(&ifield->field); \
1077 } \
1078 FINISH(5);
1079
1080#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
1081 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
1082 { \
1083 Object* obj; \
1084 vdst = INST_A(inst); \
1085 vsrc1 = INST_B(inst); /* object ptr */ \
1086 ref = FETCH(1); /* field offset */ \
1087 ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
1088 (_opname), vdst, vsrc1, ref); \
1089 obj = (Object*) GET_REGISTER(vsrc1); \
1090 if (!checkForNullExportPC(obj, fp, pc)) \
1091 GOTO_exceptionThrown(); \
1092 SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
1093 ILOGV("+ IGETQ %d=0x%08llx", ref, \
1094 (u8) GET_REGISTER##_regsize(vdst)); \
1095 } \
1096 FINISH(2);
1097
1098#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
1099 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
1100 { \
1101 InstField* ifield; \
1102 Object* obj; \
1103 EXPORT_PC(); \
1104 vdst = INST_A(inst); \
1105 vsrc1 = INST_B(inst); /* object ptr */ \
1106 ref = FETCH(1); /* field ref */ \
1107 ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
1108 obj = (Object*) GET_REGISTER(vsrc1); \
1109 if (!checkForNull(obj)) \
1110 GOTO_exceptionThrown(); \
1111 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
1112 if (ifield == NULL) { \
1113 ifield = dvmResolveInstField(curMethod->clazz, ref); \
1114 if (ifield == NULL) \
1115 GOTO_exceptionThrown(); \
1116 } \
1117 dvmSetField##_ftype(obj, ifield->byteOffset, \
1118 GET_REGISTER##_regsize(vdst)); \
1119 ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
1120 (u8) GET_REGISTER##_regsize(vdst)); \
1121 UPDATE_FIELD_PUT(&ifield->field); \
1122 } \
1123 FINISH(2);
1124
1125#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize) \
1126 HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/) \
1127 { \
1128 InstField* ifield; \
1129 Object* obj; \
1130 EXPORT_PC(); \
1131 ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \
1132 vdst = FETCH(3); \
1133 vsrc1 = FETCH(4); /* object ptr */ \
1134 ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x", \
1135 (_opname), vdst, vsrc1, ref); \
1136 obj = (Object*) GET_REGISTER(vsrc1); \
1137 if (!checkForNull(obj)) \
1138 GOTO_exceptionThrown(); \
1139 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
1140 if (ifield == NULL) { \
1141 ifield = dvmResolveInstField(curMethod->clazz, ref); \
1142 if (ifield == NULL) \
1143 GOTO_exceptionThrown(); \
1144 } \
1145 dvmSetField##_ftype(obj, ifield->byteOffset, \
1146 GET_REGISTER##_regsize(vdst)); \
1147 ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
1148 (u8) GET_REGISTER##_regsize(vdst)); \
1149 UPDATE_FIELD_PUT(&ifield->field); \
1150 } \
1151 FINISH(5);
1152
1153#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
1154 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
1155 { \
1156 Object* obj; \
1157 vdst = INST_A(inst); \
1158 vsrc1 = INST_B(inst); /* object ptr */ \
1159 ref = FETCH(1); /* field offset */ \
1160 ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
1161 (_opname), vdst, vsrc1, ref); \
1162 obj = (Object*) GET_REGISTER(vsrc1); \
1163 if (!checkForNullExportPC(obj, fp, pc)) \
1164 GOTO_exceptionThrown(); \
1165 dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
1166 ILOGV("+ IPUTQ %d=0x%08llx", ref, \
1167 (u8) GET_REGISTER##_regsize(vdst)); \
1168 } \
1169 FINISH(2);
1170
1171/*
1172 * The JIT needs dvmDexGetResolvedField() to return non-null.
1173 * Because the portable interpreter is not involved with the JIT
1174 * and trace building, we only need the extra check here when this
1175 * code is massaged into a stub called from an assembly interpreter.
1176 * This is controlled by the JIT_STUB_HACK maco.
1177 */
1178
1179#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
1180 HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
1181 { \
1182 StaticField* sfield; \
1183 vdst = INST_AA(inst); \
1184 ref = FETCH(1); /* field ref */ \
1185 ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
1186 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
1187 if (sfield == NULL) { \
1188 EXPORT_PC(); \
1189 sfield = dvmResolveStaticField(curMethod->clazz, ref); \
1190 if (sfield == NULL) \
1191 GOTO_exceptionThrown(); \
1192 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
1193 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
1194 } \
1195 } \
1196 SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
1197 ILOGV("+ SGET '%s'=0x%08llx", \
1198 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
1199 UPDATE_FIELD_GET(&sfield->field); \
1200 } \
1201 FINISH(2);
1202
1203#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize) \
1204 HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/) \
1205 { \
1206 StaticField* sfield; \
1207 ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \
1208 vdst = FETCH(3); \
1209 ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref); \
1210 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
1211 if (sfield == NULL) { \
1212 EXPORT_PC(); \
1213 sfield = dvmResolveStaticField(curMethod->clazz, ref); \
1214 if (sfield == NULL) \
1215 GOTO_exceptionThrown(); \
1216 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
1217 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
1218 } \
1219 } \
1220 SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
1221 ILOGV("+ SGET '%s'=0x%08llx", \
1222 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
1223 UPDATE_FIELD_GET(&sfield->field); \
1224 } \
1225 FINISH(4);
1226
1227#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
1228 HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
1229 { \
1230 StaticField* sfield; \
1231 vdst = INST_AA(inst); \
1232 ref = FETCH(1); /* field ref */ \
1233 ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
1234 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
1235 if (sfield == NULL) { \
1236 EXPORT_PC(); \
1237 sfield = dvmResolveStaticField(curMethod->clazz, ref); \
1238 if (sfield == NULL) \
1239 GOTO_exceptionThrown(); \
1240 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
1241 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
1242 } \
1243 } \
1244 dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
1245 ILOGV("+ SPUT '%s'=0x%08llx", \
1246 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
1247 UPDATE_FIELD_PUT(&sfield->field); \
1248 } \
1249 FINISH(2);
1250
1251#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize) \
1252 HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/) \
1253 { \
1254 StaticField* sfield; \
1255 ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \
1256 vdst = FETCH(3); \
1257 ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref); \
1258 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
1259 if (sfield == NULL) { \
1260 EXPORT_PC(); \
1261 sfield = dvmResolveStaticField(curMethod->clazz, ref); \
1262 if (sfield == NULL) \
1263 GOTO_exceptionThrown(); \
1264 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
1265 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
1266 } \
1267 } \
1268 dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
1269 ILOGV("+ SPUT '%s'=0x%08llx", \
1270 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
1271 UPDATE_FIELD_PUT(&sfield->field); \
1272 } \
1273 FINISH(4);
1274
1275/* File: c/OP_IGET_WIDE_VOLATILE.cpp */
1276HANDLE_IGET_X(OP_IGET_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
1277OP_END
1278
1279/* File: c/OP_IPUT_WIDE_VOLATILE.cpp */
1280HANDLE_IPUT_X(OP_IPUT_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
1281OP_END
1282
1283/* File: c/OP_SGET_WIDE_VOLATILE.cpp */
1284HANDLE_SGET_X(OP_SGET_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
1285OP_END
1286
1287/* File: c/OP_SPUT_WIDE_VOLATILE.cpp */
1288HANDLE_SPUT_X(OP_SPUT_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
1289OP_END
1290
1291/* File: c/OP_EXECUTE_INLINE_RANGE.cpp */
1292HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/)
1293 {
1294 u4 arg0, arg1, arg2, arg3;
1295 arg0 = arg1 = arg2 = arg3 = 0; /* placate gcc */
1296
1297 EXPORT_PC();
1298
1299 vsrc1 = INST_AA(inst); /* #of args */
1300 ref = FETCH(1); /* inline call "ref" */
1301 vdst = FETCH(2); /* range base */
1302 ILOGV("|execute-inline-range args=%d @%d {regs=v%d-v%d}",
1303 vsrc1, ref, vdst, vdst+vsrc1-1);
1304
1305 assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
1306 assert(vsrc1 <= 4);
1307
1308 switch (vsrc1) {
1309 case 4:
1310 arg3 = GET_REGISTER(vdst+3);
1311 /* fall through */
1312 case 3:
1313 arg2 = GET_REGISTER(vdst+2);
1314 /* fall through */
1315 case 2:
1316 arg1 = GET_REGISTER(vdst+1);
1317 /* fall through */
1318 case 1:
1319 arg0 = GET_REGISTER(vdst+0);
1320 /* fall through */
1321 default: // case 0
1322 ;
1323 }
1324
1325 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
1326 if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
1327 GOTO_exceptionThrown();
1328 } else {
1329 if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
1330 GOTO_exceptionThrown();
1331 }
1332 }
1333 FINISH(3);
1334OP_END
1335
1336/* File: c/OP_INVOKE_OBJECT_INIT_RANGE.cpp */
1337HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
1338 {
1339 Object* obj;
1340
1341 vsrc1 = FETCH(2); /* reg number of "this" pointer */
1342 obj = GET_REGISTER_AS_OBJECT(vsrc1);
1343
1344 if (!checkForNullExportPC(obj, fp, pc))
1345 GOTO_exceptionThrown();
1346
1347 /*
1348 * The object should be marked "finalizable" when Object.<init>
1349 * completes normally. We're going to assume it does complete
1350 * (by virtue of being nothing but a return-void) and set it now.
1351 */
1352 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
1353 EXPORT_PC();
1354 dvmSetFinalizable(obj);
1355 if (dvmGetException(self))
1356 GOTO_exceptionThrown();
1357 }
1358
1359 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
1360 /* behave like OP_INVOKE_DIRECT_RANGE */
1361 GOTO_invoke(invokeDirect, true, false);
1362 }
1363 FINISH(3);
1364 }
1365OP_END
1366
1367/* File: c/OP_RETURN_VOID_BARRIER.cpp */
1368HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/)
1369 ILOGV("|return-void");
1370#ifndef NDEBUG
1371 retval.j = 0xababababULL; /* placate valgrind */
1372#endif
1373 ANDROID_MEMBAR_STORE();
1374 GOTO_returnFromMethod();
1375OP_END
1376
1377/* File: c/OP_INVOKE_OBJECT_INIT_JUMBO.cpp */
1378HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_JUMBO /*{vCCCC..vNNNN}, meth@AAAAAAAA*/)
1379 {
1380 Object* obj;
1381
1382 vsrc1 = FETCH(4); /* reg number of "this" pointer */
1383 obj = GET_REGISTER_AS_OBJECT(vsrc1);
1384
1385 if (!checkForNullExportPC(obj, fp, pc))
1386 GOTO_exceptionThrown();
1387
1388 /*
1389 * The object should be marked "finalizable" when Object.<init>
1390 * completes normally. We're going to assume it does complete
1391 * (by virtue of being nothing but a return-void) and set it now.
1392 */
1393 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
1394 EXPORT_PC();
1395 dvmSetFinalizable(obj);
1396 if (dvmGetException(self))
1397 GOTO_exceptionThrown();
1398 }
1399
1400 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
1401 /* behave like OP_INVOKE_DIRECT_RANGE */
1402 GOTO_invoke(invokeDirect, true, true);
1403 }
1404 FINISH(5);
1405 }
1406OP_END
1407
1408/* File: c/OP_IGET_VOLATILE_JUMBO.cpp */
1409HANDLE_IGET_X_JUMBO(OP_IGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
1410OP_END
1411
1412/* File: c/OP_IGET_WIDE_VOLATILE_JUMBO.cpp */
1413HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
1414OP_END
1415
1416/* File: c/OP_IGET_OBJECT_VOLATILE_JUMBO.cpp */
1417HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
1418OP_END
1419
1420/* File: c/OP_IPUT_VOLATILE_JUMBO.cpp */
1421HANDLE_IPUT_X_JUMBO(OP_IPUT_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
1422OP_END
1423
1424/* File: c/OP_IPUT_WIDE_VOLATILE_JUMBO.cpp */
1425HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
1426OP_END
1427
1428/* File: c/OP_IPUT_OBJECT_VOLATILE_JUMBO.cpp */
1429HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
1430OP_END
1431
1432/* File: c/OP_SGET_VOLATILE_JUMBO.cpp */
1433HANDLE_SGET_X_JUMBO(OP_SGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, )
1434OP_END
1435
1436/* File: c/OP_SGET_WIDE_VOLATILE_JUMBO.cpp */
1437HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
1438OP_END
1439
1440/* File: c/OP_SGET_OBJECT_VOLATILE_JUMBO.cpp */
1441HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
1442OP_END
1443
1444/* File: c/OP_SPUT_VOLATILE_JUMBO.cpp */
1445HANDLE_SPUT_X_JUMBO(OP_SPUT_VOLATILE_JUMBO, "-volatile", IntVolatile, )
1446OP_END
1447
1448/* File: c/OP_SPUT_WIDE_VOLATILE_JUMBO.cpp */
1449HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE)
1450OP_END
1451
1452/* File: c/OP_SPUT_OBJECT_VOLATILE_JUMBO.cpp */
1453HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT)
1454OP_END
1455
1456/* File: c/gotoTargets.cpp */
1457/*
1458 * C footer. This has some common code shared by the various targets.
1459 */
1460
1461/*
1462 * Everything from here on is a "goto target". In the basic interpreter
1463 * we jump into these targets and then jump directly to the handler for
1464 * next instruction. Here, these are subroutines that return to the caller.
1465 */
1466
1467GOTO_TARGET(filledNewArray, bool methodCallRange, bool jumboFormat)
1468 {
1469 ClassObject* arrayClass;
1470 ArrayObject* newArray;
1471 u4* contents;
1472 char typeCh;
1473 int i;
1474 u4 arg5;
1475
1476 EXPORT_PC();
1477
1478 if (jumboFormat) {
1479 ref = FETCH(1) | (u4)FETCH(2) << 16; /* class ref */
1480 vsrc1 = FETCH(3); /* #of elements */
1481 vdst = FETCH(4); /* range base */
1482 arg5 = -1; /* silence compiler warning */
1483 ILOGV("|filled-new-array/jumbo args=%d @0x%08x {regs=v%d-v%d}",
1484 vsrc1, ref, vdst, vdst+vsrc1-1);
1485 } else {
1486 ref = FETCH(1); /* class ref */
1487 vdst = FETCH(2); /* first 4 regs -or- range base */
1488
1489 if (methodCallRange) {
1490 vsrc1 = INST_AA(inst); /* #of elements */
1491 arg5 = -1; /* silence compiler warning */
1492 ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
1493 vsrc1, ref, vdst, vdst+vsrc1-1);
1494 } else {
1495 arg5 = INST_A(inst);
1496 vsrc1 = INST_B(inst); /* #of elements */
1497 ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
1498 vsrc1, ref, vdst, arg5);
1499 }
1500 }
1501
1502 /*
1503 * Resolve the array class.
1504 */
1505 arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
1506 if (arrayClass == NULL) {
1507 arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
1508 if (arrayClass == NULL)
1509 GOTO_exceptionThrown();
1510 }
1511 /*
1512 if (!dvmIsArrayClass(arrayClass)) {
1513 dvmThrowRuntimeException(
1514 "filled-new-array needs array class");
1515 GOTO_exceptionThrown();
1516 }
1517 */
1518 /* verifier guarantees this is an array class */
1519 assert(dvmIsArrayClass(arrayClass));
1520 assert(dvmIsClassInitialized(arrayClass));
1521
1522 /*
1523 * Create an array of the specified type.
1524 */
1525 LOGVV("+++ filled-new-array type is '%s'\n", arrayClass->descriptor);
1526 typeCh = arrayClass->descriptor[1];
1527 if (typeCh == 'D' || typeCh == 'J') {
1528 /* category 2 primitives not allowed */
1529 dvmThrowRuntimeException("bad filled array req");
1530 GOTO_exceptionThrown();
1531 } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
1532 /* TODO: requires multiple "fill in" loops with different widths */
1533 LOGE("non-int primitives not implemented\n");
1534 dvmThrowInternalError(
1535 "filled-new-array not implemented for anything but 'int'");
1536 GOTO_exceptionThrown();
1537 }
1538
1539 newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK);
1540 if (newArray == NULL)
1541 GOTO_exceptionThrown();
1542
1543 /*
1544 * Fill in the elements. It's legal for vsrc1 to be zero.
1545 */
1546 contents = (u4*)(void*)newArray->contents;
1547 if (methodCallRange) {
1548 for (i = 0; i < vsrc1; i++)
1549 contents[i] = GET_REGISTER(vdst+i);
1550 } else {
1551 assert(vsrc1 <= 5);
1552 if (vsrc1 == 5) {
1553 contents[4] = GET_REGISTER(arg5);
1554 vsrc1--;
1555 }
1556 for (i = 0; i < vsrc1; i++) {
1557 contents[i] = GET_REGISTER(vdst & 0x0f);
1558 vdst >>= 4;
1559 }
1560 }
1561 if (typeCh == 'L' || typeCh == '[') {
1562 dvmWriteBarrierArray(newArray, 0, newArray->length);
1563 }
1564
1565 retval.l = newArray;
1566 }
1567 if (jumboFormat) {
1568 FINISH(5);
1569 } else {
1570 FINISH(3);
1571 }
1572GOTO_TARGET_END
1573
1574
1575GOTO_TARGET(invokeVirtual, bool methodCallRange, bool jumboFormat)
1576 {
1577 Method* baseMethod;
1578 Object* thisPtr;
1579
1580 EXPORT_PC();
1581
1582 if (jumboFormat) {
1583 ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */
1584 vsrc1 = FETCH(3); /* count */
1585 vdst = FETCH(4); /* first reg */
1586 ADJUST_PC(2); /* advance pc partially to make returns easier */
1587 ILOGV("|invoke-virtual/jumbo args=%d @0x%08x {regs=v%d-v%d}",
1588 vsrc1, ref, vdst, vdst+vsrc1-1);
1589 thisPtr = (Object*) GET_REGISTER(vdst);
1590 } else {
1591 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1592 ref = FETCH(1); /* method ref */
1593 vdst = FETCH(2); /* 4 regs -or- first reg */
1594
1595 /*
1596 * The object against which we are executing a method is always
1597 * in the first argument.
1598 */
1599 if (methodCallRange) {
1600 assert(vsrc1 > 0);
1601 ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
1602 vsrc1, ref, vdst, vdst+vsrc1-1);
1603 thisPtr = (Object*) GET_REGISTER(vdst);
1604 } else {
1605 assert((vsrc1>>4) > 0);
1606 ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
1607 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1608 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
1609 }
1610 }
1611
1612 if (!checkForNull(thisPtr))
1613 GOTO_exceptionThrown();
1614
1615 /*
1616 * Resolve the method. This is the correct method for the static
1617 * type of the object. We also verify access permissions here.
1618 */
1619 baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
1620 if (baseMethod == NULL) {
1621 baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
1622 if (baseMethod == NULL) {
1623 ILOGV("+ unknown method or access denied\n");
1624 GOTO_exceptionThrown();
1625 }
1626 }
1627
1628 /*
1629 * Combine the object we found with the vtable offset in the
1630 * method.
1631 */
1632 assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
1633 methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
1634
1635#if defined(WITH_JIT) && defined(MTERP_STUB)
1636 self->methodToCall = methodToCall;
1637 self->callsiteClass = thisPtr->clazz;
1638#endif
1639
1640#if 0
1641 if (dvmIsAbstractMethod(methodToCall)) {
1642 /*
1643 * This can happen if you create two classes, Base and Sub, where
1644 * Sub is a sub-class of Base. Declare a protected abstract
1645 * method foo() in Base, and invoke foo() from a method in Base.
1646 * Base is an "abstract base class" and is never instantiated
1647 * directly. Now, Override foo() in Sub, and use Sub. This
1648 * Works fine unless Sub stops providing an implementation of
1649 * the method.
1650 */
1651 dvmThrowAbstractMethodError("abstract method not implemented");
1652 GOTO_exceptionThrown();
1653 }
1654#else
1655 assert(!dvmIsAbstractMethod(methodToCall) ||
1656 methodToCall->nativeFunc != NULL);
1657#endif
1658
1659 LOGVV("+++ base=%s.%s virtual[%d]=%s.%s\n",
1660 baseMethod->clazz->descriptor, baseMethod->name,
1661 (u4) baseMethod->methodIndex,
1662 methodToCall->clazz->descriptor, methodToCall->name);
1663 assert(methodToCall != NULL);
1664
1665#if 0
1666 if (vsrc1 != methodToCall->insSize) {
1667 LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s\n",
1668 baseMethod->clazz->descriptor, baseMethod->name,
1669 (u4) baseMethod->methodIndex,
1670 methodToCall->clazz->descriptor, methodToCall->name);
1671 //dvmDumpClass(baseMethod->clazz);
1672 //dvmDumpClass(methodToCall->clazz);
1673 dvmDumpAllClasses(0);
1674 }
1675#endif
1676
1677 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1678 }
1679GOTO_TARGET_END
1680
1681GOTO_TARGET(invokeSuper, bool methodCallRange, bool jumboFormat)
1682 {
1683 Method* baseMethod;
1684 u2 thisReg;
1685
1686 EXPORT_PC();
1687
1688 if (jumboFormat) {
1689 ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */
1690 vsrc1 = FETCH(3); /* count */
1691 vdst = FETCH(4); /* first reg */
1692 ADJUST_PC(2); /* advance pc partially to make returns easier */
1693 ILOGV("|invoke-super/jumbo args=%d @0x%08x {regs=v%d-v%d}",
1694 vsrc1, ref, vdst, vdst+vsrc1-1);
1695 thisReg = vdst;
1696 } else {
1697 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1698 ref = FETCH(1); /* method ref */
1699 vdst = FETCH(2); /* 4 regs -or- first reg */
1700
1701 if (methodCallRange) {
1702 ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
1703 vsrc1, ref, vdst, vdst+vsrc1-1);
1704 thisReg = vdst;
1705 } else {
1706 ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
1707 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1708 thisReg = vdst & 0x0f;
1709 }
1710 }
1711
1712 /* impossible in well-formed code, but we must check nevertheless */
1713 if (!checkForNull((Object*) GET_REGISTER(thisReg)))
1714 GOTO_exceptionThrown();
1715
1716 /*
1717 * Resolve the method. This is the correct method for the static
1718 * type of the object. We also verify access permissions here.
1719 * The first arg to dvmResolveMethod() is just the referring class
1720 * (used for class loaders and such), so we don't want to pass
1721 * the superclass into the resolution call.
1722 */
1723 baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
1724 if (baseMethod == NULL) {
1725 baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
1726 if (baseMethod == NULL) {
1727 ILOGV("+ unknown method or access denied\n");
1728 GOTO_exceptionThrown();
1729 }
1730 }
1731
1732 /*
1733 * Combine the object we found with the vtable offset in the
1734 * method's class.
1735 *
1736 * We're using the current method's class' superclass, not the
1737 * superclass of "this". This is because we might be executing
1738 * in a method inherited from a superclass, and we want to run
1739 * in that class' superclass.
1740 */
1741 if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) {
1742 /*
1743 * Method does not exist in the superclass. Could happen if
1744 * superclass gets updated.
1745 */
1746 dvmThrowNoSuchMethodError(baseMethod->name);
1747 GOTO_exceptionThrown();
1748 }
1749 methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
1750
1751#if 0
1752 if (dvmIsAbstractMethod(methodToCall)) {
1753 dvmThrowAbstractMethodError("abstract method not implemented");
1754 GOTO_exceptionThrown();
1755 }
1756#else
1757 assert(!dvmIsAbstractMethod(methodToCall) ||
1758 methodToCall->nativeFunc != NULL);
1759#endif
1760 LOGVV("+++ base=%s.%s super-virtual=%s.%s\n",
1761 baseMethod->clazz->descriptor, baseMethod->name,
1762 methodToCall->clazz->descriptor, methodToCall->name);
1763 assert(methodToCall != NULL);
1764
1765 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1766 }
1767GOTO_TARGET_END
1768
1769GOTO_TARGET(invokeInterface, bool methodCallRange, bool jumboFormat)
1770 {
1771 Object* thisPtr;
1772 ClassObject* thisClass;
1773
1774 EXPORT_PC();
1775
1776 if (jumboFormat) {
1777 ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */
1778 vsrc1 = FETCH(3); /* count */
1779 vdst = FETCH(4); /* first reg */
1780 ADJUST_PC(2); /* advance pc partially to make returns easier */
1781 ILOGV("|invoke-interface/jumbo args=%d @0x%08x {regs=v%d-v%d}",
1782 vsrc1, ref, vdst, vdst+vsrc1-1);
1783 thisPtr = (Object*) GET_REGISTER(vdst);
1784 } else {
1785 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1786 ref = FETCH(1); /* method ref */
1787 vdst = FETCH(2); /* 4 regs -or- first reg */
1788
1789 /*
1790 * The object against which we are executing a method is always
1791 * in the first argument.
1792 */
1793 if (methodCallRange) {
1794 assert(vsrc1 > 0);
1795 ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
1796 vsrc1, ref, vdst, vdst+vsrc1-1);
1797 thisPtr = (Object*) GET_REGISTER(vdst);
1798 } else {
1799 assert((vsrc1>>4) > 0);
1800 ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
1801 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1802 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
1803 }
1804 }
1805
1806 if (!checkForNull(thisPtr))
1807 GOTO_exceptionThrown();
1808
1809 thisClass = thisPtr->clazz;
1810
1811
1812 /*
1813 * Given a class and a method index, find the Method* with the
1814 * actual code we want to execute.
1815 */
1816 methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod,
1817 methodClassDex);
1818#if defined(WITH_JIT) && defined(MTERP_STUB)
1819 self->callsiteClass = thisClass;
1820 self->methodToCall = methodToCall;
1821#endif
1822 if (methodToCall == NULL) {
1823 assert(dvmCheckException(self));
1824 GOTO_exceptionThrown();
1825 }
1826
1827 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1828 }
1829GOTO_TARGET_END
1830
1831GOTO_TARGET(invokeDirect, bool methodCallRange, bool jumboFormat)
1832 {
1833 u2 thisReg;
1834
1835 EXPORT_PC();
1836
1837 if (jumboFormat) {
1838 ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */
1839 vsrc1 = FETCH(3); /* count */
1840 vdst = FETCH(4); /* first reg */
1841 ADJUST_PC(2); /* advance pc partially to make returns easier */
1842 ILOGV("|invoke-direct/jumbo args=%d @0x%08x {regs=v%d-v%d}",
1843 vsrc1, ref, vdst, vdst+vsrc1-1);
1844 thisReg = vdst;
1845 } else {
1846 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1847 ref = FETCH(1); /* method ref */
1848 vdst = FETCH(2); /* 4 regs -or- first reg */
1849
1850 if (methodCallRange) {
1851 ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
1852 vsrc1, ref, vdst, vdst+vsrc1-1);
1853 thisReg = vdst;
1854 } else {
1855 ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
1856 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1857 thisReg = vdst & 0x0f;
1858 }
1859 }
1860
1861 if (!checkForNull((Object*) GET_REGISTER(thisReg)))
1862 GOTO_exceptionThrown();
1863
1864 methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
1865 if (methodToCall == NULL) {
1866 methodToCall = dvmResolveMethod(curMethod->clazz, ref,
1867 METHOD_DIRECT);
1868 if (methodToCall == NULL) {
1869 ILOGV("+ unknown direct method\n"); // should be impossible
1870 GOTO_exceptionThrown();
1871 }
1872 }
1873 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1874 }
1875GOTO_TARGET_END
1876
1877GOTO_TARGET(invokeStatic, bool methodCallRange, bool jumboFormat)
1878 EXPORT_PC();
1879
1880 if (jumboFormat) {
1881 ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */
1882 vsrc1 = FETCH(3); /* count */
1883 vdst = FETCH(4); /* first reg */
1884 ADJUST_PC(2); /* advance pc partially to make returns easier */
1885 ILOGV("|invoke-static/jumbo args=%d @0x%08x {regs=v%d-v%d}",
1886 vsrc1, ref, vdst, vdst+vsrc1-1);
1887 } else {
1888 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1889 ref = FETCH(1); /* method ref */
1890 vdst = FETCH(2); /* 4 regs -or- first reg */
1891
1892 if (methodCallRange)
1893 ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
1894 vsrc1, ref, vdst, vdst+vsrc1-1);
1895 else
1896 ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
1897 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1898 }
1899
1900 methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
1901 if (methodToCall == NULL) {
1902 methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC);
1903 if (methodToCall == NULL) {
1904 ILOGV("+ unknown method\n");
1905 GOTO_exceptionThrown();
1906 }
1907
1908#if defined(WITH_JIT) && defined(MTERP_STUB)
1909 /*
1910 * The JIT needs dvmDexGetResolvedMethod() to return non-null.
1911 * Include the check if this code is being used as a stub
1912 * called from the assembly interpreter.
1913 */
1914 if ((self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) &&
1915 (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL)) {
1916 /* Class initialization is still ongoing */
1917 dvmJitEndTraceSelect(self,pc);
1918 }
1919#endif
1920 }
1921 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1922GOTO_TARGET_END
1923
1924GOTO_TARGET(invokeVirtualQuick, bool methodCallRange, bool jumboFormat)
1925 {
1926 Object* thisPtr;
1927
1928 EXPORT_PC();
1929
1930 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1931 ref = FETCH(1); /* vtable index */
1932 vdst = FETCH(2); /* 4 regs -or- first reg */
1933
1934 /*
1935 * The object against which we are executing a method is always
1936 * in the first argument.
1937 */
1938 if (methodCallRange) {
1939 assert(vsrc1 > 0);
1940 ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
1941 vsrc1, ref, vdst, vdst+vsrc1-1);
1942 thisPtr = (Object*) GET_REGISTER(vdst);
1943 } else {
1944 assert((vsrc1>>4) > 0);
1945 ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
1946 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1947 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
1948 }
1949
1950 if (!checkForNull(thisPtr))
1951 GOTO_exceptionThrown();
1952
1953
1954 /*
1955 * Combine the object we found with the vtable offset in the
1956 * method.
1957 */
1958 assert(ref < (unsigned int) thisPtr->clazz->vtableCount);
1959 methodToCall = thisPtr->clazz->vtable[ref];
1960#if defined(WITH_JIT) && defined(MTERP_STUB)
1961 self->callsiteClass = thisPtr->clazz;
1962 self->methodToCall = methodToCall;
1963#endif
1964
1965#if 0
1966 if (dvmIsAbstractMethod(methodToCall)) {
1967 dvmThrowAbstractMethodError("abstract method not implemented");
1968 GOTO_exceptionThrown();
1969 }
1970#else
1971 assert(!dvmIsAbstractMethod(methodToCall) ||
1972 methodToCall->nativeFunc != NULL);
1973#endif
1974
1975 LOGVV("+++ virtual[%d]=%s.%s\n",
1976 ref, methodToCall->clazz->descriptor, methodToCall->name);
1977 assert(methodToCall != NULL);
1978
1979 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1980 }
1981GOTO_TARGET_END
1982
1983GOTO_TARGET(invokeSuperQuick, bool methodCallRange, bool jumboFormat)
1984 {
1985 u2 thisReg;
1986
1987 EXPORT_PC();
1988
1989 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1990 ref = FETCH(1); /* vtable index */
1991 vdst = FETCH(2); /* 4 regs -or- first reg */
1992
1993 if (methodCallRange) {
1994 ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
1995 vsrc1, ref, vdst, vdst+vsrc1-1);
1996 thisReg = vdst;
1997 } else {
1998 ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
1999 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
2000 thisReg = vdst & 0x0f;
2001 }
2002 /* impossible in well-formed code, but we must check nevertheless */
2003 if (!checkForNull((Object*) GET_REGISTER(thisReg)))
2004 GOTO_exceptionThrown();
2005
2006#if 0 /* impossible in optimized + verified code */
2007 if (ref >= curMethod->clazz->super->vtableCount) {
2008 dvmThrowNoSuchMethodError(NULL);
2009 GOTO_exceptionThrown();
2010 }
2011#else
2012 assert(ref < (unsigned int) curMethod->clazz->super->vtableCount);
2013#endif
2014
2015 /*
2016 * Combine the object we found with the vtable offset in the
2017 * method's class.
2018 *
2019 * We're using the current method's class' superclass, not the
2020 * superclass of "this". This is because we might be executing
2021 * in a method inherited from a superclass, and we want to run
2022 * in the method's class' superclass.
2023 */
2024 methodToCall = curMethod->clazz->super->vtable[ref];
2025
2026#if 0
2027 if (dvmIsAbstractMethod(methodToCall)) {
2028 dvmThrowAbstractMethodError("abstract method not implemented");
2029 GOTO_exceptionThrown();
2030 }
2031#else
2032 assert(!dvmIsAbstractMethod(methodToCall) ||
2033 methodToCall->nativeFunc != NULL);
2034#endif
2035 LOGVV("+++ super-virtual[%d]=%s.%s\n",
2036 ref, methodToCall->clazz->descriptor, methodToCall->name);
2037 assert(methodToCall != NULL);
2038 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
2039 }
2040GOTO_TARGET_END
2041
2042
2043 /*
2044 * General handling for return-void, return, and return-wide. Put the
2045 * return value in "retval" before jumping here.
2046 */
2047GOTO_TARGET(returnFromMethod)
2048 {
2049 StackSaveArea* saveArea;
2050
2051 /*
2052 * We must do this BEFORE we pop the previous stack frame off, so
2053 * that the GC can see the return value (if any) in the local vars.
2054 *
2055 * Since this is now an interpreter switch point, we must do it before
2056 * we do anything at all.
2057 */
2058 PERIODIC_CHECKS(0);
2059
2060 ILOGV("> retval=0x%llx (leaving %s.%s %s)",
2061 retval.j, curMethod->clazz->descriptor, curMethod->name,
2062 curMethod->shorty);
2063 //DUMP_REGS(curMethod, fp);
2064
2065 saveArea = SAVEAREA_FROM_FP(fp);
2066
2067#ifdef EASY_GDB
2068 debugSaveArea = saveArea;
2069#endif
2070
2071 /* back up to previous frame and see if we hit a break */
2072 fp = (u4*)saveArea->prevFrame;
2073 assert(fp != NULL);
2074
2075 /* Handle any special subMode requirements */
2076 if (self->interpBreak.ctl.subMode != 0) {
2077 PC_FP_TO_SELF();
2078 dvmReportReturn(self);
2079 }
2080
2081 if (dvmIsBreakFrame(fp)) {
2082 /* bail without popping the method frame from stack */
2083 LOGVV("+++ returned into break frame\n");
2084 GOTO_bail();
2085 }
2086
2087 /* update thread FP, and reset local variables */
buzbee30bc0d42011-04-22 10:27:14 -07002088 self->interpSave.curFrame = fp;
Carl Shapirocd8f5e72011-04-20 16:12:46 -07002089 curMethod = SAVEAREA_FROM_FP(fp)->method;
2090 self->interpSave.method = curMethod;
2091 //methodClass = curMethod->clazz;
2092 methodClassDex = curMethod->clazz->pDvmDex;
2093 pc = saveArea->savedPc;
2094 ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor,
2095 curMethod->name, curMethod->shorty);
2096
2097 /* use FINISH on the caller's invoke instruction */
2098 //u2 invokeInstr = INST_INST(FETCH(0));
2099 if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
2100 invokeInstr <= OP_INVOKE_INTERFACE*/)
2101 {
2102 FINISH(3);
2103 } else {
2104 //LOGE("Unknown invoke instr %02x at %d\n",
2105 // invokeInstr, (int) (pc - curMethod->insns));
2106 assert(false);
2107 }
2108 }
2109GOTO_TARGET_END
2110
2111
2112 /*
2113 * Jump here when the code throws an exception.
2114 *
2115 * By the time we get here, the Throwable has been created and the stack
2116 * trace has been saved off.
2117 */
2118GOTO_TARGET(exceptionThrown)
2119 {
2120 Object* exception;
2121 int catchRelPc;
2122
2123 PERIODIC_CHECKS(0);
2124
2125 /*
2126 * We save off the exception and clear the exception status. While
2127 * processing the exception we might need to load some Throwable
2128 * classes, and we don't want class loader exceptions to get
2129 * confused with this one.
2130 */
2131 assert(dvmCheckException(self));
2132 exception = dvmGetException(self);
2133 dvmAddTrackedAlloc(exception, self);
2134 dvmClearException(self);
2135
2136 LOGV("Handling exception %s at %s:%d\n",
2137 exception->clazz->descriptor, curMethod->name,
2138 dvmLineNumFromPC(curMethod, pc - curMethod->insns));
2139
2140 /*
2141 * Report the exception throw to any "subMode" watchers.
2142 *
2143 * TODO: if the exception was thrown by interpreted code, control
2144 * fell through native, and then back to us, we will report the
2145 * exception at the point of the throw and again here. We can avoid
2146 * this by not reporting exceptions when we jump here directly from
2147 * the native call code above, but then we won't report exceptions
2148 * that were thrown *from* the JNI code (as opposed to *through* it).
2149 *
2150 * The correct solution is probably to ignore from-native exceptions
2151 * here, and have the JNI exception code do the reporting to the
2152 * debugger.
2153 */
2154 if (self->interpBreak.ctl.subMode != 0) {
2155 PC_FP_TO_SELF();
2156 dvmReportExceptionThrow(self, exception);
2157 }
2158
2159 /*
2160 * We need to unroll to the catch block or the nearest "break"
2161 * frame.
2162 *
2163 * A break frame could indicate that we have reached an intermediate
2164 * native call, or have gone off the top of the stack and the thread
2165 * needs to exit. Either way, we return from here, leaving the
2166 * exception raised.
2167 *
2168 * If we do find a catch block, we want to transfer execution to
2169 * that point.
2170 *
2171 * Note this can cause an exception while resolving classes in
2172 * the "catch" blocks.
2173 */
2174 catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
2175 exception, false, (void**)(void*)&fp);
2176
2177 /*
2178 * Restore the stack bounds after an overflow. This isn't going to
2179 * be correct in all circumstances, e.g. if JNI code devours the
2180 * exception this won't happen until some other exception gets
2181 * thrown. If the code keeps pushing the stack bounds we'll end
2182 * up aborting the VM.
2183 *
2184 * Note we want to do this *after* the call to dvmFindCatchBlock,
2185 * because that may need extra stack space to resolve exception
2186 * classes (e.g. through a class loader).
2187 *
2188 * It's possible for the stack overflow handling to cause an
2189 * exception (specifically, class resolution in a "catch" block
2190 * during the call above), so we could see the thread's overflow
2191 * flag raised but actually be running in a "nested" interpreter
2192 * frame. We don't allow doubled-up StackOverflowErrors, so
2193 * we can check for this by just looking at the exception type
2194 * in the cleanup function. Also, we won't unroll past the SOE
2195 * point because the more-recent exception will hit a break frame
2196 * as it unrolls to here.
2197 */
2198 if (self->stackOverflowed)
2199 dvmCleanupStackOverflow(self, exception);
2200
2201 if (catchRelPc < 0) {
2202 /* falling through to JNI code or off the bottom of the stack */
2203#if DVM_SHOW_EXCEPTION >= 2
2204 LOGD("Exception %s from %s:%d not caught locally\n",
2205 exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
2206 dvmLineNumFromPC(curMethod, pc - curMethod->insns));
2207#endif
2208 dvmSetException(self, exception);
2209 dvmReleaseTrackedAlloc(exception, self);
2210 GOTO_bail();
2211 }
2212
2213#if DVM_SHOW_EXCEPTION >= 3
2214 {
2215 const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method;
2216 LOGD("Exception %s thrown from %s:%d to %s:%d\n",
2217 exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
2218 dvmLineNumFromPC(curMethod, pc - curMethod->insns),
2219 dvmGetMethodSourceFile(catchMethod),
2220 dvmLineNumFromPC(catchMethod, catchRelPc));
2221 }
2222#endif
2223
2224 /*
buzbee30bc0d42011-04-22 10:27:14 -07002225 * Adjust local variables to match self->interpSave.curFrame and the
Carl Shapirocd8f5e72011-04-20 16:12:46 -07002226 * updated PC.
2227 */
buzbee30bc0d42011-04-22 10:27:14 -07002228 //fp = (u4*) self->interpSave.curFrame;
Carl Shapirocd8f5e72011-04-20 16:12:46 -07002229 curMethod = SAVEAREA_FROM_FP(fp)->method;
2230 self->interpSave.method = curMethod;
2231 //methodClass = curMethod->clazz;
2232 methodClassDex = curMethod->clazz->pDvmDex;
2233 pc = curMethod->insns + catchRelPc;
2234 ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
2235 curMethod->name, curMethod->shorty);
2236 DUMP_REGS(curMethod, fp, false); // show all regs
2237
2238 /*
2239 * Restore the exception if the handler wants it.
2240 *
2241 * The Dalvik spec mandates that, if an exception handler wants to
2242 * do something with the exception, the first instruction executed
2243 * must be "move-exception". We can pass the exception along
2244 * through the thread struct, and let the move-exception instruction
2245 * clear it for us.
2246 *
2247 * If the handler doesn't call move-exception, we don't want to
2248 * finish here with an exception still pending.
2249 */
2250 if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
2251 dvmSetException(self, exception);
2252
2253 dvmReleaseTrackedAlloc(exception, self);
2254 FINISH(0);
2255 }
2256GOTO_TARGET_END
2257
2258
2259
2260 /*
2261 * General handling for invoke-{virtual,super,direct,static,interface},
2262 * including "quick" variants.
2263 *
2264 * Set "methodToCall" to the Method we're calling, and "methodCallRange"
2265 * depending on whether this is a "/range" instruction.
2266 *
2267 * For a range call:
2268 * "vsrc1" holds the argument count (8 bits)
2269 * "vdst" holds the first argument in the range
2270 * For a non-range call:
2271 * "vsrc1" holds the argument count (4 bits) and the 5th argument index
2272 * "vdst" holds four 4-bit register indices
2273 *
2274 * The caller must EXPORT_PC before jumping here, because any method
2275 * call can throw a stack overflow exception.
2276 */
2277GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
2278 u2 count, u2 regs)
2279 {
2280 STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;);
2281
2282 //printf("range=%d call=%p count=%d regs=0x%04x\n",
2283 // methodCallRange, methodToCall, count, regs);
2284 //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
2285 // methodToCall->name, methodToCall->shorty);
2286
2287 u4* outs;
2288 int i;
2289
2290 /*
2291 * Copy args. This may corrupt vsrc1/vdst.
2292 */
2293 if (methodCallRange) {
2294 // could use memcpy or a "Duff's device"; most functions have
2295 // so few args it won't matter much
2296 assert(vsrc1 <= curMethod->outsSize);
2297 assert(vsrc1 == methodToCall->insSize);
2298 outs = OUTS_FROM_FP(fp, vsrc1);
2299 for (i = 0; i < vsrc1; i++)
2300 outs[i] = GET_REGISTER(vdst+i);
2301 } else {
2302 u4 count = vsrc1 >> 4;
2303
2304 assert(count <= curMethod->outsSize);
2305 assert(count == methodToCall->insSize);
2306 assert(count <= 5);
2307
2308 outs = OUTS_FROM_FP(fp, count);
2309#if 0
2310 if (count == 5) {
2311 outs[4] = GET_REGISTER(vsrc1 & 0x0f);
2312 count--;
2313 }
2314 for (i = 0; i < (int) count; i++) {
2315 outs[i] = GET_REGISTER(vdst & 0x0f);
2316 vdst >>= 4;
2317 }
2318#else
2319 // This version executes fewer instructions but is larger
2320 // overall. Seems to be a teensy bit faster.
2321 assert((vdst >> 16) == 0); // 16 bits -or- high 16 bits clear
2322 switch (count) {
2323 case 5:
2324 outs[4] = GET_REGISTER(vsrc1 & 0x0f);
2325 case 4:
2326 outs[3] = GET_REGISTER(vdst >> 12);
2327 case 3:
2328 outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
2329 case 2:
2330 outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
2331 case 1:
2332 outs[0] = GET_REGISTER(vdst & 0x0f);
2333 default:
2334 ;
2335 }
2336#endif
2337 }
2338 }
2339
2340 /*
2341 * (This was originally a "goto" target; I've kept it separate from the
2342 * stuff above in case we want to refactor things again.)
2343 *
2344 * At this point, we have the arguments stored in the "outs" area of
2345 * the current method's stack frame, and the method to call in
2346 * "methodToCall". Push a new stack frame.
2347 */
2348 {
2349 StackSaveArea* newSaveArea;
2350 u4* newFp;
2351
2352 ILOGV("> %s%s.%s %s",
2353 dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
2354 methodToCall->clazz->descriptor, methodToCall->name,
2355 methodToCall->shorty);
2356
2357 newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
2358 newSaveArea = SAVEAREA_FROM_FP(newFp);
2359
2360 /* verify that we have enough space */
2361 if (true) {
2362 u1* bottom;
2363 bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
2364 if (bottom < self->interpStackEnd) {
2365 /* stack overflow */
2366 LOGV("Stack overflow on method call (start=%p end=%p newBot=%p(%d) size=%d '%s')\n",
2367 self->interpStackStart, self->interpStackEnd, bottom,
2368 (u1*) fp - bottom, self->interpStackSize,
2369 methodToCall->name);
2370 dvmHandleStackOverflow(self, methodToCall);
2371 assert(dvmCheckException(self));
2372 GOTO_exceptionThrown();
2373 }
2374 //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p\n",
2375 // fp, newFp, newSaveArea, bottom);
2376 }
2377
2378#ifdef LOG_INSTR
2379 if (methodToCall->registersSize > methodToCall->insSize) {
2380 /*
2381 * This makes valgrind quiet when we print registers that
2382 * haven't been initialized. Turn it off when the debug
2383 * messages are disabled -- we want valgrind to report any
2384 * used-before-initialized issues.
2385 */
2386 memset(newFp, 0xcc,
2387 (methodToCall->registersSize - methodToCall->insSize) * 4);
2388 }
2389#endif
2390
2391#ifdef EASY_GDB
2392 newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
2393#endif
2394 newSaveArea->prevFrame = fp;
2395 newSaveArea->savedPc = pc;
2396#if defined(WITH_JIT) && defined(MTERP_STUB)
2397 newSaveArea->returnAddr = 0;
2398#endif
2399 newSaveArea->method = methodToCall;
2400
2401 if (self->interpBreak.ctl.subMode != 0) {
2402 /*
2403 * We mark ENTER here for both native and non-native
2404 * calls. For native calls, we'll mark EXIT on return.
2405 * For non-native calls, EXIT is marked in the RETURN op.
2406 */
buzbee30bc0d42011-04-22 10:27:14 -07002407 PC_TO_SELF();
Carl Shapirocd8f5e72011-04-20 16:12:46 -07002408 dvmReportInvoke(self, methodToCall);
2409 }
2410
2411 if (!dvmIsNativeMethod(methodToCall)) {
2412 /*
2413 * "Call" interpreted code. Reposition the PC, update the
2414 * frame pointer and other local state, and continue.
2415 */
2416 curMethod = methodToCall;
2417 self->interpSave.method = curMethod;
2418 methodClassDex = curMethod->clazz->pDvmDex;
2419 pc = methodToCall->insns;
buzbee30bc0d42011-04-22 10:27:14 -07002420 self->interpSave.curFrame = fp = newFp;
Carl Shapirocd8f5e72011-04-20 16:12:46 -07002421#ifdef EASY_GDB
2422 debugSaveArea = SAVEAREA_FROM_FP(newFp);
2423#endif
2424 self->debugIsMethodEntry = true; // profiling, debugging
2425 ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
2426 curMethod->name, curMethod->shorty);
2427 DUMP_REGS(curMethod, fp, true); // show input args
2428 FINISH(0); // jump to method start
2429 } else {
2430 /* set this up for JNI locals, even if not a JNI native */
2431 newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
2432
buzbee30bc0d42011-04-22 10:27:14 -07002433 self->interpSave.curFrame = newFp;
Carl Shapirocd8f5e72011-04-20 16:12:46 -07002434
2435 DUMP_REGS(methodToCall, newFp, true); // show input args
2436
2437 if (self->interpBreak.ctl.subMode != 0) {
buzbee30bc0d42011-04-22 10:27:14 -07002438 dvmReportPreNativeInvoke(methodToCall, self, fp);
Carl Shapirocd8f5e72011-04-20 16:12:46 -07002439 }
2440
2441 ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
2442 methodToCall->name, methodToCall->shorty);
2443
2444 /*
2445 * Jump through native call bridge. Because we leave no
2446 * space for locals on native calls, "newFp" points directly
2447 * to the method arguments.
2448 */
2449 (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
2450
2451 if (self->interpBreak.ctl.subMode != 0) {
buzbee30bc0d42011-04-22 10:27:14 -07002452 dvmReportPostNativeInvoke(methodToCall, self, fp);
Carl Shapirocd8f5e72011-04-20 16:12:46 -07002453 }
2454
2455 /* pop frame off */
2456 dvmPopJniLocals(self, newSaveArea);
buzbee30bc0d42011-04-22 10:27:14 -07002457 self->interpSave.curFrame = fp;
Carl Shapirocd8f5e72011-04-20 16:12:46 -07002458
2459 /*
2460 * If the native code threw an exception, or interpreted code
2461 * invoked by the native call threw one and nobody has cleared
2462 * it, jump to our local exception handling.
2463 */
2464 if (dvmCheckException(self)) {
2465 LOGV("Exception thrown by/below native code\n");
2466 GOTO_exceptionThrown();
2467 }
2468
2469 ILOGD("> retval=0x%llx (leaving native)", retval.j);
2470 ILOGD("> (return from native %s.%s to %s.%s %s)",
2471 methodToCall->clazz->descriptor, methodToCall->name,
2472 curMethod->clazz->descriptor, curMethod->name,
2473 curMethod->shorty);
2474
2475 //u2 invokeInstr = INST_INST(FETCH(0));
2476 if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
2477 invokeInstr <= OP_INVOKE_INTERFACE*/)
2478 {
2479 FINISH(3);
2480 } else {
2481 //LOGE("Unknown invoke instr %02x at %d\n",
2482 // invokeInstr, (int) (pc - curMethod->insns));
2483 assert(false);
2484 }
2485 }
2486 }
2487 assert(false); // should not get here
2488GOTO_TARGET_END
2489
2490/* File: cstubs/enddefs.cpp */
2491
2492/* undefine "magic" name remapping */
2493#undef retval
2494#undef pc
2495#undef fp
2496#undef curMethod
2497#undef methodClassDex
2498#undef self
2499#undef debugTrackedRefStart
2500