auto import from //depot/cupcake/@135843
diff --git a/vm/mterp/out/InterpAsm-allstubs.S b/vm/mterp/out/InterpAsm-allstubs.S
new file mode 100644
index 0000000..a6973ae
--- /dev/null
+++ b/vm/mterp/out/InterpAsm-allstubs.S
@@ -0,0 +1,35 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'allstubs'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+
+ .global dvmAsmInstructionStart
+ .type dvmAsmInstructionStart, %function
+dvmAsmInstructionStart = .L_OP_NOP
+ .text
+
+ .balign 64
+.L_OP_NOP: /* dummy */
+
+ .balign 64
+ .size dvmAsmInstructionStart, .-dvmAsmInstructionStart
+ .global dvmAsmInstructionEnd
+dvmAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global dvmAsmSisterStart
+ .type dvmAsmSisterStart, %function
+ .text
+ .balign 4
+dvmAsmSisterStart:
+
+ .size dvmAsmSisterStart, .-dvmAsmSisterStart
+ .global dvmAsmSisterEnd
+dvmAsmSisterEnd:
+
diff --git a/vm/mterp/out/InterpAsm-armv4.S b/vm/mterp/out/InterpAsm-armv4.S
new file mode 100644
index 0000000..3de7aea
--- /dev/null
+++ b/vm/mterp/out/InterpAsm-armv4.S
@@ -0,0 +1,10012 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'armv4'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: armv5te/header.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ARMv5 definitions and declarations.
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rGLUE MterpGlue pointer
+ r7 rIBASE interpreted instruction base pointer, used for computed goto
+ r8 rINST first 16-bit code unit of current instruction
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define rFP r5
+#define rGLUE r6
+#define rIBASE r7
+#define rINST r8
+
+/* save/restore the PC and/or FP from the glue struct */
+#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc]
+#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc]
+#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp]
+#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp]
+#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP}
+#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP}
+
+/*
+ * "export" the PC to the stack frame, f/b/o future exception objects. Must
+ * be done *before* something calls dvmThrowException.
+ *
+ * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
+ * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
+ *
+ * It's okay to do this more than once.
+ */
+#define EXPORT_PC() \
+ str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)]
+
+/*
+ * Given a frame pointer, find the stack save area.
+ *
+ * In C this is "((StackSaveArea*)(_fp) -1)".
+ */
+#define SAVEAREA_FROM_FP(_reg, _fpreg) \
+ sub _reg, _fpreg, #sizeofStackSaveArea
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+#define FETCH_INST() ldrh rINST, [rPC]
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]!
+
+/*
+ * Fetch the next instruction from an offset specified by _reg. Updates
+ * rPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]!
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)]
+#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)]
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)]
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(_reg) and _reg, rINST, #255
+
+/*
+ * Begin executing the opcode in _reg. Because this only jumps within the
+ * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
+ */
+#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2]
+#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2]
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../common/asm-constants.h"
+
+
+/* File: armv5te/platform.S */
+/*
+ * ===========================================================================
+ * CPU-version-specific defines
+ * ===========================================================================
+ */
+
+/*
+ * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a
+ * one-way branch.
+ *
+ * May modify IP. Does not modify LR.
+ */
+.macro LDR_PC source
+ ldr pc, \source
+.endm
+
+/*
+ * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
+ * Jump to subroutine.
+ *
+ * May modify IP and LR.
+ */
+.macro LDR_PC_LR source
+ mov lr, pc
+ ldr pc, \source
+.endm
+
+/*
+ * Macro for "LDMFD SP!, {...regs...,PC}".
+ *
+ * May modify IP and LR.
+ */
+.macro LDMFD_PC regs
+ ldmfd sp!, {\regs,pc}
+.endm
+
+
+/* File: armv5te/entry.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+/*
+ * We don't have formal stack frames, so gdb scans upward in the code
+ * to find the start of the function (a label with the %function type),
+ * and then looks at the next few instructions to figure out what
+ * got pushed onto the stack. From this it figures out how to restore
+ * the registers, including PC, for the previous stack frame. If gdb
+ * sees a non-function label, it stops scanning, so either we need to
+ * have nothing but assembler-local labels between the entry point and
+ * the break, or we need to fake it out.
+ *
+ * When this is defined, we add some stuff to make gdb less confused.
+ */
+#define ASSIST_DEBUGGER 1
+
+ .text
+ .align 2
+ .global dvmMterpStdRun
+ .type dvmMterpStdRun, %function
+
+/*
+ * On entry:
+ * r0 MterpGlue* glue
+ *
+ * This function returns a boolean "changeInterp" value. The return comes
+ * via a call to dvmMterpStdBail().
+ */
+dvmMterpStdRun:
+#define MTERP_ENTRY1 \
+ .save {r4-r10,fp,lr}; \
+ stmfd sp!, {r4-r10,fp,lr} @ save 9 regs
+#define MTERP_ENTRY2 \
+ .pad #4; \
+ sub sp, sp, #4 @ align 64
+
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+
+ /* save stack pointer, add magic word for debuggerd */
+ str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return
+
+ /* set up "named" registers, figure out entry point */
+ mov rGLUE, r0 @ set rGLUE
+ ldrb r1, [r0, #offGlue_entryPoint] @ InterpEntry enum is char
+ LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue"
+ adr rIBASE, dvmAsmInstructionStart @ set rIBASE
+ cmp r1, #kInterpEntryInstr @ usual case?
+ bne .Lnot_instr @ no, handle it
+
+ /* start executing the instruction at rPC */
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.Lnot_instr:
+ cmp r1, #kInterpEntryReturn @ were we returning from a method?
+ beq common_returnFromMethod
+
+.Lnot_return:
+ cmp r1, #kInterpEntryThrow @ were we throwing an exception?
+ beq common_exceptionThrown
+
+.Lbad_arg:
+ ldr r0, strBadEntryPoint
+ @ r1 holds value of entryPoint
+ bl printf
+ bl dvmAbort
+ .fnend
+
+
+ .global dvmMterpStdBail
+ .type dvmMterpStdBail, %function
+
+/*
+ * Restore the stack pointer and PC from the save point established on entry.
+ * This is essentially the same as a longjmp, but should be cheaper. The
+ * last instruction causes us to return to whoever called dvmMterpStdRun.
+ *
+ * We pushed some registers on the stack in dvmMterpStdRun, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * r0 MterpGlue* glue
+ * r1 bool changeInterp
+ */
+dvmMterpStdBail:
+ ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP
+ mov r0, r1 @ return the changeInterp value
+ add sp, sp, #4 @ un-align 64
+ LDMFD_PC "r4-r10,fp" @ restore 9 regs and return
+
+
+/*
+ * String references.
+ */
+strBadEntryPoint:
+ .word .LstrBadEntryPoint
+
+
+
+ .global dvmAsmInstructionStart
+ .type dvmAsmInstructionStart, %function
+dvmAsmInstructionStart = .L_OP_NOP
+ .text
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOP: /* 0x00 */
+/* File: armv5te/OP_NOP.S */
+ FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ .type dalvik_inst, %function
+dalvik_inst:
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+ .fnend
+#endif
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE: /* 0x01 */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_FROM16: /* 0x02 */
+/* File: armv5te/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_16: /* 0x03 */
+/* File: armv5te/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(r1, 2) @ r1<- BBBB
+ FETCH(r0, 1) @ r0<- AAAA
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AAAA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE: /* 0x04 */
+/* File: armv5te/OP_MOVE_WIDE.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r2, r2, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: armv5te/OP_MOVE_WIDE_FROM16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH(r3, 1) @ r3<- BBBB
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: armv5te/OP_MOVE_WIDE_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH(r3, 2) @ r3<- BBBB
+ FETCH(r2, 1) @ r2<- AAAA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT: /* 0x07 */
+/* File: armv5te/OP_MOVE_OBJECT.S */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */
+/* File: armv5te/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: armv5te/OP_MOVE_OBJECT_16.S */
+/* File: armv5te/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(r1, 2) @ r1<- BBBB
+ FETCH(r0, 1) @ r0<- AAAA
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AAAA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT: /* 0x0a */
+/* File: armv5te/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[AA]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: armv5te/OP_MOVE_RESULT_WIDE.S */
+ /* move-result-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- retval.j
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */
+/* File: armv5te/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[AA]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: armv5te/OP_MOVE_EXCEPTION.S */
+ /* move-exception vAA */
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass
+ mov r1, #0 @ r1<- 0
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ SET_VREG(r3, r2) @ fp[AA]<- exception obj
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offThread_exception] @ dvmClearException bypass
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_VOID: /* 0x0e */
+/* File: armv5te/OP_RETURN_VOID.S */
+ b common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN: /* 0x0f */
+/* File: armv5te/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "glue"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r0, r2) @ r0<- vAA
+ str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+ b common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_WIDE: /* 0x10 */
+/* File: armv5te/OP_RETURN_WIDE.S */
+ /*
+ * Return a 64-bit value. Copies the return value into the "glue"
+ * structure, then jumps to the return handler.
+ */
+ /* return-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval
+ ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
+ stmia r3, {r0-r1} @ retval<- r0/r1
+ b common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_OBJECT: /* 0x11 */
+/* File: armv5te/OP_RETURN_OBJECT.S */
+/* File: armv5te/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "glue"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r0, r2) @ r0<- vAA
+ str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+ b common_returnFromMethod
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_4: /* 0x12 */
+/* File: armv5te/OP_CONST_4.S */
+ /* const/4 vA, #+B */
+ mov r1, rINST, lsl #16 @ r1<- Bxxx0000
+ mov r0, rINST, lsr #8 @ r0<- A+
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended)
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r1, r0) @ fp[A]<- r1
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_16: /* 0x13 */
+/* File: armv5te/OP_CONST_16.S */
+ /* const/16 vAA, #+BBBB */
+ FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST: /* 0x14 */
+/* File: armv5te/OP_CONST.S */
+ /* const vAA, #+BBBBbbbb */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (high)
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_HIGH16: /* 0x15 */
+/* File: armv5te/OP_CONST_HIGH16.S */
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, r0, lsl #16 @ r0<- BBBB0000
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_16: /* 0x16 */
+/* File: armv5te/OP_CONST_WIDE_16.S */
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_32: /* 0x17 */
+/* File: armv5te/OP_CONST_WIDE_32.S */
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH(r0, 1) @ r0<- 0000bbbb (low)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_S(r2, 2) @ r2<- ssssBBBB (high)
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE: /* 0x18 */
+/* File: armv5te/OP_CONST_WIDE.S */
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (low middle)
+ FETCH(r2, 3) @ r2<- hhhh (high middle)
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
+ FETCH(r3, 4) @ r3<- HHHH (high)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST(5) @ advance rPC, load rINST
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: armv5te/OP_CONST_WIDE_HIGH16.S */
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, #0 @ r0<- 00000000
+ mov r1, r1, lsl #16 @ r1<- BBBB0000
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_STRING: /* 0x1a */
+/* File: armv5te/OP_CONST_STRING.S */
+ /* const/string vAA, String@BBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB]
+ cmp r0, #0 @ not yet resolved?
+ beq .LOP_CONST_STRING_resolve
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: armv5te/OP_CONST_STRING_JUMBO.S */
+ /* const/string vAA, String@BBBBBBBB */
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (high)
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB]
+ cmp r0, #0
+ beq .LOP_CONST_STRING_JUMBO_resolve
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_CLASS: /* 0x1c */
+/* File: armv5te/OP_CONST_CLASS.S */
+ /* const/class vAA, Class@BBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB]
+ cmp r0, #0 @ not yet resolved?
+ beq .LOP_CONST_CLASS_resolve
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MONITOR_ENTER: /* 0x1d */
+/* File: armv5te/OP_MONITOR_ENTER.S */
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r1, r2) @ r1<- vAA (object)
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ cmp r1, #0 @ null object?
+#ifdef WITH_MONITOR_TRACKING
+ EXPORT_PC() @ export PC so we can grab stack trace
+#endif
+ beq common_errNullObject @ null object, throw an exception
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl dvmLockObject @ call(self, obj)
+#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ ldr r1, [r0, #offThread_exception] @ check for exception
+ cmp r1, #0
+ bne common_exceptionThrown @ exception raised, bail out
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MONITOR_EXIT: /* 0x1e */
+/* File: armv5te/OP_MONITOR_EXIT.S */
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ EXPORT_PC() @ before fetch: export the PC
+ GET_VREG(r1, r2) @ r1<- vAA (object)
+ cmp r1, #0 @ null object?
+ beq common_errNullObject @ yes
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ bl dvmUnlockObject @ r0<- success for unlock(self, obj)
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, exception is pending
+ FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CHECK_CAST: /* 0x1f */
+/* File: armv5te/OP_CHECK_CAST.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH(r2, 1) @ r2<- BBBB
+ GET_VREG(r9, r3) @ r9<- object
+ ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex
+ cmp r9, #0 @ is object null?
+ ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses
+ beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds
+ ldr r1, [r0, r2, lsl #2] @ r1<- resolved class
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ cmp r1, #0 @ have we resolved this before?
+ beq .LOP_CHECK_CAST_resolve @ not resolved, do it now
+.LOP_CHECK_CAST_resolved:
+ cmp r0, r1 @ same class (trivial success)?
+ bne .LOP_CHECK_CAST_fullcheck @ no, do full check
+.LOP_CHECK_CAST_okay:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INSTANCE_OF: /* 0x20 */
+/* File: armv5te/OP_INSTANCE_OF.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB (object)
+ and r9, r9, #15 @ r9<- A
+ cmp r0, #0 @ is object null?
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex
+ beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0
+ FETCH(r3, 1) @ r3<- CCCC
+ ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses
+ ldr r1, [r2, r3, lsl #2] @ r1<- resolved class
+ ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz
+ cmp r1, #0 @ have we resolved this before?
+ beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now
+.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class
+ cmp r0, r1 @ same class (trivial success)?
+ beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish
+ b .LOP_INSTANCE_OF_fullcheck @ no, do full check
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: armv5te/OP_ARRAY_LENGTH.S */
+ /*
+ * Return the length of an array.
+ */
+ mov r1, rINST, lsr #12 @ r1<- B
+ mov r2, rINST, lsr #8 @ r2<- A+
+ GET_VREG(r0, r1) @ r0<- vB (object ref)
+ and r2, r2, #15 @ r2<- A
+ cmp r0, #0 @ is object null?
+ beq common_errNullObject @ yup, fail
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r3, [r0, #offArrayObject_length] @ r3<- array length
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r3, r2) @ vB<- length
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEW_INSTANCE: /* 0x22 */
+/* File: armv5te/OP_NEW_INSTANCE.S */
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+ EXPORT_PC() @ req'd for init, resolve, alloc
+ cmp r0, #0 @ already resolved?
+ beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now
+.LOP_NEW_INSTANCE_resolved: @ r0=class
+ ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum
+ cmp r1, #CLASS_INITIALIZED @ has class been initialized?
+ bne .LOP_NEW_INSTANCE_needinit @ no, init class now
+.LOP_NEW_INSTANCE_initialized: @ r0=class
+ ldr r3, [r0, #offClassObject_accessFlags] @ r3<- clazz->accessFlags
+ tst r3, #(ACC_INTERFACE|ACC_ABSTRACT) @ abstract or interface?
+ mov r1, #ALLOC_DONT_TRACK @ flags for alloc call
+ beq .LOP_NEW_INSTANCE_finish @ concrete class, continue
+ b .LOP_NEW_INSTANCE_abstract @ fail
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEW_ARRAY: /* 0x23 */
+/* File: armv5te/OP_NEW_ARRAY.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ FETCH(r2, 1) @ r2<- CCCC
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ GET_VREG(r1, r0) @ r1<- vB (array length)
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ cmp r1, #0 @ check length
+ ldr r0, [r3, r2, lsl #2] @ r0<- resolved class
+ bmi common_errNegativeArraySize @ negative length, bail
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ req'd for resolve, alloc
+ bne .LOP_NEW_ARRAY_finish @ resolved, continue
+ b .LOP_NEW_ARRAY_resolve @ do resolve now
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ EXPORT_PC() @ need for resolve and alloc
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+ mov r10, rINST, lsr #8 @ r10<- AA or BA
+ cmp r0, #0 @ already resolved?
+ bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on
+8: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ b .LOP_FILLED_NEW_ARRAY_continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */
+/* File: armv5te/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ EXPORT_PC() @ need for resolve and alloc
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+ mov r10, rINST, lsr #8 @ r10<- AA or BA
+ cmp r0, #0 @ already resolved?
+ bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on
+8: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ b .LOP_FILLED_NEW_ARRAY_RANGE_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: armv5te/OP_FILL_ARRAY_DATA.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ GET_VREG(r0, r3) @ r0<- vAA (array object)
+ add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
+ EXPORT_PC();
+ bl dvmInterpHandleFillArrayData@ fill the array with predefined data
+ cmp r0, #0 @ 0 means an exception is thrown
+ beq common_exceptionThrown @ has exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_THROW: /* 0x27 */
+/* File: armv5te/OP_THROW.S */
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r1, r2) @ r1<- vAA (exception object)
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ cmp r1, #0 @ null object?
+ beq common_errNullObject @ yes, throw an NPE instead
+ @ bypass dvmSetException, just store it
+ str r1, [r0, #offThread_exception] @ thread->exception<- obj
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO: /* 0x28 */
+/* File: armv5te/OP_GOTO.S */
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended)
+ mov r9, r9, lsl #1 @ r9<- byte offset
+ bmi common_backwardBranch @ backward branch, do periodic checks
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO_16: /* 0x29 */
+/* File: armv5te/OP_GOTO_16.S */
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended)
+ movs r9, r0, asl #1 @ r9<- byte offset, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO_32: /* 0x2a */
+/* File: armv5te/OP_GOTO_32.S */
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". The ORRS
+ * instruction doesn't affect the V flag, so we need to clear it
+ * explicitly.
+ */
+ /* goto/32 +AAAAAAAA */
+ FETCH(r0, 1) @ r0<- aaaa (lo)
+ FETCH(r1, 2) @ r1<- AAAA (hi)
+ cmp ip, ip @ (clear V flag during stall)
+ orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign
+ mov r9, r0, asl #1 @ r9<- byte offset
+ ble common_backwardBranch @ backward branch, do periodic checks
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_PACKED_SWITCH: /* 0x2b */
+/* File: armv5te/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG(r1, r3) @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset
+ movs r9, r0, asl #1 @ r9<- branch byte offset, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+ beq common_backwardBranch @ (want to use BLE but V is unknown)
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: armv5te/OP_SPARSE_SWITCH.S */
+/* File: armv5te/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG(r1, r3) @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset
+ movs r9, r0, asl #1 @ r9<- branch byte offset, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+ beq common_backwardBranch @ (want to use BLE but V is unknown)
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPL_FLOAT: /* 0x2d */
+/* File: armv5te/OP_CMPL_FLOAT.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * The straightforward implementation requires 3 calls to functions
+ * that return a result in r0. We can do it with two calls if our
+ * EABI library supports __aeabi_cfcmple (only one if we want to check
+ * for NaN directly):
+ * check x <= y
+ * if <, return -1
+ * if ==, return 0
+ * check y <= x
+ * if <, return 1
+ * return {-1,1}
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ copy to arg registers
+ mov r1, r10
+ bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPL_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPG_FLOAT: /* 0x2e */
+/* File: armv5te/OP_CMPG_FLOAT.S */
+/* File: armv5te/OP_CMPL_FLOAT.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * The straightforward implementation requires 3 calls to functions
+ * that return a result in r0. We can do it with two calls if our
+ * EABI library supports __aeabi_cfcmple (only one if we want to check
+ * for NaN directly):
+ * check x <= y
+ * if <, return -1
+ * if ==, return 0
+ * check y <= x
+ * if <, return 1
+ * return {-1,1}
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ copy to arg registers
+ mov r1, r10
+ bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPG_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: armv5te/OP_CMPL_DOUBLE.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r9, r0, #255 @ r9<- BB
+ mov r10, r0, lsr #8 @ r10<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[BB]
+ add r10, rFP, r10, lsl #2 @ r10<- &fp[CC]
+ ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1
+ bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPL_DOUBLE_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: armv5te/OP_CMPG_DOUBLE.S */
+/* File: armv5te/OP_CMPL_DOUBLE.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r9, r0, #255 @ r9<- BB
+ mov r10, r0, lsr #8 @ r10<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[BB]
+ add r10, rFP, r10, lsl #2 @ r10<- &fp[CC]
+ ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1
+ bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPG_DOUBLE_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMP_LONG: /* 0x31 */
+/* File: armv5te/OP_CMP_LONG.S */
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ *
+ * We load the full values with LDM, but in practice many values could
+ * be resolved by only looking at the high word. This could be made
+ * faster or slower by splitting the LDM into a pair of LDRs.
+ *
+ * If we just wanted to set condition flags, we could do this:
+ * subs ip, r0, r2
+ * sbcs ip, r1, r3
+ * subeqs ip, r0, r2
+ * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
+ * integer value, which we can do with 2 conditional mov/mvn instructions
+ * (set 1, set -1; if they're equal we already have 0 in ip), giving
+ * us a constant 5-cycle path plus a branch at the end to the
+ * instruction epilogue code. The multi-compare approach below needs
+ * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+ * in the worst case (the 64-bit values are equal).
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ cmp r1, r3 @ compare (vBB+1, vCC+1)
+ blt .LOP_CMP_LONG_less @ signed compare on high part
+ bgt .LOP_CMP_LONG_greater
+ subs r1, r0, r2 @ r1<- r0 - r2
+ bhi .LOP_CMP_LONG_greater @ unsigned compare on low part
+ bne .LOP_CMP_LONG_less
+ b .LOP_CMP_LONG_finish @ equal; r1 already holds 0
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_EQ: /* 0x32 */
+/* File: armv5te/OP_IF_EQ.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ bne 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_NE: /* 0x33 */
+/* File: armv5te/OP_IF_NE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ beq 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LT: /* 0x34 */
+/* File: armv5te/OP_IF_LT.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ bge 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GE: /* 0x35 */
+/* File: armv5te/OP_IF_GE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ blt 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GT: /* 0x36 */
+/* File: armv5te/OP_IF_GT.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ ble 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LE: /* 0x37 */
+/* File: armv5te/OP_IF_LE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ bgt 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_EQZ: /* 0x38 */
+/* File: armv5te/OP_IF_EQZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ bne 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_NEZ: /* 0x39 */
+/* File: armv5te/OP_IF_NEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ beq 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LTZ: /* 0x3a */
+/* File: armv5te/OP_IF_LTZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ bge 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GEZ: /* 0x3b */
+/* File: armv5te/OP_IF_GEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ blt 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GTZ: /* 0x3c */
+/* File: armv5te/OP_IF_GTZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ ble 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LEZ: /* 0x3d */
+/* File: armv5te/OP_IF_LEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ bgt 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_3E: /* 0x3e */
+/* File: armv5te/OP_UNUSED_3E.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_3F: /* 0x3f */
+/* File: armv5te/OP_UNUSED_3F.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_40: /* 0x40 */
+/* File: armv5te/OP_UNUSED_40.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_41: /* 0x41 */
+/* File: armv5te/OP_UNUSED_41.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_42: /* 0x42 */
+/* File: armv5te/OP_UNUSED_42.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_43: /* 0x43 */
+/* File: armv5te/OP_UNUSED_43.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET: /* 0x44 */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_WIDE: /* 0x45 */
+/* File: armv4/OP_AGET_WIDE.S */
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcc .LOP_AGET_WIDE_finish @ okay, continue below
+ b common_errArrayIndex @ index >= length, bail
+ @ May want to swap the order of these two branches depending on how the
+ @ branch prediction (if any) handles conditional forward branches vs.
+ @ unconditional forward branches.
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_OBJECT: /* 0x46 */
+/* File: armv5te/OP_AGET_OBJECT.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: armv5te/OP_AGET_BOOLEAN.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_BYTE: /* 0x48 */
+/* File: armv5te/OP_AGET_BYTE.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_CHAR: /* 0x49 */
+/* File: armv5te/OP_AGET_CHAR.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_SHORT: /* 0x4a */
+/* File: armv5te/OP_AGET_SHORT.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT: /* 0x4b */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_WIDE: /* 0x4c */
+/* File: armv4/OP_APUT_WIDE.S */
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ bcc .LOP_APUT_WIDE_finish @ okay, continue below
+ b common_errArrayIndex @ index >= length, bail
+ @ May want to swap the order of these two branches depending on how the
+ @ branch prediction (if any) handles conditional forward branches vs.
+ @ unconditional forward branches.
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_OBJECT: /* 0x4d */
+/* File: armv5te/OP_APUT_OBJECT.S */
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r1, r2) @ r1<- vBB (array object)
+ GET_VREG(r0, r3) @ r0<- vCC (requested index)
+ cmp r1, #0 @ null array object?
+ GET_VREG(r9, r9) @ r9<- vAA
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length
+ add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width
+ cmp r0, r3 @ compare unsigned index, length
+ bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on
+ b common_errArrayIndex @ index >= length, bail
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: armv5te/OP_APUT_BOOLEAN.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_BYTE: /* 0x4f */
+/* File: armv5te/OP_APUT_BYTE.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_CHAR: /* 0x50 */
+/* File: armv5te/OP_APUT_CHAR.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_SHORT: /* 0x51 */
+/* File: armv5te/OP_APUT_SHORT.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET: /* 0x52 */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_finish
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE: /* 0x53 */
+/* File: armv4/OP_IGET_WIDE.S */
+ /*
+ * Wide 32-bit instance field get.
+ */
+ /* iget-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_WIDE_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_WIDE_finish
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT: /* 0x54 */
+/* File: armv5te/OP_IGET_OBJECT.S */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_OBJECT_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_OBJECT_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: armv5te/OP_IGET_BOOLEAN.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_BOOLEAN_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_BOOLEAN_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_BYTE: /* 0x56 */
+/* File: armv5te/OP_IGET_BYTE.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_BYTE_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_BYTE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_CHAR: /* 0x57 */
+/* File: armv5te/OP_IGET_CHAR.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_CHAR_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_CHAR_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_SHORT: /* 0x58 */
+/* File: armv5te/OP_IGET_SHORT.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_SHORT_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_SHORT_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT: /* 0x59 */
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_finish @ yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE: /* 0x5a */
+/* File: armv4/OP_IPUT_WIDE.S */
+ /* iput-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_WIDE_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_WIDE_finish @ yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT: /* 0x5b */
+/* File: armv5te/OP_IPUT_OBJECT.S */
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_OBJECT_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_OBJECT_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: armv5te/OP_IPUT_BOOLEAN.S */
+@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_BYTE: /* 0x5d */
+/* File: armv5te/OP_IPUT_BYTE.S */
+@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_BYTE_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_BYTE_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_CHAR: /* 0x5e */
+/* File: armv5te/OP_IPUT_CHAR.S */
+@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_CHAR_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_CHAR_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_SHORT: /* 0x5f */
+/* File: armv5te/OP_IPUT_SHORT.S */
+@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_SHORT_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_SHORT_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET: /* 0x60 */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_resolve @ yes, do resolve
+.LOP_SGET_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_WIDE: /* 0x61 */
+/* File: armv4/OP_SGET_WIDE.S */
+ /*
+ * 64-bit SGET handler.
+ */
+ /* sget-wide vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_WIDE_resolve @ yes, do resolve
+.LOP_SGET_WIDE_finish:
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r0, r0, #offStaticField_value
+ ldmia r0, {r2-r3} @ r2/r3<- field value (aligned)
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[AA]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ stmia r1, {r2-r3} @ vAA/vAA+1<- r2/r3
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_OBJECT: /* 0x62 */
+/* File: armv5te/OP_SGET_OBJECT.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_OBJECT_resolve @ yes, do resolve
+.LOP_SGET_OBJECT_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: armv5te/OP_SGET_BOOLEAN.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve
+.LOP_SGET_BOOLEAN_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_BYTE: /* 0x64 */
+/* File: armv5te/OP_SGET_BYTE.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_BYTE_resolve @ yes, do resolve
+.LOP_SGET_BYTE_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_CHAR: /* 0x65 */
+/* File: armv5te/OP_SGET_CHAR.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_CHAR_resolve @ yes, do resolve
+.LOP_SGET_CHAR_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_SHORT: /* 0x66 */
+/* File: armv5te/OP_SGET_SHORT.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_SHORT_resolve @ yes, do resolve
+.LOP_SGET_SHORT_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT: /* 0x67 */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_resolve @ yes, do resolve
+.LOP_SPUT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_WIDE: /* 0x68 */
+/* File: armv4/OP_SPUT_WIDE.S */
+ /*
+ * 64-bit SPUT handler.
+ */
+ /* sput-wide vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_WIDE_resolve @ yes, do resolve
+.LOP_SPUT_WIDE_finish: @ field ptr in r0, AA in r9
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ add r0, r0, #offStaticField_value
+ stmia r0, {r2-r3} @ field<- vAA/vAA+1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_OBJECT: /* 0x69 */
+/* File: armv5te/OP_SPUT_OBJECT.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve
+.LOP_SPUT_OBJECT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: armv5te/OP_SPUT_BOOLEAN.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve
+.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_BYTE: /* 0x6b */
+/* File: armv5te/OP_SPUT_BYTE.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_BYTE_resolve @ yes, do resolve
+.LOP_SPUT_BYTE_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_CHAR: /* 0x6c */
+/* File: armv5te/OP_SPUT_CHAR.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_CHAR_resolve @ yes, do resolve
+.LOP_SPUT_CHAR_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_SHORT: /* 0x6d */
+/* File: armv5te/OP_SPUT_SHORT.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_SHORT_resolve @ yes, do resolve
+.LOP_SPUT_SHORT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: armv5te/OP_INVOKE_VIRTUAL.S */
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_VIRTUAL_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER: /* 0x6f */
+/* File: armv5te/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ GET_VREG(r2, r10) @ r2<- "this" ptr
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ cmp r2, #0 @ null "this"?
+ ldr r9, [rGLUE, #offGlue_method] @ r9<- current method
+ beq common_errNullObject @ null "this", throw exception
+ cmp r0, #0 @ already resolved?
+ ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_SUPER_continue @ resolved, continue on
+ b .LOP_INVOKE_SUPER_resolve @ do resolve now
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: armv5te/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r2, r10) @ r2<- "this" ptr
+ beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now
+.LOP_INVOKE_DIRECT_finish:
+ cmp r2, #0 @ null "this" ref?
+ bne common_invokeMethodNoRange @ no, continue on
+ b common_errNullObject @ yes, throw exception
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_STATIC: /* 0x71 */
+/* File: armv5te/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne common_invokeMethodNoRange @ yes, continue on
+0: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_STATIC @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne common_invokeMethodNoRange @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: armv5te/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r2, 2) @ r2<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!0)
+ and r2, r2, #15 @ r2<- C (or stays CCCC)
+ .endif
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r0, r2) @ r0<- first arg ("this")
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex
+ cmp r0, #0 @ null obj?
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- method
+ beq common_errNullObject @ yes, fail
+ ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz
+ bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, handle exception
+ b common_invokeMethodNoRange @ jump to common handler
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_73: /* 0x73 */
+/* File: armv5te/OP_UNUSED_73.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */
+/* File: armv5te/OP_INVOKE_VIRTUAL.S */
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */
+/* File: armv5te/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ GET_VREG(r2, r10) @ r2<- "this" ptr
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ cmp r2, #0 @ null "this"?
+ ldr r9, [rGLUE, #offGlue_method] @ r9<- current method
+ beq common_errNullObject @ null "this", throw exception
+ cmp r0, #0 @ already resolved?
+ ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on
+ b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */
+/* File: armv5te/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r2, r10) @ r2<- "this" ptr
+ beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now
+.LOP_INVOKE_DIRECT_RANGE_finish:
+ cmp r2, #0 @ null "this" ref?
+ bne common_invokeMethodRange @ no, continue on
+ b common_errNullObject @ yes, throw exception
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */
+/* File: armv5te/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne common_invokeMethodRange @ yes, continue on
+0: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_STATIC @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne common_invokeMethodRange @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */
+/* File: armv5te/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r2, 2) @ r2<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!1)
+ and r2, r2, #15 @ r2<- C (or stays CCCC)
+ .endif
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r0, r2) @ r0<- first arg ("this")
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex
+ cmp r0, #0 @ null obj?
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- method
+ beq common_errNullObject @ yes, fail
+ ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz
+ bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, handle exception
+ b common_invokeMethodRange @ jump to common handler
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_79: /* 0x79 */
+/* File: armv5te/OP_UNUSED_79.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_7A: /* 0x7a */
+/* File: armv5te/OP_UNUSED_7A.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_INT: /* 0x7b */
+/* File: armv5te/OP_NEG_INT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ rsb r0, r0, #0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOT_INT: /* 0x7c */
+/* File: armv5te/OP_NOT_INT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mvn r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_LONG: /* 0x7d */
+/* File: armv5te/OP_NEG_LONG.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ rsbs r0, r0, #0 @ optional op; may set condition codes
+ rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOT_LONG: /* 0x7e */
+/* File: armv5te/OP_NOT_LONG.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mvn r0, r0 @ optional op; may set condition codes
+ mvn r1, r1 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_FLOAT: /* 0x7f */
+/* File: armv5te/OP_NEG_FLOAT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_DOUBLE: /* 0x80 */
+/* File: armv5te/OP_NEG_DOUBLE.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_LONG: /* 0x81 */
+/* File: armv5te/OP_INT_TO_LONG.S */
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r1, r0, asr #31 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: armv5te/OP_INT_TO_FLOAT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_i2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: armv5te/OP_INT_TO_DOUBLE.S */
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_i2d @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_INT: /* 0x84 */
+/* File: armv5te/OP_LONG_TO_INT.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: armv5te/OP_LONG_TO_FLOAT.S */
+/* File: armv5te/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ and r9, r9, #15
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_l2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: armv5te/OP_LONG_TO_DOUBLE.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: armv5te/OP_FLOAT_TO_INT.S */
+/* EABI appears to have Java-style conversions of +inf/-inf/NaN */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_f2iz @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+#if 0
+@include "armv5te/unop.S" {"instr":"bl f2i_doconv"}
+@break
+/*
+ * Convert the float in r0 to an int in r0.
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2i_doconv:
+ stmfd sp!, {r4, lr}
+ mov r1, #0x4f000000 @ (float)maxint
+ mov r4, r0
+ bl __aeabi_fcmpge @ is arg >= maxint?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0x80000000 @ return maxint (7fffffff)
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, #0xcf000000 @ (float)minint
+ bl __aeabi_fcmple @ is arg <= minint?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0x80000000 @ return minint (80000000)
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, r4
+ bl __aeabi_fcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ ldmeqfd sp!, {r4, pc} @ return zero for NaN
+
+ mov r0, r4 @ recover arg
+ bl __aeabi_f2iz @ convert float to int
+ ldmfd sp!, {r4, pc}
+#endif
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: armv5te/OP_FLOAT_TO_LONG.S */
+@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"}
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl f2l_doconv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: armv5te/OP_FLOAT_TO_DOUBLE.S */
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_f2d @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: armv5te/OP_DOUBLE_TO_INT.S */
+/* EABI appears to have Java-style conversions of +inf/-inf/NaN */
+/* File: armv5te/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ and r9, r9, #15
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_d2iz @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+#if 0
+@include "armv5te/unopNarrower.S" {"instr":"bl d2i_doconv"}
+@break
+/*
+ * Convert the double in r0/r1 to an int in r0.
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2i_doconv:
+ stmfd sp!, {r4, r5, lr} @ save regs
+ ldr r2, .LOP_DOUBLE_TO_INT_maxlo @ (double)maxint, lo
+ ldr r3, .LOP_DOUBLE_TO_INT_maxhi @ (double)maxint, hi
+ sub sp, sp, #4 @ align for EABI
+ mov r4, r0 @ save r0
+ mov r5, r1 @ and r1
+ bl __aeabi_dcmpge @ is arg >= maxint?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0x80000000 @ return maxint (7fffffff)
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ ldr r3, .LOP_DOUBLE_TO_INT_min @ (double)minint, hi
+ mov r2, #0 @ (double)minint, lo
+ bl __aeabi_dcmple @ is arg <= minint?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0x80000000 @ return minint (80000000)
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r2, r4 @ compare against self
+ mov r3, r5
+ bl __aeabi_dcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ beq 1f @ return zero for NaN
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ bl __aeabi_d2iz @ convert double to int
+
+1:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, pc}
+
+.LOP_DOUBLE_TO_INT_maxlo:
+ .word 0xffc00000 @ maxint, as a double (low word)
+.LOP_DOUBLE_TO_INT_maxhi:
+ .word 0x41dfffff @ maxint, as a double (high word)
+.LOP_DOUBLE_TO_INT_min:
+ .word 0xc1e00000 @ minint, as a double (high word)
+#endif
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: armv5te/OP_DOUBLE_TO_LONG.S */
+@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"}
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl d2l_doconv @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: armv5te/OP_DOUBLE_TO_FLOAT.S */
+/* File: armv5te/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ and r9, r9, #15
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_d2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_BYTE: /* 0x8d */
+/* File: armv5te/OP_INT_TO_BYTE.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ mov r0, r0, asl #24 @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r0, r0, asr #24 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_CHAR: /* 0x8e */
+/* File: armv5te/OP_INT_TO_CHAR.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ mov r0, r0, asl #16 @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_SHORT: /* 0x8f */
+/* File: armv5te/OP_INT_TO_SHORT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ mov r0, r0, asl #16 @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r0, r0, asr #16 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT: /* 0x90 */
+/* File: armv5te/OP_ADD_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_INT: /* 0x91 */
+/* File: armv5te/OP_SUB_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT: /* 0x92 */
+/* File: armv5te/OP_MUL_INT.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT: /* 0x93 */
+/* File: armv5te/OP_DIV_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT: /* 0x94 */
+/* File: armv5te/OP_REM_INT.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT: /* 0x95 */
+/* File: armv5te/OP_AND_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT: /* 0x96 */
+/* File: armv5te/OP_OR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT: /* 0x97 */
+/* File: armv5te/OP_XOR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT: /* 0x98 */
+/* File: armv5te/OP_SHL_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT: /* 0x99 */
+/* File: armv5te/OP_SHR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT: /* 0x9a */
+/* File: armv5te/OP_USHR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_LONG: /* 0x9b */
+/* File: armv5te/OP_ADD_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_LONG: /* 0x9c */
+/* File: armv5te/OP_SUB_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_LONG: /* 0x9d */
+/* File: armv5te/OP_MUL_LONG.S */
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST, lsr #8 @ r0<- AA
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[AA]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_MUL_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_LONG: /* 0x9e */
+/* File: armv5te/OP_DIV_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_LONG: /* 0x9f */
+/* File: armv5te/OP_REM_LONG.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_LONG: /* 0xa0 */
+/* File: armv5te/OP_AND_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_LONG: /* 0xa1 */
+/* File: armv5te/OP_OR_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_LONG: /* 0xa2 */
+/* File: armv5te/OP_XOR_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_LONG: /* 0xa3 */
+/* File: armv5te/OP_SHL_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_SHL_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_LONG: /* 0xa4 */
+/* File: armv5te/OP_SHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_SHR_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_LONG: /* 0xa5 */
+/* File: armv5te/OP_USHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_USHR_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_FLOAT: /* 0xa6 */
+/* File: armv5te/OP_ADD_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fadd @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_FLOAT: /* 0xa7 */
+/* File: armv5te/OP_SUB_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fsub @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_FLOAT: /* 0xa8 */
+/* File: armv5te/OP_MUL_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fmul @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_FLOAT: /* 0xa9 */
+/* File: armv5te/OP_DIV_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fdiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_FLOAT: /* 0xaa */
+/* File: armv5te/OP_REM_FLOAT.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_DOUBLE: /* 0xab */
+/* File: armv5te/OP_ADD_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dadd @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_DOUBLE: /* 0xac */
+/* File: armv5te/OP_SUB_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dsub @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_DOUBLE: /* 0xad */
+/* File: armv5te/OP_MUL_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dmul @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_DOUBLE: /* 0xae */
+/* File: armv5te/OP_DIV_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ddiv @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_DOUBLE: /* 0xaf */
+/* File: armv5te/OP_REM_DOUBLE.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: armv5te/OP_ADD_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: armv5te/OP_SUB_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: armv5te/OP_MUL_INT_2ADDR.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: armv5te/OP_DIV_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: armv5te/OP_REM_INT_2ADDR.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: armv5te/OP_AND_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: armv5te/OP_OR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: armv5te/OP_XOR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: armv5te/OP_SHL_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: armv5te/OP_SHR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: armv5te/OP_USHR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: armv5te/OP_ADD_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: armv5te/OP_SUB_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: armv5te/OP_MUL_LONG_2ADDR.S */
+ /*
+ * Signed 64-bit integer multiply, "/2addr" version.
+ *
+ * See OP_MUL_LONG for an explanation.
+ *
+ * We get a little tight on registers, so to avoid looking up &fp[A]
+ * again we stuff it into rINST.
+ */
+ /* mul-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST @ r0<- &fp[A] (free up rINST)
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: armv5te/OP_DIV_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: armv5te/OP_REM_LONG_2ADDR.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: armv5te/OP_AND_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: armv5te/OP_OR_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: armv5te/OP_XOR_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: armv5te/OP_SHL_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ b .LOP_SHL_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: armv5te/OP_SHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ b .LOP_SHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: armv5te/OP_USHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ b .LOP_USHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: armv5te/OP_ADD_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fadd @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: armv5te/OP_SUB_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fsub @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: armv5te/OP_MUL_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fmul @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: armv5te/OP_DIV_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fdiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: armv5te/OP_REM_FLOAT_2ADDR.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: armv5te/OP_ADD_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dadd @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: armv5te/OP_SUB_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dsub @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: armv5te/OP_MUL_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dmul @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: armv5te/OP_DIV_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ddiv @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: armv5te/OP_ADD_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RSUB_INT: /* 0xd1 */
+/* File: armv5te/OP_RSUB_INT.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: armv5te/OP_MUL_INT_LIT16.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: armv5te/OP_DIV_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: armv5te/OP_REM_INT_LIT16.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: armv5te/OP_AND_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: armv5te/OP_OR_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: armv5te/OP_XOR_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: armv5te/OP_ADD_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: armv5te/OP_RSUB_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_LIT8: /* 0xda */
+/* File: armv5te/OP_MUL_INT_LIT8.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: armv5te/OP_DIV_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 1
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_LIT8: /* 0xdc */
+/* File: armv5te/OP_REM_INT_LIT8.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 1
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_LIT8: /* 0xdd */
+/* File: armv5te/OP_AND_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_LIT8: /* 0xde */
+/* File: armv5te/OP_OR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: armv5te/OP_XOR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: armv5te/OP_SHL_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: armv5te/OP_SHR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: armv5te/OP_USHR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E3: /* 0xe3 */
+/* File: armv5te/OP_UNUSED_E3.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E4: /* 0xe4 */
+/* File: armv5te/OP_UNUSED_E4.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E5: /* 0xe5 */
+/* File: armv5te/OP_UNUSED_E5.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E6: /* 0xe6 */
+/* File: armv5te/OP_UNUSED_E6.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E7: /* 0xe7 */
+/* File: armv5te/OP_UNUSED_E7.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E8: /* 0xe8 */
+/* File: armv5te/OP_UNUSED_E8.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E9: /* 0xe9 */
+/* File: armv5te/OP_UNUSED_E9.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EA: /* 0xea */
+/* File: armv5te/OP_UNUSED_EA.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EB: /* 0xeb */
+/* File: armv5te/OP_UNUSED_EB.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EC: /* 0xec */
+/* File: armv5te/OP_UNUSED_EC.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_ED: /* 0xed */
+/* File: armv5te/OP_UNUSED_ED.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_EXECUTE_INLINE: /* 0xee */
+/* File: armv5te/OP_EXECUTE_INLINE.S */
+ /*
+ * Execute a "native inline" instruction.
+ *
+ * We need to call:
+ * dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref)
+ *
+ * The first four args are in r0-r3, but the last two must be pushed
+ * onto the stack.
+ */
+ /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+ FETCH(r10, 1) @ r10<- BBBB
+ add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval
+ EXPORT_PC() @ can throw
+ sub sp, sp, #8 @ make room for arg(s)
+ mov r0, rINST, lsr #12 @ r0<- B
+ str r1, [sp] @ push &glue->retval
+ bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ add sp, sp, #8 @ pop stack
+ cmp r0, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EF: /* 0xef */
+/* File: armv5te/OP_UNUSED_EF.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */
+/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */
+ /*
+ * invoke-direct-empty is a no-op in a "standard" interpreter.
+ */
+ FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_F1: /* 0xf1 */
+/* File: armv5te/OP_UNUSED_F1.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_QUICK: /* 0xf2 */
+/* File: armv5te/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: armv4/OP_IGET_WIDE_QUICK.S */
+ /* iget-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ add r9, r3, r1 @ r9<- object + offset
+ ldmia r9, {r0-r1} @ r0/r1<- obj.field (64 bits, aligned)
+ and r2, r2, #15 @ r2<- A
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: armv5te/OP_IGET_OBJECT_QUICK.S */
+/* File: armv5te/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_QUICK: /* 0xf5 */
+/* File: armv5te/OP_IPUT_QUICK.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- fp[B], the object pointer
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ and r2, r2, #15
+ GET_VREG(r0, r2) @ r0<- fp[A]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ str r0, [r3, r1] @ obj.field (always 32 bits)<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: armv4/OP_IPUT_WIDE_QUICK.S */
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A(+)
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r2, r1) @ r2<- fp[B], the object pointer
+ add r3, rFP, r0, lsl #2 @ r3<- &fp[A]
+ cmp r2, #0 @ check object for null
+ ldmia r3, {r0-r1} @ r0/r1<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH(r3, 1) @ r3<- field byte offset
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r2, r2, r3 @ r2<- object + byte offset
+ stmia r2, {r0-r1} @ obj.field (64 bits, aligned)<- r0/r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */
+/* File: armv5te/OP_IPUT_QUICK.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- fp[B], the object pointer
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ and r2, r2, #15
+ GET_VREG(r0, r2) @ r0<- fp[A]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ str r0, [r3, r1] @ obj.field (always 32 bits)<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r3, 2) @ r3<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!0)
+ and r3, r3, #15 @ r3<- C (or stays CCCC)
+ .endif
+ GET_VREG(r2, r3) @ r2<- vC ("this" ptr)
+ cmp r2, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable
+ EXPORT_PC() @ invoke must export
+ ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB]
+ bl common_invokeMethodNoRange @ continue on
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r3, 2) @ r3<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!1)
+ and r3, r3, #15 @ r3<- C (or stays CCCC)
+ .endif
+ GET_VREG(r2, r3) @ r2<- vC ("this" ptr)
+ cmp r2, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable
+ EXPORT_PC() @ invoke must export
+ ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB]
+ bl common_invokeMethodRange @ continue on
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super
+ GET_VREG(r3, r10) @ r3<- "this"
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable
+ cmp r3, #0 @ null "this" ref?
+ ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB]
+ beq common_errNullObject @ "this" is null, throw exception
+ bl common_invokeMethodNoRange @ continue on
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super
+ GET_VREG(r3, r10) @ r3<- "this"
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable
+ cmp r3, #0 @ null "this" ref?
+ ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB]
+ beq common_errNullObject @ "this" is null, throw exception
+ bl common_invokeMethodRange @ continue on
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FC: /* 0xfc */
+/* File: armv5te/OP_UNUSED_FC.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FD: /* 0xfd */
+/* File: armv5te/OP_UNUSED_FD.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FE: /* 0xfe */
+/* File: armv5te/OP_UNUSED_FE.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FF: /* 0xff */
+/* File: armv5te/OP_UNUSED_FF.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+
+ .balign 64
+ .size dvmAsmInstructionStart, .-dvmAsmInstructionStart
+ .global dvmAsmInstructionEnd
+dvmAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global dvmAsmSisterStart
+ .type dvmAsmSisterStart, %function
+ .text
+ .balign 4
+dvmAsmSisterStart:
+
+/* continuation for OP_CONST_STRING */
+
+ /*
+ * Continuation if the String has not yet been resolved.
+ * r1: BBBB (String ref)
+ * r9: target register
+ */
+.LOP_CONST_STRING_resolve:
+ EXPORT_PC()
+ ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveString @ r0<- String reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_CONST_STRING_JUMBO */
+
+ /*
+ * Continuation if the String has not yet been resolved.
+ * r1: BBBBBBBB (String ref)
+ * r9: target register
+ */
+.LOP_CONST_STRING_JUMBO_resolve:
+ EXPORT_PC()
+ ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveString @ r0<- String reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_CONST_CLASS */
+
+ /*
+ * Continuation if the Class has not yet been resolved.
+ * r1: BBBB (Class ref)
+ * r9: target register
+ */
+.LOP_CONST_CLASS_resolve:
+ EXPORT_PC()
+ ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+ mov r2, #1 @ r2<- true
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- Class reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_CHECK_CAST */
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * r0 holds obj->clazz
+ * r1 holds class resolved from BBBB
+ * r9 holds object
+ */
+.LOP_CHECK_CAST_fullcheck:
+ bl dvmInstanceofNonTrivial @ r0<- boolean result
+ cmp r0, #0 @ failed?
+ bne .LOP_CHECK_CAST_okay @ no, success
+
+ @ A cast has failed. We need to throw a ClassCastException with the
+ @ class of the object that failed to be cast.
+ EXPORT_PC() @ about to throw
+ ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz
+ ldr r0, .LstrClassCastExceptionPtr
+ ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor
+ bl dvmThrowExceptionWithClassMessage
+ b common_exceptionThrown
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r2 holds BBBB
+ * r9 holds object
+ */
+.LOP_CHECK_CAST_resolve:
+ EXPORT_PC() @ resolve() could throw
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ mov r1, r2 @ r1<- BBBB
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ mov r1, r0 @ r1<- class resolved from BBB
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ b .LOP_CHECK_CAST_resolved @ pick up where we left off
+
+.LstrClassCastExceptionPtr:
+ .word .LstrClassCastException
+
+
+/* continuation for OP_INSTANCE_OF */
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * r0 holds obj->clazz
+ * r1 holds class resolved from BBBB
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_fullcheck:
+ bl dvmInstanceofNonTrivial @ r0<- boolean result
+ @ fall through to OP_INSTANCE_OF_store
+
+ /*
+ * r0 holds boolean result
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_store:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Trivial test succeeded, save and bail.
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_trivial:
+ mov r0, #1 @ indicate success
+ @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r3 holds BBBB
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_resolve:
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+ mov r1, r3 @ r1<- BBBB
+ mov r2, #1 @ r2<- true
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ mov r1, r0 @ r1<- class resolved from BBB
+ mov r3, rINST, lsr #12 @ r3<- B
+ GET_VREG(r0, r3) @ r0<- vB (object)
+ ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz
+ b .LOP_INSTANCE_OF_resolved @ pick up where we left off
+
+
+/* continuation for OP_NEW_INSTANCE */
+
+ .balign 32 @ minimize cache lines
+.LOP_NEW_INSTANCE_finish: @ r0=class
+ bl dvmAllocObject @ r0<- new object
+ mov r3, rINST, lsr #8 @ r3<- AA
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Class initialization required.
+ *
+ * r0 holds class object
+ */
+.LOP_NEW_INSTANCE_needinit:
+ mov r9, r0 @ save r0
+ bl dvmInitClass @ initialize class
+ cmp r0, #0 @ check boolean result
+ mov r0, r9 @ restore r0
+ bne .LOP_NEW_INSTANCE_initialized @ success, continue
+ b common_exceptionThrown @ failed, deal with init exception
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r1 holds BBBB
+ */
+.LOP_NEW_INSTANCE_resolve:
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ bne .LOP_NEW_INSTANCE_resolved @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+ /*
+ * We can't instantiate an abstract class or interface, so throw an
+ * InstantiationError with the class descriptor as the message.
+ *
+ * r0 holds class object
+ */
+.LOP_NEW_INSTANCE_abstract:
+ ldr r1, [r0, #offClassObject_descriptor]
+ ldr r0, .LstrInstantiationErrorPtr
+ bl dvmThrowExceptionWithClassMessage
+ b common_exceptionThrown
+
+.LstrInstantiationErrorPtr:
+ .word .LstrInstantiationError
+
+
+/* continuation for OP_NEW_ARRAY */
+
+
+ /*
+ * Resolve class. (This is an uncommon case.)
+ *
+ * r1 holds array length
+ * r2 holds class ref CCCC
+ */
+.LOP_NEW_ARRAY_resolve:
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ mov r9, r1 @ r9<- length (save)
+ mov r1, r2 @ r1<- CCCC
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ mov r1, r9 @ r1<- length (restore)
+ beq common_exceptionThrown @ yes, handle exception
+ @ fall through to OP_NEW_ARRAY_finish
+
+ /*
+ * Finish allocation.
+ *
+ * r0 holds class
+ * r1 holds array length
+ */
+.LOP_NEW_ARRAY_finish:
+ mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table
+ bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags)
+ cmp r0, #0 @ failed?
+ mov r2, rINST, lsr #8 @ r2<- A+
+ beq common_exceptionThrown @ yes, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_FILLED_NEW_ARRAY */
+
+ /*
+ * On entry:
+ * r0 holds array class
+ * r10 holds AA or BA
+ */
+.LOP_FILLED_NEW_ARRAY_continue:
+ ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+ mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags
+ ldrb r3, [r3, #1] @ r3<- descriptor[1]
+ .if 0
+ mov r1, r10 @ r1<- AA (length)
+ .else
+ mov r1, r10, lsr #4 @ r1<- B (length)
+ .endif
+ cmp r3, #'I' @ array of ints?
+ cmpne r3, #'L' @ array of objects?
+ cmpne r3, #'[' @ array of arrays?
+ mov r9, r1 @ save length in r9
+ bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet
+ bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags)
+ cmp r0, #0 @ null return?
+ beq common_exceptionThrown @ alloc failed, handle exception
+
+ FETCH(r1, 2) @ r1<- FEDC or CCCC
+ str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array
+ add r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+ subs r9, r9, #1 @ length--, check for neg
+ FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
+ bmi 2f @ was zero, bail
+
+ @ copy values from registers into the array
+ @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
+ .if 0
+ add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC]
+1: ldr r3, [r2], #4 @ r3<- *r2++
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .else
+ cmp r9, #4 @ length was initially 5?
+ and r2, r10, #15 @ r2<- A
+ bne 1f @ <= 4 args, branch
+ GET_VREG(r3, r2) @ r3<- vA
+ sub r9, r9, #1 @ count--
+ str r3, [r0, #16] @ contents[4] = vA
+1: and r2, r1, #15 @ r2<- F/E/D/C
+ GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC
+ mov r1, r1, lsr #4 @ r1<- next reg in low 4
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .endif
+
+2:
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_notimpl:
+ ldr r0, .L_strInternalError
+ ldr r1, .L_strFilledNewArrayNotImpl
+ bl dvmThrowException
+ b common_exceptionThrown
+
+ .if (!0) @ define in one or the other, not both
+.L_strFilledNewArrayNotImpl:
+ .word .LstrFilledNewArrayNotImpl
+.L_strInternalError:
+ .word .LstrInternalError
+ .endif
+
+
+/* continuation for OP_FILLED_NEW_ARRAY_RANGE */
+
+ /*
+ * On entry:
+ * r0 holds array class
+ * r10 holds AA or BA
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_continue:
+ ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+ mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags
+ ldrb r3, [r3, #1] @ r3<- descriptor[1]
+ .if 1
+ mov r1, r10 @ r1<- AA (length)
+ .else
+ mov r1, r10, lsr #4 @ r1<- B (length)
+ .endif
+ cmp r3, #'I' @ array of ints?
+ cmpne r3, #'L' @ array of objects?
+ cmpne r3, #'[' @ array of arrays?
+ mov r9, r1 @ save length in r9
+ bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet
+ bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags)
+ cmp r0, #0 @ null return?
+ beq common_exceptionThrown @ alloc failed, handle exception
+
+ FETCH(r1, 2) @ r1<- FEDC or CCCC
+ str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array
+ add r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+ subs r9, r9, #1 @ length--, check for neg
+ FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
+ bmi 2f @ was zero, bail
+
+ @ copy values from registers into the array
+ @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
+ .if 1
+ add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC]
+1: ldr r3, [r2], #4 @ r3<- *r2++
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .else
+ cmp r9, #4 @ length was initially 5?
+ and r2, r10, #15 @ r2<- A
+ bne 1f @ <= 4 args, branch
+ GET_VREG(r3, r2) @ r3<- vA
+ sub r9, r9, #1 @ count--
+ str r3, [r0, #16] @ contents[4] = vA
+1: and r2, r1, #15 @ r2<- F/E/D/C
+ GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC
+ mov r1, r1, lsr #4 @ r1<- next reg in low 4
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .endif
+
+2:
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
+ ldr r0, .L_strInternalError
+ ldr r1, .L_strFilledNewArrayNotImpl
+ bl dvmThrowException
+ b common_exceptionThrown
+
+ .if (!1) @ define in one or the other, not both
+.L_strFilledNewArrayNotImpl:
+ .word .LstrFilledNewArrayNotImpl
+.L_strInternalError:
+ .word .LstrInternalError
+ .endif
+
+
+/* continuation for OP_CMPL_FLOAT */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPL_FLOAT_gt_or_nan:
+ mov r1, r9 @ reverse order
+ mov r0, r10
+ bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPL_FLOAT_finish
+ mvn r1, #0 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPL_FLOAT_finish
+
+
+#if 0 /* "clasic" form */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpeq @ r0<- (vBB == vCC)
+ cmp r0, #0 @ equal?
+ movne r1, #0 @ yes, result is 0
+ bne OP_CMPL_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmplt @ r0<- (vBB < vCC)
+ cmp r0, #0 @ less than?
+ b OP_CMPL_FLOAT_continue
+@%break
+
+OP_CMPL_FLOAT_continue:
+ mvnne r1, #0 @ yes, result is -1
+ bne OP_CMPL_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpgt @ r0<- (vBB > vCC)
+ cmp r0, #0 @ greater than?
+ beq OP_CMPL_FLOAT_nan @ no, must be NaN
+ mov r1, #1 @ yes, result is 1
+ @ fall through to _finish
+
+OP_CMPL_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * This is expected to be uncommon, so we double-branch (once to here,
+ * again back to _finish).
+ */
+OP_CMPL_FLOAT_nan:
+ mvn r1, #0 @ r1<- 1 or -1 for NaN
+ b OP_CMPL_FLOAT_finish
+
+#endif
+
+
+/* continuation for OP_CMPG_FLOAT */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPG_FLOAT_gt_or_nan:
+ mov r1, r9 @ reverse order
+ mov r0, r10
+ bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPG_FLOAT_finish
+ mov r1, #1 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPG_FLOAT_finish
+
+
+#if 0 /* "clasic" form */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpeq @ r0<- (vBB == vCC)
+ cmp r0, #0 @ equal?
+ movne r1, #0 @ yes, result is 0
+ bne OP_CMPG_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmplt @ r0<- (vBB < vCC)
+ cmp r0, #0 @ less than?
+ b OP_CMPG_FLOAT_continue
+@%break
+
+OP_CMPG_FLOAT_continue:
+ mvnne r1, #0 @ yes, result is -1
+ bne OP_CMPG_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpgt @ r0<- (vBB > vCC)
+ cmp r0, #0 @ greater than?
+ beq OP_CMPG_FLOAT_nan @ no, must be NaN
+ mov r1, #1 @ yes, result is 1
+ @ fall through to _finish
+
+OP_CMPG_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * This is expected to be uncommon, so we double-branch (once to here,
+ * again back to _finish).
+ */
+OP_CMPG_FLOAT_nan:
+ mov r1, #1 @ r1<- 1 or -1 for NaN
+ b OP_CMPG_FLOAT_finish
+
+#endif
+
+
+/* continuation for OP_CMPL_DOUBLE */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPL_DOUBLE_gt_or_nan:
+ ldmia r10, {r0-r1} @ reverse order
+ ldmia r9, {r2-r3}
+ bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPL_DOUBLE_finish
+ mvn r1, #0 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPL_DOUBLE_finish
+
+
+/* continuation for OP_CMPG_DOUBLE */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPG_DOUBLE_gt_or_nan:
+ ldmia r10, {r0-r1} @ reverse order
+ ldmia r9, {r2-r3}
+ bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPG_DOUBLE_finish
+ mov r1, #1 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPG_DOUBLE_finish
+
+
+/* continuation for OP_CMP_LONG */
+
+.LOP_CMP_LONG_less:
+ mvn r1, #0 @ r1<- -1
+ @ Want to cond code the next mov so we can avoid branch, but don't see it;
+ @ instead, we just replicate the tail end.
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.LOP_CMP_LONG_greater:
+ mov r1, #1 @ r1<- 1
+ @ fall through to _finish
+
+.LOP_CMP_LONG_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_AGET_WIDE */
+
+.LOP_AGET_WIDE_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r0, r0, #offArrayObject_contents
+ ldmia r0, {r2-r3} @ r2/r3 <- vBB[vCC]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_APUT_WIDE */
+
+.LOP_APUT_WIDE_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ add r0, #offArrayObject_contents
+ stmia r0, {r2-r3} @ vBB[vCC] <- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_APUT_OBJECT */
+ /*
+ * On entry:
+ * r1 = vBB (arrayObj)
+ * r9 = vAA (obj)
+ * r10 = offset into array (vBB + vCC * width)
+ */
+.LOP_APUT_OBJECT_finish:
+ cmp r9, #0 @ storing null reference?
+ beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz
+ bl dvmCanPutArrayElement @ test object type vs. array type
+ cmp r0, #0 @ okay?
+ beq common_errArrayStore @ no
+.LOP_APUT_OBJECT_skip_check:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_WIDE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_WIDE_finish:
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ mov r2, rINST, lsr #8 @ r2<- A+
+ add r9, r9, r3 @ r9<- obj + field offset
+ ldmia r9, {r0-r1} @ r0/r1<- obj.field (64-bit align ok)
+ and r2, r2, #15 @ r2<- A
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_OBJECT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_OBJECT_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_BOOLEAN */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_BOOLEAN_finish:
+ @bl common_squeak1
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_BYTE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_BYTE_finish:
+ @bl common_squeak2
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_CHAR */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_CHAR_finish:
+ @bl common_squeak3
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_SHORT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_SHORT_finish:
+ @bl common_squeak4
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_WIDE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_WIDE_finish:
+ mov r2, rINST, lsr #8 @ r2<- A+
+ cmp r9, #0 @ check object for null
+ and r2, r2, #15 @ r2<- A
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ add r2, rFP, r2, lsl #2 @ r3<- &fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r2, {r0-r1} @ r0/r1<- fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ add r9, r9, r3 @ r9<- object + byte offset
+ stmia r9, {r0-r1} @ obj.field (64 bits, aligned)<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_OBJECT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_OBJECT_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_BOOLEAN */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_BOOLEAN_finish:
+ @bl common_squeak1
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_BYTE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_BYTE_finish:
+ @bl common_squeak2
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_CHAR */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_CHAR_finish:
+ @bl common_squeak3
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_SHORT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_SHORT_finish:
+ @bl common_squeak4
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_SGET */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_WIDE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_WIDE_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_WIDE_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_OBJECT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_OBJECT_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_OBJECT_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_BOOLEAN */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_BOOLEAN_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_BOOLEAN_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_BYTE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_BYTE_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_BYTE_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_CHAR */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_CHAR_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_CHAR_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_SHORT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_SHORT_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_SHORT_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_WIDE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r9: &fp[AA]
+ */
+.LOP_SPUT_WIDE_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_WIDE_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_OBJECT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_OBJECT_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_OBJECT_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_BOOLEAN */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_BOOLEAN_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_BOOLEAN_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_BYTE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_BYTE_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_BYTE_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_CHAR */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_CHAR_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_CHAR_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_SHORT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_SHORT_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_SHORT_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_INVOKE_VIRTUAL */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.LOP_INVOKE_VIRTUAL_continue:
+ GET_VREG(r1, r10) @ r1<- "this" ptr
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ cmp r1, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz
+ ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable
+ ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodNoRange @ continue on
+
+
+/* continuation for OP_INVOKE_SUPER */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r9 = method->clazz
+ */
+.LOP_INVOKE_SUPER_continue:
+ ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount
+ EXPORT_PC() @ must export for invoke
+ cmp r2, r3 @ compare (methodIndex, vtableCount)
+ bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass
+ ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable
+ ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodNoRange @ continue on
+
+.LOP_INVOKE_SUPER_resolve:
+ mov r0, r9 @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_SUPER_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * r0 = resolved base method
+ */
+.LOP_INVOKE_SUPER_nsm:
+ ldr r1, [r0, #offMethod_name] @ r1<- method name
+ b common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_DIRECT */
+
+ /*
+ * On entry:
+ * r1 = reference (BBBB or CCCC)
+ * r10 = "this" register
+ */
+.LOP_INVOKE_DIRECT_resolve:
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_DIRECT @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ GET_VREG(r2, r10) @ r2<- "this" ptr (reload)
+ bne .LOP_INVOKE_DIRECT_finish @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+/* continuation for OP_INVOKE_VIRTUAL_RANGE */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.LOP_INVOKE_VIRTUAL_RANGE_continue:
+ GET_VREG(r1, r10) @ r1<- "this" ptr
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ cmp r1, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz
+ ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable
+ ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodRange @ continue on
+
+
+/* continuation for OP_INVOKE_SUPER_RANGE */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r9 = method->clazz
+ */
+.LOP_INVOKE_SUPER_RANGE_continue:
+ ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount
+ EXPORT_PC() @ must export for invoke
+ cmp r2, r3 @ compare (methodIndex, vtableCount)
+ bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass
+ ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable
+ ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodRange @ continue on
+
+.LOP_INVOKE_SUPER_RANGE_resolve:
+ mov r0, r9 @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * r0 = resolved base method
+ */
+.LOP_INVOKE_SUPER_RANGE_nsm:
+ ldr r1, [r0, #offMethod_name] @ r1<- method name
+ b common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_DIRECT_RANGE */
+
+ /*
+ * On entry:
+ * r1 = reference (BBBB or CCCC)
+ * r10 = "this" register
+ */
+.LOP_INVOKE_DIRECT_RANGE_resolve:
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_DIRECT @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ GET_VREG(r2, r10) @ r2<- "this" ptr (reload)
+ bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+/* continuation for OP_FLOAT_TO_LONG */
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+ stmfd sp!, {r4, lr}
+ mov r1, #0x5f000000 @ (float)maxlong
+ mov r4, r0
+ bl __aeabi_fcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffff)
+ mvnne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, #0xdf000000 @ (float)minlong
+ bl __aeabi_fcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (80000000)
+ movne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, r4
+ bl __aeabi_fcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ ldmeqfd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ bl __aeabi_f2lz @ convert float to long
+ ldmfd sp!, {r4, pc}
+
+
+/* continuation for OP_DOUBLE_TO_LONG */
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+ stmfd sp!, {r4, r5, lr} @ save regs
+ ldr r3, .LOP_DOUBLE_TO_LONG_max @ (double)maxlong, hi
+ sub sp, sp, #4 @ align for EABI
+ mov r2, #0 @ (double)maxlong, lo
+ mov r4, r0 @ save r0
+ mov r5, r1 @ and r1
+ bl __aeabi_dcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffffffffffff)
+ mvnne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ ldr r3, .LOP_DOUBLE_TO_LONG_min @ (double)minlong, hi
+ mov r2, #0 @ (double)minlong, lo
+ bl __aeabi_dcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (8000000000000000)
+ movne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r2, r4 @ compare against self
+ mov r3, r5
+ bl __aeabi_dcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ beq 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ bl __aeabi_d2lz @ convert double to long
+
+1:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, pc}
+
+.LOP_DOUBLE_TO_LONG_max:
+ .word 0x43e00000 @ maxlong, as a double (high word)
+.LOP_DOUBLE_TO_LONG_min:
+ .word 0xc3e00000 @ minlong, as a double (high word)
+
+
+/* continuation for OP_MUL_LONG */
+
+.LOP_MUL_LONG_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_SHL_LONG */
+
+.LOP_SHL_LONG_finish:
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_SHR_LONG */
+
+.LOP_SHR_LONG_finish:
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_USHR_LONG */
+
+.LOP_USHR_LONG_finish:
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_SHL_LONG_2ADDR */
+
+.LOP_SHL_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_SHR_LONG_2ADDR */
+
+.LOP_SHR_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_USHR_LONG_2ADDR */
+
+.LOP_USHR_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_EXECUTE_INLINE */
+
+ /*
+ * Extract args, call function.
+ * r0 = #of args (0-4)
+ * r10 = call index
+ * lr = return addr, above [DO NOT bl out of here w/o preserving LR]
+ *
+ * Other ideas:
+ * - Use a jump table from the main piece to jump directly into the
+ * AND/LDR pairs. Costs a data load, saves a branch.
+ * - Have five separate pieces that do the loading, so we can work the
+ * interleave a little better. Increases code size.
+ */
+.LOP_EXECUTE_INLINE_continue:
+ rsb r0, r0, #4 @ r0<- 4-r0
+ FETCH(r9, 2) @ r9<- FEDC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+4: and ip, r9, #0xf000 @ isolate F
+ ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
+3: and ip, r9, #0x0f00 @ isolate E
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vE
+2: and ip, r9, #0x00f0 @ isolate D
+ ldr r1, [rFP, ip, lsr #2] @ r1<- vD
+1: and ip, r9, #0x000f @ isolate C
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vC
+0:
+ ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry
+ @ (not reached)
+
+.LOP_EXECUTE_INLINE_table:
+ .word gDvmInlineOpsTable
+
+
+ .size dvmAsmSisterStart, .-dvmAsmSisterStart
+ .global dvmAsmSisterEnd
+dvmAsmSisterEnd:
+
+/* File: armv5te/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * Common code when a backward branch is taken.
+ *
+ * On entry:
+ * r9 is PC adjustment *in bytes*
+ */
+common_backwardBranch:
+ mov r0, #kInterpEntryInstr
+ bl common_periodicChecks
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/*
+ * Need to see if the thread needs to be suspended or debugger/profiler
+ * activity has begun.
+ *
+ * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't
+ * have to do the second ldr.
+ *
+ * TODO: reduce this so we're just checking a single location.
+ *
+ * On entry:
+ * r0 is reentry type, e.g. kInterpEntryInstr
+ * r9 is trampoline PC adjustment *in bytes*
+ */
+common_periodicChecks:
+ ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount
+
+#if defined(WITH_DEBUGGER)
+ ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive
+#endif
+#if defined(WITH_PROFILER)
+ ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers
+#endif
+
+ ldr r3, [r3] @ r3<- suspendCount (int)
+
+#if defined(WITH_DEBUGGER)
+ ldrb r1, [r1] @ r1<- debuggerActive (boolean)
+#endif
+#if defined (WITH_PROFILER)
+ ldr r2, [r2] @ r2<- activeProfilers (int)
+#endif
+
+ cmp r3, #0 @ suspend pending?
+ bne 2f @ yes, check suspend
+
+#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER)
+# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER)
+ orrs r1, r1, r2 @ r1<- r1 | r2
+ cmp r1, #0 @ debugger attached or profiler started?
+# elif defined(WITH_DEBUGGER)
+ cmp r1, #0 @ debugger attached?
+# elif defined(WITH_PROFILER)
+ cmp r2, #0 @ profiler started?
+# endif
+ bne 3f @ debugger/profiler, switch interp
+#endif
+
+ bx lr @ nothing to do, return
+
+2: @ check suspend
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ b dvmCheckSuspendPending @ suspend if necessary, then return
+
+3: @ debugger/profiler enabled, bail out
+ add rPC, rPC, r9 @ update rPC
+ str r0, [rGLUE, #offGlue_entryPoint]
+ mov r1, #1 @ "want switch" = true
+ b common_gotoBail
+
+
+/*
+ * The equivalent of "goto bail", this calls through the "bail handler".
+ *
+ * State registers will be saved to the "glue" area before bailing.
+ *
+ * On entry:
+ * r1 is "bool changeInterp", indicating if we want to switch to the
+ * other interpreter or just bail all the way out
+ */
+common_gotoBail:
+ SAVE_PC_FP_TO_GLUE() @ export state to "glue"
+ mov r0, rGLUE @ r0<- glue ptr
+ b dvmMterpStdBail @ call(glue, changeInterp)
+
+ @add r1, r1, #1 @ using (boolean+1)
+ @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf
+ @bl _longjmp @ does not return
+ @bl common_abort
+
+
+/*
+ * Common code for method invocation with range.
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodRange:
+.LinvokeNewRange:
+ @ prepare to copy args to "outs" area of current frame
+ movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero
+ SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area
+ beq .LinvokeArgsDone @ if no args, skip the rest
+ FETCH(r1, 2) @ r1<- CCCC
+
+ @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
+ @ (very few methods have > 10 args; could unroll for common cases)
+ add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC]
+ sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args
+1: ldr r1, [r3], #4 @ val = *fp++
+ subs r2, r2, #1 @ count--
+ str r1, [r10], #4 @ *outs++ = val
+ bne 1b @ ...while count != 0
+ b .LinvokeArgsDone
+
+/*
+ * Common code for method invocation without range.
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodNoRange:
+.LinvokeNewNoRange:
+ @ prepare to copy args to "outs" area of current frame
+ movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero
+ SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area
+ beq .LinvokeArgsDone @ if no args, skip the rest
+ FETCH(r1, 2) @ r1<- GFED
+
+ @ r0=methodToCall, r1=GFED, r2=count, r10=outs
+.LinvokeNonRange:
+ rsb r2, r2, #5 @ r2<- 5-r2
+ add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+5: and ip, rINST, #0x0f00 @ isolate A
+ ldr r3, [rFP, ip, lsr #6] @ r3<- vA (shift right 8, left 2)
+ mov r0, r0 @ nop
+ str r3, [r10, #-4]! @ *--outs = vA
+4: and ip, r1, #0xf000 @ isolate G
+ ldr r3, [rFP, ip, lsr #10] @ r3<- vG (shift right 12, left 2)
+ mov r0, r0 @ nop
+ str r3, [r10, #-4]! @ *--outs = vG
+3: and ip, r1, #0x0f00 @ isolate F
+ ldr r3, [rFP, ip, lsr #6] @ r3<- vF
+ mov r0, r0 @ nop
+ str r3, [r10, #-4]! @ *--outs = vF
+2: and ip, r1, #0x00f0 @ isolate E
+ ldr r3, [rFP, ip, lsr #2] @ r3<- vE
+ mov r0, r0 @ nop
+ str r3, [r10, #-4]! @ *--outs = vE
+1: and ip, r1, #0x000f @ isolate D
+ ldr r3, [rFP, ip, lsl #2] @ r3<- vD
+ mov r0, r0 @ nop
+ str r3, [r10, #-4]! @ *--outs = vD
+0: @ fall through to .LinvokeArgsDone
+
+.LinvokeArgsDone: @ r0=methodToCall
+ @ find space for the new stack frame, check for overflow
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ ldrh r2, [r0, #offMethod_registersSize] @ r2<- methodToCall->regsSize
+ ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize
+ sub r1, r1, r2, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea
+@ bl common_dumpRegs
+ ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd
+ sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize)
+ cmp r3, r9 @ bottom < interpStackEnd?
+ blt .LstackOverflow @ yes, this frame will overflow stack
+
+ @ set up newSaveArea
+#ifdef EASY_GDB
+ SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area
+ str ip, [r10, #offStackSaveArea_prevSave]
+#endif
+ str rFP, [r10, #offStackSaveArea_prevFrame]
+ str rPC, [r10, #offStackSaveArea_savedPc]
+ str r0, [r10, #offStackSaveArea_method]
+
+ ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
+ tst r3, #ACC_NATIVE
+ bne .LinvokeNative
+
+ /*
+ stmfd sp!, {r0-r3}
+ bl common_printNewline
+ mov r0, rFP
+ mov r1, #0
+ bl dvmDumpFp
+ ldmfd sp!, {r0-r3}
+ stmfd sp!, {r0-r3}
+ mov r0, r1
+ mov r1, r10
+ bl dvmDumpFp
+ bl common_printNewline
+ ldmfd sp!, {r0-r3}
+ */
+
+ @ Update "glue" values for the new method
+ @ r0=methodToCall, r1=newFp
+ ldr r3, [r0, #offMethod_clazz] @ r3<- method->clazz
+ str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall
+ ldr r3, [r3, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+ ldr rPC, [r0, #offMethod_insns] @ rPC<- method->insns
+ str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+ ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self
+ FETCH_INST() @ load rINST from rPC
+ mov rFP, r1 @ fp = newFp
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.LinvokeNative:
+ @ Prep for the native call
+ @ r0=methodToCall, r1=newFp, r10=newSaveArea
+ ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ ldr r9, [r3, #offThread_jniLocal_nextEntry] @ r9<- thread->refNext
+ str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp
+ str r9, [r10, #offStackSaveArea_localRefTop] @newFp->localRefTop=refNext
+ mov r9, r3 @ r9<- glue->self (preserve)
+
+ mov r2, r0 @ r2<- methodToCall
+ mov r0, r1 @ r0<- newFp (points to args)
+ add r1, rGLUE, #offGlue_retval @ r1<- &retval
+
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ b .Lskip
+ .type dalvik_mterp, %function
+dalvik_mterp:
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+.Lskip:
+#endif
+
+ @mov lr, pc @ set return addr
+ @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+
+ @ native return; r9=self, r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r0, [r10, #offStackSaveArea_localRefTop] @ r0<- newSave->localRefTop
+ ldr r1, [r9, #offThread_exception] @ check for exception
+ str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [r9, #offThread_jniLocal_nextEntry] @ self->refNext<- r0
+ bne common_exceptionThrown @ no, handle exception
+
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.LstackOverflow:
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- self
+ bl dvmHandleStackOverflow
+ b common_exceptionThrown
+#ifdef ASSIST_DEBUGGER
+ .fnend
+#endif
+
+
+ /*
+ * Common code for method invocation, calling through "glue code".
+ *
+ * TODO: now that we have range and non-range invoke handlers, this
+ * needs to be split into two. Maybe just create entry points
+ * that set r9 and jump here?
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", the method we're trying to call
+ * r9 is "bool methodCallRange", indicating if this is a /range variant
+ */
+ .if 0
+.LinvokeOld:
+ sub sp, sp, #8 @ space for args + pad
+ FETCH(ip, 2) @ ip<- FEDC or CCCC
+ mov r2, r0 @ A2<- methodToCall
+ mov r0, rGLUE @ A0<- glue
+ SAVE_PC_FP_TO_GLUE() @ export state to "glue"
+ mov r1, r9 @ A1<- methodCallRange
+ mov r3, rINST, lsr #8 @ A3<- AA
+ str ip, [sp, #0] @ A4<- ip
+ bl dvmMterp_invokeMethod @ call the C invokeMethod
+ add sp, sp, #8 @ remove arg area
+ b common_resumeAfterGlueCall @ continue to next instruction
+ .endif
+
+
+
+/*
+ * Common code for handling a return instruction.
+ *
+ * This does not return.
+ */
+common_returnFromMethod:
+.LreturnNew:
+ mov r0, #kInterpEntryReturn
+ mov r9, #0
+ bl common_periodicChecks
+
+ SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
+ ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
+ ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ @ r2<- method we're returning to
+ cmp r2, #0 @ is this a break frame?
+ mov r1, #0 @ "want switch" = false
+ beq common_gotoBail @ break frame, bail out completely
+
+ ldr rPC, [r0, #offStackSaveArea_savedPc] @ pc = saveArea->savedPc
+ ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ str r2, [rGLUE, #offGlue_method] @ glue->method = newSave->method
+ str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp
+ ldr r1, [r2, #offMethod_clazz] @ r1<- method->clazz
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ ldr r1, [r1, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [rGLUE, #offGlue_methodClassDex]
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Return handling, calls through "glue code".
+ */
+ .if 0
+.LreturnOld:
+ SAVE_PC_FP_TO_GLUE() @ export state
+ mov r0, rGLUE @ arg to function
+ bl dvmMterp_returnFromMethod
+ b common_resumeAfterGlueCall
+ .endif
+
+
+/*
+ * Somebody has thrown an exception. Handle it.
+ *
+ * If the exception processing code returns to us (instead of falling
+ * out of the interpreter), continue with whatever the next instruction
+ * now happens to be.
+ *
+ * This does not return.
+ */
+common_exceptionThrown:
+.LexceptionNew:
+ mov r0, #kInterpEntryThrow
+ mov r9, #0
+ bl common_periodicChecks
+
+ ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self
+ ldr r9, [r10, #offThread_exception] @ r9<- self->exception
+ mov r1, r10 @ r1<- self
+ mov r0, r9 @ r0<- exception
+ bl dvmAddTrackedAlloc @ don't let the exception be GCed
+ mov r3, #0 @ r3<- NULL
+ str r3, [r10, #offThread_exception] @ self->exception = NULL
+
+ /* set up args and a local for "&fp" */
+ /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */
+ str rFP, [sp, #-4]! @ *--sp = fp
+ mov ip, sp @ ip<- &fp
+ mov r3, #0 @ r3<- false
+ str ip, [sp, #-4]! @ *--sp = &fp
+ ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method
+ mov r0, r10 @ r0<- self
+ ldr r1, [r1, #offMethod_insns] @ r1<- method->insns
+ mov r2, r9 @ r2<- exception
+ sub r1, rPC, r1 @ r1<- pc - method->insns
+ mov r1, r1, asr #1 @ r1<- offset in code units
+
+ /* call, r0 gets catchRelPc (a code-unit offset) */
+ bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp)
+
+ /* fix earlier stack overflow if necessary; may trash rFP */
+ ldrb r1, [r10, #offThread_stackOverflowed]
+ cmp r1, #0 @ did we overflow earlier?
+ beq 1f @ no, skip ahead
+ mov rFP, r0 @ save relPc result in rFP
+ mov r0, r10 @ r0<- self
+ bl dvmCleanupStackOverflow @ call(self)
+ mov r0, rFP @ restore result
+1:
+
+ /* update frame pointer and check result from dvmFindCatchBlock */
+ ldr rFP, [sp, #4] @ retrieve the updated rFP
+ cmp r0, #0 @ is catchRelPc < 0?
+ add sp, sp, #8 @ restore stack
+ bmi .LnotCaughtLocally
+
+ /* adjust locals to match self->curFrame and updated PC */
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area
+ ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method
+ str r1, [rGLUE, #offGlue_method] @ glue->method = new method
+ ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz
+ ldr r3, [r1, #offMethod_insns] @ r3<- method->insns
+ ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
+ add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc
+ str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth...
+
+ /* release the tracked alloc on the exception */
+ mov r0, r9 @ r0<- exception
+ mov r1, r10 @ r1<- self
+ bl dvmReleaseTrackedAlloc @ release the exception
+
+ /* restore the exception if the handler wants it */
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"?
+ streq r9, [r10, #offThread_exception] @ yes, restore the exception
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.LnotCaughtLocally: @ r9=exception, r10=self
+ /* fix stack overflow if necessary */
+ ldrb r1, [r10, #offThread_stackOverflowed]
+ cmp r1, #0 @ did we overflow earlier?
+ movne r0, r10 @ if yes: r0<- self
+ blne dvmCleanupStackOverflow @ if yes: call(self)
+
+ @ may want to show "not caught locally" debug messages here
+#if DVM_SHOW_EXCEPTION >= 2
+ /* call __android_log_print(prio, tag, format, ...) */
+ /* "Exception %s from %s:%d not caught locally" */
+ @ dvmLineNumFromPC(method, pc - method->insns)
+ ldr r0, [rGLUE, #offGlue_method]
+ ldr r1, [r0, #offMethod_insns]
+ sub r1, rPC, r1
+ asr r1, r1, #1
+ bl dvmLineNumFromPC
+ str r0, [sp, #-4]!
+ @ dvmGetMethodSourceFile(method)
+ ldr r0, [rGLUE, #offGlue_method]
+ bl dvmGetMethodSourceFile
+ str r0, [sp, #-4]!
+ @ exception->clazz->descriptor
+ ldr r3, [r9, #offObject_clazz]
+ ldr r3, [r3, #offClassObject_descriptor]
+ @
+ ldr r2, strExceptionNotCaughtLocally
+ ldr r1, strLogTag
+ mov r0, #3 @ LOG_DEBUG
+ bl __android_log_print
+#endif
+ str r9, [r10, #offThread_exception] @ restore exception
+ mov r0, r9 @ r0<- exception
+ mov r1, r10 @ r1<- self
+ bl dvmReleaseTrackedAlloc @ release the exception
+ mov r1, #0 @ "want switch" = false
+ b common_gotoBail @ bail out
+
+
+ /*
+ * Exception handling, calls through "glue code".
+ */
+ .if 0
+.LexceptionOld:
+ SAVE_PC_FP_TO_GLUE() @ export state
+ mov r0, rGLUE @ arg to function
+ bl dvmMterp_exceptionThrown
+ b common_resumeAfterGlueCall
+ .endif
+
+
+/*
+ * After returning from a "glued" function, pull out the updated
+ * values and start executing at the next instruction.
+ */
+common_resumeAfterGlueCall:
+ LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/*
+ * Invalid array index.
+ */
+common_errArrayIndex:
+ EXPORT_PC()
+ ldr r0, strArrayIndexException
+ mov r1, #0
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * Invalid array value.
+ */
+common_errArrayStore:
+ EXPORT_PC()
+ ldr r0, strArrayStoreException
+ mov r1, #0
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * Integer divide or mod by zero.
+ */
+common_errDivideByZero:
+ EXPORT_PC()
+ ldr r0, strArithmeticException
+ ldr r1, strDivideByZero
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ */
+common_errNegativeArraySize:
+ EXPORT_PC()
+ ldr r0, strNegativeArraySizeException
+ mov r1, #0
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * Invocation of a non-existent method.
+ */
+common_errNoSuchMethod:
+ EXPORT_PC()
+ ldr r0, strNoSuchMethodError
+ mov r1, #0
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * We encountered a null object when we weren't expecting one. We
+ * export the PC, throw a NullPointerException, and goto the exception
+ * processing code.
+ */
+common_errNullObject:
+ EXPORT_PC()
+ ldr r0, strNullPointerException
+ mov r1, #0
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * For debugging, cause an immediate fault. The source address will
+ * be in lr (use a bl instruction to jump here).
+ */
+common_abort:
+ ldr pc, .LdeadFood
+.LdeadFood:
+ .word 0xdeadf00d
+
+/*
+ * Spit out a "we were here", preserving all registers. (The attempt
+ * to save ip won't work, but we need to save an even number of
+ * registers for EABI 64-bit stack alignment.)
+ */
+ .macro SQUEAK num
+common_squeak\num:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ ldr r0, strSqueak
+ mov r1, #\num
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+ .endm
+
+ SQUEAK 0
+ SQUEAK 1
+ SQUEAK 2
+ SQUEAK 3
+ SQUEAK 4
+ SQUEAK 5
+
+/*
+ * Spit out the number in r0, preserving registers.
+ */
+common_printNum:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r1, r0
+ ldr r0, strSqueak
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print a newline, preserving registers.
+ */
+common_printNewline:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ ldr r0, strNewline
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+ /*
+ * Print the 32-bit quantity in r0 as a hex value, preserving registers.
+ */
+common_printHex:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r1, r0
+ ldr r0, strPrintHex
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print the 64-bit quantity in r0-r1, preserving registers.
+ */
+common_printLong:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r3, r1
+ mov r2, r0
+ ldr r0, strPrintLong
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print full method info. Pass the Method* in r0. Preserves regs.
+ */
+common_printMethod:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bl dvmMterpPrintMethod
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Call a C helper function that dumps regs and possibly some
+ * additional info. Requires the C function to be compiled in.
+ */
+ .if 0
+common_dumpRegs:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bl dvmMterpDumpArmRegs
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+ .endif
+
+
+/*
+ * String references, must be close to the code that uses them.
+ */
+ .align 2
+strArithmeticException:
+ .word .LstrArithmeticException
+strArrayIndexException:
+ .word .LstrArrayIndexException
+strArrayStoreException:
+ .word .LstrArrayStoreException
+strDivideByZero:
+ .word .LstrDivideByZero
+strNegativeArraySizeException:
+ .word .LstrNegativeArraySizeException
+strNoSuchMethodError:
+ .word .LstrNoSuchMethodError
+strNullPointerException:
+ .word .LstrNullPointerException
+
+strLogTag:
+ .word .LstrLogTag
+strExceptionNotCaughtLocally:
+ .word .LstrExceptionNotCaughtLocally
+
+strNewline:
+ .word .LstrNewline
+strSqueak:
+ .word .LstrSqueak
+strPrintHex:
+ .word .LstrPrintHex
+strPrintLong:
+ .word .LstrPrintLong
+
+/*
+ * Zero-terminated ASCII string data.
+ *
+ * On ARM we have two choices: do like gcc does, and LDR from a .word
+ * with the address, or use an ADR pseudo-op to get the address
+ * directly. ADR saves 4 bytes and an indirection, but it's using a
+ * PC-relative addressing mode and hence has a limited range, which
+ * makes it not work well with mergeable string sections.
+ */
+ .section .rodata.str1.4,"aMS",%progbits,1
+
+.LstrBadEntryPoint:
+ .asciz "Bad entry point %d\n"
+.LstrArithmeticException:
+ .asciz "Ljava/lang/ArithmeticException;"
+.LstrArrayIndexException:
+ .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;"
+.LstrArrayStoreException:
+ .asciz "Ljava/lang/ArrayStoreException;"
+.LstrClassCastException:
+ .asciz "Ljava/lang/ClassCastException;"
+.LstrDivideByZero:
+ .asciz "divide by zero"
+.LstrFilledNewArrayNotImpl:
+ .asciz "filled-new-array only implemented for objects and 'int'"
+.LstrInternalError:
+ .asciz "Ljava/lang/InternalError;"
+.LstrInstantiationError:
+ .asciz "Ljava/lang/InstantiationError;"
+.LstrNegativeArraySizeException:
+ .asciz "Ljava/lang/NegativeArraySizeException;"
+.LstrNoSuchMethodError:
+ .asciz "Ljava/lang/NoSuchMethodError;"
+.LstrNullPointerException:
+ .asciz "Ljava/lang/NullPointerException;"
+
+.LstrLogTag:
+ .asciz "mterp"
+.LstrExceptionNotCaughtLocally:
+ .asciz "Exception %s from %s:%d not caught locally\n"
+
+.LstrNewline:
+ .asciz "\n"
+.LstrSqueak:
+ .asciz "<%d>"
+.LstrPrintHex:
+ .asciz "<0x%x>"
+.LstrPrintLong:
+ .asciz "<%lld>"
+
+
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
new file mode 100644
index 0000000..9987ff5
--- /dev/null
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -0,0 +1,10006 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'armv5te'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: armv5te/header.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ARMv5 definitions and declarations.
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rGLUE MterpGlue pointer
+ r7 rIBASE interpreted instruction base pointer, used for computed goto
+ r8 rINST first 16-bit code unit of current instruction
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define rFP r5
+#define rGLUE r6
+#define rIBASE r7
+#define rINST r8
+
+/* save/restore the PC and/or FP from the glue struct */
+#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc]
+#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc]
+#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp]
+#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp]
+#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP}
+#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP}
+
+/*
+ * "export" the PC to the stack frame, f/b/o future exception objects. Must
+ * be done *before* something calls dvmThrowException.
+ *
+ * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
+ * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
+ *
+ * It's okay to do this more than once.
+ */
+#define EXPORT_PC() \
+ str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)]
+
+/*
+ * Given a frame pointer, find the stack save area.
+ *
+ * In C this is "((StackSaveArea*)(_fp) -1)".
+ */
+#define SAVEAREA_FROM_FP(_reg, _fpreg) \
+ sub _reg, _fpreg, #sizeofStackSaveArea
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+#define FETCH_INST() ldrh rINST, [rPC]
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]!
+
+/*
+ * Fetch the next instruction from an offset specified by _reg. Updates
+ * rPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]!
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)]
+#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)]
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)]
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(_reg) and _reg, rINST, #255
+
+/*
+ * Begin executing the opcode in _reg. Because this only jumps within the
+ * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
+ */
+#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2]
+#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2]
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../common/asm-constants.h"
+
+
+/* File: armv5te/platform.S */
+/*
+ * ===========================================================================
+ * CPU-version-specific defines
+ * ===========================================================================
+ */
+
+/*
+ * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a
+ * one-way branch.
+ *
+ * May modify IP. Does not modify LR.
+ */
+.macro LDR_PC source
+ ldr pc, \source
+.endm
+
+/*
+ * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
+ * Jump to subroutine.
+ *
+ * May modify IP and LR.
+ */
+.macro LDR_PC_LR source
+ mov lr, pc
+ ldr pc, \source
+.endm
+
+/*
+ * Macro for "LDMFD SP!, {...regs...,PC}".
+ *
+ * May modify IP and LR.
+ */
+.macro LDMFD_PC regs
+ ldmfd sp!, {\regs,pc}
+.endm
+
+
+/* File: armv5te/entry.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+/*
+ * We don't have formal stack frames, so gdb scans upward in the code
+ * to find the start of the function (a label with the %function type),
+ * and then looks at the next few instructions to figure out what
+ * got pushed onto the stack. From this it figures out how to restore
+ * the registers, including PC, for the previous stack frame. If gdb
+ * sees a non-function label, it stops scanning, so either we need to
+ * have nothing but assembler-local labels between the entry point and
+ * the break, or we need to fake it out.
+ *
+ * When this is defined, we add some stuff to make gdb less confused.
+ */
+#define ASSIST_DEBUGGER 1
+
+ .text
+ .align 2
+ .global dvmMterpStdRun
+ .type dvmMterpStdRun, %function
+
+/*
+ * On entry:
+ * r0 MterpGlue* glue
+ *
+ * This function returns a boolean "changeInterp" value. The return comes
+ * via a call to dvmMterpStdBail().
+ */
+dvmMterpStdRun:
+#define MTERP_ENTRY1 \
+ .save {r4-r10,fp,lr}; \
+ stmfd sp!, {r4-r10,fp,lr} @ save 9 regs
+#define MTERP_ENTRY2 \
+ .pad #4; \
+ sub sp, sp, #4 @ align 64
+
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+
+ /* save stack pointer, add magic word for debuggerd */
+ str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return
+
+ /* set up "named" registers, figure out entry point */
+ mov rGLUE, r0 @ set rGLUE
+ ldrb r1, [r0, #offGlue_entryPoint] @ InterpEntry enum is char
+ LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue"
+ adr rIBASE, dvmAsmInstructionStart @ set rIBASE
+ cmp r1, #kInterpEntryInstr @ usual case?
+ bne .Lnot_instr @ no, handle it
+
+ /* start executing the instruction at rPC */
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.Lnot_instr:
+ cmp r1, #kInterpEntryReturn @ were we returning from a method?
+ beq common_returnFromMethod
+
+.Lnot_return:
+ cmp r1, #kInterpEntryThrow @ were we throwing an exception?
+ beq common_exceptionThrown
+
+.Lbad_arg:
+ ldr r0, strBadEntryPoint
+ @ r1 holds value of entryPoint
+ bl printf
+ bl dvmAbort
+ .fnend
+
+
+ .global dvmMterpStdBail
+ .type dvmMterpStdBail, %function
+
+/*
+ * Restore the stack pointer and PC from the save point established on entry.
+ * This is essentially the same as a longjmp, but should be cheaper. The
+ * last instruction causes us to return to whoever called dvmMterpStdRun.
+ *
+ * We pushed some registers on the stack in dvmMterpStdRun, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * r0 MterpGlue* glue
+ * r1 bool changeInterp
+ */
+dvmMterpStdBail:
+ ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP
+ mov r0, r1 @ return the changeInterp value
+ add sp, sp, #4 @ un-align 64
+ LDMFD_PC "r4-r10,fp" @ restore 9 regs and return
+
+
+/*
+ * String references.
+ */
+strBadEntryPoint:
+ .word .LstrBadEntryPoint
+
+
+
+ .global dvmAsmInstructionStart
+ .type dvmAsmInstructionStart, %function
+dvmAsmInstructionStart = .L_OP_NOP
+ .text
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOP: /* 0x00 */
+/* File: armv5te/OP_NOP.S */
+ FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ .type dalvik_inst, %function
+dalvik_inst:
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+ .fnend
+#endif
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE: /* 0x01 */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_FROM16: /* 0x02 */
+/* File: armv5te/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_16: /* 0x03 */
+/* File: armv5te/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(r1, 2) @ r1<- BBBB
+ FETCH(r0, 1) @ r0<- AAAA
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AAAA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE: /* 0x04 */
+/* File: armv5te/OP_MOVE_WIDE.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r2, r2, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: armv5te/OP_MOVE_WIDE_FROM16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH(r3, 1) @ r3<- BBBB
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: armv5te/OP_MOVE_WIDE_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH(r3, 2) @ r3<- BBBB
+ FETCH(r2, 1) @ r2<- AAAA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT: /* 0x07 */
+/* File: armv5te/OP_MOVE_OBJECT.S */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */
+/* File: armv5te/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: armv5te/OP_MOVE_OBJECT_16.S */
+/* File: armv5te/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(r1, 2) @ r1<- BBBB
+ FETCH(r0, 1) @ r0<- AAAA
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AAAA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT: /* 0x0a */
+/* File: armv5te/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[AA]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: armv5te/OP_MOVE_RESULT_WIDE.S */
+ /* move-result-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- retval.j
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */
+/* File: armv5te/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[AA]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: armv5te/OP_MOVE_EXCEPTION.S */
+ /* move-exception vAA */
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass
+ mov r1, #0 @ r1<- 0
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ SET_VREG(r3, r2) @ fp[AA]<- exception obj
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offThread_exception] @ dvmClearException bypass
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_VOID: /* 0x0e */
+/* File: armv5te/OP_RETURN_VOID.S */
+ b common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN: /* 0x0f */
+/* File: armv5te/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "glue"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r0, r2) @ r0<- vAA
+ str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+ b common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_WIDE: /* 0x10 */
+/* File: armv5te/OP_RETURN_WIDE.S */
+ /*
+ * Return a 64-bit value. Copies the return value into the "glue"
+ * structure, then jumps to the return handler.
+ */
+ /* return-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval
+ ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
+ stmia r3, {r0-r1} @ retval<- r0/r1
+ b common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_OBJECT: /* 0x11 */
+/* File: armv5te/OP_RETURN_OBJECT.S */
+/* File: armv5te/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "glue"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r0, r2) @ r0<- vAA
+ str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
+ b common_returnFromMethod
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_4: /* 0x12 */
+/* File: armv5te/OP_CONST_4.S */
+ /* const/4 vA, #+B */
+ mov r1, rINST, lsl #16 @ r1<- Bxxx0000
+ mov r0, rINST, lsr #8 @ r0<- A+
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended)
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r1, r0) @ fp[A]<- r1
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_16: /* 0x13 */
+/* File: armv5te/OP_CONST_16.S */
+ /* const/16 vAA, #+BBBB */
+ FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST: /* 0x14 */
+/* File: armv5te/OP_CONST.S */
+ /* const vAA, #+BBBBbbbb */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (high)
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_HIGH16: /* 0x15 */
+/* File: armv5te/OP_CONST_HIGH16.S */
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, r0, lsl #16 @ r0<- BBBB0000
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_16: /* 0x16 */
+/* File: armv5te/OP_CONST_WIDE_16.S */
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_32: /* 0x17 */
+/* File: armv5te/OP_CONST_WIDE_32.S */
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH(r0, 1) @ r0<- 0000bbbb (low)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_S(r2, 2) @ r2<- ssssBBBB (high)
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE: /* 0x18 */
+/* File: armv5te/OP_CONST_WIDE.S */
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (low middle)
+ FETCH(r2, 3) @ r2<- hhhh (high middle)
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
+ FETCH(r3, 4) @ r3<- HHHH (high)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST(5) @ advance rPC, load rINST
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: armv5te/OP_CONST_WIDE_HIGH16.S */
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, #0 @ r0<- 00000000
+ mov r1, r1, lsl #16 @ r1<- BBBB0000
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_STRING: /* 0x1a */
+/* File: armv5te/OP_CONST_STRING.S */
+ /* const/string vAA, String@BBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB]
+ cmp r0, #0 @ not yet resolved?
+ beq .LOP_CONST_STRING_resolve
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: armv5te/OP_CONST_STRING_JUMBO.S */
+ /* const/string vAA, String@BBBBBBBB */
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (high)
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB]
+ cmp r0, #0
+ beq .LOP_CONST_STRING_JUMBO_resolve
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_CLASS: /* 0x1c */
+/* File: armv5te/OP_CONST_CLASS.S */
+ /* const/class vAA, Class@BBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB]
+ cmp r0, #0 @ not yet resolved?
+ beq .LOP_CONST_CLASS_resolve
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MONITOR_ENTER: /* 0x1d */
+/* File: armv5te/OP_MONITOR_ENTER.S */
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r1, r2) @ r1<- vAA (object)
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ cmp r1, #0 @ null object?
+#ifdef WITH_MONITOR_TRACKING
+ EXPORT_PC() @ export PC so we can grab stack trace
+#endif
+ beq common_errNullObject @ null object, throw an exception
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl dvmLockObject @ call(self, obj)
+#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ ldr r1, [r0, #offThread_exception] @ check for exception
+ cmp r1, #0
+ bne common_exceptionThrown @ exception raised, bail out
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MONITOR_EXIT: /* 0x1e */
+/* File: armv5te/OP_MONITOR_EXIT.S */
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ EXPORT_PC() @ before fetch: export the PC
+ GET_VREG(r1, r2) @ r1<- vAA (object)
+ cmp r1, #0 @ null object?
+ beq common_errNullObject @ yes
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ bl dvmUnlockObject @ r0<- success for unlock(self, obj)
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, exception is pending
+ FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CHECK_CAST: /* 0x1f */
+/* File: armv5te/OP_CHECK_CAST.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH(r2, 1) @ r2<- BBBB
+ GET_VREG(r9, r3) @ r9<- object
+ ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex
+ cmp r9, #0 @ is object null?
+ ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses
+ beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds
+ ldr r1, [r0, r2, lsl #2] @ r1<- resolved class
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ cmp r1, #0 @ have we resolved this before?
+ beq .LOP_CHECK_CAST_resolve @ not resolved, do it now
+.LOP_CHECK_CAST_resolved:
+ cmp r0, r1 @ same class (trivial success)?
+ bne .LOP_CHECK_CAST_fullcheck @ no, do full check
+.LOP_CHECK_CAST_okay:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INSTANCE_OF: /* 0x20 */
+/* File: armv5te/OP_INSTANCE_OF.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB (object)
+ and r9, r9, #15 @ r9<- A
+ cmp r0, #0 @ is object null?
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex
+ beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0
+ FETCH(r3, 1) @ r3<- CCCC
+ ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses
+ ldr r1, [r2, r3, lsl #2] @ r1<- resolved class
+ ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz
+ cmp r1, #0 @ have we resolved this before?
+ beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now
+.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class
+ cmp r0, r1 @ same class (trivial success)?
+ beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish
+ b .LOP_INSTANCE_OF_fullcheck @ no, do full check
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: armv5te/OP_ARRAY_LENGTH.S */
+ /*
+ * Return the length of an array.
+ */
+ mov r1, rINST, lsr #12 @ r1<- B
+ mov r2, rINST, lsr #8 @ r2<- A+
+ GET_VREG(r0, r1) @ r0<- vB (object ref)
+ and r2, r2, #15 @ r2<- A
+ cmp r0, #0 @ is object null?
+ beq common_errNullObject @ yup, fail
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r3, [r0, #offArrayObject_length] @ r3<- array length
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r3, r2) @ vB<- length
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEW_INSTANCE: /* 0x22 */
+/* File: armv5te/OP_NEW_INSTANCE.S */
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+ EXPORT_PC() @ req'd for init, resolve, alloc
+ cmp r0, #0 @ already resolved?
+ beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now
+.LOP_NEW_INSTANCE_resolved: @ r0=class
+ ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum
+ cmp r1, #CLASS_INITIALIZED @ has class been initialized?
+ bne .LOP_NEW_INSTANCE_needinit @ no, init class now
+.LOP_NEW_INSTANCE_initialized: @ r0=class
+ ldr r3, [r0, #offClassObject_accessFlags] @ r3<- clazz->accessFlags
+ tst r3, #(ACC_INTERFACE|ACC_ABSTRACT) @ abstract or interface?
+ mov r1, #ALLOC_DONT_TRACK @ flags for alloc call
+ beq .LOP_NEW_INSTANCE_finish @ concrete class, continue
+ b .LOP_NEW_INSTANCE_abstract @ fail
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEW_ARRAY: /* 0x23 */
+/* File: armv5te/OP_NEW_ARRAY.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ FETCH(r2, 1) @ r2<- CCCC
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ GET_VREG(r1, r0) @ r1<- vB (array length)
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ cmp r1, #0 @ check length
+ ldr r0, [r3, r2, lsl #2] @ r0<- resolved class
+ bmi common_errNegativeArraySize @ negative length, bail
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ req'd for resolve, alloc
+ bne .LOP_NEW_ARRAY_finish @ resolved, continue
+ b .LOP_NEW_ARRAY_resolve @ do resolve now
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ EXPORT_PC() @ need for resolve and alloc
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+ mov r10, rINST, lsr #8 @ r10<- AA or BA
+ cmp r0, #0 @ already resolved?
+ bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on
+8: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ b .LOP_FILLED_NEW_ARRAY_continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */
+/* File: armv5te/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ EXPORT_PC() @ need for resolve and alloc
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+ mov r10, rINST, lsr #8 @ r10<- AA or BA
+ cmp r0, #0 @ already resolved?
+ bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on
+8: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ b .LOP_FILLED_NEW_ARRAY_RANGE_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: armv5te/OP_FILL_ARRAY_DATA.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ GET_VREG(r0, r3) @ r0<- vAA (array object)
+ add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
+ EXPORT_PC();
+ bl dvmInterpHandleFillArrayData@ fill the array with predefined data
+ cmp r0, #0 @ 0 means an exception is thrown
+ beq common_exceptionThrown @ has exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_THROW: /* 0x27 */
+/* File: armv5te/OP_THROW.S */
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r1, r2) @ r1<- vAA (exception object)
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ cmp r1, #0 @ null object?
+ beq common_errNullObject @ yes, throw an NPE instead
+ @ bypass dvmSetException, just store it
+ str r1, [r0, #offThread_exception] @ thread->exception<- obj
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO: /* 0x28 */
+/* File: armv5te/OP_GOTO.S */
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended)
+ mov r9, r9, lsl #1 @ r9<- byte offset
+ bmi common_backwardBranch @ backward branch, do periodic checks
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO_16: /* 0x29 */
+/* File: armv5te/OP_GOTO_16.S */
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended)
+ movs r9, r0, asl #1 @ r9<- byte offset, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO_32: /* 0x2a */
+/* File: armv5te/OP_GOTO_32.S */
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". The ORRS
+ * instruction doesn't affect the V flag, so we need to clear it
+ * explicitly.
+ */
+ /* goto/32 +AAAAAAAA */
+ FETCH(r0, 1) @ r0<- aaaa (lo)
+ FETCH(r1, 2) @ r1<- AAAA (hi)
+ cmp ip, ip @ (clear V flag during stall)
+ orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign
+ mov r9, r0, asl #1 @ r9<- byte offset
+ ble common_backwardBranch @ backward branch, do periodic checks
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_PACKED_SWITCH: /* 0x2b */
+/* File: armv5te/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG(r1, r3) @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset
+ movs r9, r0, asl #1 @ r9<- branch byte offset, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+ beq common_backwardBranch @ (want to use BLE but V is unknown)
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: armv5te/OP_SPARSE_SWITCH.S */
+/* File: armv5te/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG(r1, r3) @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset
+ movs r9, r0, asl #1 @ r9<- branch byte offset, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+ beq common_backwardBranch @ (want to use BLE but V is unknown)
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPL_FLOAT: /* 0x2d */
+/* File: armv5te/OP_CMPL_FLOAT.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * The straightforward implementation requires 3 calls to functions
+ * that return a result in r0. We can do it with two calls if our
+ * EABI library supports __aeabi_cfcmple (only one if we want to check
+ * for NaN directly):
+ * check x <= y
+ * if <, return -1
+ * if ==, return 0
+ * check y <= x
+ * if <, return 1
+ * return {-1,1}
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ copy to arg registers
+ mov r1, r10
+ bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPL_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPG_FLOAT: /* 0x2e */
+/* File: armv5te/OP_CMPG_FLOAT.S */
+/* File: armv5te/OP_CMPL_FLOAT.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * The straightforward implementation requires 3 calls to functions
+ * that return a result in r0. We can do it with two calls if our
+ * EABI library supports __aeabi_cfcmple (only one if we want to check
+ * for NaN directly):
+ * check x <= y
+ * if <, return -1
+ * if ==, return 0
+ * check y <= x
+ * if <, return 1
+ * return {-1,1}
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ copy to arg registers
+ mov r1, r10
+ bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPG_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: armv5te/OP_CMPL_DOUBLE.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r9, r0, #255 @ r9<- BB
+ mov r10, r0, lsr #8 @ r10<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[BB]
+ add r10, rFP, r10, lsl #2 @ r10<- &fp[CC]
+ ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1
+ bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPL_DOUBLE_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: armv5te/OP_CMPG_DOUBLE.S */
+/* File: armv5te/OP_CMPL_DOUBLE.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r9, r0, #255 @ r9<- BB
+ mov r10, r0, lsr #8 @ r10<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[BB]
+ add r10, rFP, r10, lsl #2 @ r10<- &fp[CC]
+ ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1
+ bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPG_DOUBLE_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMP_LONG: /* 0x31 */
+/* File: armv5te/OP_CMP_LONG.S */
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ *
+ * We load the full values with LDM, but in practice many values could
+ * be resolved by only looking at the high word. This could be made
+ * faster or slower by splitting the LDM into a pair of LDRs.
+ *
+ * If we just wanted to set condition flags, we could do this:
+ * subs ip, r0, r2
+ * sbcs ip, r1, r3
+ * subeqs ip, r0, r2
+ * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
+ * integer value, which we can do with 2 conditional mov/mvn instructions
+ * (set 1, set -1; if they're equal we already have 0 in ip), giving
+ * us a constant 5-cycle path plus a branch at the end to the
+ * instruction epilogue code. The multi-compare approach below needs
+ * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+ * in the worst case (the 64-bit values are equal).
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ cmp r1, r3 @ compare (vBB+1, vCC+1)
+ blt .LOP_CMP_LONG_less @ signed compare on high part
+ bgt .LOP_CMP_LONG_greater
+ subs r1, r0, r2 @ r1<- r0 - r2
+ bhi .LOP_CMP_LONG_greater @ unsigned compare on low part
+ bne .LOP_CMP_LONG_less
+ b .LOP_CMP_LONG_finish @ equal; r1 already holds 0
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_EQ: /* 0x32 */
+/* File: armv5te/OP_IF_EQ.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ bne 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_NE: /* 0x33 */
+/* File: armv5te/OP_IF_NE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ beq 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LT: /* 0x34 */
+/* File: armv5te/OP_IF_LT.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ bge 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GE: /* 0x35 */
+/* File: armv5te/OP_IF_GE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ blt 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GT: /* 0x36 */
+/* File: armv5te/OP_IF_GT.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ ble 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LE: /* 0x37 */
+/* File: armv5te/OP_IF_LE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, r3 @ compare (vA, vB)
+ bgt 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ yes, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_EQZ: /* 0x38 */
+/* File: armv5te/OP_IF_EQZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ bne 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_NEZ: /* 0x39 */
+/* File: armv5te/OP_IF_NEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ beq 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LTZ: /* 0x3a */
+/* File: armv5te/OP_IF_LTZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ bge 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GEZ: /* 0x3b */
+/* File: armv5te/OP_IF_GEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ blt 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GTZ: /* 0x3c */
+/* File: armv5te/OP_IF_GTZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ ble 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LEZ: /* 0x3d */
+/* File: armv5te/OP_IF_LEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ mov r9, #4 @ r0<- BYTE branch dist for not-taken
+ cmp r2, #0 @ compare (vA, 0)
+ bgt 1f @ branch to 1 if comparison failed
+ FETCH_S(r9, 1) @ r9<- branch offset, in code units
+ movs r9, r9, asl #1 @ convert to bytes, check sign
+ bmi common_backwardBranch @ backward branch, do periodic checks
+1: FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_3E: /* 0x3e */
+/* File: armv5te/OP_UNUSED_3E.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_3F: /* 0x3f */
+/* File: armv5te/OP_UNUSED_3F.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_40: /* 0x40 */
+/* File: armv5te/OP_UNUSED_40.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_41: /* 0x41 */
+/* File: armv5te/OP_UNUSED_41.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_42: /* 0x42 */
+/* File: armv5te/OP_UNUSED_42.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_43: /* 0x43 */
+/* File: armv5te/OP_UNUSED_43.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET: /* 0x44 */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_WIDE: /* 0x45 */
+/* File: armv5te/OP_AGET_WIDE.S */
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcc .LOP_AGET_WIDE_finish @ okay, continue below
+ b common_errArrayIndex @ index >= length, bail
+ @ May want to swap the order of these two branches depending on how the
+ @ branch prediction (if any) handles conditional forward branches vs.
+ @ unconditional forward branches.
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_OBJECT: /* 0x46 */
+/* File: armv5te/OP_AGET_OBJECT.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: armv5te/OP_AGET_BOOLEAN.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_BYTE: /* 0x48 */
+/* File: armv5te/OP_AGET_BYTE.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_CHAR: /* 0x49 */
+/* File: armv5te/OP_AGET_CHAR.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_SHORT: /* 0x4a */
+/* File: armv5te/OP_AGET_SHORT.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT: /* 0x4b */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_WIDE: /* 0x4c */
+/* File: armv5te/OP_APUT_WIDE.S */
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ bcc .LOP_APUT_WIDE_finish @ okay, continue below
+ b common_errArrayIndex @ index >= length, bail
+ @ May want to swap the order of these two branches depending on how the
+ @ branch prediction (if any) handles conditional forward branches vs.
+ @ unconditional forward branches.
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_OBJECT: /* 0x4d */
+/* File: armv5te/OP_APUT_OBJECT.S */
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r1, r2) @ r1<- vBB (array object)
+ GET_VREG(r0, r3) @ r0<- vCC (requested index)
+ cmp r1, #0 @ null array object?
+ GET_VREG(r9, r9) @ r9<- vAA
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length
+ add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width
+ cmp r0, r3 @ compare unsigned index, length
+ bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on
+ b common_errArrayIndex @ index >= length, bail
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: armv5te/OP_APUT_BOOLEAN.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_BYTE: /* 0x4f */
+/* File: armv5te/OP_APUT_BYTE.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_CHAR: /* 0x50 */
+/* File: armv5te/OP_APUT_CHAR.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_SHORT: /* 0x51 */
+/* File: armv5te/OP_APUT_SHORT.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET: /* 0x52 */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_finish
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE: /* 0x53 */
+/* File: armv5te/OP_IGET_WIDE.S */
+ /*
+ * Wide 32-bit instance field get.
+ */
+ /* iget-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_WIDE_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_WIDE_finish
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT: /* 0x54 */
+/* File: armv5te/OP_IGET_OBJECT.S */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_OBJECT_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_OBJECT_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: armv5te/OP_IGET_BOOLEAN.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_BOOLEAN_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_BOOLEAN_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_BYTE: /* 0x56 */
+/* File: armv5te/OP_IGET_BYTE.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_BYTE_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_BYTE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_CHAR: /* 0x57 */
+/* File: armv5te/OP_IGET_CHAR.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_CHAR_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_CHAR_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_SHORT: /* 0x58 */
+/* File: armv5te/OP_IGET_SHORT.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_SHORT_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_SHORT_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT: /* 0x59 */
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_finish @ yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE: /* 0x5a */
+/* File: armv5te/OP_IPUT_WIDE.S */
+ /* iput-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_WIDE_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_WIDE_finish @ yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT: /* 0x5b */
+/* File: armv5te/OP_IPUT_OBJECT.S */
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_OBJECT_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_OBJECT_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: armv5te/OP_IPUT_BOOLEAN.S */
+@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_BYTE: /* 0x5d */
+/* File: armv5te/OP_IPUT_BYTE.S */
+@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_BYTE_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_BYTE_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_CHAR: /* 0x5e */
+/* File: armv5te/OP_IPUT_CHAR.S */
+@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_CHAR_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_CHAR_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_SHORT: /* 0x5f */
+/* File: armv5te/OP_IPUT_SHORT.S */
+@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_SHORT_finish @ no, already resolved
+8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_SHORT_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET: /* 0x60 */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_resolve @ yes, do resolve
+.LOP_SGET_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_WIDE: /* 0x61 */
+/* File: armv5te/OP_SGET_WIDE.S */
+ /*
+ * 64-bit SGET handler.
+ */
+ /* sget-wide vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_WIDE_resolve @ yes, do resolve
+.LOP_SGET_WIDE_finish:
+ mov r1, rINST, lsr #8 @ r1<- AA
+ ldrd r2, [r0, #offStaticField_value] @ r2/r3<- field value (aligned)
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[AA]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ stmia r1, {r2-r3} @ vAA/vAA+1<- r2/r3
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_OBJECT: /* 0x62 */
+/* File: armv5te/OP_SGET_OBJECT.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_OBJECT_resolve @ yes, do resolve
+.LOP_SGET_OBJECT_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: armv5te/OP_SGET_BOOLEAN.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve
+.LOP_SGET_BOOLEAN_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_BYTE: /* 0x64 */
+/* File: armv5te/OP_SGET_BYTE.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_BYTE_resolve @ yes, do resolve
+.LOP_SGET_BYTE_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_CHAR: /* 0x65 */
+/* File: armv5te/OP_SGET_CHAR.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_CHAR_resolve @ yes, do resolve
+.LOP_SGET_CHAR_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_SHORT: /* 0x66 */
+/* File: armv5te/OP_SGET_SHORT.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_SHORT_resolve @ yes, do resolve
+.LOP_SGET_SHORT_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT: /* 0x67 */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_resolve @ yes, do resolve
+.LOP_SPUT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_WIDE: /* 0x68 */
+/* File: armv5te/OP_SPUT_WIDE.S */
+ /*
+ * 64-bit SPUT handler.
+ */
+ /* sput-wide vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_WIDE_resolve @ yes, do resolve
+.LOP_SPUT_WIDE_finish: @ field ptr in r0, AA in r9
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strd r2, [r0, #offStaticField_value] @ field<- vAA/vAA+1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_OBJECT: /* 0x69 */
+/* File: armv5te/OP_SPUT_OBJECT.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve
+.LOP_SPUT_OBJECT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: armv5te/OP_SPUT_BOOLEAN.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve
+.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_BYTE: /* 0x6b */
+/* File: armv5te/OP_SPUT_BYTE.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_BYTE_resolve @ yes, do resolve
+.LOP_SPUT_BYTE_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_CHAR: /* 0x6c */
+/* File: armv5te/OP_SPUT_CHAR.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_CHAR_resolve @ yes, do resolve
+.LOP_SPUT_CHAR_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_SHORT: /* 0x6d */
+/* File: armv5te/OP_SPUT_SHORT.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_SHORT_resolve @ yes, do resolve
+.LOP_SPUT_SHORT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: armv5te/OP_INVOKE_VIRTUAL.S */
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_VIRTUAL_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER: /* 0x6f */
+/* File: armv5te/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ GET_VREG(r2, r10) @ r2<- "this" ptr
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ cmp r2, #0 @ null "this"?
+ ldr r9, [rGLUE, #offGlue_method] @ r9<- current method
+ beq common_errNullObject @ null "this", throw exception
+ cmp r0, #0 @ already resolved?
+ ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_SUPER_continue @ resolved, continue on
+ b .LOP_INVOKE_SUPER_resolve @ do resolve now
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: armv5te/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r2, r10) @ r2<- "this" ptr
+ beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now
+.LOP_INVOKE_DIRECT_finish:
+ cmp r2, #0 @ null "this" ref?
+ bne common_invokeMethodNoRange @ no, continue on
+ b common_errNullObject @ yes, throw exception
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_STATIC: /* 0x71 */
+/* File: armv5te/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne common_invokeMethodNoRange @ yes, continue on
+0: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_STATIC @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne common_invokeMethodNoRange @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: armv5te/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r2, 2) @ r2<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!0)
+ and r2, r2, #15 @ r2<- C (or stays CCCC)
+ .endif
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r0, r2) @ r0<- first arg ("this")
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex
+ cmp r0, #0 @ null obj?
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- method
+ beq common_errNullObject @ yes, fail
+ ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz
+ bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, handle exception
+ b common_invokeMethodNoRange @ jump to common handler
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_73: /* 0x73 */
+/* File: armv5te/OP_UNUSED_73.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */
+/* File: armv5te/OP_INVOKE_VIRTUAL.S */
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */
+/* File: armv5te/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ GET_VREG(r2, r10) @ r2<- "this" ptr
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ cmp r2, #0 @ null "this"?
+ ldr r9, [rGLUE, #offGlue_method] @ r9<- current method
+ beq common_errNullObject @ null "this", throw exception
+ cmp r0, #0 @ already resolved?
+ ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on
+ b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */
+/* File: armv5te/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r2, r10) @ r2<- "this" ptr
+ beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now
+.LOP_INVOKE_DIRECT_RANGE_finish:
+ cmp r2, #0 @ null "this" ref?
+ bne common_invokeMethodRange @ no, continue on
+ b common_errNullObject @ yes, throw exception
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */
+/* File: armv5te/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne common_invokeMethodRange @ yes, continue on
+0: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_STATIC @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne common_invokeMethodRange @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */
+/* File: armv5te/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r2, 2) @ r2<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!1)
+ and r2, r2, #15 @ r2<- C (or stays CCCC)
+ .endif
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r0, r2) @ r0<- first arg ("this")
+ ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex
+ cmp r0, #0 @ null obj?
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- method
+ beq common_errNullObject @ yes, fail
+ ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz
+ bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, handle exception
+ b common_invokeMethodRange @ jump to common handler
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_79: /* 0x79 */
+/* File: armv5te/OP_UNUSED_79.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_7A: /* 0x7a */
+/* File: armv5te/OP_UNUSED_7A.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_INT: /* 0x7b */
+/* File: armv5te/OP_NEG_INT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ rsb r0, r0, #0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOT_INT: /* 0x7c */
+/* File: armv5te/OP_NOT_INT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mvn r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_LONG: /* 0x7d */
+/* File: armv5te/OP_NEG_LONG.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ rsbs r0, r0, #0 @ optional op; may set condition codes
+ rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOT_LONG: /* 0x7e */
+/* File: armv5te/OP_NOT_LONG.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mvn r0, r0 @ optional op; may set condition codes
+ mvn r1, r1 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_FLOAT: /* 0x7f */
+/* File: armv5te/OP_NEG_FLOAT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_DOUBLE: /* 0x80 */
+/* File: armv5te/OP_NEG_DOUBLE.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_LONG: /* 0x81 */
+/* File: armv5te/OP_INT_TO_LONG.S */
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r1, r0, asr #31 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: armv5te/OP_INT_TO_FLOAT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_i2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: armv5te/OP_INT_TO_DOUBLE.S */
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_i2d @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_INT: /* 0x84 */
+/* File: armv5te/OP_LONG_TO_INT.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: armv5te/OP_LONG_TO_FLOAT.S */
+/* File: armv5te/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ and r9, r9, #15
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_l2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: armv5te/OP_LONG_TO_DOUBLE.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: armv5te/OP_FLOAT_TO_INT.S */
+/* EABI appears to have Java-style conversions of +inf/-inf/NaN */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_f2iz @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+#if 0
+@include "armv5te/unop.S" {"instr":"bl f2i_doconv"}
+@break
+/*
+ * Convert the float in r0 to an int in r0.
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2i_doconv:
+ stmfd sp!, {r4, lr}
+ mov r1, #0x4f000000 @ (float)maxint
+ mov r4, r0
+ bl __aeabi_fcmpge @ is arg >= maxint?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0x80000000 @ return maxint (7fffffff)
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, #0xcf000000 @ (float)minint
+ bl __aeabi_fcmple @ is arg <= minint?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0x80000000 @ return minint (80000000)
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, r4
+ bl __aeabi_fcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ ldmeqfd sp!, {r4, pc} @ return zero for NaN
+
+ mov r0, r4 @ recover arg
+ bl __aeabi_f2iz @ convert float to int
+ ldmfd sp!, {r4, pc}
+#endif
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: armv5te/OP_FLOAT_TO_LONG.S */
+@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"}
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl f2l_doconv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: armv5te/OP_FLOAT_TO_DOUBLE.S */
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_f2d @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: armv5te/OP_DOUBLE_TO_INT.S */
+/* EABI appears to have Java-style conversions of +inf/-inf/NaN */
+/* File: armv5te/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ and r9, r9, #15
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_d2iz @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+#if 0
+@include "armv5te/unopNarrower.S" {"instr":"bl d2i_doconv"}
+@break
+/*
+ * Convert the double in r0/r1 to an int in r0.
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2i_doconv:
+ stmfd sp!, {r4, r5, lr} @ save regs
+ ldr r2, .LOP_DOUBLE_TO_INT_maxlo @ (double)maxint, lo
+ ldr r3, .LOP_DOUBLE_TO_INT_maxhi @ (double)maxint, hi
+ sub sp, sp, #4 @ align for EABI
+ mov r4, r0 @ save r0
+ mov r5, r1 @ and r1
+ bl __aeabi_dcmpge @ is arg >= maxint?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0x80000000 @ return maxint (7fffffff)
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ ldr r3, .LOP_DOUBLE_TO_INT_min @ (double)minint, hi
+ mov r2, #0 @ (double)minint, lo
+ bl __aeabi_dcmple @ is arg <= minint?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0x80000000 @ return minint (80000000)
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r2, r4 @ compare against self
+ mov r3, r5
+ bl __aeabi_dcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ beq 1f @ return zero for NaN
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ bl __aeabi_d2iz @ convert double to int
+
+1:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, pc}
+
+.LOP_DOUBLE_TO_INT_maxlo:
+ .word 0xffc00000 @ maxint, as a double (low word)
+.LOP_DOUBLE_TO_INT_maxhi:
+ .word 0x41dfffff @ maxint, as a double (high word)
+.LOP_DOUBLE_TO_INT_min:
+ .word 0xc1e00000 @ minint, as a double (high word)
+#endif
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: armv5te/OP_DOUBLE_TO_LONG.S */
+@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"}
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl d2l_doconv @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: armv5te/OP_DOUBLE_TO_FLOAT.S */
+/* File: armv5te/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ and r9, r9, #15
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_d2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_BYTE: /* 0x8d */
+/* File: armv5te/OP_INT_TO_BYTE.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ mov r0, r0, asl #24 @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r0, r0, asr #24 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_CHAR: /* 0x8e */
+/* File: armv5te/OP_INT_TO_CHAR.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ mov r0, r0, asl #16 @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_SHORT: /* 0x8f */
+/* File: armv5te/OP_INT_TO_SHORT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ mov r0, r0, asl #16 @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r0, r0, asr #16 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT: /* 0x90 */
+/* File: armv5te/OP_ADD_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_INT: /* 0x91 */
+/* File: armv5te/OP_SUB_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT: /* 0x92 */
+/* File: armv5te/OP_MUL_INT.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT: /* 0x93 */
+/* File: armv5te/OP_DIV_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT: /* 0x94 */
+/* File: armv5te/OP_REM_INT.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT: /* 0x95 */
+/* File: armv5te/OP_AND_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT: /* 0x96 */
+/* File: armv5te/OP_OR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT: /* 0x97 */
+/* File: armv5te/OP_XOR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT: /* 0x98 */
+/* File: armv5te/OP_SHL_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT: /* 0x99 */
+/* File: armv5te/OP_SHR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT: /* 0x9a */
+/* File: armv5te/OP_USHR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_LONG: /* 0x9b */
+/* File: armv5te/OP_ADD_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_LONG: /* 0x9c */
+/* File: armv5te/OP_SUB_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_LONG: /* 0x9d */
+/* File: armv5te/OP_MUL_LONG.S */
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST, lsr #8 @ r0<- AA
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[AA]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_MUL_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_LONG: /* 0x9e */
+/* File: armv5te/OP_DIV_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_LONG: /* 0x9f */
+/* File: armv5te/OP_REM_LONG.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_LONG: /* 0xa0 */
+/* File: armv5te/OP_AND_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_LONG: /* 0xa1 */
+/* File: armv5te/OP_OR_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_LONG: /* 0xa2 */
+/* File: armv5te/OP_XOR_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_LONG: /* 0xa3 */
+/* File: armv5te/OP_SHL_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_SHL_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_LONG: /* 0xa4 */
+/* File: armv5te/OP_SHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_SHR_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_LONG: /* 0xa5 */
+/* File: armv5te/OP_USHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_USHR_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_FLOAT: /* 0xa6 */
+/* File: armv5te/OP_ADD_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fadd @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_FLOAT: /* 0xa7 */
+/* File: armv5te/OP_SUB_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fsub @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_FLOAT: /* 0xa8 */
+/* File: armv5te/OP_MUL_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fmul @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_FLOAT: /* 0xa9 */
+/* File: armv5te/OP_DIV_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fdiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_FLOAT: /* 0xaa */
+/* File: armv5te/OP_REM_FLOAT.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_DOUBLE: /* 0xab */
+/* File: armv5te/OP_ADD_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dadd @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_DOUBLE: /* 0xac */
+/* File: armv5te/OP_SUB_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dsub @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_DOUBLE: /* 0xad */
+/* File: armv5te/OP_MUL_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dmul @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_DOUBLE: /* 0xae */
+/* File: armv5te/OP_DIV_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ddiv @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_DOUBLE: /* 0xaf */
+/* File: armv5te/OP_REM_DOUBLE.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: armv5te/OP_ADD_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: armv5te/OP_SUB_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: armv5te/OP_MUL_INT_2ADDR.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: armv5te/OP_DIV_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: armv5te/OP_REM_INT_2ADDR.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: armv5te/OP_AND_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: armv5te/OP_OR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: armv5te/OP_XOR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: armv5te/OP_SHL_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: armv5te/OP_SHR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: armv5te/OP_USHR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: armv5te/OP_ADD_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: armv5te/OP_SUB_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: armv5te/OP_MUL_LONG_2ADDR.S */
+ /*
+ * Signed 64-bit integer multiply, "/2addr" version.
+ *
+ * See OP_MUL_LONG for an explanation.
+ *
+ * We get a little tight on registers, so to avoid looking up &fp[A]
+ * again we stuff it into rINST.
+ */
+ /* mul-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST @ r0<- &fp[A] (free up rINST)
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: armv5te/OP_DIV_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: armv5te/OP_REM_LONG_2ADDR.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: armv5te/OP_AND_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: armv5te/OP_OR_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: armv5te/OP_XOR_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: armv5te/OP_SHL_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ b .LOP_SHL_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: armv5te/OP_SHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ b .LOP_SHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: armv5te/OP_USHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ b .LOP_USHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: armv5te/OP_ADD_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fadd @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: armv5te/OP_SUB_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fsub @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: armv5te/OP_MUL_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fmul @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: armv5te/OP_DIV_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fdiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: armv5te/OP_REM_FLOAT_2ADDR.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r9) @ r0<- vA
+ GET_VREG(r1, r3) @ r1<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: armv5te/OP_ADD_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dadd @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: armv5te/OP_SUB_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dsub @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: armv5te/OP_MUL_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dmul @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: armv5te/OP_DIV_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ddiv @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: armv5te/OP_ADD_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RSUB_INT: /* 0xd1 */
+/* File: armv5te/OP_RSUB_INT.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: armv5te/OP_MUL_INT_LIT16.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: armv5te/OP_DIV_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: armv5te/OP_REM_INT_LIT16.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: armv5te/OP_AND_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: armv5te/OP_OR_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: armv5te/OP_XOR_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: armv5te/OP_ADD_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: armv5te/OP_RSUB_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_LIT8: /* 0xda */
+/* File: armv5te/OP_MUL_INT_LIT8.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: armv5te/OP_DIV_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 1
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_LIT8: /* 0xdc */
+/* File: armv5te/OP_REM_INT_LIT8.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 1
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_LIT8: /* 0xdd */
+/* File: armv5te/OP_AND_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_LIT8: /* 0xde */
+/* File: armv5te/OP_OR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: armv5te/OP_XOR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: armv5te/OP_SHL_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: armv5te/OP_SHR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: armv5te/OP_USHR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E3: /* 0xe3 */
+/* File: armv5te/OP_UNUSED_E3.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E4: /* 0xe4 */
+/* File: armv5te/OP_UNUSED_E4.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E5: /* 0xe5 */
+/* File: armv5te/OP_UNUSED_E5.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E6: /* 0xe6 */
+/* File: armv5te/OP_UNUSED_E6.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E7: /* 0xe7 */
+/* File: armv5te/OP_UNUSED_E7.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E8: /* 0xe8 */
+/* File: armv5te/OP_UNUSED_E8.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E9: /* 0xe9 */
+/* File: armv5te/OP_UNUSED_E9.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EA: /* 0xea */
+/* File: armv5te/OP_UNUSED_EA.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EB: /* 0xeb */
+/* File: armv5te/OP_UNUSED_EB.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EC: /* 0xec */
+/* File: armv5te/OP_UNUSED_EC.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_ED: /* 0xed */
+/* File: armv5te/OP_UNUSED_ED.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_EXECUTE_INLINE: /* 0xee */
+/* File: armv5te/OP_EXECUTE_INLINE.S */
+ /*
+ * Execute a "native inline" instruction.
+ *
+ * We need to call:
+ * dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref)
+ *
+ * The first four args are in r0-r3, but the last two must be pushed
+ * onto the stack.
+ */
+ /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+ FETCH(r10, 1) @ r10<- BBBB
+ add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval
+ EXPORT_PC() @ can throw
+ sub sp, sp, #8 @ make room for arg(s)
+ mov r0, rINST, lsr #12 @ r0<- B
+ str r1, [sp] @ push &glue->retval
+ bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ add sp, sp, #8 @ pop stack
+ cmp r0, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EF: /* 0xef */
+/* File: armv5te/OP_UNUSED_EF.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */
+/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */
+ /*
+ * invoke-direct-empty is a no-op in a "standard" interpreter.
+ */
+ FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_F1: /* 0xf1 */
+/* File: armv5te/OP_UNUSED_F1.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_QUICK: /* 0xf2 */
+/* File: armv5te/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: armv5te/OP_IGET_WIDE_QUICK.S */
+ /* iget-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ ldrd r0, [r3, r1] @ r0<- obj.field (64 bits, aligned)
+ and r2, r2, #15
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: armv5te/OP_IGET_OBJECT_QUICK.S */
+/* File: armv5te/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_QUICK: /* 0xf5 */
+/* File: armv5te/OP_IPUT_QUICK.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- fp[B], the object pointer
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ and r2, r2, #15
+ GET_VREG(r0, r2) @ r0<- fp[A]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ str r0, [r3, r1] @ obj.field (always 32 bits)<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: armv5te/OP_IPUT_WIDE_QUICK.S */
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A(+)
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r2, r1) @ r2<- fp[B], the object pointer
+ add r3, rFP, r0, lsl #2 @ r3<- &fp[A]
+ cmp r2, #0 @ check object for null
+ ldmia r3, {r0-r1} @ r0/r1<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH(r3, 1) @ r3<- field byte offset
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */
+/* File: armv5te/OP_IPUT_QUICK.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- fp[B], the object pointer
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ and r2, r2, #15
+ GET_VREG(r0, r2) @ r0<- fp[A]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ str r0, [r3, r1] @ obj.field (always 32 bits)<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r3, 2) @ r3<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!0)
+ and r3, r3, #15 @ r3<- C (or stays CCCC)
+ .endif
+ GET_VREG(r2, r3) @ r2<- vC ("this" ptr)
+ cmp r2, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable
+ EXPORT_PC() @ invoke must export
+ ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB]
+ bl common_invokeMethodNoRange @ continue on
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r3, 2) @ r3<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!1)
+ and r3, r3, #15 @ r3<- C (or stays CCCC)
+ .endif
+ GET_VREG(r2, r3) @ r2<- vC ("this" ptr)
+ cmp r2, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable
+ EXPORT_PC() @ invoke must export
+ ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB]
+ bl common_invokeMethodRange @ continue on
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super
+ GET_VREG(r3, r10) @ r3<- "this"
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable
+ cmp r3, #0 @ null "this" ref?
+ ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB]
+ beq common_errNullObject @ "this" is null, throw exception
+ bl common_invokeMethodNoRange @ continue on
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super
+ GET_VREG(r3, r10) @ r3<- "this"
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable
+ cmp r3, #0 @ null "this" ref?
+ ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB]
+ beq common_errNullObject @ "this" is null, throw exception
+ bl common_invokeMethodRange @ continue on
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FC: /* 0xfc */
+/* File: armv5te/OP_UNUSED_FC.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FD: /* 0xfd */
+/* File: armv5te/OP_UNUSED_FD.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FE: /* 0xfe */
+/* File: armv5te/OP_UNUSED_FE.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FF: /* 0xff */
+/* File: armv5te/OP_UNUSED_FF.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+
+
+ .balign 64
+ .size dvmAsmInstructionStart, .-dvmAsmInstructionStart
+ .global dvmAsmInstructionEnd
+dvmAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global dvmAsmSisterStart
+ .type dvmAsmSisterStart, %function
+ .text
+ .balign 4
+dvmAsmSisterStart:
+
+/* continuation for OP_CONST_STRING */
+
+ /*
+ * Continuation if the String has not yet been resolved.
+ * r1: BBBB (String ref)
+ * r9: target register
+ */
+.LOP_CONST_STRING_resolve:
+ EXPORT_PC()
+ ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveString @ r0<- String reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_CONST_STRING_JUMBO */
+
+ /*
+ * Continuation if the String has not yet been resolved.
+ * r1: BBBBBBBB (String ref)
+ * r9: target register
+ */
+.LOP_CONST_STRING_JUMBO_resolve:
+ EXPORT_PC()
+ ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveString @ r0<- String reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_CONST_CLASS */
+
+ /*
+ * Continuation if the Class has not yet been resolved.
+ * r1: BBBB (Class ref)
+ * r9: target register
+ */
+.LOP_CONST_CLASS_resolve:
+ EXPORT_PC()
+ ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+ mov r2, #1 @ r2<- true
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- Class reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_CHECK_CAST */
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * r0 holds obj->clazz
+ * r1 holds class resolved from BBBB
+ * r9 holds object
+ */
+.LOP_CHECK_CAST_fullcheck:
+ bl dvmInstanceofNonTrivial @ r0<- boolean result
+ cmp r0, #0 @ failed?
+ bne .LOP_CHECK_CAST_okay @ no, success
+
+ @ A cast has failed. We need to throw a ClassCastException with the
+ @ class of the object that failed to be cast.
+ EXPORT_PC() @ about to throw
+ ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz
+ ldr r0, .LstrClassCastExceptionPtr
+ ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor
+ bl dvmThrowExceptionWithClassMessage
+ b common_exceptionThrown
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r2 holds BBBB
+ * r9 holds object
+ */
+.LOP_CHECK_CAST_resolve:
+ EXPORT_PC() @ resolve() could throw
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ mov r1, r2 @ r1<- BBBB
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ mov r1, r0 @ r1<- class resolved from BBB
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ b .LOP_CHECK_CAST_resolved @ pick up where we left off
+
+.LstrClassCastExceptionPtr:
+ .word .LstrClassCastException
+
+
+/* continuation for OP_INSTANCE_OF */
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * r0 holds obj->clazz
+ * r1 holds class resolved from BBBB
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_fullcheck:
+ bl dvmInstanceofNonTrivial @ r0<- boolean result
+ @ fall through to OP_INSTANCE_OF_store
+
+ /*
+ * r0 holds boolean result
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_store:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Trivial test succeeded, save and bail.
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_trivial:
+ mov r0, #1 @ indicate success
+ @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r3 holds BBBB
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_resolve:
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
+ mov r1, r3 @ r1<- BBBB
+ mov r2, #1 @ r2<- true
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ mov r1, r0 @ r1<- class resolved from BBB
+ mov r3, rINST, lsr #12 @ r3<- B
+ GET_VREG(r0, r3) @ r0<- vB (object)
+ ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz
+ b .LOP_INSTANCE_OF_resolved @ pick up where we left off
+
+
+/* continuation for OP_NEW_INSTANCE */
+
+ .balign 32 @ minimize cache lines
+.LOP_NEW_INSTANCE_finish: @ r0=class
+ bl dvmAllocObject @ r0<- new object
+ mov r3, rINST, lsr #8 @ r3<- AA
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Class initialization required.
+ *
+ * r0 holds class object
+ */
+.LOP_NEW_INSTANCE_needinit:
+ mov r9, r0 @ save r0
+ bl dvmInitClass @ initialize class
+ cmp r0, #0 @ check boolean result
+ mov r0, r9 @ restore r0
+ bne .LOP_NEW_INSTANCE_initialized @ success, continue
+ b common_exceptionThrown @ failed, deal with init exception
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r1 holds BBBB
+ */
+.LOP_NEW_INSTANCE_resolve:
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ bne .LOP_NEW_INSTANCE_resolved @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+ /*
+ * We can't instantiate an abstract class or interface, so throw an
+ * InstantiationError with the class descriptor as the message.
+ *
+ * r0 holds class object
+ */
+.LOP_NEW_INSTANCE_abstract:
+ ldr r1, [r0, #offClassObject_descriptor]
+ ldr r0, .LstrInstantiationErrorPtr
+ bl dvmThrowExceptionWithClassMessage
+ b common_exceptionThrown
+
+.LstrInstantiationErrorPtr:
+ .word .LstrInstantiationError
+
+
+/* continuation for OP_NEW_ARRAY */
+
+
+ /*
+ * Resolve class. (This is an uncommon case.)
+ *
+ * r1 holds array length
+ * r2 holds class ref CCCC
+ */
+.LOP_NEW_ARRAY_resolve:
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ mov r9, r1 @ r9<- length (save)
+ mov r1, r2 @ r1<- CCCC
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ mov r1, r9 @ r1<- length (restore)
+ beq common_exceptionThrown @ yes, handle exception
+ @ fall through to OP_NEW_ARRAY_finish
+
+ /*
+ * Finish allocation.
+ *
+ * r0 holds class
+ * r1 holds array length
+ */
+.LOP_NEW_ARRAY_finish:
+ mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table
+ bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags)
+ cmp r0, #0 @ failed?
+ mov r2, rINST, lsr #8 @ r2<- A+
+ beq common_exceptionThrown @ yes, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_FILLED_NEW_ARRAY */
+
+ /*
+ * On entry:
+ * r0 holds array class
+ * r10 holds AA or BA
+ */
+.LOP_FILLED_NEW_ARRAY_continue:
+ ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+ mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags
+ ldrb r3, [r3, #1] @ r3<- descriptor[1]
+ .if 0
+ mov r1, r10 @ r1<- AA (length)
+ .else
+ mov r1, r10, lsr #4 @ r1<- B (length)
+ .endif
+ cmp r3, #'I' @ array of ints?
+ cmpne r3, #'L' @ array of objects?
+ cmpne r3, #'[' @ array of arrays?
+ mov r9, r1 @ save length in r9
+ bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet
+ bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags)
+ cmp r0, #0 @ null return?
+ beq common_exceptionThrown @ alloc failed, handle exception
+
+ FETCH(r1, 2) @ r1<- FEDC or CCCC
+ str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array
+ add r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+ subs r9, r9, #1 @ length--, check for neg
+ FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
+ bmi 2f @ was zero, bail
+
+ @ copy values from registers into the array
+ @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
+ .if 0
+ add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC]
+1: ldr r3, [r2], #4 @ r3<- *r2++
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .else
+ cmp r9, #4 @ length was initially 5?
+ and r2, r10, #15 @ r2<- A
+ bne 1f @ <= 4 args, branch
+ GET_VREG(r3, r2) @ r3<- vA
+ sub r9, r9, #1 @ count--
+ str r3, [r0, #16] @ contents[4] = vA
+1: and r2, r1, #15 @ r2<- F/E/D/C
+ GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC
+ mov r1, r1, lsr #4 @ r1<- next reg in low 4
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .endif
+
+2:
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_notimpl:
+ ldr r0, .L_strInternalError
+ ldr r1, .L_strFilledNewArrayNotImpl
+ bl dvmThrowException
+ b common_exceptionThrown
+
+ .if (!0) @ define in one or the other, not both
+.L_strFilledNewArrayNotImpl:
+ .word .LstrFilledNewArrayNotImpl
+.L_strInternalError:
+ .word .LstrInternalError
+ .endif
+
+
+/* continuation for OP_FILLED_NEW_ARRAY_RANGE */
+
+ /*
+ * On entry:
+ * r0 holds array class
+ * r10 holds AA or BA
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_continue:
+ ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+ mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags
+ ldrb r3, [r3, #1] @ r3<- descriptor[1]
+ .if 1
+ mov r1, r10 @ r1<- AA (length)
+ .else
+ mov r1, r10, lsr #4 @ r1<- B (length)
+ .endif
+ cmp r3, #'I' @ array of ints?
+ cmpne r3, #'L' @ array of objects?
+ cmpne r3, #'[' @ array of arrays?
+ mov r9, r1 @ save length in r9
+ bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet
+ bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags)
+ cmp r0, #0 @ null return?
+ beq common_exceptionThrown @ alloc failed, handle exception
+
+ FETCH(r1, 2) @ r1<- FEDC or CCCC
+ str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array
+ add r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+ subs r9, r9, #1 @ length--, check for neg
+ FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
+ bmi 2f @ was zero, bail
+
+ @ copy values from registers into the array
+ @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
+ .if 1
+ add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC]
+1: ldr r3, [r2], #4 @ r3<- *r2++
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .else
+ cmp r9, #4 @ length was initially 5?
+ and r2, r10, #15 @ r2<- A
+ bne 1f @ <= 4 args, branch
+ GET_VREG(r3, r2) @ r3<- vA
+ sub r9, r9, #1 @ count--
+ str r3, [r0, #16] @ contents[4] = vA
+1: and r2, r1, #15 @ r2<- F/E/D/C
+ GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC
+ mov r1, r1, lsr #4 @ r1<- next reg in low 4
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .endif
+
+2:
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
+ ldr r0, .L_strInternalError
+ ldr r1, .L_strFilledNewArrayNotImpl
+ bl dvmThrowException
+ b common_exceptionThrown
+
+ .if (!1) @ define in one or the other, not both
+.L_strFilledNewArrayNotImpl:
+ .word .LstrFilledNewArrayNotImpl
+.L_strInternalError:
+ .word .LstrInternalError
+ .endif
+
+
+/* continuation for OP_CMPL_FLOAT */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPL_FLOAT_gt_or_nan:
+ mov r1, r9 @ reverse order
+ mov r0, r10
+ bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPL_FLOAT_finish
+ mvn r1, #0 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPL_FLOAT_finish
+
+
+#if 0 /* "clasic" form */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpeq @ r0<- (vBB == vCC)
+ cmp r0, #0 @ equal?
+ movne r1, #0 @ yes, result is 0
+ bne OP_CMPL_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmplt @ r0<- (vBB < vCC)
+ cmp r0, #0 @ less than?
+ b OP_CMPL_FLOAT_continue
+@%break
+
+OP_CMPL_FLOAT_continue:
+ mvnne r1, #0 @ yes, result is -1
+ bne OP_CMPL_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpgt @ r0<- (vBB > vCC)
+ cmp r0, #0 @ greater than?
+ beq OP_CMPL_FLOAT_nan @ no, must be NaN
+ mov r1, #1 @ yes, result is 1
+ @ fall through to _finish
+
+OP_CMPL_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * This is expected to be uncommon, so we double-branch (once to here,
+ * again back to _finish).
+ */
+OP_CMPL_FLOAT_nan:
+ mvn r1, #0 @ r1<- 1 or -1 for NaN
+ b OP_CMPL_FLOAT_finish
+
+#endif
+
+
+/* continuation for OP_CMPG_FLOAT */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPG_FLOAT_gt_or_nan:
+ mov r1, r9 @ reverse order
+ mov r0, r10
+ bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPG_FLOAT_finish
+ mov r1, #1 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPG_FLOAT_finish
+
+
+#if 0 /* "clasic" form */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpeq @ r0<- (vBB == vCC)
+ cmp r0, #0 @ equal?
+ movne r1, #0 @ yes, result is 0
+ bne OP_CMPG_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmplt @ r0<- (vBB < vCC)
+ cmp r0, #0 @ less than?
+ b OP_CMPG_FLOAT_continue
+@%break
+
+OP_CMPG_FLOAT_continue:
+ mvnne r1, #0 @ yes, result is -1
+ bne OP_CMPG_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpgt @ r0<- (vBB > vCC)
+ cmp r0, #0 @ greater than?
+ beq OP_CMPG_FLOAT_nan @ no, must be NaN
+ mov r1, #1 @ yes, result is 1
+ @ fall through to _finish
+
+OP_CMPG_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * This is expected to be uncommon, so we double-branch (once to here,
+ * again back to _finish).
+ */
+OP_CMPG_FLOAT_nan:
+ mov r1, #1 @ r1<- 1 or -1 for NaN
+ b OP_CMPG_FLOAT_finish
+
+#endif
+
+
+/* continuation for OP_CMPL_DOUBLE */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPL_DOUBLE_gt_or_nan:
+ ldmia r10, {r0-r1} @ reverse order
+ ldmia r9, {r2-r3}
+ bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPL_DOUBLE_finish
+ mvn r1, #0 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPL_DOUBLE_finish
+
+
+/* continuation for OP_CMPG_DOUBLE */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPG_DOUBLE_gt_or_nan:
+ ldmia r10, {r0-r1} @ reverse order
+ ldmia r9, {r2-r3}
+ bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPG_DOUBLE_finish
+ mov r1, #1 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPG_DOUBLE_finish
+
+
+/* continuation for OP_CMP_LONG */
+
+.LOP_CMP_LONG_less:
+ mvn r1, #0 @ r1<- -1
+ @ Want to cond code the next mov so we can avoid branch, but don't see it;
+ @ instead, we just replicate the tail end.
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.LOP_CMP_LONG_greater:
+ mov r1, #1 @ r1<- 1
+ @ fall through to _finish
+
+.LOP_CMP_LONG_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_AGET_WIDE */
+
+.LOP_AGET_WIDE_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_APUT_WIDE */
+
+.LOP_APUT_WIDE_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC]
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_APUT_OBJECT */
+ /*
+ * On entry:
+ * r1 = vBB (arrayObj)
+ * r9 = vAA (obj)
+ * r10 = offset into array (vBB + vCC * width)
+ */
+.LOP_APUT_OBJECT_finish:
+ cmp r9, #0 @ storing null reference?
+ beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz
+ bl dvmCanPutArrayElement @ test object type vs. array type
+ cmp r0, #0 @ okay?
+ beq common_errArrayStore @ no
+.LOP_APUT_OBJECT_skip_check:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_WIDE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_WIDE_finish:
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ mov r2, rINST, lsr #8 @ r2<- A+
+ ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok)
+ and r2, r2, #15 @ r2<- A
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_OBJECT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_OBJECT_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_BOOLEAN */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_BOOLEAN_finish:
+ @bl common_squeak1
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_BYTE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_BYTE_finish:
+ @bl common_squeak2
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_CHAR */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_CHAR_finish:
+ @bl common_squeak3
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IGET_SHORT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_SHORT_finish:
+ @bl common_squeak4
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_WIDE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_WIDE_finish:
+ mov r2, rINST, lsr #8 @ r2<- A+
+ cmp r9, #0 @ check object for null
+ and r2, r2, #15 @ r2<- A
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ add r2, rFP, r2, lsl #2 @ r3<- &fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r2, {r0-r1} @ r0/r1<- fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_OBJECT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_OBJECT_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_BOOLEAN */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_BOOLEAN_finish:
+ @bl common_squeak1
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_BYTE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_BYTE_finish:
+ @bl common_squeak2
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_CHAR */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_CHAR_finish:
+ @bl common_squeak3
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_IPUT_SHORT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_SHORT_finish:
+ @bl common_squeak4
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_SGET */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_WIDE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_WIDE_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_WIDE_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_OBJECT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_OBJECT_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_OBJECT_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_BOOLEAN */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_BOOLEAN_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_BOOLEAN_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_BYTE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_BYTE_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_BYTE_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_CHAR */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_CHAR_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_CHAR_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SGET_SHORT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SGET_SHORT_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SGET_SHORT_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_WIDE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r9: &fp[AA]
+ */
+.LOP_SPUT_WIDE_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_WIDE_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_OBJECT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_OBJECT_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_OBJECT_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_BOOLEAN */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_BOOLEAN_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_BOOLEAN_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_BYTE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_BYTE_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_BYTE_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_CHAR */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_CHAR_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_CHAR_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_SPUT_SHORT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ */
+.LOP_SPUT_SHORT_resolve:
+ ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_SPUT_SHORT_finish @ yes, finish
+ b common_exceptionThrown @ no, handle exception
+
+
+/* continuation for OP_INVOKE_VIRTUAL */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.LOP_INVOKE_VIRTUAL_continue:
+ GET_VREG(r1, r10) @ r1<- "this" ptr
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ cmp r1, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz
+ ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable
+ ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodNoRange @ continue on
+
+
+/* continuation for OP_INVOKE_SUPER */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r9 = method->clazz
+ */
+.LOP_INVOKE_SUPER_continue:
+ ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount
+ EXPORT_PC() @ must export for invoke
+ cmp r2, r3 @ compare (methodIndex, vtableCount)
+ bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass
+ ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable
+ ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodNoRange @ continue on
+
+.LOP_INVOKE_SUPER_resolve:
+ mov r0, r9 @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_SUPER_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * r0 = resolved base method
+ */
+.LOP_INVOKE_SUPER_nsm:
+ ldr r1, [r0, #offMethod_name] @ r1<- method name
+ b common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_DIRECT */
+
+ /*
+ * On entry:
+ * r1 = reference (BBBB or CCCC)
+ * r10 = "this" register
+ */
+.LOP_INVOKE_DIRECT_resolve:
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_DIRECT @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ GET_VREG(r2, r10) @ r2<- "this" ptr (reload)
+ bne .LOP_INVOKE_DIRECT_finish @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+/* continuation for OP_INVOKE_VIRTUAL_RANGE */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.LOP_INVOKE_VIRTUAL_RANGE_continue:
+ GET_VREG(r1, r10) @ r1<- "this" ptr
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ cmp r1, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz
+ ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable
+ ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodRange @ continue on
+
+
+/* continuation for OP_INVOKE_SUPER_RANGE */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r9 = method->clazz
+ */
+.LOP_INVOKE_SUPER_RANGE_continue:
+ ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount
+ EXPORT_PC() @ must export for invoke
+ cmp r2, r3 @ compare (methodIndex, vtableCount)
+ bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass
+ ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable
+ ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodRange @ continue on
+
+.LOP_INVOKE_SUPER_RANGE_resolve:
+ mov r0, r9 @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * r0 = resolved base method
+ */
+.LOP_INVOKE_SUPER_RANGE_nsm:
+ ldr r1, [r0, #offMethod_name] @ r1<- method name
+ b common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_DIRECT_RANGE */
+
+ /*
+ * On entry:
+ * r1 = reference (BBBB or CCCC)
+ * r10 = "this" register
+ */
+.LOP_INVOKE_DIRECT_RANGE_resolve:
+ ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_DIRECT @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ GET_VREG(r2, r10) @ r2<- "this" ptr (reload)
+ bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+/* continuation for OP_FLOAT_TO_LONG */
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+ stmfd sp!, {r4, lr}
+ mov r1, #0x5f000000 @ (float)maxlong
+ mov r4, r0
+ bl __aeabi_fcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffff)
+ mvnne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, #0xdf000000 @ (float)minlong
+ bl __aeabi_fcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (80000000)
+ movne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, r4
+ bl __aeabi_fcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ ldmeqfd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ bl __aeabi_f2lz @ convert float to long
+ ldmfd sp!, {r4, pc}
+
+
+/* continuation for OP_DOUBLE_TO_LONG */
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+ stmfd sp!, {r4, r5, lr} @ save regs
+ ldr r3, .LOP_DOUBLE_TO_LONG_max @ (double)maxlong, hi
+ sub sp, sp, #4 @ align for EABI
+ mov r2, #0 @ (double)maxlong, lo
+ mov r4, r0 @ save r0
+ mov r5, r1 @ and r1
+ bl __aeabi_dcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffffffffffff)
+ mvnne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ ldr r3, .LOP_DOUBLE_TO_LONG_min @ (double)minlong, hi
+ mov r2, #0 @ (double)minlong, lo
+ bl __aeabi_dcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (8000000000000000)
+ movne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r2, r4 @ compare against self
+ mov r3, r5
+ bl __aeabi_dcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ beq 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ bl __aeabi_d2lz @ convert double to long
+
+1:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, pc}
+
+.LOP_DOUBLE_TO_LONG_max:
+ .word 0x43e00000 @ maxlong, as a double (high word)
+.LOP_DOUBLE_TO_LONG_min:
+ .word 0xc3e00000 @ minlong, as a double (high word)
+
+
+/* continuation for OP_MUL_LONG */
+
+.LOP_MUL_LONG_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_SHL_LONG */
+
+.LOP_SHL_LONG_finish:
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_SHR_LONG */
+
+.LOP_SHR_LONG_finish:
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_USHR_LONG */
+
+.LOP_USHR_LONG_finish:
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_SHL_LONG_2ADDR */
+
+.LOP_SHL_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_SHR_LONG_2ADDR */
+
+.LOP_SHR_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_USHR_LONG_2ADDR */
+
+.LOP_USHR_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* continuation for OP_EXECUTE_INLINE */
+
+ /*
+ * Extract args, call function.
+ * r0 = #of args (0-4)
+ * r10 = call index
+ * lr = return addr, above [DO NOT bl out of here w/o preserving LR]
+ *
+ * Other ideas:
+ * - Use a jump table from the main piece to jump directly into the
+ * AND/LDR pairs. Costs a data load, saves a branch.
+ * - Have five separate pieces that do the loading, so we can work the
+ * interleave a little better. Increases code size.
+ */
+.LOP_EXECUTE_INLINE_continue:
+ rsb r0, r0, #4 @ r0<- 4-r0
+ FETCH(r9, 2) @ r9<- FEDC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+4: and ip, r9, #0xf000 @ isolate F
+ ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
+3: and ip, r9, #0x0f00 @ isolate E
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vE
+2: and ip, r9, #0x00f0 @ isolate D
+ ldr r1, [rFP, ip, lsr #2] @ r1<- vD
+1: and ip, r9, #0x000f @ isolate C
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vC
+0:
+ ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry
+ @ (not reached)
+
+.LOP_EXECUTE_INLINE_table:
+ .word gDvmInlineOpsTable
+
+
+ .size dvmAsmSisterStart, .-dvmAsmSisterStart
+ .global dvmAsmSisterEnd
+dvmAsmSisterEnd:
+
+/* File: armv5te/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * Common code when a backward branch is taken.
+ *
+ * On entry:
+ * r9 is PC adjustment *in bytes*
+ */
+common_backwardBranch:
+ mov r0, #kInterpEntryInstr
+ bl common_periodicChecks
+ FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/*
+ * Need to see if the thread needs to be suspended or debugger/profiler
+ * activity has begun.
+ *
+ * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't
+ * have to do the second ldr.
+ *
+ * TODO: reduce this so we're just checking a single location.
+ *
+ * On entry:
+ * r0 is reentry type, e.g. kInterpEntryInstr
+ * r9 is trampoline PC adjustment *in bytes*
+ */
+common_periodicChecks:
+ ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount
+
+#if defined(WITH_DEBUGGER)
+ ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive
+#endif
+#if defined(WITH_PROFILER)
+ ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers
+#endif
+
+ ldr r3, [r3] @ r3<- suspendCount (int)
+
+#if defined(WITH_DEBUGGER)
+ ldrb r1, [r1] @ r1<- debuggerActive (boolean)
+#endif
+#if defined (WITH_PROFILER)
+ ldr r2, [r2] @ r2<- activeProfilers (int)
+#endif
+
+ cmp r3, #0 @ suspend pending?
+ bne 2f @ yes, check suspend
+
+#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER)
+# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER)
+ orrs r1, r1, r2 @ r1<- r1 | r2
+ cmp r1, #0 @ debugger attached or profiler started?
+# elif defined(WITH_DEBUGGER)
+ cmp r1, #0 @ debugger attached?
+# elif defined(WITH_PROFILER)
+ cmp r2, #0 @ profiler started?
+# endif
+ bne 3f @ debugger/profiler, switch interp
+#endif
+
+ bx lr @ nothing to do, return
+
+2: @ check suspend
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
+ b dvmCheckSuspendPending @ suspend if necessary, then return
+
+3: @ debugger/profiler enabled, bail out
+ add rPC, rPC, r9 @ update rPC
+ str r0, [rGLUE, #offGlue_entryPoint]
+ mov r1, #1 @ "want switch" = true
+ b common_gotoBail
+
+
+/*
+ * The equivalent of "goto bail", this calls through the "bail handler".
+ *
+ * State registers will be saved to the "glue" area before bailing.
+ *
+ * On entry:
+ * r1 is "bool changeInterp", indicating if we want to switch to the
+ * other interpreter or just bail all the way out
+ */
+common_gotoBail:
+ SAVE_PC_FP_TO_GLUE() @ export state to "glue"
+ mov r0, rGLUE @ r0<- glue ptr
+ b dvmMterpStdBail @ call(glue, changeInterp)
+
+ @add r1, r1, #1 @ using (boolean+1)
+ @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf
+ @bl _longjmp @ does not return
+ @bl common_abort
+
+
+/*
+ * Common code for method invocation with range.
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodRange:
+.LinvokeNewRange:
+ @ prepare to copy args to "outs" area of current frame
+ movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero
+ SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area
+ beq .LinvokeArgsDone @ if no args, skip the rest
+ FETCH(r1, 2) @ r1<- CCCC
+
+ @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
+ @ (very few methods have > 10 args; could unroll for common cases)
+ add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC]
+ sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args
+1: ldr r1, [r3], #4 @ val = *fp++
+ subs r2, r2, #1 @ count--
+ str r1, [r10], #4 @ *outs++ = val
+ bne 1b @ ...while count != 0
+ b .LinvokeArgsDone
+
+/*
+ * Common code for method invocation without range.
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodNoRange:
+.LinvokeNewNoRange:
+ @ prepare to copy args to "outs" area of current frame
+ movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero
+ SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area
+ beq .LinvokeArgsDone @ if no args, skip the rest
+ FETCH(r1, 2) @ r1<- GFED
+
+ @ r0=methodToCall, r1=GFED, r2=count, r10=outs
+.LinvokeNonRange:
+ rsb r2, r2, #5 @ r2<- 5-r2
+ add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+5: and ip, rINST, #0x0f00 @ isolate A
+ ldr r3, [rFP, ip, lsr #6] @ r3<- vA (shift right 8, left 2)
+ mov r0, r0 @ nop
+ str r3, [r10, #-4]! @ *--outs = vA
+4: and ip, r1, #0xf000 @ isolate G
+ ldr r3, [rFP, ip, lsr #10] @ r3<- vG (shift right 12, left 2)
+ mov r0, r0 @ nop
+ str r3, [r10, #-4]! @ *--outs = vG
+3: and ip, r1, #0x0f00 @ isolate F
+ ldr r3, [rFP, ip, lsr #6] @ r3<- vF
+ mov r0, r0 @ nop
+ str r3, [r10, #-4]! @ *--outs = vF
+2: and ip, r1, #0x00f0 @ isolate E
+ ldr r3, [rFP, ip, lsr #2] @ r3<- vE
+ mov r0, r0 @ nop
+ str r3, [r10, #-4]! @ *--outs = vE
+1: and ip, r1, #0x000f @ isolate D
+ ldr r3, [rFP, ip, lsl #2] @ r3<- vD
+ mov r0, r0 @ nop
+ str r3, [r10, #-4]! @ *--outs = vD
+0: @ fall through to .LinvokeArgsDone
+
+.LinvokeArgsDone: @ r0=methodToCall
+ @ find space for the new stack frame, check for overflow
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ ldrh r2, [r0, #offMethod_registersSize] @ r2<- methodToCall->regsSize
+ ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize
+ sub r1, r1, r2, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea
+@ bl common_dumpRegs
+ ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd
+ sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize)
+ cmp r3, r9 @ bottom < interpStackEnd?
+ blt .LstackOverflow @ yes, this frame will overflow stack
+
+ @ set up newSaveArea
+#ifdef EASY_GDB
+ SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area
+ str ip, [r10, #offStackSaveArea_prevSave]
+#endif
+ str rFP, [r10, #offStackSaveArea_prevFrame]
+ str rPC, [r10, #offStackSaveArea_savedPc]
+ str r0, [r10, #offStackSaveArea_method]
+
+ ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
+ tst r3, #ACC_NATIVE
+ bne .LinvokeNative
+
+ /*
+ stmfd sp!, {r0-r3}
+ bl common_printNewline
+ mov r0, rFP
+ mov r1, #0
+ bl dvmDumpFp
+ ldmfd sp!, {r0-r3}
+ stmfd sp!, {r0-r3}
+ mov r0, r1
+ mov r1, r10
+ bl dvmDumpFp
+ bl common_printNewline
+ ldmfd sp!, {r0-r3}
+ */
+
+ @ Update "glue" values for the new method
+ @ r0=methodToCall, r1=newFp
+ ldr r3, [r0, #offMethod_clazz] @ r3<- method->clazz
+ str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall
+ ldr r3, [r3, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+ ldr rPC, [r0, #offMethod_insns] @ rPC<- method->insns
+ str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
+ ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self
+ FETCH_INST() @ load rINST from rPC
+ mov rFP, r1 @ fp = newFp
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.LinvokeNative:
+ @ Prep for the native call
+ @ r0=methodToCall, r1=newFp, r10=newSaveArea
+ ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ ldr r9, [r3, #offThread_jniLocal_nextEntry] @ r9<- thread->refNext
+ str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp
+ str r9, [r10, #offStackSaveArea_localRefTop] @newFp->localRefTop=refNext
+ mov r9, r3 @ r9<- glue->self (preserve)
+
+ mov r2, r0 @ r2<- methodToCall
+ mov r0, r1 @ r0<- newFp (points to args)
+ add r1, rGLUE, #offGlue_retval @ r1<- &retval
+
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ b .Lskip
+ .type dalvik_mterp, %function
+dalvik_mterp:
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+.Lskip:
+#endif
+
+ @mov lr, pc @ set return addr
+ @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+
+ @ native return; r9=self, r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r0, [r10, #offStackSaveArea_localRefTop] @ r0<- newSave->localRefTop
+ ldr r1, [r9, #offThread_exception] @ check for exception
+ str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [r9, #offThread_jniLocal_nextEntry] @ self->refNext<- r0
+ bne common_exceptionThrown @ no, handle exception
+
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.LstackOverflow:
+ ldr r0, [rGLUE, #offGlue_self] @ r0<- self
+ bl dvmHandleStackOverflow
+ b common_exceptionThrown
+#ifdef ASSIST_DEBUGGER
+ .fnend
+#endif
+
+
+ /*
+ * Common code for method invocation, calling through "glue code".
+ *
+ * TODO: now that we have range and non-range invoke handlers, this
+ * needs to be split into two. Maybe just create entry points
+ * that set r9 and jump here?
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", the method we're trying to call
+ * r9 is "bool methodCallRange", indicating if this is a /range variant
+ */
+ .if 0
+.LinvokeOld:
+ sub sp, sp, #8 @ space for args + pad
+ FETCH(ip, 2) @ ip<- FEDC or CCCC
+ mov r2, r0 @ A2<- methodToCall
+ mov r0, rGLUE @ A0<- glue
+ SAVE_PC_FP_TO_GLUE() @ export state to "glue"
+ mov r1, r9 @ A1<- methodCallRange
+ mov r3, rINST, lsr #8 @ A3<- AA
+ str ip, [sp, #0] @ A4<- ip
+ bl dvmMterp_invokeMethod @ call the C invokeMethod
+ add sp, sp, #8 @ remove arg area
+ b common_resumeAfterGlueCall @ continue to next instruction
+ .endif
+
+
+
+/*
+ * Common code for handling a return instruction.
+ *
+ * This does not return.
+ */
+common_returnFromMethod:
+.LreturnNew:
+ mov r0, #kInterpEntryReturn
+ mov r9, #0
+ bl common_periodicChecks
+
+ SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
+ ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
+ ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ @ r2<- method we're returning to
+ cmp r2, #0 @ is this a break frame?
+ mov r1, #0 @ "want switch" = false
+ beq common_gotoBail @ break frame, bail out completely
+
+ ldr rPC, [r0, #offStackSaveArea_savedPc] @ pc = saveArea->savedPc
+ ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
+ str r2, [rGLUE, #offGlue_method] @ glue->method = newSave->method
+ str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp
+ ldr r1, [r2, #offMethod_clazz] @ r1<- method->clazz
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ ldr r1, [r1, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [rGLUE, #offGlue_methodClassDex]
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Return handling, calls through "glue code".
+ */
+ .if 0
+.LreturnOld:
+ SAVE_PC_FP_TO_GLUE() @ export state
+ mov r0, rGLUE @ arg to function
+ bl dvmMterp_returnFromMethod
+ b common_resumeAfterGlueCall
+ .endif
+
+
+/*
+ * Somebody has thrown an exception. Handle it.
+ *
+ * If the exception processing code returns to us (instead of falling
+ * out of the interpreter), continue with whatever the next instruction
+ * now happens to be.
+ *
+ * This does not return.
+ */
+common_exceptionThrown:
+.LexceptionNew:
+ mov r0, #kInterpEntryThrow
+ mov r9, #0
+ bl common_periodicChecks
+
+ ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self
+ ldr r9, [r10, #offThread_exception] @ r9<- self->exception
+ mov r1, r10 @ r1<- self
+ mov r0, r9 @ r0<- exception
+ bl dvmAddTrackedAlloc @ don't let the exception be GCed
+ mov r3, #0 @ r3<- NULL
+ str r3, [r10, #offThread_exception] @ self->exception = NULL
+
+ /* set up args and a local for "&fp" */
+ /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */
+ str rFP, [sp, #-4]! @ *--sp = fp
+ mov ip, sp @ ip<- &fp
+ mov r3, #0 @ r3<- false
+ str ip, [sp, #-4]! @ *--sp = &fp
+ ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method
+ mov r0, r10 @ r0<- self
+ ldr r1, [r1, #offMethod_insns] @ r1<- method->insns
+ mov r2, r9 @ r2<- exception
+ sub r1, rPC, r1 @ r1<- pc - method->insns
+ mov r1, r1, asr #1 @ r1<- offset in code units
+
+ /* call, r0 gets catchRelPc (a code-unit offset) */
+ bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp)
+
+ /* fix earlier stack overflow if necessary; may trash rFP */
+ ldrb r1, [r10, #offThread_stackOverflowed]
+ cmp r1, #0 @ did we overflow earlier?
+ beq 1f @ no, skip ahead
+ mov rFP, r0 @ save relPc result in rFP
+ mov r0, r10 @ r0<- self
+ bl dvmCleanupStackOverflow @ call(self)
+ mov r0, rFP @ restore result
+1:
+
+ /* update frame pointer and check result from dvmFindCatchBlock */
+ ldr rFP, [sp, #4] @ retrieve the updated rFP
+ cmp r0, #0 @ is catchRelPc < 0?
+ add sp, sp, #8 @ restore stack
+ bmi .LnotCaughtLocally
+
+ /* adjust locals to match self->curFrame and updated PC */
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area
+ ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method
+ str r1, [rGLUE, #offGlue_method] @ glue->method = new method
+ ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz
+ ldr r3, [r1, #offMethod_insns] @ r3<- method->insns
+ ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
+ add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc
+ str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth...
+
+ /* release the tracked alloc on the exception */
+ mov r0, r9 @ r0<- exception
+ mov r1, r10 @ r1<- self
+ bl dvmReleaseTrackedAlloc @ release the exception
+
+ /* restore the exception if the handler wants it */
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"?
+ streq r9, [r10, #offThread_exception] @ yes, restore the exception
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.LnotCaughtLocally: @ r9=exception, r10=self
+ /* fix stack overflow if necessary */
+ ldrb r1, [r10, #offThread_stackOverflowed]
+ cmp r1, #0 @ did we overflow earlier?
+ movne r0, r10 @ if yes: r0<- self
+ blne dvmCleanupStackOverflow @ if yes: call(self)
+
+ @ may want to show "not caught locally" debug messages here
+#if DVM_SHOW_EXCEPTION >= 2
+ /* call __android_log_print(prio, tag, format, ...) */
+ /* "Exception %s from %s:%d not caught locally" */
+ @ dvmLineNumFromPC(method, pc - method->insns)
+ ldr r0, [rGLUE, #offGlue_method]
+ ldr r1, [r0, #offMethod_insns]
+ sub r1, rPC, r1
+ asr r1, r1, #1
+ bl dvmLineNumFromPC
+ str r0, [sp, #-4]!
+ @ dvmGetMethodSourceFile(method)
+ ldr r0, [rGLUE, #offGlue_method]
+ bl dvmGetMethodSourceFile
+ str r0, [sp, #-4]!
+ @ exception->clazz->descriptor
+ ldr r3, [r9, #offObject_clazz]
+ ldr r3, [r3, #offClassObject_descriptor]
+ @
+ ldr r2, strExceptionNotCaughtLocally
+ ldr r1, strLogTag
+ mov r0, #3 @ LOG_DEBUG
+ bl __android_log_print
+#endif
+ str r9, [r10, #offThread_exception] @ restore exception
+ mov r0, r9 @ r0<- exception
+ mov r1, r10 @ r1<- self
+ bl dvmReleaseTrackedAlloc @ release the exception
+ mov r1, #0 @ "want switch" = false
+ b common_gotoBail @ bail out
+
+
+ /*
+ * Exception handling, calls through "glue code".
+ */
+ .if 0
+.LexceptionOld:
+ SAVE_PC_FP_TO_GLUE() @ export state
+ mov r0, rGLUE @ arg to function
+ bl dvmMterp_exceptionThrown
+ b common_resumeAfterGlueCall
+ .endif
+
+
+/*
+ * After returning from a "glued" function, pull out the updated
+ * values and start executing at the next instruction.
+ */
+common_resumeAfterGlueCall:
+ LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/*
+ * Invalid array index.
+ */
+common_errArrayIndex:
+ EXPORT_PC()
+ ldr r0, strArrayIndexException
+ mov r1, #0
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * Invalid array value.
+ */
+common_errArrayStore:
+ EXPORT_PC()
+ ldr r0, strArrayStoreException
+ mov r1, #0
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * Integer divide or mod by zero.
+ */
+common_errDivideByZero:
+ EXPORT_PC()
+ ldr r0, strArithmeticException
+ ldr r1, strDivideByZero
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ */
+common_errNegativeArraySize:
+ EXPORT_PC()
+ ldr r0, strNegativeArraySizeException
+ mov r1, #0
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * Invocation of a non-existent method.
+ */
+common_errNoSuchMethod:
+ EXPORT_PC()
+ ldr r0, strNoSuchMethodError
+ mov r1, #0
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * We encountered a null object when we weren't expecting one. We
+ * export the PC, throw a NullPointerException, and goto the exception
+ * processing code.
+ */
+common_errNullObject:
+ EXPORT_PC()
+ ldr r0, strNullPointerException
+ mov r1, #0
+ bl dvmThrowException
+ b common_exceptionThrown
+
+/*
+ * For debugging, cause an immediate fault. The source address will
+ * be in lr (use a bl instruction to jump here).
+ */
+common_abort:
+ ldr pc, .LdeadFood
+.LdeadFood:
+ .word 0xdeadf00d
+
+/*
+ * Spit out a "we were here", preserving all registers. (The attempt
+ * to save ip won't work, but we need to save an even number of
+ * registers for EABI 64-bit stack alignment.)
+ */
+ .macro SQUEAK num
+common_squeak\num:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ ldr r0, strSqueak
+ mov r1, #\num
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+ .endm
+
+ SQUEAK 0
+ SQUEAK 1
+ SQUEAK 2
+ SQUEAK 3
+ SQUEAK 4
+ SQUEAK 5
+
+/*
+ * Spit out the number in r0, preserving registers.
+ */
+common_printNum:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r1, r0
+ ldr r0, strSqueak
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print a newline, preserving registers.
+ */
+common_printNewline:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ ldr r0, strNewline
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+ /*
+ * Print the 32-bit quantity in r0 as a hex value, preserving registers.
+ */
+common_printHex:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r1, r0
+ ldr r0, strPrintHex
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print the 64-bit quantity in r0-r1, preserving registers.
+ */
+common_printLong:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r3, r1
+ mov r2, r0
+ ldr r0, strPrintLong
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print full method info. Pass the Method* in r0. Preserves regs.
+ */
+common_printMethod:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bl dvmMterpPrintMethod
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Call a C helper function that dumps regs and possibly some
+ * additional info. Requires the C function to be compiled in.
+ */
+ .if 0
+common_dumpRegs:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bl dvmMterpDumpArmRegs
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+ .endif
+
+
+/*
+ * String references, must be close to the code that uses them.
+ */
+ .align 2
+strArithmeticException:
+ .word .LstrArithmeticException
+strArrayIndexException:
+ .word .LstrArrayIndexException
+strArrayStoreException:
+ .word .LstrArrayStoreException
+strDivideByZero:
+ .word .LstrDivideByZero
+strNegativeArraySizeException:
+ .word .LstrNegativeArraySizeException
+strNoSuchMethodError:
+ .word .LstrNoSuchMethodError
+strNullPointerException:
+ .word .LstrNullPointerException
+
+strLogTag:
+ .word .LstrLogTag
+strExceptionNotCaughtLocally:
+ .word .LstrExceptionNotCaughtLocally
+
+strNewline:
+ .word .LstrNewline
+strSqueak:
+ .word .LstrSqueak
+strPrintHex:
+ .word .LstrPrintHex
+strPrintLong:
+ .word .LstrPrintLong
+
+/*
+ * Zero-terminated ASCII string data.
+ *
+ * On ARM we have two choices: do like gcc does, and LDR from a .word
+ * with the address, or use an ADR pseudo-op to get the address
+ * directly. ADR saves 4 bytes and an indirection, but it's using a
+ * PC-relative addressing mode and hence has a limited range, which
+ * makes it not work well with mergeable string sections.
+ */
+ .section .rodata.str1.4,"aMS",%progbits,1
+
+.LstrBadEntryPoint:
+ .asciz "Bad entry point %d\n"
+.LstrArithmeticException:
+ .asciz "Ljava/lang/ArithmeticException;"
+.LstrArrayIndexException:
+ .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;"
+.LstrArrayStoreException:
+ .asciz "Ljava/lang/ArrayStoreException;"
+.LstrClassCastException:
+ .asciz "Ljava/lang/ClassCastException;"
+.LstrDivideByZero:
+ .asciz "divide by zero"
+.LstrFilledNewArrayNotImpl:
+ .asciz "filled-new-array only implemented for objects and 'int'"
+.LstrInternalError:
+ .asciz "Ljava/lang/InternalError;"
+.LstrInstantiationError:
+ .asciz "Ljava/lang/InstantiationError;"
+.LstrNegativeArraySizeException:
+ .asciz "Ljava/lang/NegativeArraySizeException;"
+.LstrNoSuchMethodError:
+ .asciz "Ljava/lang/NoSuchMethodError;"
+.LstrNullPointerException:
+ .asciz "Ljava/lang/NullPointerException;"
+
+.LstrLogTag:
+ .asciz "mterp"
+.LstrExceptionNotCaughtLocally:
+ .asciz "Exception %s from %s:%d not caught locally\n"
+
+.LstrNewline:
+ .asciz "\n"
+.LstrSqueak:
+ .asciz "<%d>"
+.LstrPrintHex:
+ .asciz "<0x%x>"
+.LstrPrintLong:
+ .asciz "<%lld>"
+
+
diff --git a/vm/mterp/out/InterpAsm-x86.S b/vm/mterp/out/InterpAsm-x86.S
new file mode 100644
index 0000000..a80e59e
--- /dev/null
+++ b/vm/mterp/out/InterpAsm-x86.S
@@ -0,0 +1,8844 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'x86'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: x86/header.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * 32-bit x86 definitions and declarations.
+ */
+
+/*
+386 ABI general notes:
+
+Caller save set:
+ eax, edx, ecx, st(0)-st(7)
+Callee save set:
+ ebx, esi, edi, ebp
+Return regs:
+ 32-bit in eax
+ 64-bit in edx:eax (low-order 32 in eax)
+ fp on top of fp stack st(0)
+
+Parameters passed on stack, pushed right-to-left. On entry to target, first
+parm is at 4(%esp). Traditional entry code is:
+
+functEntry:
+ push %ebp # save old frame pointer
+ mov %ebp,%esp # establish new frame pointer
+ sub FrameSize,%esp # Allocate storage for spill, locals & outs
+
+Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
+
+Alignment of stack not strictly required, but should be for performance. We'll
+align frame sizes to 16-byte multiples.
+
+If we're not doing variable stack allocation (alloca), the frame pointer can be
+eliminated and all arg references adjusted to be esp relative.
+
+Mterp notes:
+
+Some key interpreter variables will be assigned to registers. Note that each
+will also have an associated spill location (mostly used useful for those assigned
+to callee save registers).
+
+ nick reg purpose
+ rPC edx interpreted program counter, used for fetching instructions
+ rFP esi interpreted frame pointer, used for accessing locals and args
+ rIBASE edi Base pointer for instruction dispatch computed goto
+ rINST bx first 16-bit code of current instruction
+ rOPCODE bl opcode portion of instruction word
+ rINST_HI bh high byte of instruction word, usually contains src/tgt reg names
+
+Notes:
+ o High order 16 bits of ebx must be zero on entry to handler
+ o rPC, rFP, rIBASE, rINST/rOPCODE valid on handler entry and exit
+ o eax and ecx are scratch, rINST/ebx sometimes scratch
+ o rPC is in the caller save set, and will be killed across external calls. Don't
+ forget to SPILL/UNSPILL it around call points
+
+*/
+
+#define rPC %edx
+#define rFP %esi
+#define rIBASE %edi
+#define rINST_FULL %ebx
+#define rINST %bx
+#define rINST_HI %bh
+#define rINST_LO %bl
+#define rOPCODE %bl
+
+
+/* Frame diagram while executing dvmMterpStdRun, high to low addresses */
+#define IN_ARG0 ( 8)
+#define CALLER_RP ( 4)
+#define PREV_FP ( 0) /* <- dvmMterpStdRun ebp */
+/* Spill offsets relative to %ebp */
+#define EDI_SPILL ( -4)
+#define ESI_SPILL ( -8)
+#define EDX_SPILL (-12) /* <- esp following dmMterpStdRun header */
+#define rPC_SPILL (-16)
+#define rFP_SPILL (-20)
+#define rGLUE_SPILL (-24)
+#define rIBASE_SPILL (-28)
+#define rINST_FULL_SPILL (-32)
+#define TMP_SPILL (-36)
+#define LOCAL0_OFFSET (-40)
+#define LOCAL1_OFFSET (-44)
+#define LOCAL2_OFFSET (-48)
+#define LOCAL3_OFFSET (-52)
+/* Out Arg offsets, relative to %sp */
+#define OUT_ARG4 ( 16)
+#define OUT_ARG3 ( 12)
+#define OUT_ARG2 ( 8)
+#define OUT_ARG1 ( 4)
+#define OUT_ARG0 ( 0) /* <- dvmMterpStdRun esp */
+
+#define SPILL(reg) movl reg##,reg##_SPILL(%ebp)
+#define UNSPILL(reg) movl reg##_SPILL(%ebp),reg
+#define SPILL_TMP(reg) movl reg,TMP_SPILL(%ebp)
+#define UNSPILL_TMP(reg) movl TMP_SPILL(%ebp),reg
+
+
+/* save/restore the PC and/or FP from the glue struct */
+#define LOAD_PC_FROM_GLUE(_glu) movl offGlue_pc(_glu),rPC
+#define SAVE_PC_TO_GLUE(_glu) movl rPC,offGlue_pc(_glu)
+#define LOAD_FP_FROM_GLUE(_glu) movl offGlue_fp(_glu),rFP
+#define SAVE_FP_TO_GLUE(_glu) movl rFP,offGlue_fp(_glu)
+
+#define GET_GLUE(_reg) movl rGLUE_SPILL(%ebp),_reg
+
+/* The interpreter assumes a properly aligned stack on entry, and
+ * will preserve 16-byte alignment.
+ */
+
+/*
+ * "export" the PC to the interpreted stack frame, f/b/o future exception
+ * objects. Must * be done *before* something calls dvmThrowException.
+ *
+ * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
+ * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
+ *
+ * It's okay to do this more than once.
+ */
+#define EXPORT_PC() \
+ movl rPC, (-sizeofStackSaveArea + offStackSaveArea_currentPc)(rFP)
+
+/*
+ * Given a frame pointer, find the stack save area.
+ *
+ * In C this is "((StackSaveArea*)(_fp) -1)".
+ */
+#define SAVEAREA_FROM_FP(_reg, _fpreg) \
+ leal -sizeofStackSaveArea(_fpreg),_reg
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+#define FETCH_INST() movzwl (rPC),rINST_FULL
+
+/*
+ * Fetch the nth instruction word from rPC into rINST. Does not advance
+ * rPC, and _count is in words
+ */
+#define FETCH_INST_WORD(_count) movzwl _count*2(rPC),rINST_FULL
+
+/*
+ * Fetch instruction word indexed (used for branching).
+ * Index is in instruction word units.
+ */
+#define FETCH_INST_INDEXED(_reg) movzwl (rPC,_reg,2),rINST_FULL
+
+/*
+ * Extract the opcode of the instruction in rINST
+ */
+#define EXTRACT_OPCODE(_reg) movzx rOPCODE,_reg
+
+/*
+ * Advance rPC by instruction count
+ */
+#define ADVANCE_PC(_count) leal 2*_count(rPC),rPC
+
+/*
+ * Advance rPC by branch offset in register
+ */
+#define ADVANCE_PC_INDEXED(_reg) leal (rPC,_reg,2),rPC
+
+/*
+ * Note: assumes opcode previously fetched and in rINST, and
+ * %eax is killable at this point.
+ */
+#if 1
+.macro GOTO_NEXT
+ /* For computed next version */
+ movzx rOPCODE,%eax
+ sall $6,%eax
+ addl rIBASE,%eax
+ jmp *%eax
+.endm
+#else
+ /* For jump table version */
+.macro GOTO_NEXT
+ movzx rOPCODE,%eax
+ jmp *(rIBASE,%eax,4)
+.endm
+#endif
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(_reg, _vreg) movl (rFP,_vreg,4),_reg
+#define SET_VREG(_reg, _vreg) movl _reg,(rFP,_vreg,4)
+#define GET_VREG_WORD(_reg, _vreg, _offset) movl 4*(_offset)(rFP,_vreg,4),_reg
+#define SET_VREG_WORD(_reg, _vreg, _offset) movl _reg,4*(_offset)(rFP,_vreg,4)
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../common/asm-constants.h"
+
+
+
+ .global dvmAsmInstructionStart
+ .type dvmAsmInstructionStart, %function
+dvmAsmInstructionStart = .L_OP_NOP
+ .text
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOP: /* 0x00 */
+/* File: x86/OP_NOP.S */
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE: /* 0x01 */
+/* File: x86/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ movzbl rINST_HI,%eax # eax<- BA
+ andb $0xf,%al # eax<- A
+ shrl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%ecx,rINST_FULL)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ SET_VREG(%ecx,%eax) # fp[A]<-fp[B]
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_FROM16: /* 0x02 */
+/* File: x86/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ movzx rINST_HI,%eax # eax <= AA
+ movw 2(rPC),rINST # rINST <= BBBB
+ GET_VREG (%ecx,rINST_FULL) # ecx<- fp[BBBB]
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG (%ecx,%eax) # fp[AA]<- ecx]
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_16: /* 0x03 */
+/* File: x86/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ movzwl 4(rPC),%ecx # ecx<- BBBB
+ movzwl 2(rPC),%eax # eax<- AAAA
+ GET_VREG(%ecx,%ecx)
+ FETCH_INST_WORD(3)
+ ADVANCE_PC(3)
+ SET_VREG(%ecx,%eax)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE: /* 0x04 */
+/* File: x86/OP_MOVE_WIDE.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzbl rINST_HI,%ecx # ecx <- BA
+ sarl $12,rINST_FULL # rinst_FULL<- B
+ GET_VREG_WORD(%eax,rINST_FULL,0) # eax<- v[B+0]
+ GET_VREG_WORD(rINST_FULL,rINST_FULL,1) # rINST_FULL<- v[B+1]
+ andb $0xf,%cl # ecx <- A
+ SET_VREG_WORD(rINST_FULL,%ecx,1) # v[A+1]<- rINST_FULL
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ SET_VREG_WORD(%eax,%ecx,0) # v[A+0]<- eax
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: x86/OP_MOVE_WIDE_FROM16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzwl 2(rPC),%ecx # ecx<- BBBB
+ movzbl rINST_HI,%eax # eax<- AAAA
+ GET_VREG_WORD(rINST_FULL,%ecx,0) # rINST_FULL<- v[BBBB+0]
+ GET_VREG_WORD(%ecx,%ecx,1) # ecx<- v[BBBB+1]
+ SET_VREG_WORD(rINST_FULL,%eax,0) # v[AAAA+0]<- rINST_FULL
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG_WORD(%ecx,%eax,1) # v[AAAA+1]<- eax
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: x86/OP_MOVE_WIDE_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzwl 4(rPC),%ecx # ecx<- BBBB
+ movzwl 2(rPC),%eax # eax<- AAAA
+ GET_VREG_WORD(rINST_FULL,%ecx,0) # rINST_WORD<- v[BBBB+0]
+ GET_VREG_WORD(%ecx,%ecx,1) # ecx<- v[BBBB+1]
+ SET_VREG_WORD(rINST_FULL,%eax,0) # v[AAAA+0]<- rINST_FULL
+ FETCH_INST_WORD(3)
+ ADVANCE_PC(3)
+ SET_VREG_WORD(%ecx,%eax,1) # v[AAAA+1]<- ecx
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT: /* 0x07 */
+/* File: x86/OP_MOVE_OBJECT.S */
+/* File: x86/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ movzbl rINST_HI,%eax # eax<- BA
+ andb $0xf,%al # eax<- A
+ shrl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%ecx,rINST_FULL)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ SET_VREG(%ecx,%eax) # fp[A]<-fp[B]
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: x86/OP_MOVE_OBJECT_FROM16.S */
+/* File: x86/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ movzx rINST_HI,%eax # eax <= AA
+ movw 2(rPC),rINST # rINST <= BBBB
+ GET_VREG (%ecx,rINST_FULL) # ecx<- fp[BBBB]
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG (%ecx,%eax) # fp[AA]<- ecx]
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: x86/OP_MOVE_OBJECT_16.S */
+/* File: x86/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ movzwl 4(rPC),%ecx # ecx<- BBBB
+ movzwl 2(rPC),%eax # eax<- AAAA
+ GET_VREG(%ecx,%ecx)
+ FETCH_INST_WORD(3)
+ ADVANCE_PC(3)
+ SET_VREG(%ecx,%eax)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT: /* 0x0a */
+/* File: x86/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ GET_GLUE(%eax) # eax<- rGLUE
+ movzx rINST_HI,%ecx # ecx<- AA
+ movl offGlue_retval(%eax),%eax # eax<- glue->retval.l
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ SET_VREG (%eax,%ecx) # fp[AA]<- retval.l
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: x86/OP_MOVE_RESULT_WIDE.S */
+ /* move-result-wide vAA */
+ GET_GLUE(%ecx)
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ movl offGlue_retval(%ecx),%eax
+ movl 4+offGlue_retval(%ecx),%ecx
+ SET_VREG_WORD(%eax,rINST_FULL,0) # v[AA+0] <- eax
+ SET_VREG_WORD(%ecx,rINST_FULL,1) # v[AA+1] <- ecx
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: x86/OP_MOVE_RESULT_OBJECT.S */
+/* File: x86/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ GET_GLUE(%eax) # eax<- rGLUE
+ movzx rINST_HI,%ecx # ecx<- AA
+ movl offGlue_retval(%eax),%eax # eax<- glue->retval.l
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ SET_VREG (%eax,%ecx) # fp[AA]<- retval.l
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: x86/OP_MOVE_EXCEPTION.S */
+ /* move-exception vAA */
+ GET_GLUE(%ecx)
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ movl offGlue_self(%ecx),%ecx # ecx<- glue->self
+ movl offThread_exception(%ecx),%eax # eax<- dvmGetException bypass
+ SET_VREG(%eax,rINST_FULL) # fp[AA]<- exception object
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ movl $0,offThread_exception(%ecx) # dvmClearException bypass
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_VOID: /* 0x0e */
+/* File: x86/OP_RETURN_VOID.S */
+ jmp common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN: /* 0x0f */
+/* File: x86/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "glue"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ GET_GLUE(%ecx)
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,rINST_FULL) # eax<- vAA
+ movl %eax,offGlue_retval(%ecx) # retval.i <- AA
+ jmp common_returnFromMethod
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_WIDE: /* 0x10 */
+/* File: x86/OP_RETURN_WIDE.S */
+ /*
+ * Return a 64-bit value. Copies the return value into the "glue"
+ * structure, then jumps to the return handler.
+ */
+ /* return-wide vAA */
+ GET_GLUE(%ecx)
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG_WORD(%eax,rINST_FULL,0) # eax<- v[AA+0]
+ GET_VREG_WORD(rINST_FULL,rINST_FULL,1) # rINST_FULL<- v[AA+1]
+ movl %eax,offGlue_retval(%ecx)
+ movl rINST_FULL,4+offGlue_retval(%ecx)
+ jmp common_returnFromMethod
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_OBJECT: /* 0x11 */
+/* File: x86/OP_RETURN_OBJECT.S */
+/* File: x86/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "glue"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ GET_GLUE(%ecx)
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,rINST_FULL) # eax<- vAA
+ movl %eax,offGlue_retval(%ecx) # retval.i <- AA
+ jmp common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_4: /* 0x12 */
+/* File: x86/OP_CONST_4.S */
+ /* const/4 vA, #+B */
+ movsx rINST_HI,%eax # eax<-ssssssBx
+ movl $0xf,%ecx
+ andl %eax,%ecx # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ sarl $4,%eax
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_16: /* 0x13 */
+/* File: x86/OP_CONST_16.S */
+ /* const/16 vAA, #+BBBB */
+ movswl 2(rPC),%ecx # ecx<- ssssBBBB
+ movzx rINST_HI,%eax # eax<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%ecx,%eax) # vAA<- ssssBBBB
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST: /* 0x14 */
+/* File: x86/OP_CONST.S */
+ /* const vAA, #+BBBBbbbb */
+ movzbl rINST_HI,%ecx # ecx<- AA
+ movl 2(rPC),%eax # grab all 32 bits at once
+ FETCH_INST_WORD(3)
+ ADVANCE_PC(3)
+ SET_VREG(%eax,%ecx) # vAA<- eax
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_HIGH16: /* 0x15 */
+/* File: x86/OP_CONST_HIGH16.S */
+ /* const/high16 vAA, #+BBBB0000 */
+ movzwl 2(rPC),%eax # eax<- 0000BBBB
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ sall $16,%eax # eax<- BBBB0000
+ SET_VREG(%eax,%ecx) # vAA<- eax
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_16: /* 0x16 */
+/* File: x86/OP_CONST_WIDE_16.S */
+ /* const-wide/16 vAA, #+BBBB */
+ movswl 2(rPC),%eax # eax<- ssssBBBB
+ SPILL(rPC)
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ cltd # rPC:eax<- ssssssssssssBBBB
+ SET_VREG_WORD(rPC,%ecx,1) # store msw
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,%ecx,0) # store lsw
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_32: /* 0x17 */
+/* File: x86/OP_CONST_WIDE_32.S */
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ movl 2(rPC),%eax # eax<- BBBBbbbb
+ SPILL(rPC)
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(3)
+ cltd # rPC:eax<- ssssssssssssBBBB
+ SET_VREG_WORD(rPC,%ecx,1) # store msw
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,%ecx,0) # store lsw
+ ADVANCE_PC(3)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE: /* 0x18 */
+/* File: x86/OP_CONST_WIDE.S */
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ movl 2(rPC),%eax # eax<- lsw
+ movzbl rINST_HI,%ecx # ecx <- AA
+ movl 6(rPC),rINST_FULL # rINST_FULL<- msw
+ leal (rFP,%ecx,4),%ecx # dst addr
+ movl rINST_FULL,4(%ecx)
+ FETCH_INST_WORD(5)
+ movl %eax,(%ecx)
+ ADVANCE_PC(5)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: x86/OP_CONST_WIDE_HIGH16.S */
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ movzwl 2(rPC),%eax # eax<- 0000BBBB
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ sall $16,%eax # eax<- BBBB0000
+ SET_VREG_WORD(%eax,%ecx,1) # v[AA+1]<- eax
+ xorl %eax,%eax
+ SET_VREG_WORD(%eax,%ecx,0) # v[AA+0]<- eax
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_STRING: /* 0x1a */
+/* File: x86/OP_CONST_STRING.S */
+
+ /* const/string vAA, String@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ movl offDvmDex_pResStrings(%ecx),%ecx # ecx<- dvmDex->pResStrings
+ movl (%ecx,%eax,4),%eax # eax<- rResString[BBBB]
+ movl rINST_FULL,%ecx
+ FETCH_INST_WORD(2)
+ testl %eax,%eax # resolved yet?
+ je .LOP_CONST_STRING_resolve
+ SET_VREG(%eax,%ecx) # vAA<- rResString[BBBB]
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: x86/OP_CONST_STRING_JUMBO.S */
+
+ /* const/string vAA, String@BBBBBBBB */
+ GET_GLUE(%ecx)
+ movl 2(rPC),%eax # eax<- BBBBBBBB
+ movl offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ movl offDvmDex_pResStrings(%ecx),%ecx # ecx<- dvmDex->pResStrings
+ movl (%ecx,%eax,4),%eax # eax<- rResString[BBBB]
+ movl rINST_FULL,%ecx
+ FETCH_INST_WORD(3)
+ testl %eax,%eax # resolved yet?
+ je .LOP_CONST_STRING_JUMBO_resolve
+ SET_VREG(%eax,%ecx) # vAA<- rResString[BBBB]
+ ADVANCE_PC(3)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_CLASS: /* 0x1c */
+/* File: x86/OP_CONST_CLASS.S */
+
+ /* const/class vAA, Class@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ movl offDvmDex_pResClasses(%ecx),%ecx # ecx<- dvmDex->pResClasses
+ movl (%ecx,%eax,4),%eax # eax<- rResClasses[BBBB]
+ movl rINST_FULL,%ecx
+ FETCH_INST_WORD(2)
+ testl %eax,%eax # resolved yet?
+ je .LOP_CONST_CLASS_resolve
+ SET_VREG(%eax,%ecx) # vAA<- rResClasses[BBBB]
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MONITOR_ENTER: /* 0x1d */
+/* File: x86/OP_MONITOR_ENTER.S */
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ GET_GLUE(%ecx)
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,rINST_FULL) # eax<- vAA
+ movl offGlue_self(%ecx),%ecx # ecx<- glue->self
+ FETCH_INST_WORD(1)
+ testl %eax,%eax # null object?
+#ifdef WITH_MONITOR_TRACKING
+ EXPORT_PC()
+#endif
+ jne .LOP_MONITOR_ENTER_continue
+ jmp common_errNullObject
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MONITOR_EXIT: /* 0x1e */
+/* File: x86/OP_MONITOR_EXIT.S */
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,rINST_FULL)
+ GET_GLUE(%ecx)
+ EXPORT_PC()
+ testl %eax,%eax # null object?
+ je common_errNullObject # go if so
+ movl offGlue_self(%ecx),%ecx # ecx<- glue->self
+ movl %eax,OUT_ARG1(%esp)
+ SPILL(rPC)
+ movl %ecx,OUT_ARG0(%esp)
+ jmp .LOP_MONITOR_EXIT_continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CHECK_CAST: /* 0x1f */
+/* File: x86/OP_CHECK_CAST.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ GET_GLUE(%ecx)
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- vAA (object)
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+ testl rINST_FULL,rINST_FULL # is oject null?
+ movl offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
+ je .LOP_CHECK_CAST_okay # null obj, cast always succeeds
+ movl (%ecx,%eax,4),%eax # eax<- resolved class
+ movl offObject_clazz(rINST_FULL),%ecx # ecx<- obj->clazz
+ testl %eax,%eax # have we resolved this before?
+ je .LOP_CHECK_CAST_resolve # no, go do it now
+.LOP_CHECK_CAST_resolved:
+ cmpl %eax,%ecx # same class (trivial success)?
+ jne .LOP_CHECK_CAST_fullcheck # no, do full check
+.LOP_CHECK_CAST_okay:
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INSTANCE_OF: /* 0x20 */
+/* File: x86/OP_INSTANCE_OF.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ movzbl rINST_HI,%eax # eax<- BA
+ sarl $4,%eax # eax<- B
+ GET_VREG(%eax,%eax) # eax<- vB (obj)
+ GET_GLUE(%ecx)
+ testl %eax,%eax # object null?
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+ SPILL(rPC)
+ je .LOP_INSTANCE_OF_store # null obj, not instance, store it
+ movzwl 2(rPC),rPC # rPC<- CCCC
+ movl offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
+ movl (%ecx,rPC,4),%ecx # ecx<- resolved class
+ movl offObject_clazz(%eax),%eax # eax<- obj->clazz
+ testl %ecx,%ecx # have we resolved this before?
+ je .LOP_INSTANCE_OF_resolve # not resolved, do it now
+.LOP_INSTANCE_OF_resolved: # eax<- obj->clazz, ecx<- resolved class
+ cmpl %eax,%ecx # same class (trivial success)?
+ je .LOP_INSTANCE_OF_trivial # yes, trivial finish
+ jmp .LOP_INSTANCE_OF_fullcheck # no, do full check
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: x86/OP_ARRAY_LENGTH.S */
+ /*
+ * Return the length of an array.
+ */
+ movzbl rINST_HI,%eax # eax<- BA
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%ecx,rINST_FULL) # ecx<- vB (object ref)
+ andb $0xf,%al # eax<- A
+ testl %ecx,%ecx # is null?
+ je common_errNullObject
+ FETCH_INST_WORD(1)
+ movl offArrayObject_length(%ecx),%ecx
+ ADVANCE_PC(1)
+ SET_VREG(%ecx,%eax)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEW_INSTANCE: /* 0x22 */
+/* File: x86/OP_NEW_INSTANCE.S */
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ movl offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
+ EXPORT_PC()
+ movl (%ecx,%eax,4),%ecx # ecx<- resolved class
+ SPILL(rPC)
+ testl %ecx,%ecx # resolved?
+ je .LOP_NEW_INSTANCE_resolve # no, go do it
+.LOP_NEW_INSTANCE_resolved: # on entry, ecx<- class
+ cmpb $CLASS_INITIALIZED,offClassObject_status(%ecx)
+ je .LOP_NEW_INSTANCE_initialized
+ jmp .LOP_NEW_INSTANCE_needinit
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEW_ARRAY: /* 0x23 */
+/* File: x86/OP_NEW_ARRAY.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ GET_GLUE(%ecx)
+ EXPORT_PC()
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+ movzwl 2(rPC),%eax # eax<- CCCC
+ movl offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
+ movl (%ecx,%eax,4),%ecx # ecx<- resolved class
+ movzbl rINST_HI,%eax
+ sarl $4,%eax # eax<- B
+ GET_VREG(%eax,%eax) # eax<- vB (array length)
+ movzbl rINST_HI,rINST_FULL
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ testl %eax,%eax
+ js common_errNegativeArraySize # bail
+ testl %ecx,%ecx # already resolved?
+ jne .LOP_NEW_ARRAY_finish # yes, fast path
+ jmp .LOP_NEW_ARRAY_resolve # resolve now
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: x86/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ GET_GLUE(%eax)
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA or BA
+ movl offGlue_methodClassDex(%eax),%eax # eax<- pDvmDex
+ movzwl 2(rPC),%ecx # ecx<- BBBB
+ movl offDvmDex_pResClasses(%eax),%eax # eax<- pDvmDex->pResClasses
+ SPILL(rPC)
+ movl (%eax,%ecx,4),%eax # eax<- resolved class
+ EXPORT_PC()
+ testl %eax,%eax # already resolved?
+ jne .LOP_FILLED_NEW_ARRAY_continue # yes, continue
+ # less frequent path, so we'll redo some work
+ GET_GLUE(%eax)
+ movl $0,OUT_ARG2(%esp) # arg2<- false
+ movl %ecx,OUT_ARG1(%esp) # arg1<- BBBB
+ movl offGlue_method(%eax),%eax # eax<- glue->method
+ jmp .LOP_FILLED_NEW_ARRAY_more
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: x86/OP_FILLED_NEW_ARRAY_RANGE.S */
+/* File: x86/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ GET_GLUE(%eax)
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA or BA
+ movl offGlue_methodClassDex(%eax),%eax # eax<- pDvmDex
+ movzwl 2(rPC),%ecx # ecx<- BBBB
+ movl offDvmDex_pResClasses(%eax),%eax # eax<- pDvmDex->pResClasses
+ SPILL(rPC)
+ movl (%eax,%ecx,4),%eax # eax<- resolved class
+ EXPORT_PC()
+ testl %eax,%eax # already resolved?
+ jne .LOP_FILLED_NEW_ARRAY_RANGE_continue # yes, continue
+ # less frequent path, so we'll redo some work
+ GET_GLUE(%eax)
+ movl $0,OUT_ARG2(%esp) # arg2<- false
+ movl %ecx,OUT_ARG1(%esp) # arg1<- BBBB
+ movl offGlue_method(%eax),%eax # eax<- glue->method
+ jmp .LOP_FILLED_NEW_ARRAY_RANGE_more
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: x86/OP_FILL_ARRAY_DATA.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ movl 2(rPC),%ecx # ecx<- BBBBbbbb
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ leal (rPC,%ecx,2),%ecx # ecx<- PC + BBBBbbbb*2
+ GET_VREG(%eax,rINST_FULL)
+ SPILL(rPC)
+ EXPORT_PC()
+ movl %eax,OUT_ARG0(%esp)
+ movl %ecx,OUT_ARG1(%esp)
+ call dvmInterpHandleFillArrayData
+ UNSPILL(rPC)
+ FETCH_INST_WORD(3)
+ testl %eax,%eax # exception thrown?
+ je common_exceptionThrown
+ ADVANCE_PC(3)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_THROW: /* 0x27 */
+/* File: x86/OP_THROW.S */
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ GET_GLUE(%ecx)
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,rINST_FULL) # eax<- exception object
+ movl offGlue_self(%ecx),%ecx # ecx<- glue->self
+ testl %eax,%eax # null object?
+ je common_errNullObject
+ movl %eax,offThread_exception(%ecx) # thread->exception<- obj
+ jmp common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO: /* 0x28 */
+/* File: x86/OP_GOTO.S */
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ movsbl rINST_HI,rINST_FULL # ebx<- ssssssAA
+ testl rINST_FULL,rINST_FULL # test for <0
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO_16: /* 0x29 */
+/* File: x86/OP_GOTO_16.S */
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset
+ */
+ /* goto/16 +AAAA */
+ movswl 2(rPC),rINST_FULL # rINST_FULL<- ssssAAAA
+ testl rINST_FULL,rINST_FULL # test for <0
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO_32: /* 0x2a */
+/* File: x86/OP_GOTO_32.S */
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0".
+ */
+ /* goto/32 AAAAAAAA */
+ movl 2(rPC),rINST_FULL # rINST_FULL<- AAAAAAAA
+ cmpl $0,rINST_FULL # test for <= 0
+ jle common_backwardBranch
+ movl rINST_FULL,%eax
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_PACKED_SWITCH: /* 0x2b */
+/* File: x86/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ movl 2(rPC),%ecx # ecx<- BBBBbbbb
+ GET_VREG(%eax,rINST_FULL) # eax<- vAA
+ leal (rPC,%ecx,2),%ecx # ecx<- PC + BBBBbbbb*2
+ movl %eax,OUT_ARG1(%esp) # ARG1<- vAA
+ movl %ecx,OUT_ARG0(%esp) # ARG0<- switchData
+ SPILL(rPC)
+ call dvmInterpHandlePackedSwitch
+ UNSPILL(rPC)
+ testl %eax,%eax
+ movl %eax,rINST_FULL # set up word offset
+ jle common_backwardBranch # check on special actions
+ ADVANCE_PC_INDEXED(rINST_FULL)
+ FETCH_INST()
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: x86/OP_SPARSE_SWITCH.S */
+/* File: x86/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ movl 2(rPC),%ecx # ecx<- BBBBbbbb
+ GET_VREG(%eax,rINST_FULL) # eax<- vAA
+ leal (rPC,%ecx,2),%ecx # ecx<- PC + BBBBbbbb*2
+ movl %eax,OUT_ARG1(%esp) # ARG1<- vAA
+ movl %ecx,OUT_ARG0(%esp) # ARG0<- switchData
+ SPILL(rPC)
+ call dvmInterpHandleSparseSwitch
+ UNSPILL(rPC)
+ testl %eax,%eax
+ movl %eax,rINST_FULL # set up word offset
+ jle common_backwardBranch # check on special actions
+ ADVANCE_PC_INDEXED(rINST_FULL)
+ FETCH_INST()
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPL_FLOAT: /* 0x2d */
+/* File: x86/OP_CMPL_FLOAT.S */
+/* File: x86/OP_CMPG_DOUBLE.S */
+ /* float/double_cmp[gl] vAA, vBB, vCC */
+ movzbl 3(rPC),%eax # eax<- CC
+ movzbl 2(rPC),%ecx # ecx<- BB
+ .if 0
+ fldl (rFP,%eax,4)
+ fldl (rFP,%ecx,4)
+ .else
+ flds (rFP,%eax,4)
+ flds (rFP,%ecx,4)
+ .endif
+ movzbl rINST_HI,rINST_FULL
+ xorl %ecx,%ecx
+ fucompp # z if equal, p set if NaN, c set if st0 < st1
+ fnstsw %ax
+ sahf
+ movl rINST_FULL,%eax
+ FETCH_INST_WORD(2)
+ jp .LOP_CMPL_FLOAT_isNaN
+ je .LOP_CMPL_FLOAT_finish
+ sbbl %ecx,%ecx
+ jb .LOP_CMPL_FLOAT_finish
+ incl %ecx
+.LOP_CMPL_FLOAT_finish:
+ SET_VREG(%ecx,%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPG_FLOAT: /* 0x2e */
+/* File: x86/OP_CMPG_FLOAT.S */
+/* File: x86/OP_CMPG_DOUBLE.S */
+ /* float/double_cmp[gl] vAA, vBB, vCC */
+ movzbl 3(rPC),%eax # eax<- CC
+ movzbl 2(rPC),%ecx # ecx<- BB
+ .if 0
+ fldl (rFP,%eax,4)
+ fldl (rFP,%ecx,4)
+ .else
+ flds (rFP,%eax,4)
+ flds (rFP,%ecx,4)
+ .endif
+ movzbl rINST_HI,rINST_FULL
+ xorl %ecx,%ecx
+ fucompp # z if equal, p set if NaN, c set if st0 < st1
+ fnstsw %ax
+ sahf
+ movl rINST_FULL,%eax
+ FETCH_INST_WORD(2)
+ jp .LOP_CMPG_FLOAT_isNaN
+ je .LOP_CMPG_FLOAT_finish
+ sbbl %ecx,%ecx
+ jb .LOP_CMPG_FLOAT_finish
+ incl %ecx
+.LOP_CMPG_FLOAT_finish:
+ SET_VREG(%ecx,%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: x86/OP_CMPL_DOUBLE.S */
+/* File: x86/OP_CMPG_DOUBLE.S */
+ /* float/double_cmp[gl] vAA, vBB, vCC */
+ movzbl 3(rPC),%eax # eax<- CC
+ movzbl 2(rPC),%ecx # ecx<- BB
+ .if 1
+ fldl (rFP,%eax,4)
+ fldl (rFP,%ecx,4)
+ .else
+ flds (rFP,%eax,4)
+ flds (rFP,%ecx,4)
+ .endif
+ movzbl rINST_HI,rINST_FULL
+ xorl %ecx,%ecx
+ fucompp # z if equal, p set if NaN, c set if st0 < st1
+ fnstsw %ax
+ sahf
+ movl rINST_FULL,%eax
+ FETCH_INST_WORD(2)
+ jp .LOP_CMPL_DOUBLE_isNaN
+ je .LOP_CMPL_DOUBLE_finish
+ sbbl %ecx,%ecx
+ jb .LOP_CMPL_DOUBLE_finish
+ incl %ecx
+.LOP_CMPL_DOUBLE_finish:
+ SET_VREG(%ecx,%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: x86/OP_CMPG_DOUBLE.S */
+ /* float/double_cmp[gl] vAA, vBB, vCC */
+ movzbl 3(rPC),%eax # eax<- CC
+ movzbl 2(rPC),%ecx # ecx<- BB
+ .if 1
+ fldl (rFP,%eax,4)
+ fldl (rFP,%ecx,4)
+ .else
+ flds (rFP,%eax,4)
+ flds (rFP,%ecx,4)
+ .endif
+ movzbl rINST_HI,rINST_FULL
+ xorl %ecx,%ecx
+ fucompp # z if equal, p set if NaN, c set if st0 < st1
+ fnstsw %ax
+ sahf
+ movl rINST_FULL,%eax
+ FETCH_INST_WORD(2)
+ jp .LOP_CMPG_DOUBLE_isNaN
+ je .LOP_CMPG_DOUBLE_finish
+ sbbl %ecx,%ecx
+ jb .LOP_CMPG_DOUBLE_finish
+ incl %ecx
+.LOP_CMPG_DOUBLE_finish:
+ SET_VREG(%ecx,%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMP_LONG: /* 0x31 */
+/* File: x86/OP_CMP_LONG.S */
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ */
+ /* cmp-long vAA, vBB, vCC */
+ movzbl 2(rPC),%ecx # ecx<- BB
+ SPILL(rPC)
+ movzbl 3(rPC),rPC # rPC<- CC
+ GET_VREG_WORD(%eax,%ecx,1) # eax<- v[BB+1]
+ GET_VREG_WORD(%ecx,%ecx,0) # ecx<- v[BB+0]
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ cmpl 4(rFP,rPC,4),%eax
+ jl .LOP_CMP_LONG_smaller
+ jg .LOP_CMP_LONG_bigger
+ sub (rFP,rPC,4),%ecx
+ ja .LOP_CMP_LONG_bigger
+ jb .LOP_CMP_LONG_smaller
+ UNSPILL(rPC)
+ jmp .LOP_CMP_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_EQ: /* 0x32 */
+/* File: x86/OP_IF_EQ.S */
+/* File: x86/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movzx rINST_HI,%ecx # ecx <- A+
+ andb $0xf,%cl # ecx <- A
+ GET_VREG(%eax,%ecx) # eax <- vA
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ cmpl (rFP,rINST_FULL,4),%eax # compare (vA, vB)
+ movswl 2(rPC),rINST_FULL # Get signed branch offset
+ movl $2,%eax # assume not taken
+ jne 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_NE: /* 0x33 */
+/* File: x86/OP_IF_NE.S */
+/* File: x86/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movzx rINST_HI,%ecx # ecx <- A+
+ andb $0xf,%cl # ecx <- A
+ GET_VREG(%eax,%ecx) # eax <- vA
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ cmpl (rFP,rINST_FULL,4),%eax # compare (vA, vB)
+ movswl 2(rPC),rINST_FULL # Get signed branch offset
+ movl $2,%eax # assume not taken
+ je 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LT: /* 0x34 */
+/* File: x86/OP_IF_LT.S */
+/* File: x86/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movzx rINST_HI,%ecx # ecx <- A+
+ andb $0xf,%cl # ecx <- A
+ GET_VREG(%eax,%ecx) # eax <- vA
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ cmpl (rFP,rINST_FULL,4),%eax # compare (vA, vB)
+ movswl 2(rPC),rINST_FULL # Get signed branch offset
+ movl $2,%eax # assume not taken
+ jge 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GE: /* 0x35 */
+/* File: x86/OP_IF_GE.S */
+/* File: x86/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movzx rINST_HI,%ecx # ecx <- A+
+ andb $0xf,%cl # ecx <- A
+ GET_VREG(%eax,%ecx) # eax <- vA
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ cmpl (rFP,rINST_FULL,4),%eax # compare (vA, vB)
+ movswl 2(rPC),rINST_FULL # Get signed branch offset
+ movl $2,%eax # assume not taken
+ jl 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GT: /* 0x36 */
+/* File: x86/OP_IF_GT.S */
+/* File: x86/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movzx rINST_HI,%ecx # ecx <- A+
+ andb $0xf,%cl # ecx <- A
+ GET_VREG(%eax,%ecx) # eax <- vA
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ cmpl (rFP,rINST_FULL,4),%eax # compare (vA, vB)
+ movswl 2(rPC),rINST_FULL # Get signed branch offset
+ movl $2,%eax # assume not taken
+ jle 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LE: /* 0x37 */
+/* File: x86/OP_IF_LE.S */
+/* File: x86/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movzx rINST_HI,%ecx # ecx <- A+
+ andb $0xf,%cl # ecx <- A
+ GET_VREG(%eax,%ecx) # eax <- vA
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ cmpl (rFP,rINST_FULL,4),%eax # compare (vA, vB)
+ movswl 2(rPC),rINST_FULL # Get signed branch offset
+ movl $2,%eax # assume not taken
+ jg 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_EQZ: /* 0x38 */
+/* File: x86/OP_IF_EQZ.S */
+/* File: x86/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ movzx rINST_HI,%ecx # ecx <- AA
+ cmpl $0,(rFP,%ecx,4) # compare (vA, 0)
+ movswl 2(rPC),rINST_FULL # fetch signed displacement
+ movl $2,%eax # assume branch not taken
+ jne 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_NEZ: /* 0x39 */
+/* File: x86/OP_IF_NEZ.S */
+/* File: x86/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ movzx rINST_HI,%ecx # ecx <- AA
+ cmpl $0,(rFP,%ecx,4) # compare (vA, 0)
+ movswl 2(rPC),rINST_FULL # fetch signed displacement
+ movl $2,%eax # assume branch not taken
+ je 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LTZ: /* 0x3a */
+/* File: x86/OP_IF_LTZ.S */
+/* File: x86/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ movzx rINST_HI,%ecx # ecx <- AA
+ cmpl $0,(rFP,%ecx,4) # compare (vA, 0)
+ movswl 2(rPC),rINST_FULL # fetch signed displacement
+ movl $2,%eax # assume branch not taken
+ jge 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GEZ: /* 0x3b */
+/* File: x86/OP_IF_GEZ.S */
+/* File: x86/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ movzx rINST_HI,%ecx # ecx <- AA
+ cmpl $0,(rFP,%ecx,4) # compare (vA, 0)
+ movswl 2(rPC),rINST_FULL # fetch signed displacement
+ movl $2,%eax # assume branch not taken
+ jl 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GTZ: /* 0x3c */
+/* File: x86/OP_IF_GTZ.S */
+/* File: x86/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ movzx rINST_HI,%ecx # ecx <- AA
+ cmpl $0,(rFP,%ecx,4) # compare (vA, 0)
+ movswl 2(rPC),rINST_FULL # fetch signed displacement
+ movl $2,%eax # assume branch not taken
+ jle 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LEZ: /* 0x3d */
+/* File: x86/OP_IF_LEZ.S */
+/* File: x86/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ movzx rINST_HI,%ecx # ecx <- AA
+ cmpl $0,(rFP,%ecx,4) # compare (vA, 0)
+ movswl 2(rPC),rINST_FULL # fetch signed displacement
+ movl $2,%eax # assume branch not taken
+ jg 1f
+ testl rINST_FULL,rINST_FULL
+ js common_backwardBranch
+ movl rINST_FULL,%eax
+1:
+ FETCH_INST_INDEXED(%eax)
+ ADVANCE_PC_INDEXED(%eax)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_3E: /* 0x3e */
+/* File: x86/OP_UNUSED_3E.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_3F: /* 0x3f */
+/* File: x86/OP_UNUSED_3F.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_40: /* 0x40 */
+/* File: x86/OP_UNUSED_40.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_41: /* 0x41 */
+/* File: x86/OP_UNUSED_41.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_42: /* 0x42 */
+/* File: x86/OP_UNUSED_42.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_43: /* 0x43 */
+/* File: x86/OP_UNUSED_43.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET: /* 0x44 */
+/* File: x86/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jae common_errArrayIndex # index >= length, bail
+ movl offArrayObject_contents(%eax,%ecx,4),%eax
+ movl rINST_FULL,%ecx
+ FETCH_INST_WORD(2)
+ SET_VREG(%eax,%ecx)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_WIDE: /* 0x45 */
+/* File: x86/OP_AGET_WIDE.S */
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jb .LOP_AGET_WIDE_finish # index < length, OK
+ jmp common_errArrayIndex # index >= length, bail
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_OBJECT: /* 0x46 */
+/* File: x86/OP_AGET_OBJECT.S */
+/* File: x86/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jae common_errArrayIndex # index >= length, bail
+ movl offArrayObject_contents(%eax,%ecx,4),%eax
+ movl rINST_FULL,%ecx
+ FETCH_INST_WORD(2)
+ SET_VREG(%eax,%ecx)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: x86/OP_AGET_BOOLEAN.S */
+/* File: x86/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jae common_errArrayIndex # index >= length, bail
+ movzbl offArrayObject_contents(%eax,%ecx,1),%eax
+ movl rINST_FULL,%ecx
+ FETCH_INST_WORD(2)
+ SET_VREG(%eax,%ecx)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_BYTE: /* 0x48 */
+/* File: x86/OP_AGET_BYTE.S */
+/* File: x86/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jae common_errArrayIndex # index >= length, bail
+ movsbl offArrayObject_contents(%eax,%ecx,1),%eax
+ movl rINST_FULL,%ecx
+ FETCH_INST_WORD(2)
+ SET_VREG(%eax,%ecx)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_CHAR: /* 0x49 */
+/* File: x86/OP_AGET_CHAR.S */
+/* File: x86/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jae common_errArrayIndex # index >= length, bail
+ movzwl offArrayObject_contents(%eax,%ecx,2),%eax
+ movl rINST_FULL,%ecx
+ FETCH_INST_WORD(2)
+ SET_VREG(%eax,%ecx)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_SHORT: /* 0x4a */
+/* File: x86/OP_AGET_SHORT.S */
+/* File: x86/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jae common_errArrayIndex # index >= length, bail
+ movswl offArrayObject_contents(%eax,%ecx,2),%eax
+ movl rINST_FULL,%ecx
+ FETCH_INST_WORD(2)
+ SET_VREG(%eax,%ecx)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT: /* 0x4b */
+/* File: x86/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA
+ *
+ * for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jae common_errArrayIndex # index >= length, bail
+ leal offArrayObject_contents(%eax,%ecx,4),%eax
+ GET_VREG(%ecx,rINST_FULL)
+ FETCH_INST_WORD(2)
+ movl %ecx,(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_WIDE: /* 0x4c */
+/* File: x86/OP_APUT_WIDE.S */
+ /*
+ * Array put, 64 bits. vBB[vCC]<-vAA.
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jb .LOP_APUT_WIDE_finish # index < length, OK
+ jmp common_errArrayIndex # index >= length, bail
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_OBJECT: /* 0x4d */
+/* File: x86/OP_APUT_OBJECT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA
+ *
+ * for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- vAA
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jb .LOP_APUT_OBJECT_continue
+ jmp common_errArrayIndex # index >= length, bail
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: x86/OP_APUT_BOOLEAN.S */
+/* File: x86/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA
+ *
+ * for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jae common_errArrayIndex # index >= length, bail
+ leal offArrayObject_contents(%eax,%ecx,1),%eax
+ GET_VREG(%ecx,rINST_FULL)
+ FETCH_INST_WORD(2)
+ movb %cl,(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_BYTE: /* 0x4f */
+/* File: x86/OP_APUT_BYTE.S */
+/* File: x86/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA
+ *
+ * for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jae common_errArrayIndex # index >= length, bail
+ leal offArrayObject_contents(%eax,%ecx,1),%eax
+ GET_VREG(%ecx,rINST_FULL)
+ FETCH_INST_WORD(2)
+ movb %cl,(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_CHAR: /* 0x50 */
+/* File: x86/OP_APUT_CHAR.S */
+/* File: x86/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA
+ *
+ * for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jae common_errArrayIndex # index >= length, bail
+ leal offArrayObject_contents(%eax,%ecx,2),%eax
+ GET_VREG(%ecx,rINST_FULL)
+ FETCH_INST_WORD(2)
+ movw %cx,(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_SHORT: /* 0x51 */
+/* File: x86/OP_APUT_SHORT.S */
+/* File: x86/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA
+ *
+ * for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG(%eax,%eax) # eax<- vBB (array object)
+ GET_VREG(%ecx,%ecx) # ecs<- vCC (requested index)
+ testl %eax,%eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl offArrayObject_length(%eax),%ecx
+ jae common_errArrayIndex # index >= length, bail
+ leal offArrayObject_contents(%eax,%ecx,2),%eax
+ GET_VREG(%ecx,rINST_FULL)
+ FETCH_INST_WORD(2)
+ movw %cx,(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET: /* 0x52 */
+/* File: x86/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IGET_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp) # needed by dvmResolveInstField
+ GET_GLUE(rIBASE)
+ jmp .LOP_IGET_resolve
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE: /* 0x53 */
+/* File: x86/OP_IGET_WIDE.S */
+ /*
+ * 64-bit instance field get.
+ *
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IGET_WIDE_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp) # needed by dvmResolveInstField
+ GET_GLUE(rIBASE)
+ jmp .LOP_IGET_WIDE_resolve
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT: /* 0x54 */
+/* File: x86/OP_IGET_OBJECT.S */
+/* File: x86/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IGET_OBJECT_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp) # needed by dvmResolveInstField
+ GET_GLUE(rIBASE)
+ jmp .LOP_IGET_OBJECT_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: x86/OP_IGET_BOOLEAN.S */
+/* File: x86/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IGET_BOOLEAN_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp) # needed by dvmResolveInstField
+ GET_GLUE(rIBASE)
+ jmp .LOP_IGET_BOOLEAN_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_BYTE: /* 0x56 */
+/* File: x86/OP_IGET_BYTE.S */
+/* File: x86/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IGET_BYTE_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp) # needed by dvmResolveInstField
+ GET_GLUE(rIBASE)
+ jmp .LOP_IGET_BYTE_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_CHAR: /* 0x57 */
+/* File: x86/OP_IGET_CHAR.S */
+/* File: x86/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IGET_CHAR_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp) # needed by dvmResolveInstField
+ GET_GLUE(rIBASE)
+ jmp .LOP_IGET_CHAR_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_SHORT: /* 0x58 */
+/* File: x86/OP_IGET_SHORT.S */
+/* File: x86/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IGET_SHORT_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp) # needed by dvmResolveInstField
+ GET_GLUE(rIBASE)
+ jmp .LOP_IGET_SHORT_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT: /* 0x59 */
+/* File: x86/OP_IPUT.S */
+
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IPUT_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp)
+ GET_GLUE(rIBASE)
+ jmp .LOP_IPUT_resolve
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE: /* 0x5a */
+/* File: x86/OP_IPUT_WIDE.S */
+ /*
+ * 64-bit instance field put.
+ *
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IPUT_WIDE_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp)
+ GET_GLUE(rIBASE)
+ jmp .LOP_IPUT_WIDE_resolve
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT: /* 0x5b */
+/* File: x86/OP_IPUT_OBJECT.S */
+/* File: x86/OP_IPUT.S */
+
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IPUT_OBJECT_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp)
+ GET_GLUE(rIBASE)
+ jmp .LOP_IPUT_OBJECT_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: x86/OP_IPUT_BOOLEAN.S */
+/* File: x86/OP_IPUT.S */
+
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IPUT_BOOLEAN_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp)
+ GET_GLUE(rIBASE)
+ jmp .LOP_IPUT_BOOLEAN_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_BYTE: /* 0x5d */
+/* File: x86/OP_IPUT_BYTE.S */
+/* File: x86/OP_IPUT.S */
+
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IPUT_BYTE_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp)
+ GET_GLUE(rIBASE)
+ jmp .LOP_IPUT_BYTE_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_CHAR: /* 0x5e */
+/* File: x86/OP_IPUT_CHAR.S */
+/* File: x86/OP_IPUT.S */
+
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IPUT_CHAR_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp)
+ GET_GLUE(rIBASE)
+ jmp .LOP_IPUT_CHAR_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_SHORT: /* 0x5f */
+/* File: x86/OP_IPUT_SHORT.S */
+/* File: x86/OP_IPUT.S */
+
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ GET_GLUE(%ecx)
+ SPILL(rIBASE) # need another reg
+ movzwl 2(rPC),rIBASE # rIBASE<- 0000CCCC
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%ecx,%ecx) # ecx<- fp[B], the object ptr
+ movl (%eax,rIBASE,4),%eax # resolved entry
+ testl %eax,%eax # is resolved entry null?
+ jne .LOP_IPUT_SHORT_finish # no, already resolved
+ movl rIBASE,OUT_ARG1(%esp)
+ GET_GLUE(rIBASE)
+ jmp .LOP_IPUT_SHORT_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET: /* 0x60 */
+/* File: x86/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SGET_resolve # if not, make it so
+.LOP_SGET_finish: # field ptr in eax
+ movl offStaticField_value(%eax),%eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_WIDE: /* 0x61 */
+/* File: x86/OP_SGET_WIDE.S */
+ /*
+ * 64-bit SGET handler.
+ *
+ */
+ /* sget-wide vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SGET_WIDE_resolve # if not, make it so
+.LOP_SGET_WIDE_finish: # field ptr in eax
+ movl offStaticField_value(%eax),%ecx # ecx<- lsw
+ movl 4+offStaticField_value(%eax),%eax # eax<- msw
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ SET_VREG_WORD(%ecx,rINST_FULL,0)
+ SET_VREG_WORD(%eax,rINST_FULL,1)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_OBJECT: /* 0x62 */
+/* File: x86/OP_SGET_OBJECT.S */
+/* File: x86/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SGET_OBJECT_resolve # if not, make it so
+.LOP_SGET_OBJECT_finish: # field ptr in eax
+ movl offStaticField_value(%eax),%eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: x86/OP_SGET_BOOLEAN.S */
+/* File: x86/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SGET_BOOLEAN_resolve # if not, make it so
+.LOP_SGET_BOOLEAN_finish: # field ptr in eax
+ movl offStaticField_value(%eax),%eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_BYTE: /* 0x64 */
+/* File: x86/OP_SGET_BYTE.S */
+/* File: x86/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SGET_BYTE_resolve # if not, make it so
+.LOP_SGET_BYTE_finish: # field ptr in eax
+ movl offStaticField_value(%eax),%eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_CHAR: /* 0x65 */
+/* File: x86/OP_SGET_CHAR.S */
+/* File: x86/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SGET_CHAR_resolve # if not, make it so
+.LOP_SGET_CHAR_finish: # field ptr in eax
+ movl offStaticField_value(%eax),%eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_SHORT: /* 0x66 */
+/* File: x86/OP_SGET_SHORT.S */
+/* File: x86/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SGET_SHORT_resolve # if not, make it so
+.LOP_SGET_SHORT_finish: # field ptr in eax
+ movl offStaticField_value(%eax),%eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT: /* 0x67 */
+/* File: x86/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SPUT_resolve # if not, make it so
+.LOP_SPUT_finish: # field ptr in eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ GET_VREG(%ecx,%ecx)
+ FETCH_INST_WORD(2)
+ movl %ecx,offStaticField_value(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_WIDE: /* 0x68 */
+/* File: x86/OP_SPUT_WIDE.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SPUT_WIDE_resolve # if not, make it so
+.LOP_SPUT_WIDE_finish: # field ptr in eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ GET_VREG_WORD(rINST_FULL,%ecx,0) # rINST_FULL<- lsw
+ GET_VREG_WORD(%ecx,%ecx,1) # ecx<- msw
+ movl rINST_FULL,offStaticField_value(%eax)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ movl %ecx,4+offStaticField_value(%eax)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_OBJECT: /* 0x69 */
+/* File: x86/OP_SPUT_OBJECT.S */
+/* File: x86/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SPUT_OBJECT_resolve # if not, make it so
+.LOP_SPUT_OBJECT_finish: # field ptr in eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ GET_VREG(%ecx,%ecx)
+ FETCH_INST_WORD(2)
+ movl %ecx,offStaticField_value(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: x86/OP_SPUT_BOOLEAN.S */
+/* File: x86/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SPUT_BOOLEAN_resolve # if not, make it so
+.LOP_SPUT_BOOLEAN_finish: # field ptr in eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ GET_VREG(%ecx,%ecx)
+ FETCH_INST_WORD(2)
+ movl %ecx,offStaticField_value(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_BYTE: /* 0x6b */
+/* File: x86/OP_SPUT_BYTE.S */
+/* File: x86/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SPUT_BYTE_resolve # if not, make it so
+.LOP_SPUT_BYTE_finish: # field ptr in eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ GET_VREG(%ecx,%ecx)
+ FETCH_INST_WORD(2)
+ movl %ecx,offStaticField_value(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_CHAR: /* 0x6c */
+/* File: x86/OP_SPUT_CHAR.S */
+/* File: x86/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SPUT_CHAR_resolve # if not, make it so
+.LOP_SPUT_CHAR_finish: # field ptr in eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ GET_VREG(%ecx,%ecx)
+ FETCH_INST_WORD(2)
+ movl %ecx,offStaticField_value(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_SHORT: /* 0x6d */
+/* File: x86/OP_SPUT_SHORT.S */
+/* File: x86/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
+ movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
+ movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
+ testl %eax,%eax # resolved entry null?
+ je .LOP_SPUT_SHORT_resolve # if not, make it so
+.LOP_SPUT_SHORT_finish: # field ptr in eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ GET_VREG(%ecx,%ecx)
+ FETCH_INST_WORD(2)
+ movl %ecx,offStaticField_value(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: x86/OP_INVOKE_VIRTUAL.S */
+
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ GET_GLUE(%eax)
+ movzwl 2(rPC),%ecx # ecx<- BBBB
+ movl offGlue_methodClassDex(%eax),%eax # eax<- pDvmDex
+ EXPORT_PC()
+ movl offDvmDex_pResMethods(%eax),%eax # eax<- pDvmDex->pResMethods
+ movl (%eax,%ecx,4),%eax # eax<- resolved baseMethod
+ testl %eax,%eax # already resolved?
+ jne .LOP_INVOKE_VIRTUAL_continue # yes, continue
+ GET_GLUE(%eax)
+ movl %ecx,OUT_ARG1(%esp) # arg1<- ref
+ movl offGlue_method(%eax),%eax # eax<- glue->method
+ SPILL(rPC)
+ jmp .LOP_INVOKE_VIRTUAL_more
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER: /* 0x6f */
+/* File: x86/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ GET_GLUE(rINST_FULL)
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offGlue_methodClassDex(rINST_FULL),%ecx # ecx<- pDvmDex
+ EXPORT_PC()
+ movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
+ movl (%ecx,%eax,4),%ecx # ecx<- resolved baseMethod
+ movl offGlue_method(rINST_FULL),%eax # eax<- method
+ movzwl 4(rPC),rINST_FULL # rINST_FULL<- GFED or CCCC
+ .if (!0)
+ andl $0xf,rINST_FULL # rINST_FULL<- D (or stays CCCC)
+ .endif
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- "this" ptr
+ testl rINST_FULL,rINST_FULL # null "this"?
+ je common_errNullObject # yes, throw
+ movl offMethod_clazz(%eax),%eax # eax<- method->clazz
+ testl %ecx,%ecx # already resolved?
+ jne .LOP_INVOKE_SUPER_continue # yes - go on
+ jmp .LOP_INVOKE_SUPER_resolve
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: x86/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
+ movzwl 4(rPC),rPC # rPC<- GFED or CCCC
+ movl (%ecx,%eax,4),%eax # eax<- resolved methodToCall
+ .if (!0)
+ andl $0xf,rPC # rPC<- D (or stays CCCC)
+ .endif
+ testl %eax,%eax # already resolved?
+ GET_VREG(%ecx,rPC) # ecx<- "this" ptr
+ je .LOP_INVOKE_DIRECT_resolve # not resolved, do it now
+.LOP_INVOKE_DIRECT_finish:
+ UNSPILL(rPC)
+ testl %ecx,%ecx # null "this"?
+ movl $0,%ecx
+ #jne common_invokeMethodNoRange # no, continue on
+ jne common_invokeOld # no, continue on, eax<- method, ecx<- methodCallRange
+ jmp common_errNullObject
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_STATIC: /* 0x71 */
+/* File: x86/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+ EXPORT_PC()
+ movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
+ movl (%ecx,%eax,4),%eax # eax<- resolved methodToCall
+ movl $0,%ecx # needed by common_invokeOld - revisit
+ testl %eax,%eax
+ #jne common_invokeMethodNoRange
+ jne common_invokeOld
+ GET_GLUE(%ecx)
+ movl offGlue_method(%ecx),%ecx # ecx<- glue->method
+ movzwl 2(rPC),%eax
+ movl offMethod_clazz(%ecx),%ecx# ecx<- method->clazz
+ movl %eax,OUT_ARG1(%esp) # arg1<- BBBB
+ movl %ecx,OUT_ARG0(%esp) # arg0<- clazz
+ jmp .LOP_INVOKE_STATIC_continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: x86/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ movzwl 4(rPC),%eax # eax<- FEDC or CCCC
+ GET_GLUE(%ecx)
+ .if (!0)
+ andl $0xf,%eax # eax<- C (or stays CCCC)
+ .endif
+ GET_VREG(%eax,%eax) # eax<- "this"
+ EXPORT_PC()
+ testl %eax,%eax # null this?
+ je common_errNullObject # yes, fail
+ movl offObject_clazz(%eax),%eax# eax<- thisPtr->clazz
+ movl %eax,OUT_ARG0(%esp) # arg0<- class
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- methodClassDex
+ movl offGlue_method(%ecx),%ecx # ecx<- method
+ movl %eax,OUT_ARG3(%esp) # arg3<- dex
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl %ecx,OUT_ARG2(%esp) # arg2<- method
+ movl %eax,OUT_ARG1(%esp) # arg1<- BBBB
+ SPILL(rPC)
+ jmp .LOP_INVOKE_INTERFACE_continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_73: /* 0x73 */
+/* File: x86/OP_UNUSED_73.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: x86/OP_INVOKE_VIRTUAL_RANGE.S */
+/* File: x86/OP_INVOKE_VIRTUAL.S */
+
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ GET_GLUE(%eax)
+ movzwl 2(rPC),%ecx # ecx<- BBBB
+ movl offGlue_methodClassDex(%eax),%eax # eax<- pDvmDex
+ EXPORT_PC()
+ movl offDvmDex_pResMethods(%eax),%eax # eax<- pDvmDex->pResMethods
+ movl (%eax,%ecx,4),%eax # eax<- resolved baseMethod
+ testl %eax,%eax # already resolved?
+ jne .LOP_INVOKE_VIRTUAL_RANGE_continue # yes, continue
+ GET_GLUE(%eax)
+ movl %ecx,OUT_ARG1(%esp) # arg1<- ref
+ movl offGlue_method(%eax),%eax # eax<- glue->method
+ SPILL(rPC)
+ jmp .LOP_INVOKE_VIRTUAL_RANGE_more
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: x86/OP_INVOKE_SUPER_RANGE.S */
+/* File: x86/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ GET_GLUE(rINST_FULL)
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offGlue_methodClassDex(rINST_FULL),%ecx # ecx<- pDvmDex
+ EXPORT_PC()
+ movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
+ movl (%ecx,%eax,4),%ecx # ecx<- resolved baseMethod
+ movl offGlue_method(rINST_FULL),%eax # eax<- method
+ movzwl 4(rPC),rINST_FULL # rINST_FULL<- GFED or CCCC
+ .if (!1)
+ andl $0xf,rINST_FULL # rINST_FULL<- D (or stays CCCC)
+ .endif
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- "this" ptr
+ testl rINST_FULL,rINST_FULL # null "this"?
+ je common_errNullObject # yes, throw
+ movl offMethod_clazz(%eax),%eax # eax<- method->clazz
+ testl %ecx,%ecx # already resolved?
+ jne .LOP_INVOKE_SUPER_RANGE_continue # yes - go on
+ jmp .LOP_INVOKE_SUPER_RANGE_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: x86/OP_INVOKE_DIRECT_RANGE.S */
+/* File: x86/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
+ movzwl 4(rPC),rPC # rPC<- GFED or CCCC
+ movl (%ecx,%eax,4),%eax # eax<- resolved methodToCall
+ .if (!1)
+ andl $0xf,rPC # rPC<- D (or stays CCCC)
+ .endif
+ testl %eax,%eax # already resolved?
+ GET_VREG(%ecx,rPC) # ecx<- "this" ptr
+ je .LOP_INVOKE_DIRECT_RANGE_resolve # not resolved, do it now
+.LOP_INVOKE_DIRECT_RANGE_finish:
+ UNSPILL(rPC)
+ testl %ecx,%ecx # null "this"?
+ movl $1,%ecx
+ #jne common_invokeMethodRange # no, continue on
+ jne common_invokeOld # no, continue on, eax<- method, ecx<- methodCallRange
+ jmp common_errNullObject
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: x86/OP_INVOKE_STATIC_RANGE.S */
+/* File: x86/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
+ EXPORT_PC()
+ movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
+ movl (%ecx,%eax,4),%eax # eax<- resolved methodToCall
+ movl $1,%ecx # needed by common_invokeOld - revisit
+ testl %eax,%eax
+ #jne common_invokeMethodRange
+ jne common_invokeOld
+ GET_GLUE(%ecx)
+ movl offGlue_method(%ecx),%ecx # ecx<- glue->method
+ movzwl 2(rPC),%eax
+ movl offMethod_clazz(%ecx),%ecx# ecx<- method->clazz
+ movl %eax,OUT_ARG1(%esp) # arg1<- BBBB
+ movl %ecx,OUT_ARG0(%esp) # arg0<- clazz
+ jmp .LOP_INVOKE_STATIC_RANGE_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: x86/OP_INVOKE_INTERFACE_RANGE.S */
+/* File: x86/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ movzwl 4(rPC),%eax # eax<- FEDC or CCCC
+ GET_GLUE(%ecx)
+ .if (!1)
+ andl $0xf,%eax # eax<- C (or stays CCCC)
+ .endif
+ GET_VREG(%eax,%eax) # eax<- "this"
+ EXPORT_PC()
+ testl %eax,%eax # null this?
+ je common_errNullObject # yes, fail
+ movl offObject_clazz(%eax),%eax# eax<- thisPtr->clazz
+ movl %eax,OUT_ARG0(%esp) # arg0<- class
+ movl offGlue_methodClassDex(%ecx),%eax # eax<- methodClassDex
+ movl offGlue_method(%ecx),%ecx # ecx<- method
+ movl %eax,OUT_ARG3(%esp) # arg3<- dex
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl %ecx,OUT_ARG2(%esp) # arg2<- method
+ movl %eax,OUT_ARG1(%esp) # arg1<- BBBB
+ SPILL(rPC)
+ jmp .LOP_INVOKE_INTERFACE_RANGE_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_79: /* 0x79 */
+/* File: x86/OP_UNUSED_79.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_7A: /* 0x7a */
+/* File: x86/OP_UNUSED_7A.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_INT: /* 0x7b */
+/* File: x86/OP_NEG_INT.S */
+/* File: x86/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+
+
+ negl %eax
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOT_INT: /* 0x7c */
+/* File: x86/OP_NOT_INT.S */
+/* File: x86/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+
+
+ notl %eax
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_LONG: /* 0x7d */
+/* File: x86/OP_NEG_LONG.S */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movzbl rINST_HI,rINST_FULL # ecx<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG_WORD(%eax,%ecx,0) # eax<- v[B+0]
+ GET_VREG_WORD(%ecx,%ecx,1) # ecx<- v[B+1]
+ negl %eax
+ adcl $0,%ecx
+ negl %ecx
+ SET_VREG_WORD(%eax,rINST_FULL,0) # v[A+0]<- eax
+ SET_VREG_WORD(%ecx,rINST_FULL,1) # v[A+1]<- ecx
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOT_LONG: /* 0x7e */
+/* File: x86/OP_NOT_LONG.S */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ movzbl rINST_HI,rINST_FULL # ecx<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG_WORD(%eax,%ecx,0) # eax<- v[B+0]
+ GET_VREG_WORD(%ecx,%ecx,1) # ecx<- v[B+1]
+ notl %eax
+ notl %ecx
+ SET_VREG_WORD(%eax,rINST_FULL,0) # v[A+0]<- eax
+ SET_VREG_WORD(%ecx,rINST_FULL,1) # v[A+1]<- ecx
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_FLOAT: /* 0x7f */
+/* File: x86/OP_NEG_FLOAT.S */
+/* File: x86/fpcvt.S */
+ /*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ flds (rFP,rINST_FULL,4) # %st0<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ fchs
+ fstps (rFP,%ecx,4) # vA<- %st0
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_DOUBLE: /* 0x80 */
+/* File: x86/OP_NEG_DOUBLE.S */
+/* File: x86/fpcvt.S */
+ /*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fldl (rFP,rINST_FULL,4) # %st0<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ fchs
+ fstpl (rFP,%ecx,4) # vA<- %st0
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_LONG: /* 0x81 */
+/* File: x86/OP_INT_TO_LONG.S */
+ /* int to long vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- +A
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ SPILL(rPC) # will step on edx later
+ andb $0xf,%cl # ecx<- A
+ cltd # edx:eax<- sssssssBBBBBBBB
+ SET_VREG_WORD(%edx,%ecx,1) # v[A+1]<- edx/rPC
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,%ecx,0) # v[A+0]<- %eax
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: x86/OP_INT_TO_FLOAT.S */
+/* File: x86/fpcvt.S */
+ /*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fildl (rFP,rINST_FULL,4) # %st0<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+
+ fstps (rFP,%ecx,4) # vA<- %st0
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: x86/OP_INT_TO_DOUBLE.S */
+/* File: x86/fpcvt.S */
+ /*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fildl (rFP,rINST_FULL,4) # %st0<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+
+ fstpl (rFP,%ecx,4) # vA<- %st0
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_INT: /* 0x84 */
+/* File: x86/OP_LONG_TO_INT.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: x86/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ movzbl rINST_HI,%eax # eax<- BA
+ andb $0xf,%al # eax<- A
+ shrl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%ecx,rINST_FULL)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ SET_VREG(%ecx,%eax) # fp[A]<-fp[B]
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: x86/OP_LONG_TO_FLOAT.S */
+/* File: x86/fpcvt.S */
+ /*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fildll (rFP,rINST_FULL,4) # %st0<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+
+ fstps (rFP,%ecx,4) # vA<- %st0
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: x86/OP_LONG_TO_DOUBLE.S */
+/* File: x86/fpcvt.S */
+ /*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fildll (rFP,rINST_FULL,4) # %st0<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+
+ fstpl (rFP,%ecx,4) # vA<- %st0
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: x86/OP_FLOAT_TO_INT.S */
+/* File: x86/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint. If it is less
+ * than minint, it should be clamped to minint. If it is a nan, the result
+ * should be zero. Further, the rounding mode is to truncate. This model
+ * differs from what is delivered normally via the x86 fpu, so we have
+ * to play some games.
+ */
+ /* float/double to int/long vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ .if 0
+ fldl (rFP,rINST_FULL,4) # %st0<- vB
+ .else
+ flds (rFP,rINST_FULL,4) # %st0<- vB
+ .endif
+ ftst
+ fnstcw LOCAL0_OFFSET(%ebp) # remember original rounding mode
+ movzwl LOCAL0_OFFSET(%ebp),%eax
+ movb $0xc,%ah
+ movw %ax,LOCAL0_OFFSET+2(%ebp)
+ fldcw LOCAL0_OFFSET+2(%ebp) # set "to zero" rounding mode
+ FETCH_INST_WORD(1)
+ andb $0xf,%cl # ecx<- A
+ .if 0
+ fistpll (rFP,%ecx,4) # convert and store
+ .else
+ fistpl (rFP,%ecx,4) # convert and store
+ .endif
+ fldcw LOCAL0_OFFSET(%ebp) # restore previous rounding mode
+ jmp .LOP_FLOAT_TO_INT_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: x86/OP_FLOAT_TO_LONG.S */
+/* File: x86/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint. If it is less
+ * than minint, it should be clamped to minint. If it is a nan, the result
+ * should be zero. Further, the rounding mode is to truncate. This model
+ * differs from what is delivered normally via the x86 fpu, so we have
+ * to play some games.
+ */
+ /* float/double to int/long vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ .if 0
+ fldl (rFP,rINST_FULL,4) # %st0<- vB
+ .else
+ flds (rFP,rINST_FULL,4) # %st0<- vB
+ .endif
+ ftst
+ fnstcw LOCAL0_OFFSET(%ebp) # remember original rounding mode
+ movzwl LOCAL0_OFFSET(%ebp),%eax
+ movb $0xc,%ah
+ movw %ax,LOCAL0_OFFSET+2(%ebp)
+ fldcw LOCAL0_OFFSET+2(%ebp) # set "to zero" rounding mode
+ FETCH_INST_WORD(1)
+ andb $0xf,%cl # ecx<- A
+ .if 1
+ fistpll (rFP,%ecx,4) # convert and store
+ .else
+ fistpl (rFP,%ecx,4) # convert and store
+ .endif
+ fldcw LOCAL0_OFFSET(%ebp) # restore previous rounding mode
+ jmp .LOP_FLOAT_TO_LONG_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: x86/OP_FLOAT_TO_DOUBLE.S */
+/* File: x86/fpcvt.S */
+ /*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ flds (rFP,rINST_FULL,4) # %st0<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+
+ fstpl (rFP,%ecx,4) # vA<- %st0
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: x86/OP_DOUBLE_TO_INT.S */
+/* File: x86/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint. If it is less
+ * than minint, it should be clamped to minint. If it is a nan, the result
+ * should be zero. Further, the rounding mode is to truncate. This model
+ * differs from what is delivered normally via the x86 fpu, so we have
+ * to play some games.
+ */
+ /* float/double to int/long vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ .if 1
+ fldl (rFP,rINST_FULL,4) # %st0<- vB
+ .else
+ flds (rFP,rINST_FULL,4) # %st0<- vB
+ .endif
+ ftst
+ fnstcw LOCAL0_OFFSET(%ebp) # remember original rounding mode
+ movzwl LOCAL0_OFFSET(%ebp),%eax
+ movb $0xc,%ah
+ movw %ax,LOCAL0_OFFSET+2(%ebp)
+ fldcw LOCAL0_OFFSET+2(%ebp) # set "to zero" rounding mode
+ FETCH_INST_WORD(1)
+ andb $0xf,%cl # ecx<- A
+ .if 0
+ fistpll (rFP,%ecx,4) # convert and store
+ .else
+ fistpl (rFP,%ecx,4) # convert and store
+ .endif
+ fldcw LOCAL0_OFFSET(%ebp) # restore previous rounding mode
+ jmp .LOP_DOUBLE_TO_INT_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: x86/OP_DOUBLE_TO_LONG.S */
+/* File: x86/cvtfp_int.S */
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint. If it is less
+ * than minint, it should be clamped to minint. If it is a nan, the result
+ * should be zero. Further, the rounding mode is to truncate. This model
+ * differs from what is delivered normally via the x86 fpu, so we have
+ * to play some games.
+ */
+ /* float/double to int/long vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ .if 1
+ fldl (rFP,rINST_FULL,4) # %st0<- vB
+ .else
+ flds (rFP,rINST_FULL,4) # %st0<- vB
+ .endif
+ ftst
+ fnstcw LOCAL0_OFFSET(%ebp) # remember original rounding mode
+ movzwl LOCAL0_OFFSET(%ebp),%eax
+ movb $0xc,%ah
+ movw %ax,LOCAL0_OFFSET+2(%ebp)
+ fldcw LOCAL0_OFFSET+2(%ebp) # set "to zero" rounding mode
+ FETCH_INST_WORD(1)
+ andb $0xf,%cl # ecx<- A
+ .if 1
+ fistpll (rFP,%ecx,4) # convert and store
+ .else
+ fistpl (rFP,%ecx,4) # convert and store
+ .endif
+ fldcw LOCAL0_OFFSET(%ebp) # restore previous rounding mode
+ jmp .LOP_DOUBLE_TO_LONG_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: x86/OP_DOUBLE_TO_FLOAT.S */
+/* File: x86/fpcvt.S */
+ /*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fldl (rFP,rINST_FULL,4) # %st0<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+
+ fstps (rFP,%ecx,4) # vA<- %st0
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_BYTE: /* 0x8d */
+/* File: x86/OP_INT_TO_BYTE.S */
+/* File: x86/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+
+
+ movsbl %al,%eax
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_CHAR: /* 0x8e */
+/* File: x86/OP_INT_TO_CHAR.S */
+/* File: x86/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+
+
+ movzwl %ax,%eax
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_SHORT: /* 0x8f */
+/* File: x86/OP_INT_TO_SHORT.S */
+/* File: x86/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ andb $0xf,%cl # ecx<- A
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+
+
+ movswl %ax,%eax
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT: /* 0x90 */
+/* File: x86/OP_ADD_INT.S */
+/* File: x86/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ GET_VREG(%eax,%eax) # eax<- vBB
+ addl (rFP,%ecx,4),%eax # ex: addl (rFP,%ecx,4),%eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_INT: /* 0x91 */
+/* File: x86/OP_SUB_INT.S */
+/* File: x86/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ GET_VREG(%eax,%eax) # eax<- vBB
+ subl (rFP,%ecx,4),%eax # ex: addl (rFP,%ecx,4),%eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT: /* 0x92 */
+/* File: x86/OP_MUL_INT.S */
+ /*
+ * 32-bit binary multiplication.
+ */
+ /* mul vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ SPILL(rPC)
+ GET_VREG(%eax,%eax) # eax<- vBB
+ imull (rFP,%ecx,4),%eax # trashes rPC/edx
+ UNSPILL(rPC)
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT: /* 0x93 */
+/* File: x86/OP_DIV_INT.S */
+/* File: x86/bindiv.S */
+
+ /*
+ * 32-bit binary div/rem operation. Handles special case of op0=minint and
+ * op1=-1.
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ GET_VREG(%eax,%eax) # eax<- vBB
+ GET_VREG(%ecx,%ecx) # eax<- vBB
+ SPILL(rPC)
+ cmpl $0,%ecx
+ je common_errDivideByZero
+ cmpl $-1,%ecx
+ jne .LOP_DIV_INT_continue_div
+ cmpl $0x80000000,%eax
+ jne .LOP_DIV_INT_continue_div
+ movl $0x80000000,%eax
+ jmp .LOP_DIV_INT_finish_div
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT: /* 0x94 */
+/* File: x86/OP_REM_INT.S */
+/* File: x86/bindiv.S */
+
+ /*
+ * 32-bit binary div/rem operation. Handles special case of op0=minint and
+ * op1=-1.
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ GET_VREG(%eax,%eax) # eax<- vBB
+ GET_VREG(%ecx,%ecx) # eax<- vBB
+ SPILL(rPC)
+ cmpl $0,%ecx
+ je common_errDivideByZero
+ cmpl $-1,%ecx
+ jne .LOP_REM_INT_continue_div
+ cmpl $0x80000000,%eax
+ jne .LOP_REM_INT_continue_div
+ movl $0,%edx
+ jmp .LOP_REM_INT_finish_div
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT: /* 0x95 */
+/* File: x86/OP_AND_INT.S */
+/* File: x86/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ GET_VREG(%eax,%eax) # eax<- vBB
+ andl (rFP,%ecx,4),%eax # ex: addl (rFP,%ecx,4),%eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT: /* 0x96 */
+/* File: x86/OP_OR_INT.S */
+/* File: x86/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ GET_VREG(%eax,%eax) # eax<- vBB
+ orl (rFP,%ecx,4),%eax # ex: addl (rFP,%ecx,4),%eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT: /* 0x97 */
+/* File: x86/OP_XOR_INT.S */
+/* File: x86/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ GET_VREG(%eax,%eax) # eax<- vBB
+ xorl (rFP,%ecx,4),%eax # ex: addl (rFP,%ecx,4),%eax
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT: /* 0x98 */
+/* File: x86/OP_SHL_INT.S */
+/* File: x86/binop1.S */
+ /*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ GET_VREG(%eax,%eax) # eax<- vBB
+ GET_VREG(%ecx,%ecx) # eax<- vBB
+ sall %cl,%eax # ex: addl %ecx,%eax
+ movzbl rINST_HI,%ecx # tmp<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT: /* 0x99 */
+/* File: x86/OP_SHR_INT.S */
+/* File: x86/binop1.S */
+ /*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ GET_VREG(%eax,%eax) # eax<- vBB
+ GET_VREG(%ecx,%ecx) # eax<- vBB
+ sarl %cl,%eax # ex: addl %ecx,%eax
+ movzbl rINST_HI,%ecx # tmp<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT: /* 0x9a */
+/* File: x86/OP_USHR_INT.S */
+/* File: x86/binop1.S */
+ /*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ GET_VREG(%eax,%eax) # eax<- vBB
+ GET_VREG(%ecx,%ecx) # eax<- vBB
+ shrl %cl,%eax # ex: addl %ecx,%eax
+ movzbl rINST_HI,%ecx # tmp<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_LONG: /* 0x9b */
+/* File: x86/OP_ADD_LONG.S */
+/* File: x86/binopWide.S */
+ /*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ SPILL(rPC)
+ GET_VREG_WORD(rPC,%eax,0) # rPC<- v[BB+0]
+ GET_VREG_WORD(%eax,%eax,1) # eax<- v[BB+1]
+ addl (rFP,%ecx,4),rPC # ex: addl (rFP,%ecx,4),rPC
+ adcl 4(rFP,%ecx,4),%eax # ex: adcl 4(rFP,%ecx,4),%eax
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ SET_VREG_WORD(rPC,rINST_FULL,0) # v[AA+0] <- rPC
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,rINST_FULL,1) # v[AA+1] <- eax
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_LONG: /* 0x9c */
+/* File: x86/OP_SUB_LONG.S */
+/* File: x86/binopWide.S */
+ /*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ SPILL(rPC)
+ GET_VREG_WORD(rPC,%eax,0) # rPC<- v[BB+0]
+ GET_VREG_WORD(%eax,%eax,1) # eax<- v[BB+1]
+ subl (rFP,%ecx,4),rPC # ex: addl (rFP,%ecx,4),rPC
+ sbbl 4(rFP,%ecx,4),%eax # ex: adcl 4(rFP,%ecx,4),%eax
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ SET_VREG_WORD(rPC,rINST_FULL,0) # v[AA+0] <- rPC
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,rINST_FULL,1) # v[AA+1] <- eax
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_LONG: /* 0x9d */
+/* File: x86/OP_MUL_LONG.S */
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * We could definately use more free registers for
+ * this code. We must spill rPC (edx) because it
+ * is used by imul. We'll also spill rINST (ebx),
+ * giving us eax, ebc, ecx and edx as computational
+ * temps. On top of that, we'll spill rIBASE (edi)
+ * for use as the vB pointer and rFP (esi) for use
+ * as the vC pointer. Yuck.
+ */
+ /* mul-long vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- B
+ movzbl 3(rPC),%ecx # ecx<- C
+ SPILL(rPC)
+ SPILL(rIBASE)
+ SPILL(rFP)
+ SPILL(rINST_FULL)
+ leal (rFP,%eax,4),rIBASE # rIBASE<- &v[B]
+ leal (rFP,%ecx,4),rFP # rFP<- &v[C]
+ movl 4(rIBASE),%ecx # ecx<- Bmsw
+ imull (rFP),%ecx # ecx<- (Bmsw*Clsw)
+ movl 4(rFP),%eax # eax<- Cmsw
+ imull (rIBASE),%eax # eax<- (Cmsw*Blsw)
+ addl %eax,%ecx # ecx<- (Bmsw*Clsw)+(Cmsw*Blsw)
+ movl (rFP),%eax # eax<- Clsw
+ mull (rIBASE) # eax<- (Clsw*Alsw)
+ UNSPILL(rINST_FULL)
+ UNSPILL(rFP)
+ jmp .LOP_MUL_LONG_continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_LONG: /* 0x9e */
+/* File: x86/OP_DIV_LONG.S */
+ /* div vAA, vBB, vCC */
+ movzbl 3(rPC),%eax # eax<- CC
+ movzbl 2(rPC),%ecx # ecx<- BB
+ SPILL(rPC)
+ GET_VREG_WORD(rPC,%eax,0)
+ GET_VREG_WORD(%eax,%eax,1)
+ movl rPC,OUT_ARG2(%esp)
+ testl %eax,%eax
+ je .LOP_DIV_LONG_check_zero
+ cmpl $-1,%eax
+ je .LOP_DIV_LONG_check_neg1
+.LOP_DIV_LONG_notSpecial:
+ GET_VREG_WORD(rPC,%ecx,0)
+ GET_VREG_WORD(%ecx,%ecx,1)
+.LOP_DIV_LONG_notSpecial1:
+ movl %eax,OUT_ARG3(%esp)
+ movl rPC,OUT_ARG0(%esp)
+ movl %ecx,OUT_ARG1(%esp)
+ jmp .LOP_DIV_LONG_continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_LONG: /* 0x9f */
+/* File: x86/OP_REM_LONG.S */
+/* File: x86/OP_DIV_LONG.S */
+ /* div vAA, vBB, vCC */
+ movzbl 3(rPC),%eax # eax<- CC
+ movzbl 2(rPC),%ecx # ecx<- BB
+ SPILL(rPC)
+ GET_VREG_WORD(rPC,%eax,0)
+ GET_VREG_WORD(%eax,%eax,1)
+ movl rPC,OUT_ARG2(%esp)
+ testl %eax,%eax
+ je .LOP_REM_LONG_check_zero
+ cmpl $-1,%eax
+ je .LOP_REM_LONG_check_neg1
+.LOP_REM_LONG_notSpecial:
+ GET_VREG_WORD(rPC,%ecx,0)
+ GET_VREG_WORD(%ecx,%ecx,1)
+.LOP_REM_LONG_notSpecial1:
+ movl %eax,OUT_ARG3(%esp)
+ movl rPC,OUT_ARG0(%esp)
+ movl %ecx,OUT_ARG1(%esp)
+ jmp .LOP_REM_LONG_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_LONG: /* 0xa0 */
+/* File: x86/OP_AND_LONG.S */
+/* File: x86/binopWide.S */
+ /*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ SPILL(rPC)
+ GET_VREG_WORD(rPC,%eax,0) # rPC<- v[BB+0]
+ GET_VREG_WORD(%eax,%eax,1) # eax<- v[BB+1]
+ andl (rFP,%ecx,4),rPC # ex: addl (rFP,%ecx,4),rPC
+ andl 4(rFP,%ecx,4),%eax # ex: adcl 4(rFP,%ecx,4),%eax
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ SET_VREG_WORD(rPC,rINST_FULL,0) # v[AA+0] <- rPC
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,rINST_FULL,1) # v[AA+1] <- eax
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_LONG: /* 0xa1 */
+/* File: x86/OP_OR_LONG.S */
+/* File: x86/binopWide.S */
+ /*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ SPILL(rPC)
+ GET_VREG_WORD(rPC,%eax,0) # rPC<- v[BB+0]
+ GET_VREG_WORD(%eax,%eax,1) # eax<- v[BB+1]
+ orl (rFP,%ecx,4),rPC # ex: addl (rFP,%ecx,4),rPC
+ orl 4(rFP,%ecx,4),%eax # ex: adcl 4(rFP,%ecx,4),%eax
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ SET_VREG_WORD(rPC,rINST_FULL,0) # v[AA+0] <- rPC
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,rINST_FULL,1) # v[AA+1] <- eax
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_LONG: /* 0xa2 */
+/* File: x86/OP_XOR_LONG.S */
+/* File: x86/binopWide.S */
+ /*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ SPILL(rPC)
+ GET_VREG_WORD(rPC,%eax,0) # rPC<- v[BB+0]
+ GET_VREG_WORD(%eax,%eax,1) # eax<- v[BB+1]
+ xorl (rFP,%ecx,4),rPC # ex: addl (rFP,%ecx,4),rPC
+ xorl 4(rFP,%ecx,4),%eax # ex: adcl 4(rFP,%ecx,4),%eax
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- AA
+ SET_VREG_WORD(rPC,rINST_FULL,0) # v[AA+0] <- rPC
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,rINST_FULL,1) # v[AA+1] <- eax
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_LONG: /* 0xa3 */
+/* File: x86/OP_SHL_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance. x86 shifts automatically mask off
+ * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+ * case specially.
+ */
+ /* shl-long vAA, vBB, vCC */
+ /* ecx gets shift count */
+ /* Need to spill edx */
+ /* rINST gets AA */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ SPILL(rPC) # spill edx
+ GET_VREG_WORD(%edx,%eax,1) # ecx<- v[BB+1]
+ GET_VREG (%ecx,%ecx) # ecx<- vCC
+ GET_VREG_WORD(%eax,%eax,0) # eax<- v[BB+0]
+ shldl %eax,%edx
+ sall %cl,%eax
+ testb $32,%cl
+ je 2f
+ movl %eax,%edx
+ xorl %eax,%eax
+2:
+ movzbl rINST_HI,%ecx
+ SET_VREG_WORD(%edx,%ecx,1) # v[AA+1]<- %edx
+ UNSPILL(rPC)
+ FETCH_INST_WORD(2)
+ jmp .LOP_SHL_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_LONG: /* 0xa4 */
+/* File: x86/OP_SHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance. x86 shifts automatically mask off
+ * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+ * case specially.
+ */
+ /* shr-long vAA, vBB, vCC */
+ /* ecx gets shift count */
+ /* Need to spill edx */
+ /* rINST gets AA */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ SPILL(rPC) # spill edx
+ GET_VREG_WORD(%edx,%eax,1) # edx<- v[BB+1]
+ GET_VREG (%ecx,%ecx) # ecx<- vCC
+ GET_VREG_WORD(%eax,%eax,0) # eax<- v[BB+0]
+ shrdl %edx,%eax
+ sarl %cl,%edx
+ testb $32,%cl
+ je 2f
+ movl %edx,%eax
+ sarl $31,%edx
+2:
+ movzbl rINST_HI,%ecx
+ SET_VREG_WORD(%edx,%ecx,1) # v[AA+1]<- edx
+ UNSPILL(rPC)
+ FETCH_INST_WORD(2)
+ jmp .LOP_SHR_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_LONG: /* 0xa5 */
+/* File: x86/OP_USHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance. x86 shifts automatically mask off
+ * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+ * case specially.
+ */
+ /* shr-long vAA, vBB, vCC */
+ /* ecx gets shift count */
+ /* Need to spill edx */
+ /* rINST gets AA */
+ movzbl 2(rPC),%eax # eax<- BB
+ movzbl 3(rPC),%ecx # ecx<- CC
+ SPILL(rPC) # spill edx
+ GET_VREG_WORD(%edx,%eax,1) # edx<- v[BB+1]
+ GET_VREG (%ecx,%ecx) # ecx<- vCC
+ GET_VREG_WORD(%eax,%eax,0) # eax<- v[BB+0]
+ shrdl %edx,%eax
+ shrl %cl,%edx
+ testb $32,%cl
+ je 2f
+ movl %edx,%eax
+ xorl %edx,%edx
+2:
+ movzbl rINST_HI,%ecx
+ SET_VREG_WORD(%edx,%ecx,1) # v[BB+1]<- edx
+ UNSPILL(rPC)
+ jmp .LOP_USHR_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_FLOAT: /* 0xa6 */
+/* File: x86/OP_ADD_FLOAT.S */
+/* File: x86/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- CC
+ movzbl 3(rPC),%ecx # ecx<- BB
+ flds (rFP,%eax,4) # vCC to fp stack
+ fadds (rFP,%ecx,4) # ex: faddp
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ fstps (rFP,%ecx,4) # %st to vAA
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_FLOAT: /* 0xa7 */
+/* File: x86/OP_SUB_FLOAT.S */
+/* File: x86/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- CC
+ movzbl 3(rPC),%ecx # ecx<- BB
+ flds (rFP,%eax,4) # vCC to fp stack
+ fsubs (rFP,%ecx,4) # ex: faddp
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ fstps (rFP,%ecx,4) # %st to vAA
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_FLOAT: /* 0xa8 */
+/* File: x86/OP_MUL_FLOAT.S */
+/* File: x86/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- CC
+ movzbl 3(rPC),%ecx # ecx<- BB
+ flds (rFP,%eax,4) # vCC to fp stack
+ fmuls (rFP,%ecx,4) # ex: faddp
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ fstps (rFP,%ecx,4) # %st to vAA
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_FLOAT: /* 0xa9 */
+/* File: x86/OP_DIV_FLOAT.S */
+/* File: x86/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- CC
+ movzbl 3(rPC),%ecx # ecx<- BB
+ flds (rFP,%eax,4) # vCC to fp stack
+ fdivs (rFP,%ecx,4) # ex: faddp
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ fstps (rFP,%ecx,4) # %st to vAA
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_FLOAT: /* 0xaa */
+/* File: x86/OP_REM_FLOAT.S */
+ /* rem_float vAA, vBB, vCC */
+ movzbl 3(rPC),%ecx # ecx<- BB
+ movzbl 2(rPC),%eax # eax<- CC
+ flds (rFP,%ecx,4) # vCC to fp stack
+ flds (rFP,%eax,4) # vCC to fp stack
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ ADVANCE_PC(2)
+ fstps (rFP,%ecx,4) # %st to vAA
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_DOUBLE: /* 0xab */
+/* File: x86/OP_ADD_DOUBLE.S */
+/* File: x86/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- CC
+ movzbl 3(rPC),%ecx # ecx<- BB
+ fldl (rFP,%eax,4) # vCC to fp stack
+ faddl (rFP,%ecx,4) # ex: faddp
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ fstpl (rFP,%ecx,4) # %st to vAA
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_DOUBLE: /* 0xac */
+/* File: x86/OP_SUB_DOUBLE.S */
+/* File: x86/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- CC
+ movzbl 3(rPC),%ecx # ecx<- BB
+ fldl (rFP,%eax,4) # vCC to fp stack
+ fsubl (rFP,%ecx,4) # ex: faddp
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ fstpl (rFP,%ecx,4) # %st to vAA
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_DOUBLE: /* 0xad */
+/* File: x86/OP_MUL_DOUBLE.S */
+/* File: x86/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- CC
+ movzbl 3(rPC),%ecx # ecx<- BB
+ fldl (rFP,%eax,4) # vCC to fp stack
+ fmull (rFP,%ecx,4) # ex: faddp
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ fstpl (rFP,%ecx,4) # %st to vAA
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_DOUBLE: /* 0xae */
+/* File: x86/OP_DIV_DOUBLE.S */
+/* File: x86/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax<- CC
+ movzbl 3(rPC),%ecx # ecx<- BB
+ fldl (rFP,%eax,4) # vCC to fp stack
+ fdivl (rFP,%ecx,4) # ex: faddp
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ fstpl (rFP,%ecx,4) # %st to vAA
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_DOUBLE: /* 0xaf */
+/* File: x86/OP_REM_DOUBLE.S */
+ /* rem_float vAA, vBB, vCC */
+ movzbl 3(rPC),%ecx # ecx<- BB
+ movzbl 2(rPC),%eax # eax<- CC
+ fldl (rFP,%ecx,4) # vCC to fp stack
+ fldl (rFP,%eax,4) # vCC to fp stack
+ movzbl rINST_HI,%ecx # ecx<- AA
+ FETCH_INST_WORD(2)
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ ADVANCE_PC(2)
+ fstpl (rFP,%ecx,4) # %st to vAA
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: x86/OP_ADD_INT_2ADDR.S */
+/* File: x86/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ FETCH_INST_WORD(1)
+ andb $0xf,%cl # ecx<- A
+ addl %eax,(rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: x86/OP_SUB_INT_2ADDR.S */
+/* File: x86/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ FETCH_INST_WORD(1)
+ andb $0xf,%cl # ecx<- A
+ subl %eax,(rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: x86/OP_MUL_INT_2ADDR.S */
+ /* mul vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ andb $0xf,%cl # ecx<- A
+ SPILL(rPC)
+ imull (rFP,%ecx,4),%eax
+ UNSPILL(rPC)
+ SET_VREG(%eax,%ecx)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: x86/OP_DIV_INT_2ADDR.S */
+/* File: x86/bindiv2addr.S */
+ /*
+ * 32-bit binary div/rem operation. Handles special case of op0=minint and
+ * op1=-1.
+ */
+ /* div/rem/2addr vA, vB */
+ movzx rINST_HI,%ecx # eax<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG(%ecx,%ecx) # eax<- vBB
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%eax,rINST_FULL) # eax<- vBB
+ SPILL(rPC)
+ cmpl $0,%ecx
+ je common_errDivideByZero
+ cmpl $-1,%ecx
+ jne .LOP_DIV_INT_2ADDR_continue_div2addr
+ cmpl $0x80000000,%eax
+ jne .LOP_DIV_INT_2ADDR_continue_div2addr
+ movl $0x80000000,%eax
+ jmp .LOP_DIV_INT_2ADDR_finish_div2addr
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: x86/OP_REM_INT_2ADDR.S */
+/* File: x86/bindiv2addr.S */
+ /*
+ * 32-bit binary div/rem operation. Handles special case of op0=minint and
+ * op1=-1.
+ */
+ /* div/rem/2addr vA, vB */
+ movzx rINST_HI,%ecx # eax<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG(%ecx,%ecx) # eax<- vBB
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%eax,rINST_FULL) # eax<- vBB
+ SPILL(rPC)
+ cmpl $0,%ecx
+ je common_errDivideByZero
+ cmpl $-1,%ecx
+ jne .LOP_REM_INT_2ADDR_continue_div2addr
+ cmpl $0x80000000,%eax
+ jne .LOP_REM_INT_2ADDR_continue_div2addr
+ movl $0,%edx
+ jmp .LOP_REM_INT_2ADDR_finish_div2addr
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: x86/OP_AND_INT_2ADDR.S */
+/* File: x86/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ FETCH_INST_WORD(1)
+ andb $0xf,%cl # ecx<- A
+ andl %eax,(rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: x86/OP_OR_INT_2ADDR.S */
+/* File: x86/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ FETCH_INST_WORD(1)
+ andb $0xf,%cl # ecx<- A
+ orl %eax,(rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: x86/OP_XOR_INT_2ADDR.S */
+/* File: x86/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ GET_VREG(%eax,rINST_FULL) # eax<- vB
+ FETCH_INST_WORD(1)
+ andb $0xf,%cl # ecx<- A
+ xorl %eax,(rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: x86/OP_SHL_INT_2ADDR.S */
+/* File: x86/shop2addr.S */
+ /*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movzx rINST_HI,%ecx # eax<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG(%ecx,%ecx) # eax<- vBB
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%eax,rINST_FULL) # eax<- vAA
+ sall %cl,%eax # ex: sarl %cl,%eax
+ SET_VREG(%eax,rINST_FULL)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: x86/OP_SHR_INT_2ADDR.S */
+/* File: x86/shop2addr.S */
+ /*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movzx rINST_HI,%ecx # eax<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG(%ecx,%ecx) # eax<- vBB
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%eax,rINST_FULL) # eax<- vAA
+ sarl %cl,%eax # ex: sarl %cl,%eax
+ SET_VREG(%eax,rINST_FULL)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: x86/OP_USHR_INT_2ADDR.S */
+/* File: x86/shop2addr.S */
+ /*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movzx rINST_HI,%ecx # eax<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG(%ecx,%ecx) # eax<- vBB
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(%eax,rINST_FULL) # eax<- vAA
+ shrl %cl,%eax # ex: sarl %cl,%eax
+ SET_VREG(%eax,rINST_FULL)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: x86/OP_ADD_LONG_2ADDR.S */
+/* File: x86/binopWide2addr.S */
+ /*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG_WORD(%eax,%ecx,0) # eax<- v[B+0]
+ GET_VREG_WORD(%ecx,%ecx,1) # eax<- v[B+1]
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xF,rINST_LO # rINST_FULL<- A
+ addl %eax,(rFP,rINST_FULL,4) # example: addl %eax,(rFP,rINST_FULL,4)
+ adcl %ecx,4(rFP,rINST_FULL,4) # example: adcl %ecx,4(rFP,rINST_FULL,4)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: x86/OP_SUB_LONG_2ADDR.S */
+/* File: x86/binopWide2addr.S */
+ /*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG_WORD(%eax,%ecx,0) # eax<- v[B+0]
+ GET_VREG_WORD(%ecx,%ecx,1) # eax<- v[B+1]
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xF,rINST_LO # rINST_FULL<- A
+ subl %eax,(rFP,rINST_FULL,4) # example: addl %eax,(rFP,rINST_FULL,4)
+ sbbl %ecx,4(rFP,rINST_FULL,4) # example: adcl %ecx,4(rFP,rINST_FULL,4)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: x86/OP_MUL_LONG_2ADDR.S */
+ /*
+ * Signed 64-bit integer multiply, 2-addr version
+ *
+ * We could definately use more free registers for
+ * this code. We must spill rPC (edx) because it
+ * is used by imul. We'll also spill rINST (ebx),
+ * giving us eax, ebc, ecx and edx as computational
+ * temps. On top of that, we'll spill rIBASE (edi)
+ * for use as the vA pointer and rFP (esi) for use
+ * as the vB pointer. Yuck.
+ */
+ /* mul-long/2addr vA, vB */
+ movzbl rINST_HI,%eax # eax<- BA
+ andb $0xf,%al # eax<- A
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ SPILL(rPC)
+ SPILL(rIBASE)
+ SPILL(rFP)
+ leal (rFP,%eax,4),rIBASE # rIBASE<- &v[A]
+ leal (rFP,rINST_FULL,4),rFP # rFP<- &v[B]
+ movl 4(rIBASE),%ecx # ecx<- Amsw
+ imull (rFP),%ecx # ecx<- (Amsw*Blsw)
+ movl 4(rFP),%eax # eax<- Bmsw
+ imull (rIBASE),%eax # eax<- (Bmsw*Alsw)
+ addl %eax,%ecx # ecx<- (Amsw*Blsw)+(Bmsw*Alsw)
+ movl (rFP),%eax # eax<- Blsw
+ mull (rIBASE) # eax<- (Blsw*Alsw)
+ jmp .LOP_MUL_LONG_2ADDR_continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: x86/OP_DIV_LONG_2ADDR.S */
+ /* div/2addr vA, vB */
+ movzbl rINST_HI,%eax
+ shrl $4,%eax # eax<- B
+ movzbl rINST_HI,rINST_FULL
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ SPILL(rPC)
+ GET_VREG_WORD(rPC,%eax,0)
+ GET_VREG_WORD(%eax,%eax,1)
+ movl rPC,OUT_ARG2(%esp)
+ testl %eax,%eax
+ je .LOP_DIV_LONG_2ADDR_check_zero
+ cmpl $-1,%eax
+ je .LOP_DIV_LONG_2ADDR_check_neg1
+.LOP_DIV_LONG_2ADDR_notSpecial:
+ GET_VREG_WORD(rPC,rINST_FULL,0)
+ GET_VREG_WORD(%ecx,rINST_FULL,1)
+.LOP_DIV_LONG_2ADDR_notSpecial1:
+ jmp .LOP_DIV_LONG_2ADDR_continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: x86/OP_REM_LONG_2ADDR.S */
+/* File: x86/OP_DIV_LONG_2ADDR.S */
+ /* div/2addr vA, vB */
+ movzbl rINST_HI,%eax
+ shrl $4,%eax # eax<- B
+ movzbl rINST_HI,rINST_FULL
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ SPILL(rPC)
+ GET_VREG_WORD(rPC,%eax,0)
+ GET_VREG_WORD(%eax,%eax,1)
+ movl rPC,OUT_ARG2(%esp)
+ testl %eax,%eax
+ je .LOP_REM_LONG_2ADDR_check_zero
+ cmpl $-1,%eax
+ je .LOP_REM_LONG_2ADDR_check_neg1
+.LOP_REM_LONG_2ADDR_notSpecial:
+ GET_VREG_WORD(rPC,rINST_FULL,0)
+ GET_VREG_WORD(%ecx,rINST_FULL,1)
+.LOP_REM_LONG_2ADDR_notSpecial1:
+ jmp .LOP_REM_LONG_2ADDR_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: x86/OP_AND_LONG_2ADDR.S */
+/* File: x86/binopWide2addr.S */
+ /*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG_WORD(%eax,%ecx,0) # eax<- v[B+0]
+ GET_VREG_WORD(%ecx,%ecx,1) # eax<- v[B+1]
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xF,rINST_LO # rINST_FULL<- A
+ andl %eax,(rFP,rINST_FULL,4) # example: addl %eax,(rFP,rINST_FULL,4)
+ andl %ecx,4(rFP,rINST_FULL,4) # example: adcl %ecx,4(rFP,rINST_FULL,4)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: x86/OP_OR_LONG_2ADDR.S */
+/* File: x86/binopWide2addr.S */
+ /*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG_WORD(%eax,%ecx,0) # eax<- v[B+0]
+ GET_VREG_WORD(%ecx,%ecx,1) # eax<- v[B+1]
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xF,rINST_LO # rINST_FULL<- A
+ orl %eax,(rFP,rINST_FULL,4) # example: addl %eax,(rFP,rINST_FULL,4)
+ orl %ecx,4(rFP,rINST_FULL,4) # example: adcl %ecx,4(rFP,rINST_FULL,4)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: x86/OP_XOR_LONG_2ADDR.S */
+/* File: x86/binopWide2addr.S */
+ /*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG_WORD(%eax,%ecx,0) # eax<- v[B+0]
+ GET_VREG_WORD(%ecx,%ecx,1) # eax<- v[B+1]
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xF,rINST_LO # rINST_FULL<- A
+ xorl %eax,(rFP,rINST_FULL,4) # example: addl %eax,(rFP,rINST_FULL,4)
+ xorl %ecx,4(rFP,rINST_FULL,4) # example: adcl %ecx,4(rFP,rINST_FULL,4)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: x86/OP_SHL_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ /* ecx gets shift count */
+ /* Need to spill edx */
+ /* rINST gets AA */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ movzbl rINST_HI,rINST_FULL # rINST_HI<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG_WORD(%eax,rINST_FULL,0) # eax<- v[AA+0]
+ sarl $4,%ecx # ecx<- B
+ SPILL(rPC)
+ GET_VREG_WORD(%edx,rINST_FULL,1) # edx<- v[AA+1]
+ GET_VREG(%ecx,%ecx) # ecx<- vBB
+ shldl %eax,%edx
+ sall %cl,%eax
+ testb $32,%cl
+ je 2f
+ movl %eax,%edx
+ xorl %eax,%eax
+2:
+ SET_VREG_WORD(%edx,rINST_FULL,1) # v[AA+1]<- edx
+ UNSPILL(rPC)
+ jmp .LOP_SHL_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: x86/OP_SHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ /* ecx gets shift count */
+ /* Need to spill edx */
+ /* rINST gets AA */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ movzbl rINST_HI,rINST_FULL # rINST_HI<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG_WORD(%eax,rINST_FULL,0) # eax<- v[AA+0]
+ sarl $4,%ecx # ecx<- B
+ SPILL(rPC)
+ GET_VREG_WORD(%edx,rINST_FULL,1) # edx<- v[AA+1]
+ GET_VREG(%ecx,%ecx) # ecx<- vBB
+ shrdl %edx,%eax
+ sarl %cl,%edx
+ testb $32,%cl
+ je 2f
+ movl %edx,%eax
+ sarl $31,%edx
+2:
+ SET_VREG_WORD(%edx,rINST_FULL,1) # v[AA+1]<- edx
+ UNSPILL(rPC)
+ jmp .LOP_SHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: x86/OP_USHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ /* ecx gets shift count */
+ /* Need to spill edx */
+ /* rINST gets AA */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ movzbl rINST_HI,rINST_FULL # rINST_HI<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG_WORD(%eax,rINST_FULL,0) # eax<- v[AA+0]
+ sarl $4,%ecx # ecx<- B
+ SPILL(rPC)
+ GET_VREG_WORD(%edx,rINST_FULL,1) # edx<- v[AA+1]
+ GET_VREG(%ecx,%ecx) # ecx<- vBB
+ shrdl %edx,%eax
+ shrl %cl,%edx
+ testb $32,%cl
+ je 2f
+ movl %edx,%eax
+ xorl %edx,%edx
+2:
+ SET_VREG_WORD(%edx,rINST_FULL,1) # v[AA+1]<- edx
+ UNSPILL(rPC)
+ jmp .LOP_USHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: x86/OP_ADD_FLOAT_2ADDR.S */
+/* File: x86/binflop2addr.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ andb $0xf,%cl # ecx<- A
+ flds (rFP,%ecx,4) # vAA to fp stack
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fadds (rFP,rINST_FULL,4) # ex: faddp
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ fstps (rFP,%ecx,4) # %st to vA
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: x86/OP_SUB_FLOAT_2ADDR.S */
+/* File: x86/binflop2addr.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ andb $0xf,%cl # ecx<- A
+ flds (rFP,%ecx,4) # vAA to fp stack
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fsubs (rFP,rINST_FULL,4) # ex: faddp
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ fstps (rFP,%ecx,4) # %st to vA
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: x86/OP_MUL_FLOAT_2ADDR.S */
+/* File: x86/binflop2addr.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ andb $0xf,%cl # ecx<- A
+ flds (rFP,%ecx,4) # vAA to fp stack
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fmuls (rFP,rINST_FULL,4) # ex: faddp
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ fstps (rFP,%ecx,4) # %st to vA
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: x86/OP_DIV_FLOAT_2ADDR.S */
+/* File: x86/binflop2addr.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ andb $0xf,%cl # ecx<- A
+ flds (rFP,%ecx,4) # vAA to fp stack
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fdivs (rFP,rINST_FULL,4) # ex: faddp
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ fstps (rFP,%ecx,4) # %st to vA
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: x86/OP_REM_FLOAT_2ADDR.S */
+ /* rem_float/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ flds (rFP,rINST_FULL,4) # vBB to fp stack
+ andb $0xf,%cl # ecx<- A
+ flds (rFP,%ecx,4) # vAA to fp stack
+ FETCH_INST_WORD(1)
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ ADVANCE_PC(1)
+ fstps (rFP,%ecx,4) # %st to vA
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: x86/OP_ADD_DOUBLE_2ADDR.S */
+/* File: x86/binflop2addr.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ andb $0xf,%cl # ecx<- A
+ fldl (rFP,%ecx,4) # vAA to fp stack
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ faddl (rFP,rINST_FULL,4) # ex: faddp
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ fstpl (rFP,%ecx,4) # %st to vA
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: x86/OP_SUB_DOUBLE_2ADDR.S */
+/* File: x86/binflop2addr.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ andb $0xf,%cl # ecx<- A
+ fldl (rFP,%ecx,4) # vAA to fp stack
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fsubl (rFP,rINST_FULL,4) # ex: faddp
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ fstpl (rFP,%ecx,4) # %st to vA
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: x86/OP_MUL_DOUBLE_2ADDR.S */
+/* File: x86/binflop2addr.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ andb $0xf,%cl # ecx<- A
+ fldl (rFP,%ecx,4) # vAA to fp stack
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fmull (rFP,rINST_FULL,4) # ex: faddp
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ fstpl (rFP,%ecx,4) # %st to vA
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: x86/OP_DIV_DOUBLE_2ADDR.S */
+/* File: x86/binflop2addr.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ andb $0xf,%cl # ecx<- A
+ fldl (rFP,%ecx,4) # vAA to fp stack
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fdivl (rFP,rINST_FULL,4) # ex: faddp
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ fstpl (rFP,%ecx,4) # %st to vA
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: x86/OP_REM_DOUBLE_2ADDR.S */
+ /* rem_float/2addr vA, vB */
+ movzx rINST_HI,%ecx # ecx<- A+
+ sarl $12,rINST_FULL # rINST_FULL<- B
+ fldl (rFP,rINST_FULL,4) # vBB to fp stack
+ andb $0xf,%cl # ecx<- A
+ fldl (rFP,%ecx,4) # vAA to fp stack
+ FETCH_INST_WORD(1)
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ ADVANCE_PC(1)
+ fstpl (rFP,%ecx,4) # %st to vA
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: x86/OP_ADD_INT_LIT16.S */
+/* File: x86/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movzbl rINST_HI,%eax # eax<- 000000BA
+ sarl $4,%eax # eax<- B
+ GET_VREG(%eax,%eax) # eax<- vB
+ movswl 2(rPC),%ecx # ecx<- ssssCCCC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ addl %ecx,%eax # for example: addl %ecx, %eax
+ SET_VREG(%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RSUB_INT: /* 0xd1 */
+/* File: x86/OP_RSUB_INT.S */
+/* File: x86/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movzbl rINST_HI,%eax # eax<- 000000BA
+ sarl $4,%eax # eax<- B
+ GET_VREG(%eax,%eax) # eax<- vB
+ movswl 2(rPC),%ecx # ecx<- ssssCCCC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ subl %eax,%ecx # for example: addl %ecx, %eax
+ SET_VREG(%ecx,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: x86/OP_MUL_INT_LIT16.S */
+ /* mul/lit16 vA, vB, #+CCCC */
+ /* Need A in rINST_FULL, ssssCCCC in ecx, vB in eax */
+ movzbl rINST_HI,%eax # eax<- 000000BA
+ sarl $4,%eax # eax<- B
+ GET_VREG(%eax,%eax) # eax<- vB
+ movswl 2(rPC),%ecx # ecx<- ssssCCCC
+ SPILL(rPC)
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ imull %ecx,%eax # trashes rPC
+ UNSPILL(rPC)
+ SET_VREG(%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: x86/OP_DIV_INT_LIT16.S */
+/* File: x86/bindivLit16.S */
+ /*
+ * 32-bit binary div/rem operation. Handles special case of op0=minint and
+ * op1=-1.
+ */
+ /* div/rem/lit16 vA, vB, #+CCCC */
+ /* Need A in rINST_FULL, ssssCCCC in ecx, vB in eax */
+ movzbl rINST_HI,%eax # eax<- 000000BA
+ sarl $4,%eax # eax<- B
+ GET_VREG(%eax,%eax) # eax<- vB
+ movswl 2(rPC),%ecx # ecx<- ssssCCCC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ SPILL(rPC)
+ cmpl $0,%ecx
+ je common_errDivideByZero
+ cmpl $-1,%ecx
+ jne .LOP_DIV_INT_LIT16_continue_div
+ cmpl $0x80000000,%eax
+ jne .LOP_DIV_INT_LIT16_continue_div
+ movl $0x80000000,%eax
+ jmp .LOP_DIV_INT_LIT16_finish_div
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: x86/OP_REM_INT_LIT16.S */
+/* File: x86/bindivLit16.S */
+ /*
+ * 32-bit binary div/rem operation. Handles special case of op0=minint and
+ * op1=-1.
+ */
+ /* div/rem/lit16 vA, vB, #+CCCC */
+ /* Need A in rINST_FULL, ssssCCCC in ecx, vB in eax */
+ movzbl rINST_HI,%eax # eax<- 000000BA
+ sarl $4,%eax # eax<- B
+ GET_VREG(%eax,%eax) # eax<- vB
+ movswl 2(rPC),%ecx # ecx<- ssssCCCC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ SPILL(rPC)
+ cmpl $0,%ecx
+ je common_errDivideByZero
+ cmpl $-1,%ecx
+ jne .LOP_REM_INT_LIT16_continue_div
+ cmpl $0x80000000,%eax
+ jne .LOP_REM_INT_LIT16_continue_div
+ movl $0,%edx
+ jmp .LOP_REM_INT_LIT16_finish_div
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: x86/OP_AND_INT_LIT16.S */
+/* File: x86/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movzbl rINST_HI,%eax # eax<- 000000BA
+ sarl $4,%eax # eax<- B
+ GET_VREG(%eax,%eax) # eax<- vB
+ movswl 2(rPC),%ecx # ecx<- ssssCCCC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ andl %ecx,%eax # for example: addl %ecx, %eax
+ SET_VREG(%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: x86/OP_OR_INT_LIT16.S */
+/* File: x86/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movzbl rINST_HI,%eax # eax<- 000000BA
+ sarl $4,%eax # eax<- B
+ GET_VREG(%eax,%eax) # eax<- vB
+ movswl 2(rPC),%ecx # ecx<- ssssCCCC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ orl %ecx,%eax # for example: addl %ecx, %eax
+ SET_VREG(%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: x86/OP_XOR_INT_LIT16.S */
+/* File: x86/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movzbl rINST_HI,%eax # eax<- 000000BA
+ sarl $4,%eax # eax<- B
+ GET_VREG(%eax,%eax) # eax<- vB
+ movswl 2(rPC),%ecx # ecx<- ssssCCCC
+ movzbl rINST_HI,rINST_FULL # rINST_FULL<- BA
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ xor %ecx,%eax # for example: addl %ecx, %eax
+ SET_VREG(%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: x86/OP_ADD_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movsbl 3(rPC),%ecx # ecx<- ssssssCC
+ movzx rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG (%eax,%eax) # eax<- rBB
+ addl %ecx,%eax # ex: addl %ecx,%eax
+ SET_VREG (%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: x86/OP_RSUB_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movsbl 3(rPC),%ecx # ecx<- ssssssCC
+ movzx rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG (%eax,%eax) # eax<- rBB
+ subl %eax,%ecx # ex: addl %ecx,%eax
+ SET_VREG (%ecx,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_LIT8: /* 0xda */
+/* File: x86/OP_MUL_INT_LIT8.S */
+ /* mul/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movsbl 3(rPC),%ecx # ecx<- ssssssCC
+ SPILL(rPC)
+ movzx rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG (%eax,%eax) # eax<- rBB
+ imull %ecx,%eax # trashes rPC
+ UNSPILL(rPC)
+ SET_VREG (%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: x86/OP_DIV_INT_LIT8.S */
+/* File: x86/bindivLit8.S */
+ /*
+ * 32-bit div/rem "lit8" binary operation. Handles special case of
+ * op0=minint & op1=-1
+ */
+ /* div/rem/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movsbl 3(rPC),%ecx # ecx<- ssssssCC
+ GET_VREG (%eax,%eax) # eax<- rBB
+ movzx rINST_HI,rINST_FULL # rINST_FULL<- AA
+ SPILL(rPC)
+ cmpl $0,%ecx
+ je common_errDivideByZero
+ cmpl $0x80000000,%eax
+ jne .LOP_DIV_INT_LIT8_continue_div
+ cmpl $-1,%ecx
+ jne .LOP_DIV_INT_LIT8_continue_div
+ movl $0x80000000,%eax
+ jmp .LOP_DIV_INT_LIT8_finish_div
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_LIT8: /* 0xdc */
+/* File: x86/OP_REM_INT_LIT8.S */
+/* File: x86/bindivLit8.S */
+ /*
+ * 32-bit div/rem "lit8" binary operation. Handles special case of
+ * op0=minint & op1=-1
+ */
+ /* div/rem/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movsbl 3(rPC),%ecx # ecx<- ssssssCC
+ GET_VREG (%eax,%eax) # eax<- rBB
+ movzx rINST_HI,rINST_FULL # rINST_FULL<- AA
+ SPILL(rPC)
+ cmpl $0,%ecx
+ je common_errDivideByZero
+ cmpl $0x80000000,%eax
+ jne .LOP_REM_INT_LIT8_continue_div
+ cmpl $-1,%ecx
+ jne .LOP_REM_INT_LIT8_continue_div
+ movl $0,%edx
+ jmp .LOP_REM_INT_LIT8_finish_div
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_LIT8: /* 0xdd */
+/* File: x86/OP_AND_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movsbl 3(rPC),%ecx # ecx<- ssssssCC
+ movzx rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG (%eax,%eax) # eax<- rBB
+ andl %ecx,%eax # ex: addl %ecx,%eax
+ SET_VREG (%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_LIT8: /* 0xde */
+/* File: x86/OP_OR_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movsbl 3(rPC),%ecx # ecx<- ssssssCC
+ movzx rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG (%eax,%eax) # eax<- rBB
+ orl %ecx,%eax # ex: addl %ecx,%eax
+ SET_VREG (%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: x86/OP_XOR_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movsbl 3(rPC),%ecx # ecx<- ssssssCC
+ movzx rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG (%eax,%eax) # eax<- rBB
+ xor %ecx,%eax # ex: addl %ecx,%eax
+ SET_VREG (%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: x86/OP_SHL_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movsbl 3(rPC),%ecx # ecx<- ssssssCC
+ movzx rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG (%eax,%eax) # eax<- rBB
+ sall %cl,%eax # ex: addl %ecx,%eax
+ SET_VREG (%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: x86/OP_SHR_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movsbl 3(rPC),%ecx # ecx<- ssssssCC
+ movzx rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG (%eax,%eax) # eax<- rBB
+ sarl %cl,%eax # ex: addl %ecx,%eax
+ SET_VREG (%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: x86/OP_USHR_INT_LIT8.S */
+/* File: x86/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC),%eax # eax<- BB
+ movsbl 3(rPC),%ecx # ecx<- ssssssCC
+ movzx rINST_HI,rINST_FULL # rINST_FULL<- AA
+ GET_VREG (%eax,%eax) # eax<- rBB
+ shrl %cl,%eax # ex: addl %ecx,%eax
+ SET_VREG (%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E3: /* 0xe3 */
+/* File: x86/OP_UNUSED_E3.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E4: /* 0xe4 */
+/* File: x86/OP_UNUSED_E4.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E5: /* 0xe5 */
+/* File: x86/OP_UNUSED_E5.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E6: /* 0xe6 */
+/* File: x86/OP_UNUSED_E6.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E7: /* 0xe7 */
+/* File: x86/OP_UNUSED_E7.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E8: /* 0xe8 */
+/* File: x86/OP_UNUSED_E8.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_E9: /* 0xe9 */
+/* File: x86/OP_UNUSED_E9.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EA: /* 0xea */
+/* File: x86/OP_UNUSED_EA.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EB: /* 0xeb */
+/* File: x86/OP_UNUSED_EB.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EC: /* 0xec */
+/* File: x86/OP_UNUSED_EC.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_ED: /* 0xed */
+/* File: x86/OP_UNUSED_ED.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_EXECUTE_INLINE: /* 0xee */
+/* File: x86/OP_EXECUTE_INLINE.S */
+ /*
+ * Execute a "native inline" instruction.
+ *
+ * We will be calling through a function table:
+ *
+ * (*gDvmInlineOpsTable[opIndex].func)(arg0, arg1, arg2, arg3, pResult)
+ *
+ */
+ /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+ GET_GLUE(%ecx)
+ EXPORT_PC()
+ movzwl 2(rPC),%eax # eax<- BBBB
+ leal offGlue_retval(%ecx),%ecx # ecx<- & glue->retval
+ movl %ecx,OUT_ARG4(%esp)
+ sarl $12,rINST_FULL # rINST_FULL<- arg count (0-4)
+ SPILL(rPC)
+ call .LOP_EXECUTE_INLINE_continue # make call; will return after
+ UNSPILL(rPC)
+ testl %eax,%eax # successful?
+ FETCH_INST_WORD(3)
+ je common_exceptionThrown # no, handle exception
+ ADVANCE_PC(3)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_EF: /* 0xef */
+/* File: x86/OP_UNUSED_EF.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */
+/* File: x86/OP_INVOKE_DIRECT_EMPTY.S */
+ /*
+ * invoke-direct-empty is a no-op in a "standard" interpreter.
+ */
+ FETCH_INST_WORD(3)
+ ADVANCE_PC(3)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_F1: /* 0xf1 */
+/* File: x86/OP_UNUSED_F1.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_QUICK: /* 0xf2 */
+/* File: x86/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG(%ecx,%ecx) # vB (object we're operating on)
+ movzwl 2(rPC),%eax # eax<- field byte offset
+ cmpl $0,%ecx # is object null?
+ je common_errNullObject
+ movl (%ecx,%eax,1),%eax
+ movzbl rINST_HI,%ecx
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ andb $0xf,%cl # rINST_FULL<- A
+ SET_VREG (%eax,%ecx) # fp[A]<- result
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: x86/OP_IGET_WIDE_QUICK.S */
+ /* For: iget-wide-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG(%ecx,%ecx) # vB (object we're operating on)
+ movzwl 2(rPC),%eax # eax<- field byte offset
+ cmpl $0,%ecx # is object null?
+ je common_errNullObject
+ leal (%ecx,%eax,1),%eax # eax<- address of 64-bit source
+ movl (%eax),%ecx # ecx<- lsw
+ movl 4(%eax),%eax # eax<- msw
+ movzbl rINST_HI,rINST_FULL
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ SET_VREG_WORD(%ecx,rINST_FULL,0) # v[A+0]<- lsw
+ SET_VREG_WORD(%eax,rINST_FULL,1) # v[A+1]<- msw
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: x86/OP_IGET_OBJECT_QUICK.S */
+/* File: x86/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG(%ecx,%ecx) # vB (object we're operating on)
+ movzwl 2(rPC),%eax # eax<- field byte offset
+ cmpl $0,%ecx # is object null?
+ je common_errNullObject
+ movl (%ecx,%eax,1),%eax
+ movzbl rINST_HI,%ecx
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ andb $0xf,%cl # rINST_FULL<- A
+ SET_VREG (%eax,%ecx) # fp[A]<- result
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_QUICK: /* 0xf5 */
+/* File: x86/OP_IPUT_QUICK.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG(%ecx,%ecx) # vB (object we're operating on)
+ movzbl rINST_HI,rINST_FULL
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- v[A]
+ movzwl 2(rPC),%eax # eax<- field byte offset
+ testl %ecx,%ecx # is object null?
+ je common_errNullObject
+ movl rINST_FULL,(%ecx,%eax,1)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: x86/OP_IPUT_WIDE_QUICK.S */
+ /* For: iput-wide-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG(%ecx,%ecx) # vB (object we're operating on)
+ movzwl 2(rPC),%eax # eax<- field byte offset
+ testl %ecx,%ecx # is object null?
+ je common_errNullObject
+ leal (%ecx,%eax,1),%ecx # ecx<- Address of 64-bit target
+ movzbl rINST_HI,rINST_FULL
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG_WORD(%eax,rINST_FULL,0) # eax<- lsw
+ GET_VREG_WORD(rINST_FULL,rINST_FULL,1) # rINST_FULL<- msw
+ movl %eax,(%ecx)
+ movl rINST_FULL,4(%ecx)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: x86/OP_IPUT_OBJECT_QUICK.S */
+/* File: x86/OP_IPUT_QUICK.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbl rINST_HI,%ecx # ecx<- BA
+ sarl $4,%ecx # ecx<- B
+ GET_VREG(%ecx,%ecx) # vB (object we're operating on)
+ movzbl rINST_HI,rINST_FULL
+ andb $0xf,rINST_LO # rINST_FULL<- A
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- v[A]
+ movzwl 2(rPC),%eax # eax<- field byte offset
+ testl %ecx,%ecx # is object null?
+ je common_errNullObject
+ movl rINST_FULL,(%ecx,%eax,1)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: x86/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ movzwl 4(rPC),%eax # eax<- FEDC or CCCC
+ movzwl 2(rPC),%ecx # ecx<- BBBB
+ .if (!0)
+ andl $0xf,%eax # eax<- C (or stays CCCC)
+ .endif
+ GET_VREG(%eax,%eax) # eax<- vC ("this" ptr)
+ testl %eax,%eax # null?
+ je common_errNullObject # yep, throw exception
+ movl offObject_clazz(%eax),%eax # eax<- thisPtr->clazz
+ movl offClassObject_vtable(%eax),%eax # eax<- thisPtr->clazz->vtable
+ EXPORT_PC() # might throw later - get ready
+ movl (%eax,%ecx,4),%eax # eax<- vtable[BBBB]
+ movl $0,%ecx # pass range flag
+ #jmp common_invokeMethodNoRange
+ jmp common_invokeOld
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: x86/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */
+/* File: x86/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ movzwl 4(rPC),%eax # eax<- FEDC or CCCC
+ movzwl 2(rPC),%ecx # ecx<- BBBB
+ .if (!1)
+ andl $0xf,%eax # eax<- C (or stays CCCC)
+ .endif
+ GET_VREG(%eax,%eax) # eax<- vC ("this" ptr)
+ testl %eax,%eax # null?
+ je common_errNullObject # yep, throw exception
+ movl offObject_clazz(%eax),%eax # eax<- thisPtr->clazz
+ movl offClassObject_vtable(%eax),%eax # eax<- thisPtr->clazz->vtable
+ EXPORT_PC() # might throw later - get ready
+ movl (%eax,%ecx,4),%eax # eax<- vtable[BBBB]
+ movl $1,%ecx # pass range flag
+ #jmp common_invokeMethodRange
+ jmp common_invokeOld
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: x86/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 4(rPC),%eax # eax<- GFED or CCCC
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ .if (!0)
+ andl $0xf,%eax # eax<- D (or stays CCCC)
+ .endif
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ GET_VREG(%eax,%eax) # eax<- "this"
+ movl offClassObject_super(%ecx),%ecx # ecx<- method->clazz->super
+ testl %eax,%eax # null "this"?
+ je common_errNullObject # "this" is null, throw exception
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offClassObject_vtable(%ecx),%ecx # ecx<- vtable
+ EXPORT_PC()
+ movl (%ecx,%eax,4),%eax # eax<- super->vtable[BBBB]
+ movl $0,%ecx # ecx<- range flag
+ #jmp common_invokeMethodNoRange
+ jmp common_invokeOld
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: x86/OP_INVOKE_SUPER_QUICK_RANGE.S */
+/* File: x86/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ GET_GLUE(%ecx)
+ movzwl 4(rPC),%eax # eax<- GFED or CCCC
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ .if (!1)
+ andl $0xf,%eax # eax<- D (or stays CCCC)
+ .endif
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ GET_VREG(%eax,%eax) # eax<- "this"
+ movl offClassObject_super(%ecx),%ecx # ecx<- method->clazz->super
+ testl %eax,%eax # null "this"?
+ je common_errNullObject # "this" is null, throw exception
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offClassObject_vtable(%ecx),%ecx # ecx<- vtable
+ EXPORT_PC()
+ movl (%ecx,%eax,4),%eax # eax<- super->vtable[BBBB]
+ movl $1,%ecx # ecx<- range flag
+ #jmp common_invokeMethodRange
+ jmp common_invokeOld
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FC: /* 0xfc */
+/* File: x86/OP_UNUSED_FC.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FD: /* 0xfd */
+/* File: x86/OP_UNUSED_FD.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FE: /* 0xfe */
+/* File: x86/OP_UNUSED_FE.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FF: /* 0xff */
+/* File: x86/OP_UNUSED_FF.S */
+/* File: x86/unused.S */
+ jmp common_abort
+
+
+
+ .balign 64
+ .size dvmAsmInstructionStart, .-dvmAsmInstructionStart
+ .global dvmAsmInstructionEnd
+dvmAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global dvmAsmSisterStart
+ .type dvmAsmSisterStart, %function
+ .text
+ .balign 4
+dvmAsmSisterStart:
+
+/* continuation for OP_CONST_STRING */
+
+/* This is the less common path, so we'll redo some work
+ here rather than force spills on the common path */
+.LOP_CONST_STRING_resolve:
+ GET_GLUE(%eax)
+ movl %ecx,rINST_FULL # rINST_FULL<- AA
+ EXPORT_PC()
+ movl offGlue_method(%eax),%eax # eax<- glue->method
+ movzwl 2(rPC),%ecx # ecx<- BBBB
+ movl offMethod_clazz(%eax),%eax
+ SPILL(rPC)
+ movl %ecx,OUT_ARG1(%esp)
+ movl %eax,OUT_ARG0(%esp)
+ call dvmResolveString # go resolve
+ UNSPILL(rPC)
+ testl %eax,%eax # failed?
+ je common_exceptionThrown
+ SET_VREG(%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_CONST_STRING_JUMBO */
+
+/* This is the less common path, so we'll redo some work
+ here rather than force spills on the common path */
+.LOP_CONST_STRING_JUMBO_resolve:
+ GET_GLUE(%eax)
+ movl %ecx,rINST_FULL # rINST_FULL<- AA
+ EXPORT_PC()
+ movl offGlue_method(%eax),%eax # eax<- glue->method
+ movl 2(rPC),%ecx # ecx<- BBBBBBBB
+ movl offMethod_clazz(%eax),%eax
+ SPILL(rPC)
+ movl %ecx,OUT_ARG1(%esp)
+ movl %eax,OUT_ARG0(%esp)
+ call dvmResolveString # go resolve
+ UNSPILL(rPC)
+ testl %eax,%eax # failed?
+ je common_exceptionThrown
+ SET_VREG(%eax,rINST_FULL)
+ FETCH_INST_WORD(3)
+ ADVANCE_PC(3)
+ GOTO_NEXT
+
+
+/* continuation for OP_CONST_CLASS */
+
+/* This is the less common path, so we'll redo some work
+ here rather than force spills on the common path */
+.LOP_CONST_CLASS_resolve:
+ GET_GLUE(%eax)
+ movl %ecx,rINST_FULL # rINST_FULL<- AA
+ EXPORT_PC()
+ movl offGlue_method(%eax),%eax # eax<- glue->method
+ movl $1,OUT_ARG2(%esp) # true
+ movzwl 2(rPC),%ecx # ecx<- BBBB
+ movl offMethod_clazz(%eax),%eax
+ SPILL(rPC)
+ movl %ecx,OUT_ARG1(%esp)
+ movl %eax,OUT_ARG0(%esp)
+ call dvmResolveClass # go resolve
+ UNSPILL(rPC)
+ testl %eax,%eax # failed?
+ je common_exceptionThrown
+ SET_VREG(%eax,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_MONITOR_ENTER */
+
+.LOP_MONITOR_ENTER_continue:
+ SPILL(rPC) # have to - caller save
+ movl %ecx,OUT_ARG0(%esp)
+ movl %eax,OUT_ARG1(%esp)
+ call dvmLockObject # dvmLockObject(self,object)
+ UNSPILL(rPC)
+#ifdef WITH_DEADLOCK_PREDICTION
+ GET_GLUE(%ecx)
+ movl offGlueSelf(%ecx),%ecx # ecx<- glue->self
+ movl offThread_exception(%ecx),%eax
+ testl %eax,%eax
+ jne common_exceptionThrown
+#endif
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* continuation for OP_MONITOR_EXIT */
+
+.LOP_MONITOR_EXIT_continue:
+ call dvmUnlockObject # unlock(self,obj)
+ UNSPILL(rPC)
+ FETCH_INST_WORD(1)
+ testl %eax,%eax # success?
+ je common_exceptionThrown # no, exception pending
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* continuation for OP_CHECK_CAST */
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * ecx holds obj->clazz
+ * eax holds class resolved from BBBB
+ * rINST_FULL holds object
+ */
+.LOP_CHECK_CAST_fullcheck:
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ SPILL(rPC)
+ call dvmInstanceofNonTrivial # eax<- boolean result
+ UNSPILL(rPC)
+ testl %eax,%eax # failed?
+ jne .LOP_CHECK_CAST_okay # no, success
+
+ # A cast has failed. We need to throw a ClassCastException with the
+ # class of the object that failed to be cast.
+ EXPORT_PC()
+ movl offObject_clazz(rINST_FULL),%ecx # ecx<- obj->clazz
+ movl $.LstrClassCastException,%eax
+ movl offClassObject_descriptor(%ecx),%ecx
+ movl %eax,OUT_ARG0(%esp) # arg0<- message
+ movl %ecx,OUT_ARG1(%esp) # arg1<- obj->clazz->descriptor
+ SPILL(rPC)
+ call dvmThrowExceptionWithClassMessage
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+
+ /*
+ * Resolution required. This is the least-likely path, and we're
+ * going to have to recreate some data.
+ *
+ * rINST_FULL holds object
+ */
+.LOP_CHECK_CAST_resolve:
+ GET_GLUE(%ecx)
+ EXPORT_PC()
+ movzwl 2(rPC),%eax # eax<- BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- glue->method
+ movl %eax,OUT_ARG1(%esp) # arg1<- BBBB
+ movl offMethod_clazz(%ecx),%ecx # ecx<- metho->clazz
+ movl $0,OUT_ARG2(%esp) # arg2<- false
+ movl %ecx,OUT_ARG0(%esp) # arg0<- method->clazz
+ SPILL(rPC)
+ call dvmResolveClass # eax<- resolved ClassObject ptr
+ UNSPILL(rPC)
+ testl %eax,%eax # got null?
+ je common_exceptionThrown # yes, handle exception
+ movl offObject_clazz(rINST_FULL),%ecx # ecx<- obj->clazz
+ jmp .LOP_CHECK_CAST_resolved # pick up where we left off
+
+/* continuation for OP_INSTANCE_OF */
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * eax holds obj->clazz
+ * ecx holds class resolved from BBBB
+ * rINST_HI has BA
+ * rPC already spilled
+ */
+.LOP_INSTANCE_OF_fullcheck:
+ movl %eax,OUT_ARG0(%esp)
+ movl %ecx,OUT_ARG1(%esp)
+ call dvmInstanceofNonTrivial # eax<- boolean result
+ # fall through to OP_INSTANCE_OF_store
+
+ /*
+ * eax holds boolean result
+ * rINST_HI holds BA
+ */
+.LOP_INSTANCE_OF_store:
+ UNSPILL(rPC)
+ movzbl rINST_HI,%ecx # ecx<- BA
+ FETCH_INST_WORD(2)
+ andb $0xf,%cl # ecl<- A
+ ADVANCE_PC(2)
+ SET_VREG(%eax,%ecx) # vA<- eax
+ GOTO_NEXT
+
+ /*
+ * Trivial test succeeded, save and bail.
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_trivial:
+ UNSPILL(rPC)
+ movzbl rINST_HI,%ecx # ecx<- BA
+ FETCH_INST_WORD(2)
+ andb $0xf,%cl # ecl<- A
+ ADVANCE_PC(2)
+ movl $1,%eax
+ SET_VREG(%eax,%ecx) # vA<- true
+ GOTO_NEXT
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * rPC holds BBBB
+ * rINST_HI holds BA
+ */
+.LOP_INSTANCE_OF_resolve:
+ movl rPC,OUT_ARG1(%esp) # arg1<- BBBB
+ GET_GLUE(%ecx)
+ UNSPILL(rPC)
+ movl offGlue_method(%ecx),%ecx
+ movl $1,OUT_ARG2(%esp) # arg2<- true
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ EXPORT_PC()
+ movl %ecx,OUT_ARG0(%esp) # arg0<- method->clazz
+ call dvmResolveClass # eax<- resolved ClassObject ptr
+ UNSPILL(rPC)
+ testl %eax,%eax # success?
+ je common_exceptionThrown # no, handle exception
+/* Now, we need to sync up with fast path. We need eax to
+ * hold the obj->clazz, and ecx to hold the resolved class
+ */
+ movl %eax,%ecx # ecx<- resolved class
+ movzbl rINST_HI,%eax # eax<- BA
+ sarl $4,%eax # eax<- B
+ GET_VREG(%eax,%eax) # eax<- vB (obj)
+ movl offObject_clazz(%eax),%eax # eax<- obj->clazz
+ jmp .LOP_INSTANCE_OF_resolved
+
+
+/* continuation for OP_NEW_INSTANCE */
+
+.LOP_NEW_INSTANCE_initialized: # on entry, ecx<- class
+ testl $(ACC_INTERFACE|ACC_ABSTRACT),offClassObject_accessFlags(%ecx)
+ movl $ALLOC_DONT_TRACK,OUT_ARG1(%esp)
+ jne .LOP_NEW_INSTANCE_abstract
+.LOP_NEW_INSTANCE_finish: # ecx=class
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmAllocObject # eax<- new object
+ UNSPILL(rPC)
+ movl rINST_FULL,%ecx
+ FETCH_INST_WORD(2)
+ testl %eax,%eax # success?
+ je common_exceptionThrown # no, bail out
+ SET_VREG(%eax,%ecx)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+ /*
+ * Class initialization required.
+ *
+ * ecx holds class object
+ */
+.LOP_NEW_INSTANCE_needinit:
+ SPILL_TMP(%ecx) # save object
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmInitClass # initialize class
+ UNSPILL_TMP(%ecx) # restore object
+ testl %eax,%eax # success?
+ jne .LOP_NEW_INSTANCE_initialized # success, continue
+ UNSPILL(rPC) # failed, restore PC
+ jmp common_exceptionThrown # go deal with init exception
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ */
+.LOP_NEW_INSTANCE_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax
+ movl offGlue_method(%ecx),%ecx # ecx<- glue->method
+ movl %eax,OUT_ARG1(%esp)
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ movl $0,OUT_ARG2(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveClass # call(clazz,off,flags)
+ movl %eax,%ecx # ecx<- resolved ClassObject ptr
+ testl %ecx,%ecx # success?
+ jne .LOP_NEW_INSTANCE_resolved # good to go
+ UNSPILL(rPC)
+ jmp common_exceptionThrown # no, handle exception
+
+ /*
+ * We can't instantiate an abstract class or interface, so throw an
+ * InstantiationError with the class descriptor as the message.
+ *
+ * ecx holds class object
+ */
+.LOP_NEW_INSTANCE_abstract:
+ movl offClassObject_descriptor(%ecx),%eax
+ movl $.LstrInstantiationError,OUT_ARG0(%esp)
+ movl %eax,OUT_ARG1(%esp)
+ call dvmThrowExceptionWithClassMessage
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+
+
+
+
+/* continuation for OP_NEW_ARRAY */
+
+ /*
+ * Resolve class. (This is an uncommon case.)
+ * ecx holds class (null here)
+ * eax holds array length (vB)
+ */
+.LOP_NEW_ARRAY_resolve:
+ GET_GLUE(%ecx)
+ SPILL_TMP(%eax) # save array length
+ movl offGlue_method(%ecx),%ecx # ecx<- glue->method
+ movzwl 2(rPC),%eax # eax<- CCCC
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ movl %eax,OUT_ARG1(%esp)
+ movl $0,OUT_ARG2(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ SPILL(rPC)
+ call dvmResolveClass # eax<- call(clazz,ref,flag)
+ UNSPILL(rPC)
+ movl %eax,%ecx
+ UNSPILL_TMP(%eax)
+ testl %ecx,%ecx # successful resolution?
+ je common_exceptionThrown # no, bail.
+# fall through to OP_NEW_ARRAY_finish
+
+ /*
+ * Finish allocation
+ *
+ * ecx holds class
+ * eax holds array length (vB)
+ */
+.LOP_NEW_ARRAY_finish:
+ movl %ecx,OUT_ARG0(%esp)
+ movl %eax,OUT_ARG1(%esp)
+ movl $ALLOC_DONT_TRACK,OUT_ARG2(%esp)
+ SPILL(rPC)
+ call dvmAllocArrayByClass # eax<- call(clazz,length,flags)
+ UNSPILL(rPC)
+ testl %eax,%eax # failed?
+ je common_exceptionThrown # yup - go handle
+ movl rINST_FULL,%ecx
+ FETCH_INST_WORD(2)
+ SET_VREG(%eax,%ecx)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_FILLED_NEW_ARRAY */
+
+.LOP_FILLED_NEW_ARRAY_more:
+ movl offMethod_clazz(%eax),%eax # eax<- method->clazz
+ movl %eax,OUT_ARG0(%esp) # arg0<- clazz
+ call dvmResolveClass # eax<- call(clazz,ref,flag)
+ UNSPILL(rPC)
+ testl %eax,%eax # null?
+ je common_exceptionThrown # yes, handle it
+
+ # note: fall through to .LOP_FILLED_NEW_ARRAY_continue
+
+ /*
+ * On entry:
+ * eax holds array class [r0]
+ * rINST_FULL holds AA or BB [r10]
+ * ecx is scratch
+ * rPC is valid, but has been spilled
+ */
+.LOP_FILLED_NEW_ARRAY_continue:
+ movl offClassObject_descriptor(%eax),%ecx # ecx<- arrayClass->descriptor
+ movl $ALLOC_DONT_TRACK,OUT_ARG2(%esp) # arg2<- flags
+ movzbl 1(%ecx),%ecx # ecx<- descriptor[1]
+ movl %eax,OUT_ARG0(%esp) # arg0<- arrayClass
+ cmpb $'I',%cl # supported?
+ je 1f
+ cmpb $'L',%cl
+ je 1f
+ cmpb $'[',%cl
+ jne .LOP_FILLED_NEW_ARRAY_notimpl # no, not handled yet
+1:
+ .if (!0)
+ SPILL_TMP(rINST_FULL) # save copy, need "B" later
+ sarl $4,rINST_FULL
+ .endif
+ movl rINST_FULL,OUT_ARG1(%esp) # arg1<- A or AA (length)
+ call dvmAllocArrayByClass # eax<- call(arrayClass, length, flags)
+ UNSPILL(rPC)
+ GET_GLUE(%ecx)
+ testl %eax,%eax # alloc successful?
+ je common_exceptionThrown # no, handle exception
+ movl %eax,offGlue_retval(%ecx) # retval.l<- new array
+ movzwl 4(rPC),%ecx # ecx<- FEDC or CCCC
+ leal offArrayObject_contents(%eax),%eax # eax<- newArray->contents
+
+/* at this point:
+ * eax is pointer to tgt
+ * rINST_FULL is length
+ * ecx is FEDC or CCCC
+ * TMP_SPILL is BA
+ * rPC is valid, but spilled
+ * We now need to copy values from registers into the array
+ */
+
+ .if 0
+ # set up src pointer
+ SPILL(rFP) # esi
+ SPILL(rIBASE) # edi
+ movl %eax,%edi # set up dst ptr
+ leal (rFP,%ecx,4),%esi # set up src ptr
+ movl rINST_FULL,%ecx # load count register
+ FETCH_INST_WORD(3)
+ rep
+ movsd
+ UNSPILL(rIBASE)
+ UNSPILL(rFP)
+ .else
+ testl rINST_FULL,rINST_FULL
+ je 4f
+ UNSPILL_TMP(rPC)
+ andl $0x0f,rPC # rPC<- 0000000A
+ sall $16,rPC # rPC<- 000A0000
+ orl %ecx,rPC # rpc<- 000AFEDC
+3:
+ movl $0xf,%ecx
+ andl rPC,%ecx # ecx<- next reg to load
+ GET_VREG(%ecx,%ecx)
+ shrl $4,rPC
+ leal 4(%eax),%eax
+ movl %ecx,-4(%eax)
+ sub $1,rINST_FULL
+ jne 3b
+4:
+ UNSPILL(rPC)
+ FETCH_INST_WORD(3)
+ .endif
+
+ ADVANCE_PC(3)
+ GOTO_NEXT
+
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_notimpl:
+ movl $.LstrInternalError,%eax
+ movl %eax,OUT_ARG0(%esp)
+ movl $.LstrFilledNewArrayNotImpl,%eax
+ movl %eax,OUT_ARG1(%esp)
+ call dvmThrowException
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+
+
+/* continuation for OP_FILLED_NEW_ARRAY_RANGE */
+
+.LOP_FILLED_NEW_ARRAY_RANGE_more:
+ movl offMethod_clazz(%eax),%eax # eax<- method->clazz
+ movl %eax,OUT_ARG0(%esp) # arg0<- clazz
+ call dvmResolveClass # eax<- call(clazz,ref,flag)
+ UNSPILL(rPC)
+ testl %eax,%eax # null?
+ je common_exceptionThrown # yes, handle it
+
+ # note: fall through to .LOP_FILLED_NEW_ARRAY_RANGE_continue
+
+ /*
+ * On entry:
+ * eax holds array class [r0]
+ * rINST_FULL holds AA or BB [r10]
+ * ecx is scratch
+ * rPC is valid, but has been spilled
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_continue:
+ movl offClassObject_descriptor(%eax),%ecx # ecx<- arrayClass->descriptor
+ movl $ALLOC_DONT_TRACK,OUT_ARG2(%esp) # arg2<- flags
+ movzbl 1(%ecx),%ecx # ecx<- descriptor[1]
+ movl %eax,OUT_ARG0(%esp) # arg0<- arrayClass
+ cmpb $'I',%cl # supported?
+ je 1f
+ cmpb $'L',%cl
+ je 1f
+ cmpb $'[',%cl
+ jne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl # no, not handled yet
+1:
+ .if (!1)
+ SPILL_TMP(rINST_FULL) # save copy, need "B" later
+ sarl $4,rINST_FULL
+ .endif
+ movl rINST_FULL,OUT_ARG1(%esp) # arg1<- A or AA (length)
+ call dvmAllocArrayByClass # eax<- call(arrayClass, length, flags)
+ UNSPILL(rPC)
+ GET_GLUE(%ecx)
+ testl %eax,%eax # alloc successful?
+ je common_exceptionThrown # no, handle exception
+ movl %eax,offGlue_retval(%ecx) # retval.l<- new array
+ movzwl 4(rPC),%ecx # ecx<- FEDC or CCCC
+ leal offArrayObject_contents(%eax),%eax # eax<- newArray->contents
+
+/* at this point:
+ * eax is pointer to tgt
+ * rINST_FULL is length
+ * ecx is FEDC or CCCC
+ * TMP_SPILL is BA
+ * rPC is valid, but spilled
+ * We now need to copy values from registers into the array
+ */
+
+ .if 1
+ # set up src pointer
+ SPILL(rFP) # esi
+ SPILL(rIBASE) # edi
+ movl %eax,%edi # set up dst ptr
+ leal (rFP,%ecx,4),%esi # set up src ptr
+ movl rINST_FULL,%ecx # load count register
+ FETCH_INST_WORD(3)
+ rep
+ movsd
+ UNSPILL(rIBASE)
+ UNSPILL(rFP)
+ .else
+ testl rINST_FULL,rINST_FULL
+ je 4f
+ UNSPILL_TMP(rPC)
+ andl $0x0f,rPC # rPC<- 0000000A
+ sall $16,rPC # rPC<- 000A0000
+ orl %ecx,rPC # rpc<- 000AFEDC
+3:
+ movl $0xf,%ecx
+ andl rPC,%ecx # ecx<- next reg to load
+ GET_VREG(%ecx,%ecx)
+ shrl $4,rPC
+ leal 4(%eax),%eax
+ movl %ecx,-4(%eax)
+ sub $1,rINST_FULL
+ jne 3b
+4:
+ UNSPILL(rPC)
+ FETCH_INST_WORD(3)
+ .endif
+
+ ADVANCE_PC(3)
+ GOTO_NEXT
+
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
+ movl $.LstrInternalError,%eax
+ movl %eax,OUT_ARG0(%esp)
+ movl $.LstrFilledNewArrayNotImpl,%eax
+ movl %eax,OUT_ARG1(%esp)
+ call dvmThrowException
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+
+
+/* continuation for OP_CMPL_FLOAT */
+
+.LOP_CMPL_FLOAT_isNaN:
+ movl $-1,%ecx
+ jmp .LOP_CMPL_FLOAT_finish
+
+/* continuation for OP_CMPG_FLOAT */
+
+.LOP_CMPG_FLOAT_isNaN:
+ movl $1,%ecx
+ jmp .LOP_CMPG_FLOAT_finish
+
+/* continuation for OP_CMPL_DOUBLE */
+
+.LOP_CMPL_DOUBLE_isNaN:
+ movl $-1,%ecx
+ jmp .LOP_CMPL_DOUBLE_finish
+
+/* continuation for OP_CMPG_DOUBLE */
+
+.LOP_CMPG_DOUBLE_isNaN:
+ movl $1,%ecx
+ jmp .LOP_CMPG_DOUBLE_finish
+
+/* continuation for OP_CMP_LONG */
+
+.LOP_CMP_LONG_bigger:
+ UNSPILL(rPC)
+ movl $1,%ecx
+ jmp .LOP_CMP_LONG_finish
+.LOP_CMP_LONG_smaller:
+ UNSPILL(rPC)
+ movl $-1,%ecx
+.LOP_CMP_LONG_finish:
+ SET_VREG(%ecx,rINST_FULL)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_AGET_WIDE */
+
+.LOP_AGET_WIDE_finish:
+ leal offArrayObject_contents(%eax,%ecx,8),%eax
+ movl (%eax),%ecx
+ movl 4(%eax),%eax
+ SET_VREG_WORD(%ecx,rINST_FULL,0)
+ SET_VREG_WORD(%eax,rINST_FULL,1)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_APUT_WIDE */
+
+.LOP_APUT_WIDE_finish:
+ leal offArrayObject_contents(%eax,%ecx,8),%eax
+ GET_VREG_WORD(%ecx,rINST_FULL,0)
+ GET_VREG_WORD(rINST_FULL,rINST_FULL,1)
+ movl rINST_FULL,4(%eax)
+ FETCH_INST_WORD(2)
+ movl %ecx,(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_APUT_OBJECT */
+
+ /* On entry:
+ * eax<- array object
+ * ecx<- index
+ * rINST_FULL<- vAA
+ */
+.LOP_APUT_OBJECT_continue:
+ leal offArrayObject_contents(%eax,%ecx,4),%ecx
+ testl rINST_FULL,rINST_FULL # storing null reference?
+ je .LOP_APUT_OBJECT_skip_check
+ SPILL(rPC)
+ SPILL_TMP(%ecx)
+ movl offObject_clazz(%eax),%eax # eax<- arrayObj->clazz
+ movl offObject_clazz(rINST_FULL),%ecx # ecx<- obj->clazz
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmCanPutArrayElement # test object type vs. array type
+ UNSPILL(rPC)
+ UNSPILL_TMP(%ecx)
+ testl %eax,%eax
+ je common_errArrayStore
+
+.LOP_APUT_OBJECT_skip_check:
+ movl rINST_FULL,(%ecx)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_IGET */
+
+
+.LOP_IGET_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IGET_finish
+ jmp common_exceptionThrown
+
+.LOP_IGET_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movl (%ecx,%eax,1),%ecx # ecx<- obj.field (8/16/32 bits)
+ movl rINST_FULL,%eax # eax<- A
+ FETCH_INST_WORD(2)
+ SET_VREG(%ecx,%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_IGET_WIDE */
+
+
+.LOP_IGET_WIDE_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IGET_WIDE_finish
+ jmp common_exceptionThrown
+
+.LOP_IGET_WIDE_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ leal (%ecx,%eax,1),%eax # eax<- address of field
+ movl (%eax),%ecx # ecx<- lsw
+ movl 4(%eax),%eax # eax<- msw
+ SET_VREG_WORD(%ecx,rINST_FULL,0)
+ SET_VREG_WORD(%eax,rINST_FULL,1)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_IGET_OBJECT */
+
+
+.LOP_IGET_OBJECT_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IGET_OBJECT_finish
+ jmp common_exceptionThrown
+
+.LOP_IGET_OBJECT_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movl (%ecx,%eax,1),%ecx # ecx<- obj.field (8/16/32 bits)
+ movl rINST_FULL,%eax # eax<- A
+ FETCH_INST_WORD(2)
+ SET_VREG(%ecx,%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_IGET_BOOLEAN */
+
+
+.LOP_IGET_BOOLEAN_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IGET_BOOLEAN_finish
+ jmp common_exceptionThrown
+
+.LOP_IGET_BOOLEAN_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movzbl (%ecx,%eax,1),%ecx # ecx<- obj.field (8/16/32 bits)
+ movl rINST_FULL,%eax # eax<- A
+ FETCH_INST_WORD(2)
+ SET_VREG(%ecx,%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_IGET_BYTE */
+
+
+.LOP_IGET_BYTE_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IGET_BYTE_finish
+ jmp common_exceptionThrown
+
+.LOP_IGET_BYTE_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movsbl (%ecx,%eax,1),%ecx # ecx<- obj.field (8/16/32 bits)
+ movl rINST_FULL,%eax # eax<- A
+ FETCH_INST_WORD(2)
+ SET_VREG(%ecx,%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_IGET_CHAR */
+
+
+.LOP_IGET_CHAR_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IGET_CHAR_finish
+ jmp common_exceptionThrown
+
+.LOP_IGET_CHAR_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movzwl (%ecx,%eax,1),%ecx # ecx<- obj.field (8/16/32 bits)
+ movl rINST_FULL,%eax # eax<- A
+ FETCH_INST_WORD(2)
+ SET_VREG(%ecx,%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_IGET_SHORT */
+
+
+.LOP_IGET_SHORT_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IGET_SHORT_finish
+ jmp common_exceptionThrown
+
+.LOP_IGET_SHORT_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movswl (%ecx,%eax,1),%ecx # ecx<- obj.field (8/16/32 bits)
+ movl rINST_FULL,%eax # eax<- A
+ FETCH_INST_WORD(2)
+ SET_VREG(%ecx,%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_IPUT */
+
+
+.LOP_IPUT_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IPUT_finish
+ jmp common_exceptionThrown
+
+.LOP_IPUT_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- v[A]
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movl rINST_FULL,(%ecx,%eax,1) # obj.field <- v[A](8/16/32 bits)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_IPUT_WIDE */
+
+
+.LOP_IPUT_WIDE_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IPUT_WIDE_finish
+ jmp common_exceptionThrown
+
+.LOP_IPUT_WIDE_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ leal (%ecx,%eax,1),%eax # eax<- address of field
+ GET_VREG_WORD(%ecx,rINST_FULL,0) # ecx<- lsw
+ GET_VREG_WORD(rINST_FULL,rINST_FULL,1) # rINST_FULL<- msw
+ movl rINST_FULL,4(%eax)
+ FETCH_INST_WORD(2)
+ movl %ecx,(%eax)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_IPUT_OBJECT */
+
+
+.LOP_IPUT_OBJECT_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IPUT_OBJECT_finish
+ jmp common_exceptionThrown
+
+.LOP_IPUT_OBJECT_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- v[A]
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movl rINST_FULL,(%ecx,%eax,1) # obj.field <- v[A](8/16/32 bits)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_IPUT_BOOLEAN */
+
+
+.LOP_IPUT_BOOLEAN_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IPUT_BOOLEAN_finish
+ jmp common_exceptionThrown
+
+.LOP_IPUT_BOOLEAN_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- v[A]
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movb rINST_LO,(%ecx,%eax,1) # obj.field <- v[A](8/16/32 bits)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_IPUT_BYTE */
+
+
+.LOP_IPUT_BYTE_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IPUT_BYTE_finish
+ jmp common_exceptionThrown
+
+.LOP_IPUT_BYTE_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- v[A]
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movb rINST_LO,(%ecx,%eax,1) # obj.field <- v[A](8/16/32 bits)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_IPUT_CHAR */
+
+
+.LOP_IPUT_CHAR_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IPUT_CHAR_finish
+ jmp common_exceptionThrown
+
+.LOP_IPUT_CHAR_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- v[A]
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movw rINST,(%ecx,%eax,1) # obj.field <- v[A](8/16/32 bits)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_IPUT_SHORT */
+
+
+.LOP_IPUT_SHORT_resolve:
+ EXPORT_PC()
+ SPILL(rPC)
+ movl offGlue_method(rIBASE),rPC # rPC<- current method
+ UNSPILL(rIBASE)
+ movl offMethod_clazz(rPC),rPC # rPC<- method->clazz
+ SPILL_TMP(%ecx) # save object pointer across call
+ movl rPC,OUT_ARG0(%esp) # pass in method->clazz
+ call dvmResolveInstField # ... to dvmResolveInstField
+ UNSPILL_TMP(%ecx)
+ UNSPILL(rPC)
+ testl %eax,%eax # ... which returns InstrField ptr
+ jne .LOP_IPUT_SHORT_finish
+ jmp common_exceptionThrown
+
+.LOP_IPUT_SHORT_finish:
+ /*
+ * Currently:
+ * eax holds resolved field
+ * ecx holds object
+ * rIBASE is scratch, but needs to be unspilled
+ * rINST_FULL holds A
+ */
+ GET_VREG(rINST_FULL,rINST_FULL) # rINST_FULL<- v[A]
+ movl offInstField_byteOffset(%eax),%eax # eax<- byte offset of field
+ UNSPILL(rIBASE)
+ testl %ecx,%ecx # object null?
+ je common_errNullObject # object was null
+ movw rINST,(%ecx,%eax,1) # obj.field <- v[A](8/16/32 bits)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_SGET */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SGET_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SGET_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SGET_WIDE */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SGET_WIDE_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SGET_WIDE_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SGET_OBJECT */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SGET_OBJECT_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SGET_OBJECT_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SGET_BOOLEAN */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SGET_BOOLEAN_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SGET_BOOLEAN_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SGET_BYTE */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SGET_BYTE_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SGET_BYTE_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SGET_CHAR */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SGET_CHAR_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SGET_CHAR_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SGET_SHORT */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SGET_SHORT_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SGET_SHORT_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SPUT */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SPUT_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SPUT_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SPUT_WIDE */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SPUT_WIDE_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SPUT_WIDE_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SPUT_OBJECT */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SPUT_OBJECT_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SPUT_OBJECT_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SPUT_BOOLEAN */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SPUT_BOOLEAN_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SPUT_BOOLEAN_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SPUT_BYTE */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SPUT_BYTE_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SPUT_BYTE_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SPUT_CHAR */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SPUT_CHAR_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SPUT_CHAR_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_SPUT_SHORT */
+
+ /*
+ * Go resolve the field
+ */
+.LOP_SPUT_SHORT_resolve:
+ GET_GLUE(%ecx)
+ movzwl 2(rPC),%eax # eax<- field ref BBBB
+ movl offGlue_method(%ecx),%ecx # ecx<- current method
+ EXPORT_PC() # could throw, need to export
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ SPILL(rPC)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveStaticField # eax<- resolved StaticField ptr
+ UNSPILL(rPC)
+ testl %eax,%eax
+ jne .LOP_SPUT_SHORT_finish # success, continue
+ jmp common_exceptionThrown # no, handle exception
+
+
+/* continuation for OP_INVOKE_VIRTUAL */
+
+
+.LOP_INVOKE_VIRTUAL_more:
+ movl offMethod_clazz(%eax),%eax # ecx<- method->clazz
+ movl %eax,OUT_ARG0(%esp) # arg0<- clazz
+ movl $METHOD_VIRTUAL,OUT_ARG2(%esp) # arg2<- flags
+ call dvmResolveMethod # eax<- call(clazz, ref, flags)
+ UNSPILL(rPC)
+ testl %eax,%eax # got null?
+ jne .LOP_INVOKE_VIRTUAL_continue # no, continue
+ jmp common_exceptionThrown # yes, handle exception
+
+ /* At this point:
+ * eax = resolved base method
+ * ecx = scratch
+ */
+.LOP_INVOKE_VIRTUAL_continue:
+ movzwl 4(rPC),%ecx # ecx<- GFED or CCCC
+ .if (!0)
+ andl $0xf,%ecx # ecx<- D (or stays CCCC)
+ .endif
+ GET_VREG(%ecx,%ecx) # ecx<- "this"
+ movzwl offMethod_methodIndex(%eax),%eax # eax<- baseMethod->methodIndex
+ testl %ecx,%ecx # null this?
+ je common_errNullObject # go if so
+ movl offObject_clazz(%ecx),%ecx # ecx<- thisPtr->clazz
+ movl offClassObject_vtable(%ecx),%ecx # ecx<- thisPtr->clazz->vtable
+ movl (%ecx,%eax,4),%eax # eax<- vtable[methodIndex]
+ movl $0,%ecx # needed for common_invokeOld
+ #jmp common_invokeMethodNoRange
+ jmp common_invokeOld
+
+
+/* continuation for OP_INVOKE_SUPER */
+
+ /*
+ * At this point:
+ * ecx = resolved base method [r0]
+ * eax = method->clazz [r9]
+ */
+.LOP_INVOKE_SUPER_continue:
+ movl offClassObject_super(%eax),%eax # eax<- method->clazz->super
+ movzwl offMethod_methodIndex(%ecx),%ecx # ecx<- baseMthod->methodIndex
+ cmpl offClassObject_vtableCount(%eax),%ecx # compare(methodIndex,vtableCount)
+ jae .LOP_INVOKE_SUPER_nsm # method not present in superclass
+ movl offClassObject_vtable(%eax),%eax # eax<- ...clazz->super->vtable
+ movl (%eax,%ecx,4),%eax # eax<- vtable[methodIndex]
+ movl $0,%ecx
+ #jmp common_invokeMethodNoRange
+ jmp common_invokeOld
+
+ /* At this point:
+ * ecx = null (needs to be resolved base method)
+ * eax = method->clazz
+ */
+.LOP_INVOKE_SUPER_resolve:
+ SPILL_TMP(%eax) # method->clazz
+ movl %eax,OUT_ARG0(%esp) # arg0<- method->clazz
+ movzwl 2(rPC),%ecx # ecx<- BBBB
+ movl $METHOD_VIRTUAL,OUT_ARG2(%esp) # arg2<- resolver method type
+ movl %ecx,OUT_ARG1(%esp) # arg1<- ref
+ SPILL(rPC)
+ call dvmResolveMethod # eax<- call(clazz, ref, flags)
+ UNSPILL(rPC)
+ testl %eax,%eax # got null?
+ movl %eax,%ecx # ecx<- resolved base method
+ UNSPILL_TMP(%eax) # restore method->clazz
+ jne .LOP_INVOKE_SUPER_continue # good to go - continue
+ jmp common_exceptionThrown # handle exception
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * ecx = resolved base method
+ */
+.LOP_INVOKE_SUPER_nsm:
+ movl offMethod_name(%ecx),%eax
+ mov %eax,OUT_ARG1(%esp)
+ jmp common_errNoSuchMethod
+
+/* continuation for OP_INVOKE_DIRECT */
+
+ /*
+ * On entry:
+ * TMP_SPILL <- "this" register
+ * Things a bit ugly on this path, but it's the less
+ * frequent one. We'll have to do some reloading.
+ */
+.LOP_INVOKE_DIRECT_resolve:
+ SPILL_TMP(%ecx)
+ GET_GLUE(%ecx)
+ UNSPILL(rPC)
+ movl offGlue_method(%ecx),%ecx # ecx<- glue->method
+ movzwl 2(rPC),%eax # reference (BBBB or CCCC)
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ movl $METHOD_DIRECT,OUT_ARG2(%esp)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveMethod # eax<- call(clazz, ref, flags)
+ UNSPILL_TMP(%ecx)
+ testl %eax,%eax
+ jne .LOP_INVOKE_DIRECT_finish
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+
+/* continuation for OP_INVOKE_STATIC */
+
+.LOP_INVOKE_STATIC_continue:
+ movl $METHOD_STATIC,%eax
+ movl %eax,OUT_ARG2(%esp) # arg2<- flags
+ SPILL(rPC)
+ call dvmResolveMethod # call(clazz,ref,flags)
+ UNSPILL(rPC)
+ movl $0,%ecx
+ testl %eax,%eax # got null?
+ #jne common_invokeMethodNoRange
+ jne common_invokeOld
+ jmp common_exceptionThrown
+
+
+/* continuation for OP_INVOKE_INTERFACE */
+
+.LOP_INVOKE_INTERFACE_continue:
+ call dvmFindInterfaceMethodInCache # eax<- call(class, ref, method, dex)
+ UNSPILL(rPC)
+ testl %eax,%eax
+ je common_exceptionThrown
+ movl $0,%ecx
+ #jmp common_invokeMethodNoRange
+ jmp common_invokeOld
+
+
+/* continuation for OP_INVOKE_VIRTUAL_RANGE */
+
+
+.LOP_INVOKE_VIRTUAL_RANGE_more:
+ movl offMethod_clazz(%eax),%eax # ecx<- method->clazz
+ movl %eax,OUT_ARG0(%esp) # arg0<- clazz
+ movl $METHOD_VIRTUAL,OUT_ARG2(%esp) # arg2<- flags
+ call dvmResolveMethod # eax<- call(clazz, ref, flags)
+ UNSPILL(rPC)
+ testl %eax,%eax # got null?
+ jne .LOP_INVOKE_VIRTUAL_RANGE_continue # no, continue
+ jmp common_exceptionThrown # yes, handle exception
+
+ /* At this point:
+ * eax = resolved base method
+ * ecx = scratch
+ */
+.LOP_INVOKE_VIRTUAL_RANGE_continue:
+ movzwl 4(rPC),%ecx # ecx<- GFED or CCCC
+ .if (!1)
+ andl $0xf,%ecx # ecx<- D (or stays CCCC)
+ .endif
+ GET_VREG(%ecx,%ecx) # ecx<- "this"
+ movzwl offMethod_methodIndex(%eax),%eax # eax<- baseMethod->methodIndex
+ testl %ecx,%ecx # null this?
+ je common_errNullObject # go if so
+ movl offObject_clazz(%ecx),%ecx # ecx<- thisPtr->clazz
+ movl offClassObject_vtable(%ecx),%ecx # ecx<- thisPtr->clazz->vtable
+ movl (%ecx,%eax,4),%eax # eax<- vtable[methodIndex]
+ movl $1,%ecx # needed for common_invokeOld
+ #jmp common_invokeMethodRange
+ jmp common_invokeOld
+
+
+/* continuation for OP_INVOKE_SUPER_RANGE */
+
+ /*
+ * At this point:
+ * ecx = resolved base method [r0]
+ * eax = method->clazz [r9]
+ */
+.LOP_INVOKE_SUPER_RANGE_continue:
+ movl offClassObject_super(%eax),%eax # eax<- method->clazz->super
+ movzwl offMethod_methodIndex(%ecx),%ecx # ecx<- baseMthod->methodIndex
+ cmpl offClassObject_vtableCount(%eax),%ecx # compare(methodIndex,vtableCount)
+ jae .LOP_INVOKE_SUPER_RANGE_nsm # method not present in superclass
+ movl offClassObject_vtable(%eax),%eax # eax<- ...clazz->super->vtable
+ movl (%eax,%ecx,4),%eax # eax<- vtable[methodIndex]
+ movl $1,%ecx
+ #jmp common_invokeMethodRange
+ jmp common_invokeOld
+
+ /* At this point:
+ * ecx = null (needs to be resolved base method)
+ * eax = method->clazz
+ */
+.LOP_INVOKE_SUPER_RANGE_resolve:
+ SPILL_TMP(%eax) # method->clazz
+ movl %eax,OUT_ARG0(%esp) # arg0<- method->clazz
+ movzwl 2(rPC),%ecx # ecx<- BBBB
+ movl $METHOD_VIRTUAL,OUT_ARG2(%esp) # arg2<- resolver method type
+ movl %ecx,OUT_ARG1(%esp) # arg1<- ref
+ SPILL(rPC)
+ call dvmResolveMethod # eax<- call(clazz, ref, flags)
+ UNSPILL(rPC)
+ testl %eax,%eax # got null?
+ movl %eax,%ecx # ecx<- resolved base method
+ UNSPILL_TMP(%eax) # restore method->clazz
+ jne .LOP_INVOKE_SUPER_RANGE_continue # good to go - continue
+ jmp common_exceptionThrown # handle exception
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * ecx = resolved base method
+ */
+.LOP_INVOKE_SUPER_RANGE_nsm:
+ movl offMethod_name(%ecx),%eax
+ mov %eax,OUT_ARG1(%esp)
+ jmp common_errNoSuchMethod
+
+/* continuation for OP_INVOKE_DIRECT_RANGE */
+
+ /*
+ * On entry:
+ * TMP_SPILL <- "this" register
+ * Things a bit ugly on this path, but it's the less
+ * frequent one. We'll have to do some reloading.
+ */
+.LOP_INVOKE_DIRECT_RANGE_resolve:
+ SPILL_TMP(%ecx)
+ GET_GLUE(%ecx)
+ UNSPILL(rPC)
+ movl offGlue_method(%ecx),%ecx # ecx<- glue->method
+ movzwl 2(rPC),%eax # reference (BBBB or CCCC)
+ movl offMethod_clazz(%ecx),%ecx # ecx<- method->clazz
+ movl $METHOD_DIRECT,OUT_ARG2(%esp)
+ movl %eax,OUT_ARG1(%esp)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmResolveMethod # eax<- call(clazz, ref, flags)
+ UNSPILL_TMP(%ecx)
+ testl %eax,%eax
+ jne .LOP_INVOKE_DIRECT_RANGE_finish
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+
+/* continuation for OP_INVOKE_STATIC_RANGE */
+
+.LOP_INVOKE_STATIC_RANGE_continue:
+ movl $METHOD_STATIC,%eax
+ movl %eax,OUT_ARG2(%esp) # arg2<- flags
+ SPILL(rPC)
+ call dvmResolveMethod # call(clazz,ref,flags)
+ UNSPILL(rPC)
+ movl $1,%ecx
+ testl %eax,%eax # got null?
+ #jne common_invokeMethodRange
+ jne common_invokeOld
+ jmp common_exceptionThrown
+
+
+/* continuation for OP_INVOKE_INTERFACE_RANGE */
+
+.LOP_INVOKE_INTERFACE_RANGE_continue:
+ call dvmFindInterfaceMethodInCache # eax<- call(class, ref, method, dex)
+ UNSPILL(rPC)
+ testl %eax,%eax
+ je common_exceptionThrown
+ movl $1,%ecx
+ #jmp common_invokeMethodRange
+ jmp common_invokeOld
+
+
+/* continuation for OP_FLOAT_TO_INT */
+
+
+.LOP_FLOAT_TO_INT_continue:
+ .if 0
+ movl $0x80000000,%eax
+ xorl 4(rFP,%ecx,4),%eax
+ orl (rFP,%ecx,4),%eax
+ .else
+ cmpl $0x80000000,(rFP,%ecx,4)
+ .endif
+ je .LOP_FLOAT_TO_INT_special_case # fix up result
+
+.LOP_FLOAT_TO_INT_finish:
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+.LOP_FLOAT_TO_INT_special_case:
+ fnstsw %ax
+ sahf
+ jp .LOP_FLOAT_TO_INT_isNaN
+ adcl $-1,(rFP,%ecx,4)
+ .if 0
+ adcl $-1,4(rFP,%ecx,4)
+ .endif
+ jmp .LOP_FLOAT_TO_INT_finish
+.LOP_FLOAT_TO_INT_isNaN:
+ movl $0,(rFP,%ecx,4)
+ .if 0
+ movl $0,4(rFP,%ecx,4)
+ .endif
+ jmp .LOP_FLOAT_TO_INT_finish
+
+/* continuation for OP_FLOAT_TO_LONG */
+
+
+.LOP_FLOAT_TO_LONG_continue:
+ .if 1
+ movl $0x80000000,%eax
+ xorl 4(rFP,%ecx,4),%eax
+ orl (rFP,%ecx,4),%eax
+ .else
+ cmpl $0x80000000,(rFP,%ecx,4)
+ .endif
+ je .LOP_FLOAT_TO_LONG_special_case # fix up result
+
+.LOP_FLOAT_TO_LONG_finish:
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+.LOP_FLOAT_TO_LONG_special_case:
+ fnstsw %ax
+ sahf
+ jp .LOP_FLOAT_TO_LONG_isNaN
+ adcl $-1,(rFP,%ecx,4)
+ .if 1
+ adcl $-1,4(rFP,%ecx,4)
+ .endif
+ jmp .LOP_FLOAT_TO_LONG_finish
+.LOP_FLOAT_TO_LONG_isNaN:
+ movl $0,(rFP,%ecx,4)
+ .if 1
+ movl $0,4(rFP,%ecx,4)
+ .endif
+ jmp .LOP_FLOAT_TO_LONG_finish
+
+/* continuation for OP_DOUBLE_TO_INT */
+
+
+.LOP_DOUBLE_TO_INT_continue:
+ .if 0
+ movl $0x80000000,%eax
+ xorl 4(rFP,%ecx,4),%eax
+ orl (rFP,%ecx,4),%eax
+ .else
+ cmpl $0x80000000,(rFP,%ecx,4)
+ .endif
+ je .LOP_DOUBLE_TO_INT_special_case # fix up result
+
+.LOP_DOUBLE_TO_INT_finish:
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+.LOP_DOUBLE_TO_INT_special_case:
+ fnstsw %ax
+ sahf
+ jp .LOP_DOUBLE_TO_INT_isNaN
+ adcl $-1,(rFP,%ecx,4)
+ .if 0
+ adcl $-1,4(rFP,%ecx,4)
+ .endif
+ jmp .LOP_DOUBLE_TO_INT_finish
+.LOP_DOUBLE_TO_INT_isNaN:
+ movl $0,(rFP,%ecx,4)
+ .if 0
+ movl $0,4(rFP,%ecx,4)
+ .endif
+ jmp .LOP_DOUBLE_TO_INT_finish
+
+/* continuation for OP_DOUBLE_TO_LONG */
+
+
+.LOP_DOUBLE_TO_LONG_continue:
+ .if 1
+ movl $0x80000000,%eax
+ xorl 4(rFP,%ecx,4),%eax
+ orl (rFP,%ecx,4),%eax
+ .else
+ cmpl $0x80000000,(rFP,%ecx,4)
+ .endif
+ je .LOP_DOUBLE_TO_LONG_special_case # fix up result
+
+.LOP_DOUBLE_TO_LONG_finish:
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+.LOP_DOUBLE_TO_LONG_special_case:
+ fnstsw %ax
+ sahf
+ jp .LOP_DOUBLE_TO_LONG_isNaN
+ adcl $-1,(rFP,%ecx,4)
+ .if 1
+ adcl $-1,4(rFP,%ecx,4)
+ .endif
+ jmp .LOP_DOUBLE_TO_LONG_finish
+.LOP_DOUBLE_TO_LONG_isNaN:
+ movl $0,(rFP,%ecx,4)
+ .if 1
+ movl $0,4(rFP,%ecx,4)
+ .endif
+ jmp .LOP_DOUBLE_TO_LONG_finish
+
+/* continuation for OP_DIV_INT */
+.LOP_DIV_INT_continue_div:
+ cltd
+ idivl %ecx
+.LOP_DIV_INT_finish_div:
+ movzbl rINST_HI,%ecx # ecl<- AA
+ SET_VREG(%eax,%ecx)
+ UNSPILL(rPC)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_REM_INT */
+.LOP_REM_INT_continue_div:
+ cltd
+ idivl %ecx
+.LOP_REM_INT_finish_div:
+ movzbl rINST_HI,%ecx # ecl<- AA
+ SET_VREG(%edx,%ecx)
+ UNSPILL(rPC)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_MUL_LONG */
+
+.LOP_MUL_LONG_continue:
+ leal (%ecx,%edx),%edx # full result now in %edx:%eax
+ movzbl rINST_HI,%ecx # ecx<- A
+ movl %edx,4(rFP,%ecx,4) # v[B+1]<- %edx
+ UNSPILL(rPC) # restore rPC/%edx
+ FETCH_INST_WORD(2)
+ UNSPILL(rIBASE)
+ movl %eax,(rFP,%ecx,4) # v[B]<- %eax
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+
+/* continuation for OP_DIV_LONG */
+
+.LOP_DIV_LONG_continue:
+ call __divdi3
+.LOP_DIV_LONG_finish:
+ movzbl rINST_HI,%ecx
+ SET_VREG_WORD(rPC,%ecx,1)
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,%ecx,0)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+.LOP_DIV_LONG_check_zero:
+ testl rPC,rPC
+ jne .LOP_DIV_LONG_notSpecial
+ UNSPILL(rPC)
+ jmp common_errDivideByZero
+.LOP_DIV_LONG_check_neg1:
+ testl rPC,%eax
+ jne .LOP_DIV_LONG_notSpecial
+ GET_VREG_WORD(rPC,%ecx,0)
+ GET_VREG_WORD(%ecx,%ecx,1)
+ testl rPC,rPC
+ jne .LOP_DIV_LONG_notSpecial1
+ cmpl $0x80000000,%ecx
+ jne .LOP_DIV_LONG_notSpecial1
+ /* minint / -1, return minint on div, 0 on rem */
+ xorl %eax,%eax
+ movl $0x80000000,%edx
+ jmp .LOP_DIV_LONG_finish
+
+
+
+
+/* continuation for OP_REM_LONG */
+
+.LOP_REM_LONG_continue:
+ call __moddi3
+.LOP_REM_LONG_finish:
+ movzbl rINST_HI,%ecx
+ SET_VREG_WORD(rPC,%ecx,1)
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,%ecx,0)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+.LOP_REM_LONG_check_zero:
+ testl rPC,rPC
+ jne .LOP_REM_LONG_notSpecial
+ UNSPILL(rPC)
+ jmp common_errDivideByZero
+.LOP_REM_LONG_check_neg1:
+ testl rPC,%eax
+ jne .LOP_REM_LONG_notSpecial
+ GET_VREG_WORD(rPC,%ecx,0)
+ GET_VREG_WORD(%ecx,%ecx,1)
+ testl rPC,rPC
+ jne .LOP_REM_LONG_notSpecial1
+ cmpl $0x80000000,%ecx
+ jne .LOP_REM_LONG_notSpecial1
+ /* minint / -1, return minint on div, 0 on rem */
+ xorl %eax,%eax
+ movl $0,%edx
+ jmp .LOP_REM_LONG_finish
+
+
+
+
+/* continuation for OP_SHL_LONG */
+
+.LOP_SHL_LONG_finish:
+ SET_VREG_WORD(%eax,%ecx,0) # v[AA+0]<- %eax
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_SHR_LONG */
+
+
+.LOP_SHR_LONG_finish:
+ SET_VREG_WORD(%eax,%ecx,0) # v[AA+0]<- eax
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_USHR_LONG */
+
+
+.LOP_USHR_LONG_finish:
+ SET_VREG_WORD(%eax,%ecx,0) # v[BB+0]<- eax
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_DIV_INT_2ADDR */
+.LOP_DIV_INT_2ADDR_continue_div2addr:
+ cltd
+ idivl %ecx
+.LOP_DIV_INT_2ADDR_finish_div2addr:
+ SET_VREG(%eax,rINST_FULL)
+ UNSPILL(rPC)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* continuation for OP_REM_INT_2ADDR */
+.LOP_REM_INT_2ADDR_continue_div2addr:
+ cltd
+ idivl %ecx
+.LOP_REM_INT_2ADDR_finish_div2addr:
+ SET_VREG(%edx,rINST_FULL)
+ UNSPILL(rPC)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* continuation for OP_MUL_LONG_2ADDR */
+
+.LOP_MUL_LONG_2ADDR_continue:
+ leal (%ecx,%edx),%edx # full result now in %edx:%eax
+ movl %edx,4(rIBASE) # v[A+1]<- %edx
+ UNSPILL(rPC) # restore rPC/%edx
+ FETCH_INST_WORD(1)
+ movl %eax,(rIBASE) # v[A]<- %eax
+ UNSPILL(rFP)
+ UNSPILL(rIBASE)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+
+/* continuation for OP_DIV_LONG_2ADDR */
+
+.LOP_DIV_LONG_2ADDR_continue:
+ movl %eax,OUT_ARG3(%esp)
+ movl rPC,OUT_ARG0(%esp)
+ movl %ecx,OUT_ARG1(%esp)
+ call __divdi3
+.LOP_DIV_LONG_2ADDR_finish:
+ movl rINST_FULL,%ecx
+ SET_VREG_WORD(rPC,%ecx,1)
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,%ecx,0)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+.LOP_DIV_LONG_2ADDR_check_zero:
+ testl rPC,rPC
+ jne .LOP_DIV_LONG_2ADDR_notSpecial
+ UNSPILL(rPC)
+ jmp common_errDivideByZero
+.LOP_DIV_LONG_2ADDR_check_neg1:
+ testl rPC,%eax
+ jne .LOP_DIV_LONG_2ADDR_notSpecial
+ GET_VREG_WORD(rPC,rINST_FULL,0)
+ GET_VREG_WORD(%ecx,rINST_FULL,1)
+ testl rPC,rPC
+ jne .LOP_DIV_LONG_2ADDR_notSpecial1
+ cmpl $0x80000000,%ecx
+ jne .LOP_DIV_LONG_2ADDR_notSpecial1
+ /* minint / -1, return minint on div, 0 on rem */
+ xorl %eax,%eax
+ movl $0x80000000,%edx
+ jmp .LOP_DIV_LONG_2ADDR_finish
+
+
+/* continuation for OP_REM_LONG_2ADDR */
+
+.LOP_REM_LONG_2ADDR_continue:
+ movl %eax,OUT_ARG3(%esp)
+ movl rPC,OUT_ARG0(%esp)
+ movl %ecx,OUT_ARG1(%esp)
+ call __moddi3
+.LOP_REM_LONG_2ADDR_finish:
+ movl rINST_FULL,%ecx
+ SET_VREG_WORD(rPC,%ecx,1)
+ UNSPILL(rPC)
+ SET_VREG_WORD(%eax,%ecx,0)
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+.LOP_REM_LONG_2ADDR_check_zero:
+ testl rPC,rPC
+ jne .LOP_REM_LONG_2ADDR_notSpecial
+ UNSPILL(rPC)
+ jmp common_errDivideByZero
+.LOP_REM_LONG_2ADDR_check_neg1:
+ testl rPC,%eax
+ jne .LOP_REM_LONG_2ADDR_notSpecial
+ GET_VREG_WORD(rPC,rINST_FULL,0)
+ GET_VREG_WORD(%ecx,rINST_FULL,1)
+ testl rPC,rPC
+ jne .LOP_REM_LONG_2ADDR_notSpecial1
+ cmpl $0x80000000,%ecx
+ jne .LOP_REM_LONG_2ADDR_notSpecial1
+ /* minint / -1, return minint on div, 0 on rem */
+ xorl %eax,%eax
+ movl $0,%edx
+ jmp .LOP_REM_LONG_2ADDR_finish
+
+
+/* continuation for OP_SHL_LONG_2ADDR */
+
+
+.LOP_SHL_LONG_2ADDR_finish:
+ SET_VREG_WORD(%eax,rINST_FULL,0) # v[AA+0]<- eax
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* continuation for OP_SHR_LONG_2ADDR */
+
+
+.LOP_SHR_LONG_2ADDR_finish:
+ SET_VREG_WORD(%eax,rINST_FULL,0) # v[AA+0]<- eax
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* continuation for OP_USHR_LONG_2ADDR */
+
+
+.LOP_USHR_LONG_2ADDR_finish:
+ SET_VREG_WORD(%eax,rINST_FULL,0) # v[AA+0]<- eax
+ FETCH_INST_WORD(1)
+ ADVANCE_PC(1)
+ GOTO_NEXT
+
+/* continuation for OP_DIV_INT_LIT16 */
+.LOP_DIV_INT_LIT16_continue_div:
+ cltd
+ idivl %ecx
+.LOP_DIV_INT_LIT16_finish_div:
+ SET_VREG(%eax,rINST_FULL)
+ UNSPILL(rPC)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_REM_INT_LIT16 */
+.LOP_REM_INT_LIT16_continue_div:
+ cltd
+ idivl %ecx
+.LOP_REM_INT_LIT16_finish_div:
+ SET_VREG(%edx,rINST_FULL)
+ UNSPILL(rPC)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_DIV_INT_LIT8 */
+.LOP_DIV_INT_LIT8_continue_div:
+ cltd
+ idivl %ecx
+.LOP_DIV_INT_LIT8_finish_div:
+ SET_VREG(%eax,rINST_FULL)
+ UNSPILL(rPC)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_REM_INT_LIT8 */
+.LOP_REM_INT_LIT8_continue_div:
+ cltd
+ idivl %ecx
+.LOP_REM_INT_LIT8_finish_div:
+ SET_VREG(%edx,rINST_FULL)
+ UNSPILL(rPC)
+ FETCH_INST_WORD(2)
+ ADVANCE_PC(2)
+ GOTO_NEXT
+
+/* continuation for OP_EXECUTE_INLINE */
+
+.LOP_EXECUTE_INLINE_continue:
+ /*
+ * Extract args, call function.
+ * ecx = #of args (0-4)
+ * eax = call index
+ * @esp = return addr
+ * esp is -4 from normal
+ *
+ * Go ahead and load all 4 args, even if not used.
+ */
+ movzwl 4(rPC),rPC
+
+ movl $0xf,%ecx
+ andl rPC,%ecx
+ GET_VREG(%ecx,%ecx)
+ sarl $4,rPC
+ movl %ecx,4+OUT_ARG0(%esp)
+
+ movl $0xf,%ecx
+ andl rPC,%ecx
+ GET_VREG(%ecx,%ecx)
+ sarl $4,rPC
+ movl %ecx,4+OUT_ARG1(%esp)
+
+ movl $0xf,%ecx
+ andl rPC,%ecx
+ GET_VREG(%ecx,%ecx)
+ sarl $4,rPC
+ movl %ecx,4+OUT_ARG2(%esp)
+
+ movl $0xf,%ecx
+ andl rPC,%ecx
+ GET_VREG(%ecx,%ecx)
+ sarl $4,rPC
+ movl %ecx,4+OUT_ARG3(%esp)
+
+ sall $4,%eax # index *= sizeof(table entry)
+ jmp *gDvmInlineOpsTable(%eax)
+ # will return to caller of .LOP_EXECUTE_INLINE_continue
+
+
+ .size dvmAsmSisterStart, .-dvmAsmSisterStart
+ .global dvmAsmSisterEnd
+dvmAsmSisterEnd:
+
+/* File: x86/entry.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+ .text
+ .global dvmMterpStdRun
+ .type dvmMterpStdRun, %function
+/*
+ * bool dvmMterpStdRun(MterpGlue* glue)
+ *
+ * Interpreter entry point. Returns changeInterp.
+ *
+ */
+dvmMterpStdRun:
+ push %ebp
+ movl %esp,%ebp
+ push %edi
+ push %esi
+ push %ebx
+
+/* at this point, stack is misaligned by 1 word
+ We're allocating spill space for 6 words, plus
+ outgoing argument (5 words) and local variables
+ (4 words) - 15 words or 60 bytes total. See
+ diagram in header.S
+*/
+ subl $60,%esp
+
+/* Set up "named" registers */
+ movl IN_ARG0(%ebp),%ecx
+ movl %ecx,rGLUE_SPILL(%ebp)
+ LOAD_PC_FROM_GLUE(%ecx)
+ LOAD_FP_FROM_GLUE(%ecx)
+ movl $dvmAsmInstructionStart,rIBASE
+
+/* Remember %esp for future "longjmp" */
+ movl %esp,offGlue_bailPtr(%ecx)
+
+/* How to start? */
+ movb offGlue_entryPoint(%ecx),%al
+
+/* Normal start? */
+ cmpb $kInterpEntryInstr,%al
+ jne .Lnot_instr
+
+ /* Normal case: start executing the instruction at rPC */
+ FETCH_INST()
+ GOTO_NEXT
+
+.Lnot_instr:
+ /* Reset to normal case */
+ movb $kInterpEntryInstr,offGlue_entryPoint(%ecx)
+ cmpb $kInterpEntryReturn,%al
+ je common_returnFromMethod
+ cmpb $kInterpEntryThrow,%al
+ je common_exceptionThrown
+ movzx %al,%eax
+ movl %eax,OUT_ARG1(%esp)
+ movl $.LstrBadEntryPoint,OUT_ARG0(%esp)
+ call printf
+ call dvmAbort
+ /* Not reached */
+
+
+ .global dvmMterpStdBail
+ .type dvmMterpStdBail, %function
+/*
+ * void dvmMterpStdBail(MterpGlue* glue, bool changeInterp)
+ *
+ * Restore the stack pointer and PC from the save point established on entry.
+ * This is essentially the same as a longjmp, but should be cheaper. The
+ * last instruction causes us to return to whoever called dvmMterpStdRun.
+ *
+ * We're not going to build a standard frame here, so the arg accesses will
+ * look a little strange.
+ *
+ * On entry:
+ * esp+4 (arg0) MterpGlue* glue
+ * esp+8 (arg1) bool changeInterp
+ */
+dvmMterpStdBail:
+ movl 4(%esp),%ecx # grab glue
+ movl 8(%esp),%eax # changeInterp to return reg
+ movl offGlue_bailPtr(%ecx),%esp # Stack back to normal
+ addl $60,%esp # Strip dvmMterpStdRun's frame
+ pop %ebx
+ pop %esi
+ pop %edi
+ pop %ebp
+ ret # return to dvmMterpStdRun's caller
+
+
+/*
+ * Strings
+ */
+ .section .rodata
+.LstrBadEntryPoint:
+ .asciz "Bad entry point %d\n"
+
+/* File: x86/footer.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Common subroutines and data.
+ */
+
+/*
+ * Common code when a backwards branch is taken
+ *
+ * On entry:
+ * ebx (a.k.a. rINST_FULL) -> PC adjustment in 16-bit words
+ */
+common_backwardBranch:
+ GET_GLUE(%ecx)
+ call common_periodicChecks # Note: expects rPC to be preserved
+ ADVANCE_PC_INDEXED(rINST_FULL)
+ FETCH_INST()
+ GOTO_NEXT
+
+/*
+ * Common invoke code (old-style).
+ * TUNING: Rewrite along lines of new armv5 code?
+ *
+ * On entry:
+ * eax = Method* methodToCall
+ * ecx = bool methodCallRange
+ * rINST trashed, must reload
+ */
+common_invokeOld:
+ movl %ecx,OUT_ARG1(%esp) # arg1<- methodCallRange
+ GET_GLUE(%ecx)
+ movzwl (rPC),rINST_FULL # recover rINST
+ movl %eax,OUT_ARG2(%esp) # arg2<- method
+ movzwl 4(rPC),%eax # eax<- GFED or CCCC
+ SAVE_PC_TO_GLUE(%ecx)
+ SAVE_FP_TO_GLUE(%ecx)
+ movzbl rINST_HI,rINST_FULL
+ movl rINST_FULL,OUT_ARG3(%esp)# arg3<- AA
+ movl %ecx,OUT_ARG0(%esp) # arg0<- GLUE
+ movl %eax,OUT_ARG4(%esp) # arg4<- GFED/CCCC
+ call dvmMterp_invokeMethod
+ jmp common_resumeAfterGlueCall
+
+
+/*
+ * Do we need the thread to be suspended or have debugger/profiling activity?
+ *
+ * On entry:
+ * ebx -> PC adjustment in 16-bit words (must be preserved)
+ * ecx -> GLUE pointer
+ *
+ * Note: A call will normally kill %eax, rPC/%edx and %ecx. To
+ * streamline the normal case, this routine will preserve rPC and
+ * %ecx in addition to the normal caller save regs. The save/restore
+ * is a bit ugly, but will happen in the relatively uncommon path.
+ * TUNING: Might be worthwhile to inline this.
+ * TODO: Basic-block style Jit will need a hook here as well. Fold it into
+ * the suspendCount check so we can get both in 1 shot.
+ */
+common_periodicChecks:
+ movl offGlue_pSelfSuspendCount(%ecx),%eax # eax <- &suspendCount
+ cmpl $0,(%eax)
+ jne 1f
+
+#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER)
+#if defined(WITH_DEBUGGER)
+ movl offGlue_pDebuggerActive(%ecx),%eax # eax <- &DebuggerActive
+#endif
+#if defined(WITH_PROFILER)
+ movl offGlue_pActiveProfilers(%ecx),%ecx # ecx <- &ActiveProfilers
+#endif
+#if defined(WITH_DEBUGGER) && defined(WITH_PROFILER)
+ movzbl (%eax),%eax # eax <- debuggerActive (boolean)
+ orl (%ecx),%eax # eax <- debuggerActive || activeProfilers
+#elif defined(WITH_DEBUGGER)
+ movzbl (%eax),%eax # eax <- debuggerActive (boolean)
+#elif defined(WITH_PROFILER)
+ movl (%ecx),%eax # eax <= activeProfilers
+#endif
+ GET_GLUE(%ecx) # restore rGLUE
+ testl %eax,%eax
+ jne 3f # one or both active - switch interp
+#endif
+
+ ret
+
+ /* Check for suspend */
+1:
+ /* At this point, the return pointer to the caller of
+ * common_periodicChecks is on the top of stack. We need to preserve
+ * rPC(edx) and GLUE(ecx). We'll spill rPC, and reload GLUE.
+ * The outgoing profile is:
+ * bool dvmCheckSuspendPending(Thread* self)
+ * Because we reached here via a call, go ahead and build a new frame.
+ */
+ movl offGlue_self(%ecx),%eax # eax<- glue->self
+ SPILL(rPC) # save edx
+ push %ebp
+ movl %esp,%ebp
+ subl $24,%esp
+ movl %eax,OUT_ARG0(%esp)
+ call dvmCheckSuspendPending
+ addl $24,%esp
+ pop %ebp
+ UNSPILL(rPC)
+ GET_GLUE(%ecx)
+ ret
+
+ /* Switch interpreters */
+ /* Note: %ebx contains the 16-bit word offset to be applied to rPC to
+ * "complete" the interpretation of backwards branches. In effect, we
+ * are completing the interpretation of the branch instruction here,
+ * and the new interpreter will resume interpretation at the branch
+ * target. However, a switch request recognized during the handling
+ * of a return from method instruction results in an immediate abort,
+ * and the new interpreter will resume by re-interpreting the return
+ * instruction.
+ */
+3:
+ leal (rPC,%ebx,2),rPC # adjust pc to show target
+ GET_GLUE(%ecx) # bail expect GLUE already loaded
+ movl $1,rINST_FULL # set changeInterp to true
+ jmp common_gotoBail
+
+
+/*
+ * Common code for handling a return instruction
+ */
+common_returnFromMethod:
+ GET_GLUE(%ecx)
+ /* Set entry mode in case we bail */
+ movb $kInterpEntryReturn,offGlue_entryPoint(%ecx)
+ xorl rINST_FULL,rINST_FULL # zero offset in case we switch interps
+ call common_periodicChecks # Note: expects %ecx to be preserved
+
+ SAVEAREA_FROM_FP(%eax,rFP) # eax<- saveArea (old)
+ movl offStackSaveArea_prevFrame(%eax),rFP # rFP<- prevFrame
+ movl (offStackSaveArea_method-sizeofStackSaveArea)(rFP),rINST_FULL
+ cmpl $0,rINST_FULL # break?
+ je common_gotoBail # break frame, bail out completely
+
+ movl offStackSaveArea_savedPc(%eax),rPC # pc<- saveArea->savedPC
+ movl offGlue_self(%ecx),%eax # eax<- self
+ movl rINST_FULL,offGlue_method(%ecx) # glue->method = newSave->meethod
+ movl rFP,offThread_curFrame(%eax) # self->curFrame = fp
+ movl offMethod_clazz(rINST_FULL),%eax # eax<- method->clazz
+ FETCH_INST_WORD(3)
+ movl offClassObject_pDvmDex(%eax),%eax # eax<- method->clazz->pDvmDex
+ ADVANCE_PC(3)
+ movl %eax,offGlue_methodClassDex(%ecx)
+ /* not bailing - restore entry mode to default */
+ movb $kInterpEntryInstr,offGlue_entryPoint(%ecx)
+ GOTO_NEXT
+
+/*
+ * Prepare to strip the current frame and "longjump" back to caller of
+ * dvmMterpStdRun.
+ *
+ * on entry:
+ * rINST_FULL holds changeInterp
+ * ecx holds glue pointer
+ *
+ * expected profile: dvmMterpStdBail(MterpGlue *glue, bool changeInterp)
+ */
+common_gotoBail:
+ SAVE_PC_TO_GLUE(%ecx) # export state to glue
+ SAVE_FP_TO_GLUE(%ecx)
+ movl %ecx,OUT_ARG0(%esp) # glue in arg0
+ movl rINST_FULL,OUT_ARG1(%esp) # changeInterp in arg1
+ call dvmMterpStdBail # bail out....
+
+
+/*
+ * After returning from a "glued" function, pull out the updated values
+ * and start executing at the next instruction.
+ */
+ common_resumeAfterGlueCall:
+ GET_GLUE(%ecx)
+ LOAD_PC_FROM_GLUE(%ecx)
+ LOAD_FP_FROM_GLUE(%ecx)
+ FETCH_INST()
+ GOTO_NEXT
+
+/*
+ * Integer divide or mod by zero
+ */
+common_errDivideByZero:
+ EXPORT_PC()
+ movl $.LstrArithmeticException,%eax
+ movl %eax,OUT_ARG0(%esp)
+ movl $.LstrDivideByZero,%eax
+ movl %eax,OUT_ARG1(%esp)
+ SPILL(rPC)
+ call dvmThrowException
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ */
+common_errNegativeArraySize:
+ EXPORT_PC()
+ movl $.LstrNegativeArraySizeException,%eax
+ movl %eax,OUT_ARG0(%esp)
+ xorl %eax,%eax
+ movl %eax,OUT_ARG1(%esp)
+ SPILL(rPC)
+ call dvmThrowException
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ */
+common_errNoSuchMethod:
+
+ EXPORT_PC()
+ movl $.LstrNoSuchMethodError,%eax
+ movl %eax,OUT_ARG0(%esp)
+ xorl %eax,%eax
+ movl %eax,OUT_ARG1(%esp)
+ SPILL(rPC)
+ call dvmThrowException
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+
+/*
+ * Hit a null object when we weren't expecting one. Export the PC, throw a
+ * NullPointerException and goto the exception processing code.
+ */
+common_errNullObject:
+ EXPORT_PC()
+ movl $.LstrNullPointerException,%eax
+ movl %eax,OUT_ARG0(%esp)
+ xorl %eax,%eax
+ movl %eax,OUT_ARG1(%esp)
+ SPILL(rPC)
+ call dvmThrowException
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+
+/*
+ * Array index exceeds max.
+ */
+common_errArrayIndex:
+ EXPORT_PC()
+ movl $.LstrArrayIndexException,%eax
+ movl %eax,OUT_ARG0(%esp)
+ xorl %eax,%eax
+ movl %eax,OUT_ARG1(%esp)
+ SPILL(rPC)
+ call dvmThrowException
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+/*
+ * Invalid array value.
+ */
+common_errArrayStore:
+ EXPORT_PC()
+ movl $.LstrArrayStoreException,%eax
+ movl %eax,OUT_ARG0(%esp)
+ xorl %eax,%eax
+ movl %eax,OUT_ARG1(%esp)
+ SPILL(rPC)
+ call dvmThrowException
+ UNSPILL(rPC)
+ jmp common_exceptionThrown
+
+/*
+ * Somebody has thrown an exception. Handle it.
+ *
+ * If the exception processing code returns to us (instead of falling
+ * out of the interpreter), continue with whatever the next instruction
+ * now happens to be.
+ *
+ * This does not return.
+ */
+common_exceptionThrown:
+ GET_GLUE(%ecx)
+ SAVE_PC_TO_GLUE(%ecx)
+ SAVE_FP_TO_GLUE(%ecx)
+ movl %ecx,OUT_ARG0(%esp)
+ call dvmMterp_exceptionThrown
+ jmp common_resumeAfterGlueCall
+
+common_abort:
+ movl $0xdeadf00d,%eax
+ call *%eax
+
+
+/*
+ * Strings
+ */
+
+ .section .rodata
+.LstrNullPointerException:
+ .asciz "Ljava/lang/NullPointerException;"
+.LstrArithmeticException:
+ .asciz "Ljava/lang/ArithmeticException;"
+.LstrDivideByZero:
+ .asciz "divide by zero"
+.LstrArrayIndexException:
+ .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;"
+.LstrArrayStoreException:
+ .asciz "Ljava/lang/ArrayStoreException;"
+.LstrNegativeArraySizeException:
+ .asciz "Ljava/lang/NegativeArraySizeException;"
+.LstrInstantiationError:
+ .asciz "Ljava/lang/InstantiationError;"
+.LstrClassCastException:
+ .asciz "Ljava/lang/ClassCastException;"
+.LstrNoSuchMethodError:
+ .asciz "Ljava/lang/NoSuchMethodError;"
+.LstrInternalError:
+ .asciz "Ljava/lang/InternalError;"
+.LstrFilledNewArrayNotImpl:
+ .asciz "filled-new-array only implemented for 'int'"
+
+
diff --git a/vm/mterp/out/InterpC-allstubs.c b/vm/mterp/out/InterpC-allstubs.c
new file mode 100644
index 0000000..635a873
--- /dev/null
+++ b/vm/mterp/out/InterpC-allstubs.c
@@ -0,0 +1,3969 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'allstubs'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.c */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h> // needed for fmod, fmodf
+
+/*
+ * Configuration defines. These affect the C implementations, i.e. the
+ * portable interpreter(s) and C stubs.
+ *
+ * Some defines are controlled by the Makefile, e.g.:
+ * WITH_PROFILER
+ * WITH_DEBUGGER
+ * WITH_INSTR_CHECKS
+ * WITH_TRACKREF_CHECKS
+ * EASY_GDB
+ * NDEBUG
+ *
+ * If THREADED_INTERP is not defined, we use a classic "while true / switch"
+ * interpreter. If it is defined, then the tail end of each instruction
+ * handler fetches the next instruction and jumps directly to the handler.
+ * This increases the size of the "Std" interpreter by about 10%, but
+ * provides a speedup of about the same magnitude.
+ *
+ * There's a "hybrid" approach that uses a goto table instead of a switch
+ * statement, avoiding the "is the opcode in range" tests required for switch.
+ * The performance is close to the threaded version, and without the 10%
+ * size increase, but the benchmark results are off enough that it's not
+ * worth adding as a third option.
+ */
+#define THREADED_INTERP /* threaded vs. while-loop interpreter */
+
+#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia */
+# define CHECK_BRANCH_OFFSETS
+# define CHECK_REGISTER_INDICES
+#endif
+
+/*
+ * ARM EABI requires 64-bit alignment for access to 64-bit data types. We
+ * can't just use pointers to copy 64-bit values out of our interpreted
+ * register set, because gcc will generate ldrd/strd.
+ *
+ * The __UNION version copies data in and out of a union. The __MEMCPY
+ * version uses a memcpy() call to do the transfer; gcc is smart enough to
+ * not actually call memcpy(). The __UNION version is very bad on ARM;
+ * it only uses one more instruction than __MEMCPY, but for some reason
+ * gcc thinks it needs separate storage for every instance of the union.
+ * On top of that, it feels the need to zero them out at the start of the
+ * method. Net result is we zero out ~700 bytes of stack space at the top
+ * of the interpreter using ARM STM instructions.
+ */
+#if defined(__ARM_EABI__)
+//# define NO_UNALIGN_64__UNION
+# define NO_UNALIGN_64__MEMCPY
+#endif
+
+//#define LOG_INSTR /* verbose debugging */
+/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
+
+/*
+ * Keep a tally of accesses to fields. Currently only works if full DEX
+ * optimization is disabled.
+ */
+#ifdef PROFILE_FIELD_ACCESS
+# define UPDATE_FIELD_GET(_field) { (_field)->gets++; }
+# define UPDATE_FIELD_PUT(_field) { (_field)->puts++; }
+#else
+# define UPDATE_FIELD_GET(_field) ((void)0)
+# define UPDATE_FIELD_PUT(_field) ((void)0)
+#endif
+
+/*
+ * Adjust the program counter. "_offset" is a signed int, in 16-bit units.
+ *
+ * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
+ *
+ * We don't advance the program counter until we finish an instruction or
+ * branch, because we do want to have to unroll the PC if there's an
+ * exception.
+ */
+#ifdef CHECK_BRANCH_OFFSETS
+# define ADJUST_PC(_offset) do { \
+ int myoff = _offset; /* deref only once */ \
+ if (pc + myoff < curMethod->insns || \
+ pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
+ { \
+ char* desc; \
+ desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
+ LOGE("Invalid branch %d at 0x%04x in %s.%s %s\n", \
+ myoff, (int) (pc - curMethod->insns), \
+ curMethod->clazz->descriptor, curMethod->name, desc); \
+ free(desc); \
+ dvmAbort(); \
+ } \
+ pc += myoff; \
+ } while (false)
+#else
+# define ADJUST_PC(_offset) (pc += _offset)
+#endif
+
+/*
+ * If enabled, log instructions as we execute them.
+ */
+#ifdef LOG_INSTR
+# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
+# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
+# define ILOG(_level, ...) do { \
+ char debugStrBuf[128]; \
+ snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
+ if (curMethod != NULL) \
+ LOG(_level, LOG_TAG"i", "%-2d|%04x%s\n", \
+ self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
+ else \
+ LOG(_level, LOG_TAG"i", "%-2d|####%s\n", \
+ self->threadId, debugStrBuf); \
+ } while(false)
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
+# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
+static const char kSpacing[] = " ";
+#else
+# define ILOGD(...) ((void)0)
+# define ILOGV(...) ((void)0)
+# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
+#endif
+
+/* get a long from an array of u4 */
+static inline s8 getLongFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.ll;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ s8 val;
+ memcpy(&val, &ptr[idx], 8);
+ return val;
+#else
+ return *((s8*) &ptr[idx]);
+#endif
+}
+
+/* store a long into an array of u4 */
+static inline void putLongToArray(u4* ptr, int idx, s8 val)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.ll = val;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &val, 8);
+#else
+ *((s8*) &ptr[idx]) = val;
+#endif
+}
+
+/* get a double from an array of u4 */
+static inline double getDoubleFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.d;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ double dval;
+ memcpy(&dval, &ptr[idx], 8);
+ return dval;
+#else
+ return *((double*) &ptr[idx]);
+#endif
+}
+
+/* store a double into an array of u4 */
+static inline void putDoubleToArray(u4* ptr, int idx, double dval)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.d = dval;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &dval, 8);
+#else
+ *((double*) &ptr[idx]) = dval;
+#endif
+}
+
+/*
+ * If enabled, validate the register number on every access. Otherwise,
+ * just do an array access.
+ *
+ * Assumes the existence of "u4* fp".
+ *
+ * "_idx" may be referenced more than once.
+ */
+#ifdef CHECK_REGISTER_INDICES
+# define GET_REGISTER(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)]) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
+# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER_WIDE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putLongToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_FLOAT(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
+# define SET_REGISTER_FLOAT(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
+# define GET_REGISTER_DOUBLE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
+# define SET_REGISTER_DOUBLE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putDoubleToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969.0) )
+#else
+# define GET_REGISTER(_idx) (fp[(_idx)])
+# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
+# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
+# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
+# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
+# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
+# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
+# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
+# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
+#endif
+
+/*
+ * Get 16 bits from the specified offset of the program counter. We always
+ * want to load 16 bits at a time from the instruction stream -- it's more
+ * efficient than 8 and won't have the alignment problems that 32 might.
+ *
+ * Assumes existence of "const u2* pc".
+ */
+#define FETCH(_offset) (pc[(_offset)])
+
+/*
+ * Extract instruction byte from 16-bit fetch (_inst is a u2).
+ */
+#define INST_INST(_inst) ((_inst) & 0xff)
+
+/*
+ * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
+ */
+#define INST_A(_inst) (((_inst) >> 8) & 0x0f)
+#define INST_B(_inst) ((_inst) >> 12)
+
+/*
+ * Get the 8-bit "vAA" 8-bit register index from the instruction word.
+ * (_inst is u2)
+ */
+#define INST_AA(_inst) ((_inst) >> 8)
+
+/*
+ * The current PC must be available to Throwable constructors, e.g.
+ * those created by dvmThrowException(), so that the exception stack
+ * trace can be generated correctly. If we don't do this, the offset
+ * within the current method won't be shown correctly. See the notes
+ * in Exception.c.
+ *
+ * Assumes existence of "u4* fp" and "const u2* pc".
+ */
+#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
+
+/*
+ * Determine if we need to switch to a different interpreter. "_current"
+ * is either INTERP_STD or INTERP_DBG. It should be fixed for a given
+ * interpreter generation file, which should remove the outer conditional
+ * from the following.
+ *
+ * If we're building without debug and profiling support, we never switch.
+ */
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+# define NEED_INTERP_SWITCH(_current) ( \
+ (_current == INTERP_STD) ? \
+ dvmDebuggerOrProfilerActive() : !dvmDebuggerOrProfilerActive() )
+#else
+# define NEED_INTERP_SWITCH(_current) (false)
+#endif
+
+/*
+ * Look up an interface on a class using the cache.
+ */
+INLINE Method* dvmFindInterfaceMethodInCache(ClassObject* thisClass,
+ u4 methodIdx, const Method* method, DvmDex* methodClassDex)
+{
+#define ATOMIC_CACHE_CALC \
+ dvmInterpFindInterfaceMethod(thisClass, methodIdx, method, methodClassDex)
+
+ return (Method*) ATOMIC_CACHE_LOOKUP(methodClassDex->pInterfaceCache,
+ DEX_INTERFACE_CACHE_SIZE, thisClass, methodIdx);
+
+#undef ATOMIC_CACHE_CALC
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, throw an exception. Assumes the
+ * pc has already been exported to the stack.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler calls into
+ * something that could throw an exception (so we have already called
+ * EXPORT_PC at the top).
+ */
+static inline bool checkForNull(Object* obj)
+{
+ if (obj == NULL) {
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, export the PC into the stack
+ * frame and throw an exception.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler doesn't do
+ * anything else that can throw an exception.
+ */
+static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
+{
+ if (obj == NULL) {
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+
+/* File: cstubs/stubdefs.c */
+/* this is a standard (no debug support) interpreter */
+#define INTERP_TYPE INTERP_STD
+#define CHECK_DEBUG_AND_PROF() ((void)0)
+# define CHECK_TRACKED_REFS() ((void)0)
+
+/*
+ * In the C mterp stubs, "goto" is a function call followed immediately
+ * by a return.
+ */
+
+#define GOTO_TARGET_DECL(_target, ...) \
+ void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+
+#define GOTO_TARGET(_target, ...) \
+ void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
+ u2 ref, vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0); \
+ const Method* methodToCall; \
+ StackSaveArea* debugSaveArea;
+
+#define GOTO_TARGET_END }
+
+/*
+ * Redefine what used to be local variable accesses into MterpGlue struct
+ * references. (These are undefined down in "footer.c".)
+ */
+#define retval glue->retval
+#define pc glue->pc
+#define fp glue->fp
+#define curMethod glue->method
+#define methodClassDex glue->methodClassDex
+#define self glue->self
+#define debugTrackedRefStart glue->debugTrackedRefStart
+
+/* ugh */
+#define STUB_HACK(x) x
+
+
+/*
+ * Opcode handler framing macros. Here, each opcode is a separate function
+ * that takes a "glue" argument and returns void. We can't declare
+ * these "static" because they may be called from an assembly stub.
+ */
+#define HANDLE_OPCODE(_op) \
+ void dvmMterp_##_op(MterpGlue* glue) { \
+ u2 ref, vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0);
+
+#define OP_END }
+
+/*
+ * Like the "portable" FINISH, but don't reload "inst", and return to caller
+ * when done.
+ */
+#define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ CHECK_DEBUG_AND_PROF(); \
+ CHECK_TRACKED_REFS(); \
+ return; \
+ }
+
+
+/*
+ * The "goto label" statements turn into function calls followed by
+ * return statements. Some of the functions take arguments, which in the
+ * portable interpreter are handled by assigning values to globals.
+ */
+
+#define GOTO_exceptionThrown() \
+ do { \
+ dvmMterp_exceptionThrown(glue); \
+ return; \
+ } while(false)
+
+#define GOTO_returnFromMethod() \
+ do { \
+ dvmMterp_returnFromMethod(glue); \
+ return; \
+ } while(false)
+
+#define GOTO_invoke(_target, _methodCallRange) \
+ do { \
+ dvmMterp_##_target(glue, _methodCallRange); \
+ return; \
+ } while(false)
+
+#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \
+ do { \
+ dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall, \
+ _vsrc1, _vdst); \
+ return; \
+ } while(false)
+
+/*
+ * As a special case, "goto bail" turns into a longjmp. Use "bail_switch"
+ * if we need to switch to the other interpreter upon our return.
+ */
+#define GOTO_bail() \
+ dvmMterpStdBail(glue, false);
+#define GOTO_bail_switch() \
+ dvmMterpStdBail(glue, true);
+
+/*
+ * Periodically check for thread suspension.
+ *
+ * While we're at it, see if a debugger has attached or the profiler has
+ * started. If so, switch to a different "goto" table.
+ */
+#define PERIODIC_CHECKS(_entryPoint, _pcadj) { \
+ dvmCheckSuspendQuick(self); \
+ if (NEED_INTERP_SWITCH(INTERP_TYPE)) { \
+ ADJUST_PC(_pcadj); \
+ glue->entryPoint = _entryPoint; \
+ LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n", \
+ glue->self->threadId, (_entryPoint), (_pcadj)); \
+ GOTO_bail_switch(); \
+ } \
+ }
+
+
+/* File: c/opcommon.c */
+/* forward declarations of goto targets */
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+ u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+/*
+ * ===========================================================================
+ *
+ * What follows are opcode definitions shared between multiple opcodes with
+ * minor substitutions handled by the C pre-processor. These should probably
+ * use the mterp substitution mechanism instead, with the code here moved
+ * into common fragment files (like the asm "binop.S"), although it's hard
+ * to give up the C preprocessor in favor of the much simpler text subst.
+ *
+ * ===========================================================================
+ */
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_totype(vdst, \
+ GET_REGISTER##_fromtype(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
+ _tovtype, _tortype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ { \
+ /* spec defines specific handling for +/- inf and NaN values */ \
+ _fromvtype val; \
+ _tovtype intMin, intMax, result; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ val = GET_REGISTER##_fromrtype(vsrc1); \
+ intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
+ intMax = ~intMin; \
+ result = (_tovtype) val; \
+ if (val >= intMax) /* +inf */ \
+ result = intMax; \
+ else if (val <= intMin) /* -inf */ \
+ result = intMin; \
+ else if (val != val) /* NaN */ \
+ result = 0; \
+ else \
+ result = (_tovtype) val; \
+ SET_REGISTER##_tortype(vdst, result); \
+ } \
+ FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
+ FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ int result; \
+ u2 regs; \
+ _varType val1, val2; \
+ vdst = INST_AA(inst); \
+ regs = FETCH(1); \
+ vsrc1 = regs & 0xff; \
+ vsrc2 = regs >> 8; \
+ ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ val1 = GET_REGISTER##_type(vsrc1); \
+ val2 = GET_REGISTER##_type(vsrc2); \
+ if (val1 == val2) \
+ result = 0; \
+ else if (val1 < val2) \
+ result = -1; \
+ else if (val1 > val2) \
+ result = 1; \
+ else \
+ result = (_nanVal); \
+ ILOGV("+ result=%d\n", result); \
+ SET_REGISTER(vdst, result); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
+ vsrc1 = INST_A(inst); \
+ vsrc2 = INST_B(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
+ branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
+ FINISH(2); \
+ }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
+ vsrc1 = INST_AA(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
+ FINISH(2); \
+ }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
+ FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ secondVal = GET_REGISTER(vsrc2); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ vsrc2 = FETCH(1); \
+ ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s2) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
+ /* won't generate /lit16 instr for this; check anyway */ \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op (s2) vsrc2; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s1) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op ((s1) vsrc2); \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vdst); \
+ secondVal = GET_REGISTER(vsrc1); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vsrc1); \
+ secondVal = GET_REGISTER_WIDE(vsrc2); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vdst); \
+ secondVal = GET_REGISTER_WIDE(vsrc1); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* index */ \
+ ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ LOGV("Invalid array access: %p %d (len=%d)\n", \
+ arrayObj, vsrc2, arrayObj->length); \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]); \
+ ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); /* AA: source value */ \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* CC: index */ \
+ ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] = \
+ GET_REGISTER##_regsize(vdst); \
+ } \
+ FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits. Consider:
+ * short foo = -1 (sets a 32-bit register to 0xffffffff)
+ * iput-quick foo (writes all 32 bits to the field)
+ * short bar = 1 (sets a 32-bit register to 0x00000001)
+ * iput-short (writes the low 16 bits to the field)
+ * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field. This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time. On
+ * a device with a 16-bit data bus this is sub-optimal. (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ dvmGetField##_ftype(obj, ifield->byteOffset)); \
+ ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
+ ILOGV("+ IGETQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetField##_ftype(obj, ifield->byteOffset, \
+ GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUTQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
+ ILOGV("+ SGET '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&sfield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ SPUT '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&sfield->field); \
+ } \
+ FINISH(2);
+
+
+/* File: c/OP_NOP.c */
+HANDLE_OPCODE(OP_NOP)
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE.c */
+HANDLE_OPCODE(OP_MOVE /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|move%s v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_FROM16 /*vAA, vBBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|move%s/from16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_FROM16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MOVE_16.c */
+HANDLE_OPCODE(OP_MOVE_16 /*vAAAA, vBBBB*/)
+ vdst = FETCH(1);
+ vsrc1 = FETCH(2);
+ ILOGV("|move%s/16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(3);
+OP_END
+
+/* File: c/OP_MOVE_WIDE.c */
+HANDLE_OPCODE(OP_MOVE_WIDE /*vA, vB*/)
+ /* IMPORTANT: must correctly handle overlapping registers, e.g. both
+ * "move-wide v6, v7" and "move-wide v7, v6" */
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|move-wide v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+ kSpacing+5, vdst, GET_REGISTER_WIDE(vsrc1));
+ SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_WIDE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_WIDE_FROM16 /*vAA, vBBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|move-wide/from16 v%d,v%d (v%d=0x%08llx)", vdst, vsrc1,
+ vdst, GET_REGISTER_WIDE(vsrc1));
+ SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MOVE_WIDE_16.c */
+HANDLE_OPCODE(OP_MOVE_WIDE_16 /*vAAAA, vBBBB*/)
+ vdst = FETCH(1);
+ vsrc1 = FETCH(2);
+ ILOGV("|move-wide/16 v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+ kSpacing+8, vdst, GET_REGISTER_WIDE(vsrc1));
+ SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+ FINISH(3);
+OP_END
+
+/* File: c/OP_MOVE_OBJECT.c */
+/* File: c/OP_MOVE.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|move%s v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(1);
+OP_END
+
+
+/* File: c/OP_MOVE_OBJECT_FROM16.c */
+/* File: c/OP_MOVE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT_FROM16 /*vAA, vBBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|move%s/from16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_FROM16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(2);
+OP_END
+
+
+/* File: c/OP_MOVE_OBJECT_16.c */
+/* File: c/OP_MOVE_16.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT_16 /*vAAAA, vBBBB*/)
+ vdst = FETCH(1);
+ vsrc1 = FETCH(2);
+ ILOGV("|move%s/16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(3);
+OP_END
+
+
+/* File: c/OP_MOVE_RESULT.c */
+HANDLE_OPCODE(OP_MOVE_RESULT /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-result%s v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_RESULT) ? "" : "-object",
+ vdst, kSpacing+4, vdst,retval.i);
+ SET_REGISTER(vdst, retval.i);
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_RESULT_WIDE.c */
+HANDLE_OPCODE(OP_MOVE_RESULT_WIDE /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-result-wide v%d %s(0x%08llx)", vdst, kSpacing, retval.j);
+ SET_REGISTER_WIDE(vdst, retval.j);
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_RESULT_OBJECT.c */
+/* File: c/OP_MOVE_RESULT.c */
+HANDLE_OPCODE(OP_MOVE_RESULT_OBJECT /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-result%s v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_RESULT) ? "" : "-object",
+ vdst, kSpacing+4, vdst,retval.i);
+ SET_REGISTER(vdst, retval.i);
+ FINISH(1);
+OP_END
+
+
+/* File: c/OP_MOVE_EXCEPTION.c */
+HANDLE_OPCODE(OP_MOVE_EXCEPTION /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-exception v%d", vdst);
+ assert(self->exception != NULL);
+ SET_REGISTER(vdst, (u4)self->exception);
+ dvmClearException(self);
+ FINISH(1);
+OP_END
+
+/* File: c/OP_RETURN_VOID.c */
+HANDLE_OPCODE(OP_RETURN_VOID /**/)
+ ILOGV("|return-void");
+#ifndef NDEBUG
+ retval.j = 0xababababULL; // placate valgrind
+#endif
+ GOTO_returnFromMethod();
+OP_END
+
+/* File: c/OP_RETURN.c */
+HANDLE_OPCODE(OP_RETURN /*vAA*/)
+ vsrc1 = INST_AA(inst);
+ ILOGV("|return%s v%d",
+ (INST_INST(inst) == OP_RETURN) ? "" : "-object", vsrc1);
+ retval.i = GET_REGISTER(vsrc1);
+ GOTO_returnFromMethod();
+OP_END
+
+/* File: c/OP_RETURN_WIDE.c */
+HANDLE_OPCODE(OP_RETURN_WIDE /*vAA*/)
+ vsrc1 = INST_AA(inst);
+ ILOGV("|return-wide v%d", vsrc1);
+ retval.j = GET_REGISTER_WIDE(vsrc1);
+ GOTO_returnFromMethod();
+OP_END
+
+/* File: c/OP_RETURN_OBJECT.c */
+/* File: c/OP_RETURN.c */
+HANDLE_OPCODE(OP_RETURN_OBJECT /*vAA*/)
+ vsrc1 = INST_AA(inst);
+ ILOGV("|return%s v%d",
+ (INST_INST(inst) == OP_RETURN) ? "" : "-object", vsrc1);
+ retval.i = GET_REGISTER(vsrc1);
+ GOTO_returnFromMethod();
+OP_END
+
+
+/* File: c/OP_CONST_4.c */
+HANDLE_OPCODE(OP_CONST_4 /*vA, #+B*/)
+ {
+ s4 tmp;
+
+ vdst = INST_A(inst);
+ tmp = (s4) (INST_B(inst) << 28) >> 28; // sign extend 4-bit value
+ ILOGV("|const/4 v%d,#0x%02x", vdst, (s4)tmp);
+ SET_REGISTER(vdst, tmp);
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_CONST_16.c */
+HANDLE_OPCODE(OP_CONST_16 /*vAA, #+BBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+ SET_REGISTER(vdst, (s2) vsrc1);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST.c */
+HANDLE_OPCODE(OP_CONST /*vAA, #+BBBBBBBB*/)
+ {
+ u4 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u4)FETCH(2) << 16;
+ ILOGV("|const v%d,#0x%08x", vdst, tmp);
+ SET_REGISTER(vdst, tmp);
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_HIGH16.c */
+HANDLE_OPCODE(OP_CONST_HIGH16 /*vAA, #+BBBB0000*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const/high16 v%d,#0x%04x0000", vdst, vsrc1);
+ SET_REGISTER(vdst, vsrc1 << 16);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_WIDE_16.c */
+HANDLE_OPCODE(OP_CONST_WIDE_16 /*vAA, #+BBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const-wide/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+ SET_REGISTER_WIDE(vdst, (s2)vsrc1);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_WIDE_32.c */
+HANDLE_OPCODE(OP_CONST_WIDE_32 /*vAA, #+BBBBBBBB*/)
+ {
+ u4 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u4)FETCH(2) << 16;
+ ILOGV("|const-wide/32 v%d,#0x%08x", vdst, tmp);
+ SET_REGISTER_WIDE(vdst, (s4) tmp);
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_WIDE.c */
+HANDLE_OPCODE(OP_CONST_WIDE /*vAA, #+BBBBBBBBBBBBBBBB*/)
+ {
+ u8 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u8)FETCH(2) << 16;
+ tmp |= (u8)FETCH(3) << 32;
+ tmp |= (u8)FETCH(4) << 48;
+ ILOGV("|const-wide v%d,#0x%08llx", vdst, tmp);
+ SET_REGISTER_WIDE(vdst, tmp);
+ }
+ FINISH(5);
+OP_END
+
+/* File: c/OP_CONST_WIDE_HIGH16.c */
+HANDLE_OPCODE(OP_CONST_WIDE_HIGH16 /*vAA, #+BBBB000000000000*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const-wide/high16 v%d,#0x%04x000000000000", vdst, vsrc1);
+ SET_REGISTER_WIDE(vdst, ((u8) vsrc1) << 48);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_STRING.c */
+HANDLE_OPCODE(OP_CONST_STRING /*vAA, string@BBBB*/)
+ {
+ StringObject* strObj;
+
+ vdst = INST_AA(inst);
+ ref = FETCH(1);
+ ILOGV("|const-string v%d string@0x%04x", vdst, ref);
+ strObj = dvmDexGetResolvedString(methodClassDex, ref);
+ if (strObj == NULL) {
+ EXPORT_PC();
+ strObj = dvmResolveString(curMethod->clazz, ref);
+ if (strObj == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, (u4) strObj);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_STRING_JUMBO.c */
+HANDLE_OPCODE(OP_CONST_STRING_JUMBO /*vAA, string@BBBBBBBB*/)
+ {
+ StringObject* strObj;
+ u4 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u4)FETCH(2) << 16;
+ ILOGV("|const-string/jumbo v%d string@0x%08x", vdst, tmp);
+ strObj = dvmDexGetResolvedString(methodClassDex, tmp);
+ if (strObj == NULL) {
+ EXPORT_PC();
+ strObj = dvmResolveString(curMethod->clazz, tmp);
+ if (strObj == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, (u4) strObj);
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_CLASS.c */
+HANDLE_OPCODE(OP_CONST_CLASS /*vAA, class@BBBB*/)
+ {
+ ClassObject* clazz;
+
+ vdst = INST_AA(inst);
+ ref = FETCH(1);
+ ILOGV("|const-class v%d class@0x%04x", vdst, ref);
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ EXPORT_PC();
+ clazz = dvmResolveClass(curMethod->clazz, ref, true);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, (u4) clazz);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MONITOR_ENTER.c */
+HANDLE_OPCODE(OP_MONITOR_ENTER /*vAA*/)
+ {
+ Object* obj;
+
+ vsrc1 = INST_AA(inst);
+ ILOGV("|monitor-enter v%d %s(0x%08x)",
+ vsrc1, kSpacing+6, GET_REGISTER(vsrc1));
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (!checkForNullExportPC(obj, fp, pc))
+ GOTO_exceptionThrown();
+ ILOGV("+ locking %p %s\n", obj, obj->clazz->descriptor);
+#ifdef WITH_MONITOR_TRACKING
+ EXPORT_PC(); /* need for stack trace */
+#endif
+ dvmLockObject(self, obj);
+#ifdef WITH_DEADLOCK_PREDICTION
+ if (dvmCheckException(self))
+ GOTO_exceptionThrown();
+#endif
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MONITOR_EXIT.c */
+HANDLE_OPCODE(OP_MONITOR_EXIT /*vAA*/)
+ {
+ Object* obj;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst);
+ ILOGV("|monitor-exit v%d %s(0x%08x)",
+ vsrc1, kSpacing+5, GET_REGISTER(vsrc1));
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (!checkForNull(obj)) {
+ /*
+ * The exception needs to be processed at the *following*
+ * instruction, not the current instruction (see the Dalvik
+ * spec). Because we're jumping to an exception handler,
+ * we're not actually at risk of skipping an instruction
+ * by doing so.
+ */
+ ADJUST_PC(1); /* monitor-exit width is 1 */
+ GOTO_exceptionThrown();
+ }
+ ILOGV("+ unlocking %p %s\n", obj, obj->clazz->descriptor);
+ if (!dvmUnlockObject(self, obj)) {
+ assert(dvmCheckException(self));
+ ADJUST_PC(1);
+ GOTO_exceptionThrown();
+ }
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_CHECK_CAST.c */
+HANDLE_OPCODE(OP_CHECK_CAST /*vAA, class@BBBB*/)
+ {
+ ClassObject* clazz;
+ Object* obj;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst);
+ ref = FETCH(1); /* class to check against */
+ ILOGV("|check-cast v%d,class@0x%04x", vsrc1, ref);
+
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (obj != NULL) {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+ if (!checkForNull(obj))
+ GOTO_exceptionThrown();
+#endif
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ clazz = dvmResolveClass(curMethod->clazz, ref, false);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+ if (!dvmInstanceof(obj->clazz, clazz)) {
+ dvmThrowExceptionWithClassMessage(
+ "Ljava/lang/ClassCastException;", obj->clazz->descriptor);
+ GOTO_exceptionThrown();
+ }
+ }
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_INSTANCE_OF.c */
+HANDLE_OPCODE(OP_INSTANCE_OF /*vA, vB, class@CCCC*/)
+ {
+ ClassObject* clazz;
+ Object* obj;
+
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst); /* object to check */
+ ref = FETCH(1); /* class to check against */
+ ILOGV("|instance-of v%d,v%d,class@0x%04x", vdst, vsrc1, ref);
+
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (obj == NULL) {
+ SET_REGISTER(vdst, 0);
+ } else {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+ if (!checkForNullExportPC(obj, fp, pc))
+ GOTO_exceptionThrown();
+#endif
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ EXPORT_PC();
+ clazz = dvmResolveClass(curMethod->clazz, ref, true);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, dvmInstanceof(obj->clazz, clazz));
+ }
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_ARRAY_LENGTH.c */
+HANDLE_OPCODE(OP_ARRAY_LENGTH /*vA, vB*/)
+ {
+ ArrayObject* arrayObj;
+
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+ ILOGV("|array-length v%d,v%d (%p)", vdst, vsrc1, arrayObj);
+ if (!checkForNullExportPC((Object*) arrayObj, fp, pc))
+ GOTO_exceptionThrown();
+ /* verifier guarantees this is an array reference */
+ SET_REGISTER(vdst, arrayObj->length);
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_NEW_INSTANCE.c */
+HANDLE_OPCODE(OP_NEW_INSTANCE /*vAA, class@BBBB*/)
+ {
+ ClassObject* clazz;
+ Object* newObj;
+
+ EXPORT_PC();
+
+ vdst = INST_AA(inst);
+ ref = FETCH(1);
+ ILOGV("|new-instance v%d,class@0x%04x", vdst, ref);
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ clazz = dvmResolveClass(curMethod->clazz, ref, false);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+
+ if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz))
+ GOTO_exceptionThrown();
+
+ /*
+ * Note: the verifier can ensure that this never happens, allowing us
+ * to remove the check. However, the spec requires we throw the
+ * exception at runtime, not verify time, so the verifier would
+ * need to replace the new-instance call with a magic "throw
+ * InstantiationError" instruction.
+ *
+ * Since this relies on the verifier, which is optional, we would
+ * also need a "new-instance-quick" instruction to identify instances
+ * that don't require the check.
+ */
+ if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
+ dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationError;",
+ clazz->descriptor);
+ GOTO_exceptionThrown();
+ }
+ newObj = dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+ if (newObj == NULL)
+ GOTO_exceptionThrown();
+ SET_REGISTER(vdst, (u4) newObj);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_NEW_ARRAY.c */
+HANDLE_OPCODE(OP_NEW_ARRAY /*vA, vB, class@CCCC*/)
+ {
+ ClassObject* arrayClass;
+ ArrayObject* newArray;
+ s4 length;
+
+ EXPORT_PC();
+
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst); /* length reg */
+ ref = FETCH(1);
+ ILOGV("|new-array v%d,v%d,class@0x%04x (%d elements)",
+ vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
+ length = (s4) GET_REGISTER(vsrc1);
+ if (length < 0) {
+ dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+ GOTO_exceptionThrown();
+ }
+ arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (arrayClass == NULL) {
+ arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+ if (arrayClass == NULL)
+ GOTO_exceptionThrown();
+ }
+ /* verifier guarantees this is an array class */
+ assert(dvmIsArrayClass(arrayClass));
+ assert(dvmIsClassInitialized(arrayClass));
+
+ newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_DONT_TRACK);
+ if (newArray == NULL)
+ GOTO_exceptionThrown();
+ SET_REGISTER(vdst, (u4) newArray);
+ }
+ FINISH(2);
+OP_END
+
+
+/* File: c/OP_FILLED_NEW_ARRAY.c */
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY /*vB, {vD, vE, vF, vG, vA}, class@CCCC*/)
+ GOTO_invoke(filledNewArray, false);
+OP_END
+
+/* File: c/OP_FILLED_NEW_ARRAY_RANGE.c */
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_RANGE /*{vCCCC..v(CCCC+AA-1)}, class@BBBB*/)
+ GOTO_invoke(filledNewArray, true);
+OP_END
+
+/* File: c/OP_FILL_ARRAY_DATA.c */
+HANDLE_OPCODE(OP_FILL_ARRAY_DATA) /*vAA, +BBBBBBBB*/
+ {
+ const u2* arrayData;
+ s4 offset;
+ ArrayObject* arrayObj;
+
+ EXPORT_PC();
+ vsrc1 = INST_AA(inst);
+ offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+ ILOGV("|fill-array-data v%d +0x%04x", vsrc1, offset);
+ arrayData = pc + offset; // offset in 16-bit units
+#ifndef NDEBUG
+ if (arrayData < curMethod->insns ||
+ arrayData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
+ {
+ /* should have been caught in verifier */
+ dvmThrowException("Ljava/lang/InternalError;",
+ "bad fill array data");
+ GOTO_exceptionThrown();
+ }
+#endif
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+ if (!dvmInterpHandleFillArrayData(arrayObj, arrayData)) {
+ GOTO_exceptionThrown();
+ }
+ FINISH(3);
+ }
+OP_END
+
+/* File: c/OP_THROW.c */
+HANDLE_OPCODE(OP_THROW /*vAA*/)
+ {
+ Object* obj;
+
+ vsrc1 = INST_AA(inst);
+ ILOGV("|throw v%d (%p)", vsrc1, (void*)GET_REGISTER(vsrc1));
+ obj = (Object*) GET_REGISTER(vsrc1);
+ if (!checkForNullExportPC(obj, fp, pc)) {
+ /* will throw a null pointer exception */
+ LOGVV("Bad exception\n");
+ } else {
+ /* use the requested exception */
+ dvmSetException(self, obj);
+ }
+ GOTO_exceptionThrown();
+ }
+OP_END
+
+/* File: c/OP_GOTO.c */
+HANDLE_OPCODE(OP_GOTO /*+AA*/)
+ vdst = INST_AA(inst);
+ if ((s1)vdst < 0)
+ ILOGV("|goto -0x%02x", -((s1)vdst));
+ else
+ ILOGV("|goto +0x%02x", ((s1)vdst));
+ ILOGV("> branch taken");
+ if ((s1)vdst < 0)
+ PERIODIC_CHECKS(kInterpEntryInstr, (s1)vdst);
+ FINISH((s1)vdst);
+OP_END
+
+/* File: c/OP_GOTO_16.c */
+HANDLE_OPCODE(OP_GOTO_16 /*+AAAA*/)
+ {
+ s4 offset = (s2) FETCH(1); /* sign-extend next code unit */
+
+ if (offset < 0)
+ ILOGV("|goto/16 -0x%04x", -offset);
+ else
+ ILOGV("|goto/16 +0x%04x", offset);
+ ILOGV("> branch taken");
+ if (offset < 0)
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_GOTO_32.c */
+HANDLE_OPCODE(OP_GOTO_32 /*+AAAAAAAA*/)
+ {
+ s4 offset = FETCH(1); /* low-order 16 bits */
+ offset |= ((s4) FETCH(2)) << 16; /* high-order 16 bits */
+
+ if (offset < 0)
+ ILOGV("|goto/32 -0x%08x", -offset);
+ else
+ ILOGV("|goto/32 +0x%08x", offset);
+ ILOGV("> branch taken");
+ if (offset <= 0) /* allowed to branch to self */
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_PACKED_SWITCH.c */
+HANDLE_OPCODE(OP_PACKED_SWITCH /*vAA, +BBBB*/)
+ {
+ const u2* switchData;
+ u4 testVal;
+ s4 offset;
+
+ vsrc1 = INST_AA(inst);
+ offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+ ILOGV("|packed-switch v%d +0x%04x", vsrc1, vsrc2);
+ switchData = pc + offset; // offset in 16-bit units
+#ifndef NDEBUG
+ if (switchData < curMethod->insns ||
+ switchData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
+ {
+ /* should have been caught in verifier */
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/InternalError;", "bad packed switch");
+ GOTO_exceptionThrown();
+ }
+#endif
+ testVal = GET_REGISTER(vsrc1);
+
+ offset = dvmInterpHandlePackedSwitch(switchData, testVal);
+ ILOGV("> branch taken (0x%04x)\n", offset);
+ if (offset <= 0) /* uncommon */
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_SPARSE_SWITCH.c */
+HANDLE_OPCODE(OP_SPARSE_SWITCH /*vAA, +BBBB*/)
+ {
+ const u2* switchData;
+ u4 testVal;
+ s4 offset;
+
+ vsrc1 = INST_AA(inst);
+ offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+ ILOGV("|sparse-switch v%d +0x%04x", vsrc1, vsrc2);
+ switchData = pc + offset; // offset in 16-bit units
+#ifndef NDEBUG
+ if (switchData < curMethod->insns ||
+ switchData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
+ {
+ /* should have been caught in verifier */
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/InternalError;", "bad sparse switch");
+ GOTO_exceptionThrown();
+ }
+#endif
+ testVal = GET_REGISTER(vsrc1);
+
+ offset = dvmInterpHandleSparseSwitch(switchData, testVal);
+ ILOGV("> branch taken (0x%04x)\n", offset);
+ if (offset <= 0) /* uncommon */
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_CMPL_FLOAT.c */
+HANDLE_OP_CMPX(OP_CMPL_FLOAT, "l-float", float, _FLOAT, -1)
+OP_END
+
+/* File: c/OP_CMPG_FLOAT.c */
+HANDLE_OP_CMPX(OP_CMPG_FLOAT, "g-float", float, _FLOAT, 1)
+OP_END
+
+/* File: c/OP_CMPL_DOUBLE.c */
+HANDLE_OP_CMPX(OP_CMPL_DOUBLE, "l-double", double, _DOUBLE, -1)
+OP_END
+
+/* File: c/OP_CMPG_DOUBLE.c */
+HANDLE_OP_CMPX(OP_CMPG_DOUBLE, "g-double", double, _DOUBLE, 1)
+OP_END
+
+/* File: c/OP_CMP_LONG.c */
+HANDLE_OP_CMPX(OP_CMP_LONG, "-long", s8, _WIDE, 0)
+OP_END
+
+/* File: c/OP_IF_EQ.c */
+HANDLE_OP_IF_XX(OP_IF_EQ, "eq", ==)
+OP_END
+
+/* File: c/OP_IF_NE.c */
+HANDLE_OP_IF_XX(OP_IF_NE, "ne", !=)
+OP_END
+
+/* File: c/OP_IF_LT.c */
+HANDLE_OP_IF_XX(OP_IF_LT, "lt", <)
+OP_END
+
+/* File: c/OP_IF_GE.c */
+HANDLE_OP_IF_XX(OP_IF_GE, "ge", >=)
+OP_END
+
+/* File: c/OP_IF_GT.c */
+HANDLE_OP_IF_XX(OP_IF_GT, "gt", >)
+OP_END
+
+/* File: c/OP_IF_LE.c */
+HANDLE_OP_IF_XX(OP_IF_LE, "le", <=)
+OP_END
+
+/* File: c/OP_IF_EQZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_EQZ, "eqz", ==)
+OP_END
+
+/* File: c/OP_IF_NEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_NEZ, "nez", !=)
+OP_END
+
+/* File: c/OP_IF_LTZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_LTZ, "ltz", <)
+OP_END
+
+/* File: c/OP_IF_GEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_GEZ, "gez", >=)
+OP_END
+
+/* File: c/OP_IF_GTZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_GTZ, "gtz", >)
+OP_END
+
+/* File: c/OP_IF_LEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_LEZ, "lez", <=)
+OP_END
+
+/* File: c/OP_UNUSED_3E.c */
+HANDLE_OPCODE(OP_UNUSED_3E)
+OP_END
+
+/* File: c/OP_UNUSED_3F.c */
+HANDLE_OPCODE(OP_UNUSED_3F)
+OP_END
+
+/* File: c/OP_UNUSED_40.c */
+HANDLE_OPCODE(OP_UNUSED_40)
+OP_END
+
+/* File: c/OP_UNUSED_41.c */
+HANDLE_OPCODE(OP_UNUSED_41)
+OP_END
+
+/* File: c/OP_UNUSED_42.c */
+HANDLE_OPCODE(OP_UNUSED_42)
+OP_END
+
+/* File: c/OP_UNUSED_43.c */
+HANDLE_OPCODE(OP_UNUSED_43)
+OP_END
+
+/* File: c/OP_AGET.c */
+HANDLE_OP_AGET(OP_AGET, "", u4, )
+OP_END
+
+/* File: c/OP_AGET_WIDE.c */
+HANDLE_OP_AGET(OP_AGET_WIDE, "-wide", s8, _WIDE)
+OP_END
+
+/* File: c/OP_AGET_OBJECT.c */
+HANDLE_OP_AGET(OP_AGET_OBJECT, "-object", u4, )
+OP_END
+
+/* File: c/OP_AGET_BOOLEAN.c */
+HANDLE_OP_AGET(OP_AGET_BOOLEAN, "-boolean", u1, )
+OP_END
+
+/* File: c/OP_AGET_BYTE.c */
+HANDLE_OP_AGET(OP_AGET_BYTE, "-byte", s1, )
+OP_END
+
+/* File: c/OP_AGET_CHAR.c */
+HANDLE_OP_AGET(OP_AGET_CHAR, "-char", u2, )
+OP_END
+
+/* File: c/OP_AGET_SHORT.c */
+HANDLE_OP_AGET(OP_AGET_SHORT, "-short", s2, )
+OP_END
+
+/* File: c/OP_APUT.c */
+HANDLE_OP_APUT(OP_APUT, "", u4, )
+OP_END
+
+/* File: c/OP_APUT_WIDE.c */
+HANDLE_OP_APUT(OP_APUT_WIDE, "-wide", s8, _WIDE)
+OP_END
+
+/* File: c/OP_APUT_OBJECT.c */
+HANDLE_OPCODE(OP_APUT_OBJECT /*vAA, vBB, vCC*/)
+ {
+ ArrayObject* arrayObj;
+ Object* obj;
+ u2 arrayInfo;
+ EXPORT_PC();
+ vdst = INST_AA(inst); /* AA: source value */
+ arrayInfo = FETCH(1);
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */
+ vsrc2 = arrayInfo >> 8; /* CC: index */
+ ILOGV("|aput%s v%d,v%d,v%d", "-object", vdst, vsrc1, vsrc2);
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+ if (!checkForNull((Object*) arrayObj))
+ GOTO_exceptionThrown();
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) {
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;",
+ NULL);
+ GOTO_exceptionThrown();
+ }
+ obj = (Object*) GET_REGISTER(vdst);
+ if (obj != NULL) {
+ if (!checkForNull(obj))
+ GOTO_exceptionThrown();
+ if (!dvmCanPutArrayElement(obj->clazz, arrayObj->obj.clazz)) {
+ LOGV("Can't put a '%s'(%p) into array type='%s'(%p)\n",
+ obj->clazz->descriptor, obj,
+ arrayObj->obj.clazz->descriptor, arrayObj);
+ //dvmDumpClass(obj->clazz);
+ //dvmDumpClass(arrayObj->obj.clazz);
+ dvmThrowException("Ljava/lang/ArrayStoreException;", NULL);
+ GOTO_exceptionThrown();
+ }
+ }
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));
+ ((u4*) arrayObj->contents)[GET_REGISTER(vsrc2)] =
+ GET_REGISTER(vdst);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_APUT_BOOLEAN.c */
+HANDLE_OP_APUT(OP_APUT_BOOLEAN, "-boolean", u1, )
+OP_END
+
+/* File: c/OP_APUT_BYTE.c */
+HANDLE_OP_APUT(OP_APUT_BYTE, "-byte", s1, )
+OP_END
+
+/* File: c/OP_APUT_CHAR.c */
+HANDLE_OP_APUT(OP_APUT_CHAR, "-char", u2, )
+OP_END
+
+/* File: c/OP_APUT_SHORT.c */
+HANDLE_OP_APUT(OP_APUT_SHORT, "-short", s2, )
+OP_END
+
+/* File: c/OP_IGET.c */
+HANDLE_IGET_X(OP_IGET, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_WIDE.c */
+HANDLE_IGET_X(OP_IGET_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT.c */
+HANDLE_IGET_X(OP_IGET_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IGET_BOOLEAN.c */
+HANDLE_IGET_X(OP_IGET_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_BYTE.c */
+HANDLE_IGET_X(OP_IGET_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_CHAR.c */
+HANDLE_IGET_X(OP_IGET_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_SHORT.c */
+HANDLE_IGET_X(OP_IGET_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT.c */
+HANDLE_IPUT_X(OP_IPUT, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE.c */
+HANDLE_IPUT_X(OP_IPUT_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT.c */
+/*
+ * The VM spec says we should verify that the reference being stored into
+ * the field is assignment compatible. In practice, many popular VMs don't
+ * do this because it slows down a very common operation. It's not so bad
+ * for us, since "dexopt" quickens it whenever possible, but it's still an
+ * issue.
+ *
+ * To make this spec-complaint, we'd need to add a ClassObject pointer to
+ * the Field struct, resolve the field's type descriptor at link or class
+ * init time, and then verify the type here.
+ */
+HANDLE_IPUT_X(OP_IPUT_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_BOOLEAN.c */
+HANDLE_IPUT_X(OP_IPUT_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_BYTE.c */
+HANDLE_IPUT_X(OP_IPUT_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_CHAR.c */
+HANDLE_IPUT_X(OP_IPUT_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_SHORT.c */
+HANDLE_IPUT_X(OP_IPUT_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_SGET.c */
+HANDLE_SGET_X(OP_SGET, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_WIDE.c */
+HANDLE_SGET_X(OP_SGET_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT.c */
+HANDLE_SGET_X(OP_SGET_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_BOOLEAN.c */
+HANDLE_SGET_X(OP_SGET_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_BYTE.c */
+HANDLE_SGET_X(OP_SGET_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_CHAR.c */
+HANDLE_SGET_X(OP_SGET_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_SHORT.c */
+HANDLE_SGET_X(OP_SGET_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT.c */
+HANDLE_SPUT_X(OP_SPUT, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE.c */
+HANDLE_SPUT_X(OP_SPUT_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT.c */
+HANDLE_SPUT_X(OP_SPUT_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_BOOLEAN.c */
+HANDLE_SPUT_X(OP_SPUT_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_BYTE.c */
+HANDLE_SPUT_X(OP_SPUT_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_CHAR.c */
+HANDLE_SPUT_X(OP_SPUT_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_SHORT.c */
+HANDLE_SPUT_X(OP_SPUT_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeVirtual, false);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeSuper, false);
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeDirect, false);
+OP_END
+
+/* File: c/OP_INVOKE_STATIC.c */
+HANDLE_OPCODE(OP_INVOKE_STATIC /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeStatic, false);
+OP_END
+
+/* File: c/OP_INVOKE_INTERFACE.c */
+HANDLE_OPCODE(OP_INVOKE_INTERFACE /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeInterface, false);
+OP_END
+
+/* File: c/OP_UNUSED_73.c */
+HANDLE_OPCODE(OP_UNUSED_73)
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeVirtual, true);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeSuper, true);
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeDirect, true);
+OP_END
+
+/* File: c/OP_INVOKE_STATIC_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_STATIC_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeStatic, true);
+OP_END
+
+/* File: c/OP_INVOKE_INTERFACE_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_INTERFACE_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeInterface, true);
+OP_END
+
+/* File: c/OP_UNUSED_79.c */
+HANDLE_OPCODE(OP_UNUSED_79)
+OP_END
+
+/* File: c/OP_UNUSED_7A.c */
+HANDLE_OPCODE(OP_UNUSED_7A)
+OP_END
+
+/* File: c/OP_NEG_INT.c */
+HANDLE_UNOP(OP_NEG_INT, "neg-int", -, , )
+OP_END
+
+/* File: c/OP_NOT_INT.c */
+HANDLE_UNOP(OP_NOT_INT, "not-int", , ^ 0xffffffff, )
+OP_END
+
+/* File: c/OP_NEG_LONG.c */
+HANDLE_UNOP(OP_NEG_LONG, "neg-long", -, , _WIDE)
+OP_END
+
+/* File: c/OP_NOT_LONG.c */
+HANDLE_UNOP(OP_NOT_LONG, "not-long", , ^ 0xffffffffffffffffULL, _WIDE)
+OP_END
+
+/* File: c/OP_NEG_FLOAT.c */
+HANDLE_UNOP(OP_NEG_FLOAT, "neg-float", -, , _FLOAT)
+OP_END
+
+/* File: c/OP_NEG_DOUBLE.c */
+HANDLE_UNOP(OP_NEG_DOUBLE, "neg-double", -, , _DOUBLE)
+OP_END
+
+/* File: c/OP_INT_TO_LONG.c */
+HANDLE_NUMCONV(OP_INT_TO_LONG, "int-to-long", _INT, _WIDE)
+OP_END
+
+/* File: c/OP_INT_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_INT_TO_FLOAT, "int-to-float", _INT, _FLOAT)
+OP_END
+
+/* File: c/OP_INT_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_INT_TO_DOUBLE, "int-to-double", _INT, _DOUBLE)
+OP_END
+
+/* File: c/OP_LONG_TO_INT.c */
+HANDLE_NUMCONV(OP_LONG_TO_INT, "long-to-int", _WIDE, _INT)
+OP_END
+
+/* File: c/OP_LONG_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_LONG_TO_FLOAT, "long-to-float", _WIDE, _FLOAT)
+OP_END
+
+/* File: c/OP_LONG_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_LONG_TO_DOUBLE, "long-to-double", _WIDE, _DOUBLE)
+OP_END
+
+/* File: c/OP_FLOAT_TO_INT.c */
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_INT, "float-to-int",
+ float, _FLOAT, s4, _INT)
+OP_END
+
+/* File: c/OP_FLOAT_TO_LONG.c */
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_LONG, "float-to-long",
+ float, _FLOAT, s8, _WIDE)
+OP_END
+
+/* File: c/OP_FLOAT_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_FLOAT_TO_DOUBLE, "float-to-double", _FLOAT, _DOUBLE)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_INT.c */
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_INT, "double-to-int",
+ double, _DOUBLE, s4, _INT)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_LONG.c */
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_LONG, "double-to-long",
+ double, _DOUBLE, s8, _WIDE)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_DOUBLE_TO_FLOAT, "double-to-float", _DOUBLE, _FLOAT)
+OP_END
+
+/* File: c/OP_INT_TO_BYTE.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_BYTE, "byte", s1)
+OP_END
+
+/* File: c/OP_INT_TO_CHAR.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_CHAR, "char", u2)
+OP_END
+
+/* File: c/OP_INT_TO_SHORT.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_SHORT, "short", s2) /* want sign bit */
+OP_END
+
+/* File: c/OP_ADD_INT.c */
+HANDLE_OP_X_INT(OP_ADD_INT, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_INT.c */
+HANDLE_OP_X_INT(OP_SUB_INT, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_INT.c */
+HANDLE_OP_X_INT(OP_MUL_INT, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT.c */
+HANDLE_OP_X_INT(OP_DIV_INT, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT.c */
+HANDLE_OP_X_INT(OP_REM_INT, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT.c */
+HANDLE_OP_X_INT(OP_AND_INT, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT.c */
+HANDLE_OP_X_INT(OP_OR_INT, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT.c */
+HANDLE_OP_X_INT(OP_XOR_INT, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_INT.c */
+HANDLE_OP_SHX_INT(OP_SHL_INT, "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT.c */
+HANDLE_OP_SHX_INT(OP_SHR_INT, "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT.c */
+HANDLE_OP_SHX_INT(OP_USHR_INT, "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_ADD_LONG.c */
+HANDLE_OP_X_LONG(OP_ADD_LONG, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_LONG.c */
+HANDLE_OP_X_LONG(OP_SUB_LONG, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_LONG.c */
+HANDLE_OP_X_LONG(OP_MUL_LONG, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_LONG.c */
+HANDLE_OP_X_LONG(OP_DIV_LONG, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_LONG.c */
+HANDLE_OP_X_LONG(OP_REM_LONG, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_LONG.c */
+HANDLE_OP_X_LONG(OP_AND_LONG, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_LONG.c */
+HANDLE_OP_X_LONG(OP_OR_LONG, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_LONG.c */
+HANDLE_OP_X_LONG(OP_XOR_LONG, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_LONG.c */
+HANDLE_OP_SHX_LONG(OP_SHL_LONG, "shl", (s8), <<)
+OP_END
+
+/* File: c/OP_SHR_LONG.c */
+HANDLE_OP_SHX_LONG(OP_SHR_LONG, "shr", (s8), >>)
+OP_END
+
+/* File: c/OP_USHR_LONG.c */
+HANDLE_OP_SHX_LONG(OP_USHR_LONG, "ushr", (u8), >>)
+OP_END
+
+/* File: c/OP_ADD_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_ADD_FLOAT, "add", +)
+OP_END
+
+/* File: c/OP_SUB_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_SUB_FLOAT, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_MUL_FLOAT, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_DIV_FLOAT, "div", /)
+OP_END
+
+/* File: c/OP_REM_FLOAT.c */
+HANDLE_OPCODE(OP_REM_FLOAT /*vAA, vBB, vCC*/)
+ {
+ u2 srcRegs;
+ vdst = INST_AA(inst);
+ srcRegs = FETCH(1);
+ vsrc1 = srcRegs & 0xff;
+ vsrc2 = srcRegs >> 8;
+ ILOGV("|%s-float v%d,v%d,v%d", "mod", vdst, vsrc1, vsrc2);
+ SET_REGISTER_FLOAT(vdst,
+ fmodf(GET_REGISTER_FLOAT(vsrc1), GET_REGISTER_FLOAT(vsrc2)));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_ADD_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_ADD_DOUBLE, "add", +)
+OP_END
+
+/* File: c/OP_SUB_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_SUB_DOUBLE, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_MUL_DOUBLE, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_DIV_DOUBLE, "div", /)
+OP_END
+
+/* File: c/OP_REM_DOUBLE.c */
+HANDLE_OPCODE(OP_REM_DOUBLE /*vAA, vBB, vCC*/)
+ {
+ u2 srcRegs;
+ vdst = INST_AA(inst);
+ srcRegs = FETCH(1);
+ vsrc1 = srcRegs & 0xff;
+ vsrc2 = srcRegs >> 8;
+ ILOGV("|%s-double v%d,v%d,v%d", "mod", vdst, vsrc1, vsrc2);
+ SET_REGISTER_DOUBLE(vdst,
+ fmod(GET_REGISTER_DOUBLE(vsrc1), GET_REGISTER_DOUBLE(vsrc2)));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_ADD_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_ADD_INT_2ADDR, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_SUB_INT_2ADDR, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_MUL_INT_2ADDR, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_DIV_INT_2ADDR, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_REM_INT_2ADDR, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_AND_INT_2ADDR, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_OR_INT_2ADDR, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_XOR_INT_2ADDR, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_SHL_INT_2ADDR, "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_SHR_INT_2ADDR, "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_USHR_INT_2ADDR, "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_ADD_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_ADD_LONG_2ADDR, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_SUB_LONG_2ADDR, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_MUL_LONG_2ADDR, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_DIV_LONG_2ADDR, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_REM_LONG_2ADDR, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_AND_LONG_2ADDR, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_OR_LONG_2ADDR, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_XOR_LONG_2ADDR, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHL_LONG_2ADDR, "shl", (s8), <<)
+OP_END
+
+/* File: c/OP_SHR_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHR_LONG_2ADDR, "shr", (s8), >>)
+OP_END
+
+/* File: c/OP_USHR_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_USHR_LONG_2ADDR, "ushr", (u8), >>)
+OP_END
+
+/* File: c/OP_ADD_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_ADD_FLOAT_2ADDR, "add", +)
+OP_END
+
+/* File: c/OP_SUB_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_SUB_FLOAT_2ADDR, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_MUL_FLOAT_2ADDR, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_DIV_FLOAT_2ADDR, "div", /)
+OP_END
+
+/* File: c/OP_REM_FLOAT_2ADDR.c */
+HANDLE_OPCODE(OP_REM_FLOAT_2ADDR /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|%s-float-2addr v%d,v%d", "mod", vdst, vsrc1);
+ SET_REGISTER_FLOAT(vdst,
+ fmodf(GET_REGISTER_FLOAT(vdst), GET_REGISTER_FLOAT(vsrc1)));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_ADD_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_ADD_DOUBLE_2ADDR, "add", +)
+OP_END
+
+/* File: c/OP_SUB_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_SUB_DOUBLE_2ADDR, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_MUL_DOUBLE_2ADDR, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_DIV_DOUBLE_2ADDR, "div", /)
+OP_END
+
+/* File: c/OP_REM_DOUBLE_2ADDR.c */
+HANDLE_OPCODE(OP_REM_DOUBLE_2ADDR /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|%s-double-2addr v%d,v%d", "mod", vdst, vsrc1);
+ SET_REGISTER_DOUBLE(vdst,
+ fmod(GET_REGISTER_DOUBLE(vdst), GET_REGISTER_DOUBLE(vsrc1)));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_ADD_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_ADD_INT_LIT16, "add", +, 0)
+OP_END
+
+/* File: c/OP_RSUB_INT.c */
+HANDLE_OPCODE(OP_RSUB_INT /*vA, vB, #+CCCC*/)
+ {
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ vsrc2 = FETCH(1);
+ ILOGV("|rsub-int v%d,v%d,#+0x%04x", vdst, vsrc1, vsrc2);
+ SET_REGISTER(vdst, (s2) vsrc2 - (s4) GET_REGISTER(vsrc1));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MUL_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_MUL_INT_LIT16, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_DIV_INT_LIT16, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_REM_INT_LIT16, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_AND_INT_LIT16, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_OR_INT_LIT16, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_XOR_INT_LIT16, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_ADD_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_ADD_INT_LIT8, "add", +, 0)
+OP_END
+
+/* File: c/OP_RSUB_INT_LIT8.c */
+HANDLE_OPCODE(OP_RSUB_INT_LIT8 /*vAA, vBB, #+CC*/)
+ {
+ u2 litInfo;
+ vdst = INST_AA(inst);
+ litInfo = FETCH(1);
+ vsrc1 = litInfo & 0xff;
+ vsrc2 = litInfo >> 8;
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", "rsub", vdst, vsrc1, vsrc2);
+ SET_REGISTER(vdst, (s1) vsrc2 - (s4) GET_REGISTER(vsrc1));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MUL_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_MUL_INT_LIT8, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_DIV_INT_LIT8, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_REM_INT_LIT8, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_AND_INT_LIT8, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_OR_INT_LIT8, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_XOR_INT_LIT8, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_SHL_INT_LIT8, "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_SHR_INT_LIT8, "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_USHR_INT_LIT8, "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_UNUSED_E3.c */
+HANDLE_OPCODE(OP_UNUSED_E3)
+OP_END
+
+/* File: c/OP_UNUSED_E4.c */
+HANDLE_OPCODE(OP_UNUSED_E4)
+OP_END
+
+/* File: c/OP_UNUSED_E5.c */
+HANDLE_OPCODE(OP_UNUSED_E5)
+OP_END
+
+/* File: c/OP_UNUSED_E6.c */
+HANDLE_OPCODE(OP_UNUSED_E6)
+OP_END
+
+/* File: c/OP_UNUSED_E7.c */
+HANDLE_OPCODE(OP_UNUSED_E7)
+OP_END
+
+/* File: c/OP_UNUSED_E8.c */
+HANDLE_OPCODE(OP_UNUSED_E8)
+OP_END
+
+/* File: c/OP_UNUSED_E9.c */
+HANDLE_OPCODE(OP_UNUSED_E9)
+OP_END
+
+/* File: c/OP_UNUSED_EA.c */
+HANDLE_OPCODE(OP_UNUSED_EA)
+OP_END
+
+/* File: c/OP_UNUSED_EB.c */
+HANDLE_OPCODE(OP_UNUSED_EB)
+OP_END
+
+/* File: c/OP_UNUSED_EC.c */
+HANDLE_OPCODE(OP_UNUSED_EC)
+OP_END
+
+/* File: c/OP_UNUSED_ED.c */
+HANDLE_OPCODE(OP_UNUSED_ED)
+OP_END
+
+/* File: c/OP_EXECUTE_INLINE.c */
+HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
+ {
+ /*
+ * This has the same form as other method calls, but we ignore
+ * the 5th argument (vA). This is chiefly because the first four
+ * arguments to a function on ARM are in registers.
+ *
+ * We only set the arguments that are actually used, leaving
+ * the rest uninitialized. We're assuming that, if the method
+ * needs them, they'll be specified in the call.
+ *
+ * This annoys gcc when optimizations are enabled, causing a
+ * "may be used uninitialized" warning. We can quiet the warnings
+ * for a slight penalty (5%: 373ns vs. 393ns on empty method). Note
+ * that valgrind is perfectly happy with this arrangement, because
+ * the uninitialiezd values are never actually used.
+ */
+ u4 arg0, arg1, arg2, arg3;
+ //arg0 = arg1 = arg2 = arg3 = 0;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_B(inst); /* #of args */
+ ref = FETCH(1); /* inline call "ref" */
+ vdst = FETCH(2); /* 0-4 register indices */
+ ILOGV("|execute-inline args=%d @%d {regs=0x%04x}",
+ vsrc1, ref, vdst);
+
+ assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
+ assert(vsrc1 <= 4);
+
+ switch (vsrc1) {
+ case 4:
+ arg3 = GET_REGISTER(vdst >> 12);
+ /* fall through */
+ case 3:
+ arg2 = GET_REGISTER((vdst & 0x0f00) >> 8);
+ /* fall through */
+ case 2:
+ arg1 = GET_REGISTER((vdst & 0x00f0) >> 4);
+ /* fall through */
+ case 1:
+ arg0 = GET_REGISTER(vdst & 0x0f);
+ /* fall through */
+ default: // case 0
+ ;
+ }
+
+#if INTERP_TYPE == INTERP_DBG
+ if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+#else
+ if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+#endif
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_UNUSED_EF.c */
+HANDLE_OPCODE(OP_UNUSED_EF)
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT_EMPTY.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT_EMPTY /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+#if INTERP_TYPE != INTERP_DBG
+ //LOGI("Ignoring empty\n");
+ FINISH(3);
+#else
+ if (!gDvm.debuggerActive) {
+ //LOGI("Skipping empty\n");
+ FINISH(3); // don't want it to show up in profiler output
+ } else {
+ //LOGI("Running empty\n");
+ /* fall through to OP_INVOKE_DIRECT */
+ GOTO_invoke(invokeDirect, false);
+ }
+#endif
+OP_END
+
+/* File: c/OP_UNUSED_F1.c */
+HANDLE_OPCODE(OP_UNUSED_F1)
+OP_END
+
+/* File: c/OP_IGET_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_QUICK, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_WIDE_QUICK, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_OBJECT_QUICK, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_QUICK, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_WIDE_QUICK, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_OBJECT_QUICK, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_QUICK.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeVirtualQuick, false);
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK_RANGE/*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeVirtualQuick, true);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_QUICK.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeSuperQuick, false);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_QUICK_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeSuperQuick, true);
+OP_END
+
+/* File: c/OP_UNUSED_FC.c */
+HANDLE_OPCODE(OP_UNUSED_FC)
+OP_END
+
+/* File: c/OP_UNUSED_FD.c */
+HANDLE_OPCODE(OP_UNUSED_FD)
+OP_END
+
+/* File: c/OP_UNUSED_FE.c */
+HANDLE_OPCODE(OP_UNUSED_FE)
+OP_END
+
+/* File: c/OP_UNUSED_FF.c */
+HANDLE_OPCODE(OP_UNUSED_FF)
+ /*
+ * In portable interp, most unused opcodes will fall through to here.
+ */
+ LOGE("unknown opcode 0x%02x\n", INST_INST(inst));
+ dvmAbort();
+ FINISH(1);
+OP_END
+
+/* File: cstubs/entry.c */
+/*
+ * Handler function table, one entry per opcode.
+ */
+#undef H
+#define H(_op) dvmMterp_##_op
+DEFINE_GOTO_TABLE(gDvmMterpHandlers)
+
+#undef H
+#define H(_op) #_op
+DEFINE_GOTO_TABLE(gDvmMterpHandlerNames)
+
+#include <setjmp.h>
+
+/*
+ * C mterp entry point. This just calls the various C fallbacks, making
+ * this a slow but portable interpeter.
+ *
+ * This is only used for the "allstubs" variant.
+ */
+bool dvmMterpStdRun(MterpGlue* glue)
+{
+ jmp_buf jmpBuf;
+ int changeInterp;
+
+ glue->bailPtr = &jmpBuf;
+
+ /*
+ * We want to return "changeInterp" as a boolean, but we can't return
+ * zero through longjmp, so we return (boolean+1).
+ */
+ changeInterp = setjmp(jmpBuf) -1;
+ if (changeInterp >= 0) {
+ Thread* threadSelf = dvmThreadSelf();
+ LOGVV("mterp threadid=%d returning %d\n",
+ threadSelf->threadId, changeInterp);
+ return changeInterp;
+ }
+
+ /*
+ * We may not be starting at a point where we're executing instructions.
+ * We need to pick up where the other interpreter left off.
+ *
+ * In some cases we need to call into a throw/return handler which
+ * will do some processing and then either return to us (updating "glue")
+ * or longjmp back out.
+ */
+ switch (glue->entryPoint) {
+ case kInterpEntryInstr:
+ /* just start at the start */
+ break;
+ case kInterpEntryReturn:
+ dvmMterp_returnFromMethod(glue);
+ break;
+ case kInterpEntryThrow:
+ dvmMterp_exceptionThrown(glue);
+ break;
+ default:
+ dvmAbort();
+ }
+
+ /* run until somebody longjmp()s out */
+ while (true) {
+ typedef void (*Handler)(MterpGlue* glue);
+
+ u2 inst = /*glue->*/pc[0];
+ Handler handler = (Handler) gDvmMterpHandlers[inst & 0xff];
+ LOGVV("handler %p %s\n",
+ handler, (const char*) gDvmMterpHandlerNames[inst & 0xff]);
+ (*handler)(glue);
+ }
+}
+
+/*
+ * C mterp exit point. Call here to bail out of the interpreter.
+ */
+void dvmMterpStdBail(MterpGlue* glue, bool changeInterp)
+{
+ jmp_buf* pJmpBuf = glue->bailPtr;
+ longjmp(*pJmpBuf, ((int)changeInterp)+1);
+}
+
+
+/* File: c/gotoTargets.c */
+/*
+ * C footer. This has some common code shared by the various targets.
+ */
+
+/*
+ * Everything from here on is a "goto target". In the basic interpreter
+ * we jump into these targets and then jump directly to the handler for
+ * next instruction. Here, these are subroutines that return to the caller.
+ */
+
+GOTO_TARGET(filledNewArray, bool methodCallRange)
+ {
+ ClassObject* arrayClass;
+ ArrayObject* newArray;
+ u4* contents;
+ char typeCh;
+ int i;
+ u4 arg5;
+
+ EXPORT_PC();
+
+ ref = FETCH(1); /* class ref */
+ vdst = FETCH(2); /* first 4 regs -or- range base */
+
+ if (methodCallRange) {
+ vsrc1 = INST_AA(inst); /* #of elements */
+ arg5 = -1; /* silence compiler warning */
+ ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ } else {
+ arg5 = INST_A(inst);
+ vsrc1 = INST_B(inst); /* #of elements */
+ ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1, ref, vdst, arg5);
+ }
+
+ /*
+ * Resolve the array class.
+ */
+ arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (arrayClass == NULL) {
+ arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+ if (arrayClass == NULL)
+ GOTO_exceptionThrown();
+ }
+ /*
+ if (!dvmIsArrayClass(arrayClass)) {
+ dvmThrowException("Ljava/lang/RuntimeError;",
+ "filled-new-array needs array class");
+ GOTO_exceptionThrown();
+ }
+ */
+ /* verifier guarantees this is an array class */
+ assert(dvmIsArrayClass(arrayClass));
+ assert(dvmIsClassInitialized(arrayClass));
+
+ /*
+ * Create an array of the specified type.
+ */
+ LOGVV("+++ filled-new-array type is '%s'\n", arrayClass->descriptor);
+ typeCh = arrayClass->descriptor[1];
+ if (typeCh == 'D' || typeCh == 'J') {
+ /* category 2 primitives not allowed */
+ dvmThrowException("Ljava/lang/RuntimeError;",
+ "bad filled array req");
+ GOTO_exceptionThrown();
+ } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
+ /* TODO: requires multiple "fill in" loops with different widths */
+ LOGE("non-int primitives not implemented\n");
+ dvmThrowException("Ljava/lang/InternalError;",
+ "filled-new-array not implemented for anything but 'int'");
+ GOTO_exceptionThrown();
+ }
+
+ newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK);
+ if (newArray == NULL)
+ GOTO_exceptionThrown();
+
+ /*
+ * Fill in the elements. It's legal for vsrc1 to be zero.
+ */
+ contents = (u4*) newArray->contents;
+ if (methodCallRange) {
+ for (i = 0; i < vsrc1; i++)
+ contents[i] = GET_REGISTER(vdst+i);
+ } else {
+ assert(vsrc1 <= 5);
+ if (vsrc1 == 5) {
+ contents[4] = GET_REGISTER(arg5);
+ vsrc1--;
+ }
+ for (i = 0; i < vsrc1; i++) {
+ contents[i] = GET_REGISTER(vdst & 0x0f);
+ vdst >>= 4;
+ }
+ }
+
+ retval.l = newArray;
+ }
+ FINISH(3);
+GOTO_TARGET_END
+
+
+GOTO_TARGET(invokeVirtual, bool methodCallRange)
+ {
+ Method* baseMethod;
+ Object* thisPtr;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ /*
+ * Resolve the method. This is the correct method for the static
+ * type of the object. We also verify access permissions here.
+ */
+ baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (baseMethod == NULL) {
+ baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+ if (baseMethod == NULL) {
+ ILOGV("+ unknown method or access denied\n");
+ GOTO_exceptionThrown();
+ }
+ }
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method.
+ */
+ assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
+ methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ /*
+ * This can happen if you create two classes, Base and Sub, where
+ * Sub is a sub-class of Base. Declare a protected abstract
+ * method foo() in Base, and invoke foo() from a method in Base.
+ * Base is an "abstract base class" and is never instantiated
+ * directly. Now, Override foo() in Sub, and use Sub. This
+ * Works fine unless Sub stops providing an implementation of
+ * the method.
+ */
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+
+ LOGVV("+++ base=%s.%s virtual[%d]=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ (u4) baseMethod->methodIndex,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+#if 0
+ if (vsrc1 != methodToCall->insSize) {
+ LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ (u4) baseMethod->methodIndex,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ //dvmDumpClass(baseMethod->clazz);
+ //dvmDumpClass(methodToCall->clazz);
+ dvmDumpAllClasses(0);
+ }
+#endif
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuper, bool methodCallRange)
+ {
+ Method* baseMethod;
+ u2 thisReg;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange) {
+ ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ /* impossible in well-formed code, but we must check nevertheless */
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+ /*
+ * Resolve the method. This is the correct method for the static
+ * type of the object. We also verify access permissions here.
+ * The first arg to dvmResolveMethod() is just the referring class
+ * (used for class loaders and such), so we don't want to pass
+ * the superclass into the resolution call.
+ */
+ baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (baseMethod == NULL) {
+ baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+ if (baseMethod == NULL) {
+ ILOGV("+ unknown method or access denied\n");
+ GOTO_exceptionThrown();
+ }
+ }
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method's class.
+ *
+ * We're using the current method's class' superclass, not the
+ * superclass of "this". This is because we might be executing
+ * in a method inherited from a superclass, and we want to run
+ * in that class' superclass.
+ */
+ if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) {
+ /*
+ * Method does not exist in the superclass. Could happen if
+ * superclass gets updated.
+ */
+ dvmThrowException("Ljava/lang/NoSuchMethodError;",
+ baseMethod->name);
+ GOTO_exceptionThrown();
+ }
+ methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+ LOGVV("+++ base=%s.%s super-virtual=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeInterface, bool methodCallRange)
+ {
+ Object* thisPtr;
+ ClassObject* thisClass;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ thisClass = thisPtr->clazz;
+
+ /*
+ * Given a class and a method index, find the Method* with the
+ * actual code we want to execute.
+ */
+ methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod,
+ methodClassDex);
+ if (methodToCall == NULL) {
+ assert(dvmCheckException(self));
+ GOTO_exceptionThrown();
+ }
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeDirect, bool methodCallRange)
+ {
+ u2 thisReg;
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ EXPORT_PC();
+
+ if (methodCallRange) {
+ ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+ methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (methodToCall == NULL) {
+ methodToCall = dvmResolveMethod(curMethod->clazz, ref,
+ METHOD_DIRECT);
+ if (methodToCall == NULL) {
+ ILOGV("+ unknown direct method\n"); // should be impossible
+ GOTO_exceptionThrown();
+ }
+ }
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeStatic, bool methodCallRange)
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ EXPORT_PC();
+
+ if (methodCallRange)
+ ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ else
+ ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+
+ methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (methodToCall == NULL) {
+ methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC);
+ if (methodToCall == NULL) {
+ ILOGV("+ unknown method\n");
+ GOTO_exceptionThrown();
+ }
+ }
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+ {
+ Object* thisPtr;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* vtable index */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method.
+ */
+ assert(ref < thisPtr->clazz->vtableCount);
+ methodToCall = thisPtr->clazz->vtable[ref];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+
+ LOGVV("+++ virtual[%d]=%s.%s\n",
+ ref, methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+ {
+ u2 thisReg;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* vtable index */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange) {
+ ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ /* impossible in well-formed code, but we must check nevertheless */
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+#if 0 /* impossible in optimized + verified code */
+ if (ref >= curMethod->clazz->super->vtableCount) {
+ dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(ref < curMethod->clazz->super->vtableCount);
+#endif
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method's class.
+ *
+ * We're using the current method's class' superclass, not the
+ * superclass of "this". This is because we might be executing
+ * in a method inherited from a superclass, and we want to run
+ * in the method's class' superclass.
+ */
+ methodToCall = curMethod->clazz->super->vtable[ref];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+ LOGVV("+++ super-virtual[%d]=%s.%s\n",
+ ref, methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+
+
+ /*
+ * General handling for return-void, return, and return-wide. Put the
+ * return value in "retval" before jumping here.
+ */
+GOTO_TARGET(returnFromMethod)
+ {
+ StackSaveArea* saveArea;
+
+ /*
+ * We must do this BEFORE we pop the previous stack frame off, so
+ * that the GC can see the return value (if any) in the local vars.
+ *
+ * Since this is now an interpreter switch point, we must do it before
+ * we do anything at all.
+ */
+ PERIODIC_CHECKS(kInterpEntryReturn, 0);
+
+ ILOGV("> retval=0x%llx (leaving %s.%s %s)",
+ retval.j, curMethod->clazz->descriptor, curMethod->name,
+ curMethod->signature);
+ //DUMP_REGS(curMethod, fp);
+
+ saveArea = SAVEAREA_FROM_FP(fp);
+
+#ifdef EASY_GDB
+ debugSaveArea = saveArea;
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_EXIT(self, curMethod);
+#endif
+
+ /* back up to previous frame and see if we hit a break */
+ fp = saveArea->prevFrame;
+ assert(fp != NULL);
+ if (dvmIsBreakFrame(fp)) {
+ /* bail without popping the method frame from stack */
+ LOGVV("+++ returned into break frame\n");
+ GOTO_bail();
+ }
+
+ /* update thread FP, and reset local variables */
+ self->curFrame = fp;
+ curMethod = SAVEAREA_FROM_FP(fp)->method;
+ //methodClass = curMethod->clazz;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = saveArea->savedPc;
+ ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+
+ /* use FINISH on the caller's invoke instruction */
+ //u2 invokeInstr = INST_INST(FETCH(0));
+ if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+ invokeInstr <= OP_INVOKE_INTERFACE*/)
+ {
+ FINISH(3);
+ } else {
+ //LOGE("Unknown invoke instr %02x at %d\n",
+ // invokeInstr, (int) (pc - curMethod->insns));
+ assert(false);
+ }
+ }
+GOTO_TARGET_END
+
+
+ /*
+ * Jump here when the code throws an exception.
+ *
+ * By the time we get here, the Throwable has been created and the stack
+ * trace has been saved off.
+ */
+GOTO_TARGET(exceptionThrown)
+ {
+ Object* exception;
+ int catchRelPc;
+
+ /*
+ * Since this is now an interpreter switch point, we must do it before
+ * we do anything at all.
+ */
+ PERIODIC_CHECKS(kInterpEntryThrow, 0);
+
+ /*
+ * We save off the exception and clear the exception status. While
+ * processing the exception we might need to load some Throwable
+ * classes, and we don't want class loader exceptions to get
+ * confused with this one.
+ */
+ assert(dvmCheckException(self));
+ exception = dvmGetException(self);
+ dvmAddTrackedAlloc(exception, self);
+ dvmClearException(self);
+
+ LOGV("Handling exception %s at %s:%d\n",
+ exception->clazz->descriptor, curMethod->name,
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ /*
+ * Tell the debugger about it.
+ *
+ * TODO: if the exception was thrown by interpreted code, control
+ * fell through native, and then back to us, we will report the
+ * exception at the point of the throw and again here. We can avoid
+ * this by not reporting exceptions when we jump here directly from
+ * the native call code above, but then we won't report exceptions
+ * that were thrown *from* the JNI code (as opposed to *through* it).
+ *
+ * The correct solution is probably to ignore from-native exceptions
+ * here, and have the JNI exception code do the reporting to the
+ * debugger.
+ */
+ if (gDvm.debuggerActive) {
+ void* catchFrame;
+ catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
+ exception, true, &catchFrame);
+ dvmDbgPostException(fp, pc - curMethod->insns, catchFrame,
+ catchRelPc, exception);
+ }
+#endif
+
+ /*
+ * We need to unroll to the catch block or the nearest "break"
+ * frame.
+ *
+ * A break frame could indicate that we have reached an intermediate
+ * native call, or have gone off the top of the stack and the thread
+ * needs to exit. Either way, we return from here, leaving the
+ * exception raised.
+ *
+ * If we do find a catch block, we want to transfer execution to
+ * that point.
+ */
+ catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
+ exception, false, (void*)&fp);
+
+ /*
+ * Restore the stack bounds after an overflow. This isn't going to
+ * be correct in all circumstances, e.g. if JNI code devours the
+ * exception this won't happen until some other exception gets
+ * thrown. If the code keeps pushing the stack bounds we'll end
+ * up aborting the VM.
+ *
+ * Note we want to do this *after* the call to dvmFindCatchBlock,
+ * because that may need extra stack space to resolve exception
+ * classes (e.g. through a class loader).
+ */
+ if (self->stackOverflowed)
+ dvmCleanupStackOverflow(self);
+
+ if (catchRelPc < 0) {
+ /* falling through to JNI code or off the bottom of the stack */
+#if DVM_SHOW_EXCEPTION >= 2
+ LOGD("Exception %s from %s:%d not caught locally\n",
+ exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+#endif
+ dvmSetException(self, exception);
+ dvmReleaseTrackedAlloc(exception, self);
+ GOTO_bail();
+ }
+
+#if DVM_SHOW_EXCEPTION >= 3
+ {
+ const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method;
+ LOGD("Exception %s thrown from %s:%d to %s:%d\n",
+ exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns),
+ dvmGetMethodSourceFile(catchMethod),
+ dvmLineNumFromPC(catchMethod, catchRelPc));
+ }
+#endif
+
+ /*
+ * Adjust local variables to match self->curFrame and the
+ * updated PC.
+ */
+ //fp = (u4*) self->curFrame;
+ curMethod = SAVEAREA_FROM_FP(fp)->method;
+ //methodClass = curMethod->clazz;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = curMethod->insns + catchRelPc;
+ ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+ DUMP_REGS(curMethod, fp, false); // show all regs
+
+ /*
+ * Restore the exception if the handler wants it.
+ *
+ * The Dalvik spec mandates that, if an exception handler wants to
+ * do something with the exception, the first instruction executed
+ * must be "move-exception". We can pass the exception along
+ * through the thread struct, and let the move-exception instruction
+ * clear it for us.
+ *
+ * If the handler doesn't call move-exception, we don't want to
+ * finish here with an exception still pending.
+ */
+ if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
+ dvmSetException(self, exception);
+
+ dvmReleaseTrackedAlloc(exception, self);
+ FINISH(0);
+ }
+GOTO_TARGET_END
+
+
+ /*
+ * General handling for invoke-{virtual,super,direct,static,interface},
+ * including "quick" variants.
+ *
+ * Set "methodToCall" to the Method we're calling, and "methodCallRange"
+ * depending on whether this is a "/range" instruction.
+ *
+ * For a range call:
+ * "vsrc1" holds the argument count (8 bits)
+ * "vdst" holds the first argument in the range
+ * For a non-range call:
+ * "vsrc1" holds the argument count (4 bits) and the 5th argument index
+ * "vdst" holds four 4-bit register indices
+ *
+ * The caller must EXPORT_PC before jumping here, because any method
+ * call can throw a stack overflow exception.
+ */
+GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
+ u2 count, u2 regs)
+ {
+ STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;);
+
+ //printf("range=%d call=%p count=%d regs=0x%04x\n",
+ // methodCallRange, methodToCall, count, regs);
+ //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
+ // methodToCall->name, methodToCall->signature);
+
+ u4* outs;
+ int i;
+
+ /*
+ * Copy args. This may corrupt vsrc1/vdst.
+ */
+ if (methodCallRange) {
+ // could use memcpy or a "Duff's device"; most functions have
+ // so few args it won't matter much
+ assert(vsrc1 <= curMethod->outsSize);
+ assert(vsrc1 == methodToCall->insSize);
+ outs = OUTS_FROM_FP(fp, vsrc1);
+ for (i = 0; i < vsrc1; i++)
+ outs[i] = GET_REGISTER(vdst+i);
+ } else {
+ u4 count = vsrc1 >> 4;
+
+ assert(count <= curMethod->outsSize);
+ assert(count == methodToCall->insSize);
+ assert(count <= 5);
+
+ outs = OUTS_FROM_FP(fp, count);
+#if 0
+ if (count == 5) {
+ outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+ count--;
+ }
+ for (i = 0; i < (int) count; i++) {
+ outs[i] = GET_REGISTER(vdst & 0x0f);
+ vdst >>= 4;
+ }
+#else
+ // This version executes fewer instructions but is larger
+ // overall. Seems to be a teensy bit faster.
+ assert((vdst >> 16) == 0); // 16 bits -or- high 16 bits clear
+ switch (count) {
+ case 5:
+ outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+ case 4:
+ outs[3] = GET_REGISTER(vdst >> 12);
+ case 3:
+ outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
+ case 2:
+ outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
+ case 1:
+ outs[0] = GET_REGISTER(vdst & 0x0f);
+ default:
+ ;
+ }
+#endif
+ }
+ }
+
+ /*
+ * (This was originally a "goto" target; I've kept it separate from the
+ * stuff above in case we want to refactor things again.)
+ *
+ * At this point, we have the arguments stored in the "outs" area of
+ * the current method's stack frame, and the method to call in
+ * "methodToCall". Push a new stack frame.
+ */
+ {
+ StackSaveArea* newSaveArea;
+ u4* newFp;
+
+ ILOGV("> %s%s.%s %s",
+ dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
+ methodToCall->clazz->descriptor, methodToCall->name,
+ methodToCall->signature);
+
+ newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
+ newSaveArea = SAVEAREA_FROM_FP(newFp);
+
+ /* verify that we have enough space */
+ if (true) {
+ u1* bottom;
+ bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
+ if (bottom < self->interpStackEnd) {
+ /* stack overflow */
+ LOGV("Stack overflow on method call (start=%p end=%p newBot=%p size=%d '%s')\n",
+ self->interpStackStart, self->interpStackEnd, bottom,
+ self->interpStackSize, methodToCall->name);
+ dvmHandleStackOverflow(self);
+ assert(dvmCheckException(self));
+ GOTO_exceptionThrown();
+ }
+ //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p\n",
+ // fp, newFp, newSaveArea, bottom);
+ }
+
+#ifdef LOG_INSTR
+ if (methodToCall->registersSize > methodToCall->insSize) {
+ /*
+ * This makes valgrind quiet when we print registers that
+ * haven't been initialized. Turn it off when the debug
+ * messages are disabled -- we want valgrind to report any
+ * used-before-initialized issues.
+ */
+ memset(newFp, 0xcc,
+ (methodToCall->registersSize - methodToCall->insSize) * 4);
+ }
+#endif
+
+#ifdef EASY_GDB
+ newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
+#endif
+ newSaveArea->prevFrame = fp;
+ newSaveArea->savedPc = pc;
+ newSaveArea->method = methodToCall;
+
+ if (!dvmIsNativeMethod(methodToCall)) {
+ /*
+ * "Call" interpreted code. Reposition the PC, update the
+ * frame pointer and other local state, and continue.
+ */
+ curMethod = methodToCall;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = methodToCall->insns;
+ fp = self->curFrame = newFp;
+#ifdef EASY_GDB
+ debugSaveArea = SAVEAREA_FROM_FP(newFp);
+#endif
+#if INTERP_TYPE == INTERP_DBG
+ debugIsMethodEntry = true; // profiling, debugging
+#endif
+ ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+ DUMP_REGS(curMethod, fp, true); // show input args
+ FINISH(0); // jump to method start
+ } else {
+ /* set this up for JNI locals, even if not a JNI native */
+ newSaveArea->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+
+ self->curFrame = newFp;
+
+ DUMP_REGS(methodToCall, newFp, true); // show input args
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ if (gDvm.debuggerActive) {
+ dvmDbgPostLocationEvent(methodToCall, -1,
+ dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY);
+ }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_ENTER(self, methodToCall);
+#endif
+
+ ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
+ methodToCall->name, methodToCall->signature);
+
+ /*
+ * Jump through native call bridge. Because we leave no
+ * space for locals on native calls, "newFp" points directly
+ * to the method arguments.
+ */
+ (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ if (gDvm.debuggerActive) {
+ dvmDbgPostLocationEvent(methodToCall, -1,
+ dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT);
+ }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_EXIT(self, methodToCall);
+#endif
+
+ /* pop frame off */
+ dvmPopJniLocals(self, newSaveArea);
+ self->curFrame = fp;
+
+ /*
+ * If the native code threw an exception, or interpreted code
+ * invoked by the native call threw one and nobody has cleared
+ * it, jump to our local exception handling.
+ */
+ if (dvmCheckException(self)) {
+ LOGV("Exception thrown by/below native code\n");
+ GOTO_exceptionThrown();
+ }
+
+ ILOGD("> retval=0x%llx (leaving native)", retval.j);
+ ILOGD("> (return from native %s.%s to %s.%s %s)",
+ methodToCall->clazz->descriptor, methodToCall->name,
+ curMethod->clazz->descriptor, curMethod->name,
+ curMethod->signature);
+
+ //u2 invokeInstr = INST_INST(FETCH(0));
+ if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+ invokeInstr <= OP_INVOKE_INTERFACE*/)
+ {
+ FINISH(3);
+ } else {
+ //LOGE("Unknown invoke instr %02x at %d\n",
+ // invokeInstr, (int) (pc - curMethod->insns));
+ assert(false);
+ }
+ }
+ }
+ assert(false); // should not get here
+GOTO_TARGET_END
+
+
+/* File: cstubs/enddefs.c */
+
+/* undefine "magic" name remapping */
+#undef retval
+#undef pc
+#undef fp
+#undef curMethod
+#undef methodClassDex
+#undef self
+#undef debugTrackedRefStart
+
diff --git a/vm/mterp/out/InterpC-armv4.c b/vm/mterp/out/InterpC-armv4.c
new file mode 100644
index 0000000..2fcdcab
--- /dev/null
+++ b/vm/mterp/out/InterpC-armv4.c
@@ -0,0 +1,1266 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'armv4'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.c */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h> // needed for fmod, fmodf
+
+/*
+ * Configuration defines. These affect the C implementations, i.e. the
+ * portable interpreter(s) and C stubs.
+ *
+ * Some defines are controlled by the Makefile, e.g.:
+ * WITH_PROFILER
+ * WITH_DEBUGGER
+ * WITH_INSTR_CHECKS
+ * WITH_TRACKREF_CHECKS
+ * EASY_GDB
+ * NDEBUG
+ *
+ * If THREADED_INTERP is not defined, we use a classic "while true / switch"
+ * interpreter. If it is defined, then the tail end of each instruction
+ * handler fetches the next instruction and jumps directly to the handler.
+ * This increases the size of the "Std" interpreter by about 10%, but
+ * provides a speedup of about the same magnitude.
+ *
+ * There's a "hybrid" approach that uses a goto table instead of a switch
+ * statement, avoiding the "is the opcode in range" tests required for switch.
+ * The performance is close to the threaded version, and without the 10%
+ * size increase, but the benchmark results are off enough that it's not
+ * worth adding as a third option.
+ */
+#define THREADED_INTERP /* threaded vs. while-loop interpreter */
+
+#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia */
+# define CHECK_BRANCH_OFFSETS
+# define CHECK_REGISTER_INDICES
+#endif
+
+/*
+ * ARM EABI requires 64-bit alignment for access to 64-bit data types. We
+ * can't just use pointers to copy 64-bit values out of our interpreted
+ * register set, because gcc will generate ldrd/strd.
+ *
+ * The __UNION version copies data in and out of a union. The __MEMCPY
+ * version uses a memcpy() call to do the transfer; gcc is smart enough to
+ * not actually call memcpy(). The __UNION version is very bad on ARM;
+ * it only uses one more instruction than __MEMCPY, but for some reason
+ * gcc thinks it needs separate storage for every instance of the union.
+ * On top of that, it feels the need to zero them out at the start of the
+ * method. Net result is we zero out ~700 bytes of stack space at the top
+ * of the interpreter using ARM STM instructions.
+ */
+#if defined(__ARM_EABI__)
+//# define NO_UNALIGN_64__UNION
+# define NO_UNALIGN_64__MEMCPY
+#endif
+
+//#define LOG_INSTR /* verbose debugging */
+/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
+
+/*
+ * Keep a tally of accesses to fields. Currently only works if full DEX
+ * optimization is disabled.
+ */
+#ifdef PROFILE_FIELD_ACCESS
+# define UPDATE_FIELD_GET(_field) { (_field)->gets++; }
+# define UPDATE_FIELD_PUT(_field) { (_field)->puts++; }
+#else
+# define UPDATE_FIELD_GET(_field) ((void)0)
+# define UPDATE_FIELD_PUT(_field) ((void)0)
+#endif
+
+/*
+ * Adjust the program counter. "_offset" is a signed int, in 16-bit units.
+ *
+ * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
+ *
+ * We don't advance the program counter until we finish an instruction or
+ * branch, because we do want to have to unroll the PC if there's an
+ * exception.
+ */
+#ifdef CHECK_BRANCH_OFFSETS
+# define ADJUST_PC(_offset) do { \
+ int myoff = _offset; /* deref only once */ \
+ if (pc + myoff < curMethod->insns || \
+ pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
+ { \
+ char* desc; \
+ desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
+ LOGE("Invalid branch %d at 0x%04x in %s.%s %s\n", \
+ myoff, (int) (pc - curMethod->insns), \
+ curMethod->clazz->descriptor, curMethod->name, desc); \
+ free(desc); \
+ dvmAbort(); \
+ } \
+ pc += myoff; \
+ } while (false)
+#else
+# define ADJUST_PC(_offset) (pc += _offset)
+#endif
+
+/*
+ * If enabled, log instructions as we execute them.
+ */
+#ifdef LOG_INSTR
+# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
+# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
+# define ILOG(_level, ...) do { \
+ char debugStrBuf[128]; \
+ snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
+ if (curMethod != NULL) \
+ LOG(_level, LOG_TAG"i", "%-2d|%04x%s\n", \
+ self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
+ else \
+ LOG(_level, LOG_TAG"i", "%-2d|####%s\n", \
+ self->threadId, debugStrBuf); \
+ } while(false)
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
+# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
+static const char kSpacing[] = " ";
+#else
+# define ILOGD(...) ((void)0)
+# define ILOGV(...) ((void)0)
+# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
+#endif
+
+/* get a long from an array of u4 */
+static inline s8 getLongFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.ll;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ s8 val;
+ memcpy(&val, &ptr[idx], 8);
+ return val;
+#else
+ return *((s8*) &ptr[idx]);
+#endif
+}
+
+/* store a long into an array of u4 */
+static inline void putLongToArray(u4* ptr, int idx, s8 val)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.ll = val;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &val, 8);
+#else
+ *((s8*) &ptr[idx]) = val;
+#endif
+}
+
+/* get a double from an array of u4 */
+static inline double getDoubleFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.d;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ double dval;
+ memcpy(&dval, &ptr[idx], 8);
+ return dval;
+#else
+ return *((double*) &ptr[idx]);
+#endif
+}
+
+/* store a double into an array of u4 */
+static inline void putDoubleToArray(u4* ptr, int idx, double dval)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.d = dval;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &dval, 8);
+#else
+ *((double*) &ptr[idx]) = dval;
+#endif
+}
+
+/*
+ * If enabled, validate the register number on every access. Otherwise,
+ * just do an array access.
+ *
+ * Assumes the existence of "u4* fp".
+ *
+ * "_idx" may be referenced more than once.
+ */
+#ifdef CHECK_REGISTER_INDICES
+# define GET_REGISTER(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)]) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
+# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER_WIDE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putLongToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_FLOAT(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
+# define SET_REGISTER_FLOAT(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
+# define GET_REGISTER_DOUBLE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
+# define SET_REGISTER_DOUBLE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putDoubleToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969.0) )
+#else
+# define GET_REGISTER(_idx) (fp[(_idx)])
+# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
+# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
+# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
+# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
+# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
+# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
+# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
+# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
+#endif
+
+/*
+ * Get 16 bits from the specified offset of the program counter. We always
+ * want to load 16 bits at a time from the instruction stream -- it's more
+ * efficient than 8 and won't have the alignment problems that 32 might.
+ *
+ * Assumes existence of "const u2* pc".
+ */
+#define FETCH(_offset) (pc[(_offset)])
+
+/*
+ * Extract instruction byte from 16-bit fetch (_inst is a u2).
+ */
+#define INST_INST(_inst) ((_inst) & 0xff)
+
+/*
+ * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
+ */
+#define INST_A(_inst) (((_inst) >> 8) & 0x0f)
+#define INST_B(_inst) ((_inst) >> 12)
+
+/*
+ * Get the 8-bit "vAA" 8-bit register index from the instruction word.
+ * (_inst is u2)
+ */
+#define INST_AA(_inst) ((_inst) >> 8)
+
+/*
+ * The current PC must be available to Throwable constructors, e.g.
+ * those created by dvmThrowException(), so that the exception stack
+ * trace can be generated correctly. If we don't do this, the offset
+ * within the current method won't be shown correctly. See the notes
+ * in Exception.c.
+ *
+ * Assumes existence of "u4* fp" and "const u2* pc".
+ */
+#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
+
+/*
+ * Determine if we need to switch to a different interpreter. "_current"
+ * is either INTERP_STD or INTERP_DBG. It should be fixed for a given
+ * interpreter generation file, which should remove the outer conditional
+ * from the following.
+ *
+ * If we're building without debug and profiling support, we never switch.
+ */
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+# define NEED_INTERP_SWITCH(_current) ( \
+ (_current == INTERP_STD) ? \
+ dvmDebuggerOrProfilerActive() : !dvmDebuggerOrProfilerActive() )
+#else
+# define NEED_INTERP_SWITCH(_current) (false)
+#endif
+
+/*
+ * Look up an interface on a class using the cache.
+ */
+INLINE Method* dvmFindInterfaceMethodInCache(ClassObject* thisClass,
+ u4 methodIdx, const Method* method, DvmDex* methodClassDex)
+{
+#define ATOMIC_CACHE_CALC \
+ dvmInterpFindInterfaceMethod(thisClass, methodIdx, method, methodClassDex)
+
+ return (Method*) ATOMIC_CACHE_LOOKUP(methodClassDex->pInterfaceCache,
+ DEX_INTERFACE_CACHE_SIZE, thisClass, methodIdx);
+
+#undef ATOMIC_CACHE_CALC
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, throw an exception. Assumes the
+ * pc has already been exported to the stack.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler calls into
+ * something that could throw an exception (so we have already called
+ * EXPORT_PC at the top).
+ */
+static inline bool checkForNull(Object* obj)
+{
+ if (obj == NULL) {
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, export the PC into the stack
+ * frame and throw an exception.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler doesn't do
+ * anything else that can throw an exception.
+ */
+static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
+{
+ if (obj == NULL) {
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+
+/* File: cstubs/stubdefs.c */
+/* this is a standard (no debug support) interpreter */
+#define INTERP_TYPE INTERP_STD
+#define CHECK_DEBUG_AND_PROF() ((void)0)
+# define CHECK_TRACKED_REFS() ((void)0)
+
+/*
+ * In the C mterp stubs, "goto" is a function call followed immediately
+ * by a return.
+ */
+
+#define GOTO_TARGET_DECL(_target, ...) \
+ void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+
+#define GOTO_TARGET(_target, ...) \
+ void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
+ u2 ref, vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0); \
+ const Method* methodToCall; \
+ StackSaveArea* debugSaveArea;
+
+#define GOTO_TARGET_END }
+
+/*
+ * Redefine what used to be local variable accesses into MterpGlue struct
+ * references. (These are undefined down in "footer.c".)
+ */
+#define retval glue->retval
+#define pc glue->pc
+#define fp glue->fp
+#define curMethod glue->method
+#define methodClassDex glue->methodClassDex
+#define self glue->self
+#define debugTrackedRefStart glue->debugTrackedRefStart
+
+/* ugh */
+#define STUB_HACK(x) x
+
+
+/*
+ * Opcode handler framing macros. Here, each opcode is a separate function
+ * that takes a "glue" argument and returns void. We can't declare
+ * these "static" because they may be called from an assembly stub.
+ */
+#define HANDLE_OPCODE(_op) \
+ void dvmMterp_##_op(MterpGlue* glue) { \
+ u2 ref, vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0);
+
+#define OP_END }
+
+/*
+ * Like the "portable" FINISH, but don't reload "inst", and return to caller
+ * when done.
+ */
+#define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ CHECK_DEBUG_AND_PROF(); \
+ CHECK_TRACKED_REFS(); \
+ return; \
+ }
+
+
+/*
+ * The "goto label" statements turn into function calls followed by
+ * return statements. Some of the functions take arguments, which in the
+ * portable interpreter are handled by assigning values to globals.
+ */
+
+#define GOTO_exceptionThrown() \
+ do { \
+ dvmMterp_exceptionThrown(glue); \
+ return; \
+ } while(false)
+
+#define GOTO_returnFromMethod() \
+ do { \
+ dvmMterp_returnFromMethod(glue); \
+ return; \
+ } while(false)
+
+#define GOTO_invoke(_target, _methodCallRange) \
+ do { \
+ dvmMterp_##_target(glue, _methodCallRange); \
+ return; \
+ } while(false)
+
+#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \
+ do { \
+ dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall, \
+ _vsrc1, _vdst); \
+ return; \
+ } while(false)
+
+/*
+ * As a special case, "goto bail" turns into a longjmp. Use "bail_switch"
+ * if we need to switch to the other interpreter upon our return.
+ */
+#define GOTO_bail() \
+ dvmMterpStdBail(glue, false);
+#define GOTO_bail_switch() \
+ dvmMterpStdBail(glue, true);
+
+/*
+ * Periodically check for thread suspension.
+ *
+ * While we're at it, see if a debugger has attached or the profiler has
+ * started. If so, switch to a different "goto" table.
+ */
+#define PERIODIC_CHECKS(_entryPoint, _pcadj) { \
+ dvmCheckSuspendQuick(self); \
+ if (NEED_INTERP_SWITCH(INTERP_TYPE)) { \
+ ADJUST_PC(_pcadj); \
+ glue->entryPoint = _entryPoint; \
+ LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n", \
+ glue->self->threadId, (_entryPoint), (_pcadj)); \
+ GOTO_bail_switch(); \
+ } \
+ }
+
+
+/* File: c/opcommon.c */
+/* forward declarations of goto targets */
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+ u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+/*
+ * ===========================================================================
+ *
+ * What follows are opcode definitions shared between multiple opcodes with
+ * minor substitutions handled by the C pre-processor. These should probably
+ * use the mterp substitution mechanism instead, with the code here moved
+ * into common fragment files (like the asm "binop.S"), although it's hard
+ * to give up the C preprocessor in favor of the much simpler text subst.
+ *
+ * ===========================================================================
+ */
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_totype(vdst, \
+ GET_REGISTER##_fromtype(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
+ _tovtype, _tortype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ { \
+ /* spec defines specific handling for +/- inf and NaN values */ \
+ _fromvtype val; \
+ _tovtype intMin, intMax, result; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ val = GET_REGISTER##_fromrtype(vsrc1); \
+ intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
+ intMax = ~intMin; \
+ result = (_tovtype) val; \
+ if (val >= intMax) /* +inf */ \
+ result = intMax; \
+ else if (val <= intMin) /* -inf */ \
+ result = intMin; \
+ else if (val != val) /* NaN */ \
+ result = 0; \
+ else \
+ result = (_tovtype) val; \
+ SET_REGISTER##_tortype(vdst, result); \
+ } \
+ FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
+ FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ int result; \
+ u2 regs; \
+ _varType val1, val2; \
+ vdst = INST_AA(inst); \
+ regs = FETCH(1); \
+ vsrc1 = regs & 0xff; \
+ vsrc2 = regs >> 8; \
+ ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ val1 = GET_REGISTER##_type(vsrc1); \
+ val2 = GET_REGISTER##_type(vsrc2); \
+ if (val1 == val2) \
+ result = 0; \
+ else if (val1 < val2) \
+ result = -1; \
+ else if (val1 > val2) \
+ result = 1; \
+ else \
+ result = (_nanVal); \
+ ILOGV("+ result=%d\n", result); \
+ SET_REGISTER(vdst, result); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
+ vsrc1 = INST_A(inst); \
+ vsrc2 = INST_B(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
+ branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
+ FINISH(2); \
+ }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
+ vsrc1 = INST_AA(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
+ FINISH(2); \
+ }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
+ FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ secondVal = GET_REGISTER(vsrc2); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ vsrc2 = FETCH(1); \
+ ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s2) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
+ /* won't generate /lit16 instr for this; check anyway */ \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op (s2) vsrc2; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s1) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op ((s1) vsrc2); \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vdst); \
+ secondVal = GET_REGISTER(vsrc1); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vsrc1); \
+ secondVal = GET_REGISTER_WIDE(vsrc2); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vdst); \
+ secondVal = GET_REGISTER_WIDE(vsrc1); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* index */ \
+ ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ LOGV("Invalid array access: %p %d (len=%d)\n", \
+ arrayObj, vsrc2, arrayObj->length); \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]); \
+ ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); /* AA: source value */ \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* CC: index */ \
+ ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] = \
+ GET_REGISTER##_regsize(vdst); \
+ } \
+ FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits. Consider:
+ * short foo = -1 (sets a 32-bit register to 0xffffffff)
+ * iput-quick foo (writes all 32 bits to the field)
+ * short bar = 1 (sets a 32-bit register to 0x00000001)
+ * iput-short (writes the low 16 bits to the field)
+ * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field. This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time. On
+ * a device with a 16-bit data bus this is sub-optimal. (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ dvmGetField##_ftype(obj, ifield->byteOffset)); \
+ ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
+ ILOGV("+ IGETQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetField##_ftype(obj, ifield->byteOffset, \
+ GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUTQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
+ ILOGV("+ SGET '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&sfield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ SPUT '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&sfield->field); \
+ } \
+ FINISH(2);
+
+
+/* File: cstubs/enddefs.c */
+
+/* undefine "magic" name remapping */
+#undef retval
+#undef pc
+#undef fp
+#undef curMethod
+#undef methodClassDex
+#undef self
+#undef debugTrackedRefStart
+
+/* File: armv5te/debug.c */
+#include <inttypes.h>
+
+/*
+ * Dump the fixed-purpose ARM registers, along with some other info.
+ *
+ * This function MUST be compiled in ARM mode -- THUMB will yield bogus
+ * results.
+ *
+ * This will NOT preserve r0-r3/ip.
+ */
+void dvmMterpDumpArmRegs(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3)
+{
+ register uint32_t rPC asm("r4");
+ register uint32_t rFP asm("r5");
+ register uint32_t rGLUE asm("r6");
+ register uint32_t rIBASE asm("r7");
+ register uint32_t rINST asm("r8");
+ register uint32_t r9 asm("r9");
+ register uint32_t r10 asm("r10");
+
+ extern char dvmAsmInstructionStart[];
+
+ printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3);
+ printf(" : rPC=%08x rFP=%08x rGLUE=%08x rIBASE=%08x\n",
+ rPC, rFP, rGLUE, rIBASE);
+ printf(" : rINST=%08x r9=%08x r10=%08x\n", rINST, r9, r10);
+
+ MterpGlue* glue = (MterpGlue*) rGLUE;
+ const Method* method = glue->method;
+ printf(" + self is %p\n", dvmThreadSelf());
+ //printf(" + currently in %s.%s %s\n",
+ // method->clazz->descriptor, method->name, method->signature);
+ //printf(" + dvmAsmInstructionStart = %p\n", dvmAsmInstructionStart);
+ //printf(" + next handler for 0x%02x = %p\n",
+ // rINST & 0xff, dvmAsmInstructionStart + (rINST & 0xff) * 64);
+}
+
+/*
+ * Dump the StackSaveArea for the specified frame pointer.
+ */
+void dvmDumpFp(void* fp, StackSaveArea* otherSaveArea)
+{
+ StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+ printf("StackSaveArea for fp %p [%p/%p]:\n", fp, saveArea, otherSaveArea);
+#ifdef EASY_GDB
+ printf(" prevSave=%p, prevFrame=%p savedPc=%p meth=%p curPc=%p\n",
+ saveArea->prevSave, saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc);
+#else
+ printf(" prevFrame=%p savedPc=%p meth=%p curPc=%p fp[0]=0x%08x\n",
+ saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc,
+ *(u4*)fp);
+#endif
+}
+
+/*
+ * Does the bulk of the work for common_printMethod().
+ */
+void dvmMterpPrintMethod(Method* method)
+{
+ /*
+ * It is a direct (non-virtual) method if it is static, private,
+ * or a constructor.
+ */
+ bool isDirect =
+ ((method->accessFlags & (ACC_STATIC|ACC_PRIVATE)) != 0) ||
+ (method->name[0] == '<');
+
+ char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+
+ printf("<%c:%s.%s %s> ",
+ isDirect ? 'D' : 'V',
+ method->clazz->descriptor,
+ method->name,
+ desc);
+
+ free(desc);
+}
+
diff --git a/vm/mterp/out/InterpC-armv5te.c b/vm/mterp/out/InterpC-armv5te.c
new file mode 100644
index 0000000..47c8709
--- /dev/null
+++ b/vm/mterp/out/InterpC-armv5te.c
@@ -0,0 +1,1266 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'armv5te'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.c */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h> // needed for fmod, fmodf
+
+/*
+ * Configuration defines. These affect the C implementations, i.e. the
+ * portable interpreter(s) and C stubs.
+ *
+ * Some defines are controlled by the Makefile, e.g.:
+ * WITH_PROFILER
+ * WITH_DEBUGGER
+ * WITH_INSTR_CHECKS
+ * WITH_TRACKREF_CHECKS
+ * EASY_GDB
+ * NDEBUG
+ *
+ * If THREADED_INTERP is not defined, we use a classic "while true / switch"
+ * interpreter. If it is defined, then the tail end of each instruction
+ * handler fetches the next instruction and jumps directly to the handler.
+ * This increases the size of the "Std" interpreter by about 10%, but
+ * provides a speedup of about the same magnitude.
+ *
+ * There's a "hybrid" approach that uses a goto table instead of a switch
+ * statement, avoiding the "is the opcode in range" tests required for switch.
+ * The performance is close to the threaded version, and without the 10%
+ * size increase, but the benchmark results are off enough that it's not
+ * worth adding as a third option.
+ */
+#define THREADED_INTERP /* threaded vs. while-loop interpreter */
+
+#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia */
+# define CHECK_BRANCH_OFFSETS
+# define CHECK_REGISTER_INDICES
+#endif
+
+/*
+ * ARM EABI requires 64-bit alignment for access to 64-bit data types. We
+ * can't just use pointers to copy 64-bit values out of our interpreted
+ * register set, because gcc will generate ldrd/strd.
+ *
+ * The __UNION version copies data in and out of a union. The __MEMCPY
+ * version uses a memcpy() call to do the transfer; gcc is smart enough to
+ * not actually call memcpy(). The __UNION version is very bad on ARM;
+ * it only uses one more instruction than __MEMCPY, but for some reason
+ * gcc thinks it needs separate storage for every instance of the union.
+ * On top of that, it feels the need to zero them out at the start of the
+ * method. Net result is we zero out ~700 bytes of stack space at the top
+ * of the interpreter using ARM STM instructions.
+ */
+#if defined(__ARM_EABI__)
+//# define NO_UNALIGN_64__UNION
+# define NO_UNALIGN_64__MEMCPY
+#endif
+
+//#define LOG_INSTR /* verbose debugging */
+/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
+
+/*
+ * Keep a tally of accesses to fields. Currently only works if full DEX
+ * optimization is disabled.
+ */
+#ifdef PROFILE_FIELD_ACCESS
+# define UPDATE_FIELD_GET(_field) { (_field)->gets++; }
+# define UPDATE_FIELD_PUT(_field) { (_field)->puts++; }
+#else
+# define UPDATE_FIELD_GET(_field) ((void)0)
+# define UPDATE_FIELD_PUT(_field) ((void)0)
+#endif
+
+/*
+ * Adjust the program counter. "_offset" is a signed int, in 16-bit units.
+ *
+ * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
+ *
+ * We don't advance the program counter until we finish an instruction or
+ * branch, because we do want to have to unroll the PC if there's an
+ * exception.
+ */
+#ifdef CHECK_BRANCH_OFFSETS
+# define ADJUST_PC(_offset) do { \
+ int myoff = _offset; /* deref only once */ \
+ if (pc + myoff < curMethod->insns || \
+ pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
+ { \
+ char* desc; \
+ desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
+ LOGE("Invalid branch %d at 0x%04x in %s.%s %s\n", \
+ myoff, (int) (pc - curMethod->insns), \
+ curMethod->clazz->descriptor, curMethod->name, desc); \
+ free(desc); \
+ dvmAbort(); \
+ } \
+ pc += myoff; \
+ } while (false)
+#else
+# define ADJUST_PC(_offset) (pc += _offset)
+#endif
+
+/*
+ * If enabled, log instructions as we execute them.
+ */
+#ifdef LOG_INSTR
+# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
+# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
+# define ILOG(_level, ...) do { \
+ char debugStrBuf[128]; \
+ snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
+ if (curMethod != NULL) \
+ LOG(_level, LOG_TAG"i", "%-2d|%04x%s\n", \
+ self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
+ else \
+ LOG(_level, LOG_TAG"i", "%-2d|####%s\n", \
+ self->threadId, debugStrBuf); \
+ } while(false)
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
+# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
+static const char kSpacing[] = " ";
+#else
+# define ILOGD(...) ((void)0)
+# define ILOGV(...) ((void)0)
+# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
+#endif
+
+/* get a long from an array of u4 */
+static inline s8 getLongFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.ll;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ s8 val;
+ memcpy(&val, &ptr[idx], 8);
+ return val;
+#else
+ return *((s8*) &ptr[idx]);
+#endif
+}
+
+/* store a long into an array of u4 */
+static inline void putLongToArray(u4* ptr, int idx, s8 val)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.ll = val;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &val, 8);
+#else
+ *((s8*) &ptr[idx]) = val;
+#endif
+}
+
+/* get a double from an array of u4 */
+static inline double getDoubleFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.d;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ double dval;
+ memcpy(&dval, &ptr[idx], 8);
+ return dval;
+#else
+ return *((double*) &ptr[idx]);
+#endif
+}
+
+/* store a double into an array of u4 */
+static inline void putDoubleToArray(u4* ptr, int idx, double dval)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.d = dval;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &dval, 8);
+#else
+ *((double*) &ptr[idx]) = dval;
+#endif
+}
+
+/*
+ * If enabled, validate the register number on every access. Otherwise,
+ * just do an array access.
+ *
+ * Assumes the existence of "u4* fp".
+ *
+ * "_idx" may be referenced more than once.
+ */
+#ifdef CHECK_REGISTER_INDICES
+# define GET_REGISTER(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)]) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
+# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER_WIDE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putLongToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_FLOAT(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
+# define SET_REGISTER_FLOAT(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
+# define GET_REGISTER_DOUBLE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
+# define SET_REGISTER_DOUBLE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putDoubleToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969.0) )
+#else
+# define GET_REGISTER(_idx) (fp[(_idx)])
+# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
+# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
+# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
+# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
+# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
+# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
+# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
+# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
+#endif
+
+/*
+ * Get 16 bits from the specified offset of the program counter. We always
+ * want to load 16 bits at a time from the instruction stream -- it's more
+ * efficient than 8 and won't have the alignment problems that 32 might.
+ *
+ * Assumes existence of "const u2* pc".
+ */
+#define FETCH(_offset) (pc[(_offset)])
+
+/*
+ * Extract instruction byte from 16-bit fetch (_inst is a u2).
+ */
+#define INST_INST(_inst) ((_inst) & 0xff)
+
+/*
+ * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
+ */
+#define INST_A(_inst) (((_inst) >> 8) & 0x0f)
+#define INST_B(_inst) ((_inst) >> 12)
+
+/*
+ * Get the 8-bit "vAA" 8-bit register index from the instruction word.
+ * (_inst is u2)
+ */
+#define INST_AA(_inst) ((_inst) >> 8)
+
+/*
+ * The current PC must be available to Throwable constructors, e.g.
+ * those created by dvmThrowException(), so that the exception stack
+ * trace can be generated correctly. If we don't do this, the offset
+ * within the current method won't be shown correctly. See the notes
+ * in Exception.c.
+ *
+ * Assumes existence of "u4* fp" and "const u2* pc".
+ */
+#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
+
+/*
+ * Determine if we need to switch to a different interpreter. "_current"
+ * is either INTERP_STD or INTERP_DBG. It should be fixed for a given
+ * interpreter generation file, which should remove the outer conditional
+ * from the following.
+ *
+ * If we're building without debug and profiling support, we never switch.
+ */
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+# define NEED_INTERP_SWITCH(_current) ( \
+ (_current == INTERP_STD) ? \
+ dvmDebuggerOrProfilerActive() : !dvmDebuggerOrProfilerActive() )
+#else
+# define NEED_INTERP_SWITCH(_current) (false)
+#endif
+
+/*
+ * Look up an interface on a class using the cache.
+ */
+INLINE Method* dvmFindInterfaceMethodInCache(ClassObject* thisClass,
+ u4 methodIdx, const Method* method, DvmDex* methodClassDex)
+{
+#define ATOMIC_CACHE_CALC \
+ dvmInterpFindInterfaceMethod(thisClass, methodIdx, method, methodClassDex)
+
+ return (Method*) ATOMIC_CACHE_LOOKUP(methodClassDex->pInterfaceCache,
+ DEX_INTERFACE_CACHE_SIZE, thisClass, methodIdx);
+
+#undef ATOMIC_CACHE_CALC
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, throw an exception. Assumes the
+ * pc has already been exported to the stack.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler calls into
+ * something that could throw an exception (so we have already called
+ * EXPORT_PC at the top).
+ */
+static inline bool checkForNull(Object* obj)
+{
+ if (obj == NULL) {
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, export the PC into the stack
+ * frame and throw an exception.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler doesn't do
+ * anything else that can throw an exception.
+ */
+static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
+{
+ if (obj == NULL) {
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+
+/* File: cstubs/stubdefs.c */
+/* this is a standard (no debug support) interpreter */
+#define INTERP_TYPE INTERP_STD
+#define CHECK_DEBUG_AND_PROF() ((void)0)
+# define CHECK_TRACKED_REFS() ((void)0)
+
+/*
+ * In the C mterp stubs, "goto" is a function call followed immediately
+ * by a return.
+ */
+
+#define GOTO_TARGET_DECL(_target, ...) \
+ void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+
+#define GOTO_TARGET(_target, ...) \
+ void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
+ u2 ref, vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0); \
+ const Method* methodToCall; \
+ StackSaveArea* debugSaveArea;
+
+#define GOTO_TARGET_END }
+
+/*
+ * Redefine what used to be local variable accesses into MterpGlue struct
+ * references. (These are undefined down in "footer.c".)
+ */
+#define retval glue->retval
+#define pc glue->pc
+#define fp glue->fp
+#define curMethod glue->method
+#define methodClassDex glue->methodClassDex
+#define self glue->self
+#define debugTrackedRefStart glue->debugTrackedRefStart
+
+/* ugh */
+#define STUB_HACK(x) x
+
+
+/*
+ * Opcode handler framing macros. Here, each opcode is a separate function
+ * that takes a "glue" argument and returns void. We can't declare
+ * these "static" because they may be called from an assembly stub.
+ */
+#define HANDLE_OPCODE(_op) \
+ void dvmMterp_##_op(MterpGlue* glue) { \
+ u2 ref, vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0);
+
+#define OP_END }
+
+/*
+ * Like the "portable" FINISH, but don't reload "inst", and return to caller
+ * when done.
+ */
+#define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ CHECK_DEBUG_AND_PROF(); \
+ CHECK_TRACKED_REFS(); \
+ return; \
+ }
+
+
+/*
+ * The "goto label" statements turn into function calls followed by
+ * return statements. Some of the functions take arguments, which in the
+ * portable interpreter are handled by assigning values to globals.
+ */
+
+#define GOTO_exceptionThrown() \
+ do { \
+ dvmMterp_exceptionThrown(glue); \
+ return; \
+ } while(false)
+
+#define GOTO_returnFromMethod() \
+ do { \
+ dvmMterp_returnFromMethod(glue); \
+ return; \
+ } while(false)
+
+#define GOTO_invoke(_target, _methodCallRange) \
+ do { \
+ dvmMterp_##_target(glue, _methodCallRange); \
+ return; \
+ } while(false)
+
+#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \
+ do { \
+ dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall, \
+ _vsrc1, _vdst); \
+ return; \
+ } while(false)
+
+/*
+ * As a special case, "goto bail" turns into a longjmp. Use "bail_switch"
+ * if we need to switch to the other interpreter upon our return.
+ */
+#define GOTO_bail() \
+ dvmMterpStdBail(glue, false);
+#define GOTO_bail_switch() \
+ dvmMterpStdBail(glue, true);
+
+/*
+ * Periodically check for thread suspension.
+ *
+ * While we're at it, see if a debugger has attached or the profiler has
+ * started. If so, switch to a different "goto" table.
+ */
+#define PERIODIC_CHECKS(_entryPoint, _pcadj) { \
+ dvmCheckSuspendQuick(self); \
+ if (NEED_INTERP_SWITCH(INTERP_TYPE)) { \
+ ADJUST_PC(_pcadj); \
+ glue->entryPoint = _entryPoint; \
+ LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n", \
+ glue->self->threadId, (_entryPoint), (_pcadj)); \
+ GOTO_bail_switch(); \
+ } \
+ }
+
+
+/* File: c/opcommon.c */
+/* forward declarations of goto targets */
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+ u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+/*
+ * ===========================================================================
+ *
+ * What follows are opcode definitions shared between multiple opcodes with
+ * minor substitutions handled by the C pre-processor. These should probably
+ * use the mterp substitution mechanism instead, with the code here moved
+ * into common fragment files (like the asm "binop.S"), although it's hard
+ * to give up the C preprocessor in favor of the much simpler text subst.
+ *
+ * ===========================================================================
+ */
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_totype(vdst, \
+ GET_REGISTER##_fromtype(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
+ _tovtype, _tortype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ { \
+ /* spec defines specific handling for +/- inf and NaN values */ \
+ _fromvtype val; \
+ _tovtype intMin, intMax, result; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ val = GET_REGISTER##_fromrtype(vsrc1); \
+ intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
+ intMax = ~intMin; \
+ result = (_tovtype) val; \
+ if (val >= intMax) /* +inf */ \
+ result = intMax; \
+ else if (val <= intMin) /* -inf */ \
+ result = intMin; \
+ else if (val != val) /* NaN */ \
+ result = 0; \
+ else \
+ result = (_tovtype) val; \
+ SET_REGISTER##_tortype(vdst, result); \
+ } \
+ FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
+ FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ int result; \
+ u2 regs; \
+ _varType val1, val2; \
+ vdst = INST_AA(inst); \
+ regs = FETCH(1); \
+ vsrc1 = regs & 0xff; \
+ vsrc2 = regs >> 8; \
+ ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ val1 = GET_REGISTER##_type(vsrc1); \
+ val2 = GET_REGISTER##_type(vsrc2); \
+ if (val1 == val2) \
+ result = 0; \
+ else if (val1 < val2) \
+ result = -1; \
+ else if (val1 > val2) \
+ result = 1; \
+ else \
+ result = (_nanVal); \
+ ILOGV("+ result=%d\n", result); \
+ SET_REGISTER(vdst, result); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
+ vsrc1 = INST_A(inst); \
+ vsrc2 = INST_B(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
+ branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
+ FINISH(2); \
+ }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
+ vsrc1 = INST_AA(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
+ FINISH(2); \
+ }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
+ FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ secondVal = GET_REGISTER(vsrc2); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ vsrc2 = FETCH(1); \
+ ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s2) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
+ /* won't generate /lit16 instr for this; check anyway */ \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op (s2) vsrc2; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s1) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op ((s1) vsrc2); \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vdst); \
+ secondVal = GET_REGISTER(vsrc1); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vsrc1); \
+ secondVal = GET_REGISTER_WIDE(vsrc2); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vdst); \
+ secondVal = GET_REGISTER_WIDE(vsrc1); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* index */ \
+ ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ LOGV("Invalid array access: %p %d (len=%d)\n", \
+ arrayObj, vsrc2, arrayObj->length); \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]); \
+ ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); /* AA: source value */ \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* CC: index */ \
+ ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] = \
+ GET_REGISTER##_regsize(vdst); \
+ } \
+ FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits. Consider:
+ * short foo = -1 (sets a 32-bit register to 0xffffffff)
+ * iput-quick foo (writes all 32 bits to the field)
+ * short bar = 1 (sets a 32-bit register to 0x00000001)
+ * iput-short (writes the low 16 bits to the field)
+ * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field. This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time. On
+ * a device with a 16-bit data bus this is sub-optimal. (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ dvmGetField##_ftype(obj, ifield->byteOffset)); \
+ ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
+ ILOGV("+ IGETQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetField##_ftype(obj, ifield->byteOffset, \
+ GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUTQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
+ ILOGV("+ SGET '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&sfield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ SPUT '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&sfield->field); \
+ } \
+ FINISH(2);
+
+
+/* File: cstubs/enddefs.c */
+
+/* undefine "magic" name remapping */
+#undef retval
+#undef pc
+#undef fp
+#undef curMethod
+#undef methodClassDex
+#undef self
+#undef debugTrackedRefStart
+
+/* File: armv5te/debug.c */
+#include <inttypes.h>
+
+/*
+ * Dump the fixed-purpose ARM registers, along with some other info.
+ *
+ * This function MUST be compiled in ARM mode -- THUMB will yield bogus
+ * results.
+ *
+ * This will NOT preserve r0-r3/ip.
+ */
+void dvmMterpDumpArmRegs(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3)
+{
+ register uint32_t rPC asm("r4");
+ register uint32_t rFP asm("r5");
+ register uint32_t rGLUE asm("r6");
+ register uint32_t rIBASE asm("r7");
+ register uint32_t rINST asm("r8");
+ register uint32_t r9 asm("r9");
+ register uint32_t r10 asm("r10");
+
+ extern char dvmAsmInstructionStart[];
+
+ printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3);
+ printf(" : rPC=%08x rFP=%08x rGLUE=%08x rIBASE=%08x\n",
+ rPC, rFP, rGLUE, rIBASE);
+ printf(" : rINST=%08x r9=%08x r10=%08x\n", rINST, r9, r10);
+
+ MterpGlue* glue = (MterpGlue*) rGLUE;
+ const Method* method = glue->method;
+ printf(" + self is %p\n", dvmThreadSelf());
+ //printf(" + currently in %s.%s %s\n",
+ // method->clazz->descriptor, method->name, method->signature);
+ //printf(" + dvmAsmInstructionStart = %p\n", dvmAsmInstructionStart);
+ //printf(" + next handler for 0x%02x = %p\n",
+ // rINST & 0xff, dvmAsmInstructionStart + (rINST & 0xff) * 64);
+}
+
+/*
+ * Dump the StackSaveArea for the specified frame pointer.
+ */
+void dvmDumpFp(void* fp, StackSaveArea* otherSaveArea)
+{
+ StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+ printf("StackSaveArea for fp %p [%p/%p]:\n", fp, saveArea, otherSaveArea);
+#ifdef EASY_GDB
+ printf(" prevSave=%p, prevFrame=%p savedPc=%p meth=%p curPc=%p\n",
+ saveArea->prevSave, saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc);
+#else
+ printf(" prevFrame=%p savedPc=%p meth=%p curPc=%p fp[0]=0x%08x\n",
+ saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc,
+ *(u4*)fp);
+#endif
+}
+
+/*
+ * Does the bulk of the work for common_printMethod().
+ */
+void dvmMterpPrintMethod(Method* method)
+{
+ /*
+ * It is a direct (non-virtual) method if it is static, private,
+ * or a constructor.
+ */
+ bool isDirect =
+ ((method->accessFlags & (ACC_STATIC|ACC_PRIVATE)) != 0) ||
+ (method->name[0] == '<');
+
+ char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+
+ printf("<%c:%s.%s %s> ",
+ isDirect ? 'D' : 'V',
+ method->clazz->descriptor,
+ method->name,
+ desc);
+
+ free(desc);
+}
+
diff --git a/vm/mterp/out/InterpC-portdbg.c b/vm/mterp/out/InterpC-portdbg.c
new file mode 100644
index 0000000..d527cc0
--- /dev/null
+++ b/vm/mterp/out/InterpC-portdbg.c
@@ -0,0 +1,4264 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'portdbg'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.c */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h> // needed for fmod, fmodf
+
+/*
+ * Configuration defines. These affect the C implementations, i.e. the
+ * portable interpreter(s) and C stubs.
+ *
+ * Some defines are controlled by the Makefile, e.g.:
+ * WITH_PROFILER
+ * WITH_DEBUGGER
+ * WITH_INSTR_CHECKS
+ * WITH_TRACKREF_CHECKS
+ * EASY_GDB
+ * NDEBUG
+ *
+ * If THREADED_INTERP is not defined, we use a classic "while true / switch"
+ * interpreter. If it is defined, then the tail end of each instruction
+ * handler fetches the next instruction and jumps directly to the handler.
+ * This increases the size of the "Std" interpreter by about 10%, but
+ * provides a speedup of about the same magnitude.
+ *
+ * There's a "hybrid" approach that uses a goto table instead of a switch
+ * statement, avoiding the "is the opcode in range" tests required for switch.
+ * The performance is close to the threaded version, and without the 10%
+ * size increase, but the benchmark results are off enough that it's not
+ * worth adding as a third option.
+ */
+#define THREADED_INTERP /* threaded vs. while-loop interpreter */
+
+#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia */
+# define CHECK_BRANCH_OFFSETS
+# define CHECK_REGISTER_INDICES
+#endif
+
+/*
+ * ARM EABI requires 64-bit alignment for access to 64-bit data types. We
+ * can't just use pointers to copy 64-bit values out of our interpreted
+ * register set, because gcc will generate ldrd/strd.
+ *
+ * The __UNION version copies data in and out of a union. The __MEMCPY
+ * version uses a memcpy() call to do the transfer; gcc is smart enough to
+ * not actually call memcpy(). The __UNION version is very bad on ARM;
+ * it only uses one more instruction than __MEMCPY, but for some reason
+ * gcc thinks it needs separate storage for every instance of the union.
+ * On top of that, it feels the need to zero them out at the start of the
+ * method. Net result is we zero out ~700 bytes of stack space at the top
+ * of the interpreter using ARM STM instructions.
+ */
+#if defined(__ARM_EABI__)
+//# define NO_UNALIGN_64__UNION
+# define NO_UNALIGN_64__MEMCPY
+#endif
+
+//#define LOG_INSTR /* verbose debugging */
+/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
+
+/*
+ * Keep a tally of accesses to fields. Currently only works if full DEX
+ * optimization is disabled.
+ */
+#ifdef PROFILE_FIELD_ACCESS
+# define UPDATE_FIELD_GET(_field) { (_field)->gets++; }
+# define UPDATE_FIELD_PUT(_field) { (_field)->puts++; }
+#else
+# define UPDATE_FIELD_GET(_field) ((void)0)
+# define UPDATE_FIELD_PUT(_field) ((void)0)
+#endif
+
+/*
+ * Adjust the program counter. "_offset" is a signed int, in 16-bit units.
+ *
+ * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
+ *
+ * We don't advance the program counter until we finish an instruction or
+ * branch, because we do want to have to unroll the PC if there's an
+ * exception.
+ */
+#ifdef CHECK_BRANCH_OFFSETS
+# define ADJUST_PC(_offset) do { \
+ int myoff = _offset; /* deref only once */ \
+ if (pc + myoff < curMethod->insns || \
+ pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
+ { \
+ char* desc; \
+ desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
+ LOGE("Invalid branch %d at 0x%04x in %s.%s %s\n", \
+ myoff, (int) (pc - curMethod->insns), \
+ curMethod->clazz->descriptor, curMethod->name, desc); \
+ free(desc); \
+ dvmAbort(); \
+ } \
+ pc += myoff; \
+ } while (false)
+#else
+# define ADJUST_PC(_offset) (pc += _offset)
+#endif
+
+/*
+ * If enabled, log instructions as we execute them.
+ */
+#ifdef LOG_INSTR
+# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
+# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
+# define ILOG(_level, ...) do { \
+ char debugStrBuf[128]; \
+ snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
+ if (curMethod != NULL) \
+ LOG(_level, LOG_TAG"i", "%-2d|%04x%s\n", \
+ self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
+ else \
+ LOG(_level, LOG_TAG"i", "%-2d|####%s\n", \
+ self->threadId, debugStrBuf); \
+ } while(false)
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
+# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
+static const char kSpacing[] = " ";
+#else
+# define ILOGD(...) ((void)0)
+# define ILOGV(...) ((void)0)
+# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
+#endif
+
+/* get a long from an array of u4 */
+static inline s8 getLongFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.ll;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ s8 val;
+ memcpy(&val, &ptr[idx], 8);
+ return val;
+#else
+ return *((s8*) &ptr[idx]);
+#endif
+}
+
+/* store a long into an array of u4 */
+static inline void putLongToArray(u4* ptr, int idx, s8 val)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.ll = val;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &val, 8);
+#else
+ *((s8*) &ptr[idx]) = val;
+#endif
+}
+
+/* get a double from an array of u4 */
+static inline double getDoubleFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.d;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ double dval;
+ memcpy(&dval, &ptr[idx], 8);
+ return dval;
+#else
+ return *((double*) &ptr[idx]);
+#endif
+}
+
+/* store a double into an array of u4 */
+static inline void putDoubleToArray(u4* ptr, int idx, double dval)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.d = dval;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &dval, 8);
+#else
+ *((double*) &ptr[idx]) = dval;
+#endif
+}
+
+/*
+ * If enabled, validate the register number on every access. Otherwise,
+ * just do an array access.
+ *
+ * Assumes the existence of "u4* fp".
+ *
+ * "_idx" may be referenced more than once.
+ */
+#ifdef CHECK_REGISTER_INDICES
+# define GET_REGISTER(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)]) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
+# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER_WIDE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putLongToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_FLOAT(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
+# define SET_REGISTER_FLOAT(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
+# define GET_REGISTER_DOUBLE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
+# define SET_REGISTER_DOUBLE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putDoubleToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969.0) )
+#else
+# define GET_REGISTER(_idx) (fp[(_idx)])
+# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
+# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
+# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
+# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
+# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
+# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
+# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
+# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
+#endif
+
+/*
+ * Get 16 bits from the specified offset of the program counter. We always
+ * want to load 16 bits at a time from the instruction stream -- it's more
+ * efficient than 8 and won't have the alignment problems that 32 might.
+ *
+ * Assumes existence of "const u2* pc".
+ */
+#define FETCH(_offset) (pc[(_offset)])
+
+/*
+ * Extract instruction byte from 16-bit fetch (_inst is a u2).
+ */
+#define INST_INST(_inst) ((_inst) & 0xff)
+
+/*
+ * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
+ */
+#define INST_A(_inst) (((_inst) >> 8) & 0x0f)
+#define INST_B(_inst) ((_inst) >> 12)
+
+/*
+ * Get the 8-bit "vAA" 8-bit register index from the instruction word.
+ * (_inst is u2)
+ */
+#define INST_AA(_inst) ((_inst) >> 8)
+
+/*
+ * The current PC must be available to Throwable constructors, e.g.
+ * those created by dvmThrowException(), so that the exception stack
+ * trace can be generated correctly. If we don't do this, the offset
+ * within the current method won't be shown correctly. See the notes
+ * in Exception.c.
+ *
+ * Assumes existence of "u4* fp" and "const u2* pc".
+ */
+#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
+
+/*
+ * Determine if we need to switch to a different interpreter. "_current"
+ * is either INTERP_STD or INTERP_DBG. It should be fixed for a given
+ * interpreter generation file, which should remove the outer conditional
+ * from the following.
+ *
+ * If we're building without debug and profiling support, we never switch.
+ */
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+# define NEED_INTERP_SWITCH(_current) ( \
+ (_current == INTERP_STD) ? \
+ dvmDebuggerOrProfilerActive() : !dvmDebuggerOrProfilerActive() )
+#else
+# define NEED_INTERP_SWITCH(_current) (false)
+#endif
+
+/*
+ * Look up an interface on a class using the cache.
+ */
+INLINE Method* dvmFindInterfaceMethodInCache(ClassObject* thisClass,
+ u4 methodIdx, const Method* method, DvmDex* methodClassDex)
+{
+#define ATOMIC_CACHE_CALC \
+ dvmInterpFindInterfaceMethod(thisClass, methodIdx, method, methodClassDex)
+
+ return (Method*) ATOMIC_CACHE_LOOKUP(methodClassDex->pInterfaceCache,
+ DEX_INTERFACE_CACHE_SIZE, thisClass, methodIdx);
+
+#undef ATOMIC_CACHE_CALC
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, throw an exception. Assumes the
+ * pc has already been exported to the stack.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler calls into
+ * something that could throw an exception (so we have already called
+ * EXPORT_PC at the top).
+ */
+static inline bool checkForNull(Object* obj)
+{
+ if (obj == NULL) {
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, export the PC into the stack
+ * frame and throw an exception.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler doesn't do
+ * anything else that can throw an exception.
+ */
+static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
+{
+ if (obj == NULL) {
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+
+/* File: portable/portdbg.c */
+#define INTERP_FUNC_NAME dvmInterpretDbg
+#define INTERP_TYPE INTERP_DBG
+
+#define CHECK_DEBUG_AND_PROF() \
+ checkDebugAndProf(pc, fp, self, curMethod, &debugIsMethodEntry)
+
+/* File: portable/stubdefs.c */
+/*
+ * In the C mterp stubs, "goto" is a function call followed immediately
+ * by a return.
+ */
+
+#define GOTO_TARGET_DECL(_target, ...)
+
+#define GOTO_TARGET(_target, ...) _target:
+
+#define GOTO_TARGET_END
+
+/* ugh */
+#define STUB_HACK(x)
+
+/*
+ * Instruction framing. For a switch-oriented implementation this is
+ * case/break, for a threaded implementation it's a goto label and an
+ * instruction fetch/computed goto.
+ *
+ * Assumes the existence of "const u2* pc" and (for threaded operation)
+ * "u2 inst".
+ */
+#ifdef THREADED_INTERP
+# define H(_op) &&op_##_op
+# define HANDLE_OPCODE(_op) op_##_op:
+# define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ inst = FETCH(0); \
+ CHECK_DEBUG_AND_PROF(); \
+ CHECK_TRACKED_REFS(); \
+ goto *handlerTable[INST_INST(inst)]; \
+ }
+#else
+# define HANDLE_OPCODE(_op) case _op:
+# define FINISH(_offset) { ADJUST_PC(_offset); break; }
+#endif
+
+#define OP_END
+
+#if defined(WITH_TRACKREF_CHECKS)
+# define CHECK_TRACKED_REFS() \
+ dvmInterpCheckTrackedRefs(self, curMethod, debugTrackedRefStart)
+#else
+# define CHECK_TRACKED_REFS() ((void)0)
+#endif
+
+
+/*
+ * The "goto" targets just turn into goto statements. The "arguments" are
+ * passed through local variables.
+ */
+
+#define GOTO_exceptionThrown() goto exceptionThrown;
+
+#define GOTO_returnFromMethod() goto returnFromMethod;
+
+#define GOTO_invoke(_target, _methodCallRange) \
+ do { \
+ methodCallRange = _methodCallRange; \
+ goto _target; \
+ } while(false)
+
+/* for this, the "args" are already in the locals */
+#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) goto invokeMethod;
+
+#define GOTO_bail() goto bail;
+#define GOTO_bail_switch() goto bail_switch;
+
+/*
+ * Periodically check for thread suspension.
+ *
+ * While we're at it, see if a debugger has attached or the profiler has
+ * started. If so, switch to a different "goto" table.
+ */
+#define PERIODIC_CHECKS(_entryPoint, _pcadj) { \
+ dvmCheckSuspendQuick(self); \
+ if (NEED_INTERP_SWITCH(INTERP_TYPE)) { \
+ ADJUST_PC(_pcadj); \
+ interpState->entryPoint = _entryPoint; \
+ LOGVV("threadid=%d: switch to %s ep=%d adj=%d\n", \
+ self->threadId, \
+ (interpState->nextMode == INTERP_STD) ? "STD" : "DBG", \
+ (_entryPoint), (_pcadj)); \
+ GOTO_bail_switch(); \
+ } \
+ }
+
+
+/* File: c/opcommon.c */
+/* forward declarations of goto targets */
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+ u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+/*
+ * ===========================================================================
+ *
+ * What follows are opcode definitions shared between multiple opcodes with
+ * minor substitutions handled by the C pre-processor. These should probably
+ * use the mterp substitution mechanism instead, with the code here moved
+ * into common fragment files (like the asm "binop.S"), although it's hard
+ * to give up the C preprocessor in favor of the much simpler text subst.
+ *
+ * ===========================================================================
+ */
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_totype(vdst, \
+ GET_REGISTER##_fromtype(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
+ _tovtype, _tortype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ { \
+ /* spec defines specific handling for +/- inf and NaN values */ \
+ _fromvtype val; \
+ _tovtype intMin, intMax, result; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ val = GET_REGISTER##_fromrtype(vsrc1); \
+ intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
+ intMax = ~intMin; \
+ result = (_tovtype) val; \
+ if (val >= intMax) /* +inf */ \
+ result = intMax; \
+ else if (val <= intMin) /* -inf */ \
+ result = intMin; \
+ else if (val != val) /* NaN */ \
+ result = 0; \
+ else \
+ result = (_tovtype) val; \
+ SET_REGISTER##_tortype(vdst, result); \
+ } \
+ FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
+ FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ int result; \
+ u2 regs; \
+ _varType val1, val2; \
+ vdst = INST_AA(inst); \
+ regs = FETCH(1); \
+ vsrc1 = regs & 0xff; \
+ vsrc2 = regs >> 8; \
+ ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ val1 = GET_REGISTER##_type(vsrc1); \
+ val2 = GET_REGISTER##_type(vsrc2); \
+ if (val1 == val2) \
+ result = 0; \
+ else if (val1 < val2) \
+ result = -1; \
+ else if (val1 > val2) \
+ result = 1; \
+ else \
+ result = (_nanVal); \
+ ILOGV("+ result=%d\n", result); \
+ SET_REGISTER(vdst, result); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
+ vsrc1 = INST_A(inst); \
+ vsrc2 = INST_B(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
+ branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
+ FINISH(2); \
+ }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
+ vsrc1 = INST_AA(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
+ FINISH(2); \
+ }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
+ FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ secondVal = GET_REGISTER(vsrc2); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ vsrc2 = FETCH(1); \
+ ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s2) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
+ /* won't generate /lit16 instr for this; check anyway */ \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op (s2) vsrc2; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s1) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op ((s1) vsrc2); \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vdst); \
+ secondVal = GET_REGISTER(vsrc1); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vsrc1); \
+ secondVal = GET_REGISTER_WIDE(vsrc2); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vdst); \
+ secondVal = GET_REGISTER_WIDE(vsrc1); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* index */ \
+ ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ LOGV("Invalid array access: %p %d (len=%d)\n", \
+ arrayObj, vsrc2, arrayObj->length); \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]); \
+ ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); /* AA: source value */ \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* CC: index */ \
+ ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] = \
+ GET_REGISTER##_regsize(vdst); \
+ } \
+ FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits. Consider:
+ * short foo = -1 (sets a 32-bit register to 0xffffffff)
+ * iput-quick foo (writes all 32 bits to the field)
+ * short bar = 1 (sets a 32-bit register to 0x00000001)
+ * iput-short (writes the low 16 bits to the field)
+ * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field. This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time. On
+ * a device with a 16-bit data bus this is sub-optimal. (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ dvmGetField##_ftype(obj, ifield->byteOffset)); \
+ ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
+ ILOGV("+ IGETQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetField##_ftype(obj, ifield->byteOffset, \
+ GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUTQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
+ ILOGV("+ SGET '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&sfield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ SPUT '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&sfield->field); \
+ } \
+ FINISH(2);
+
+
+/* File: portable/debug.c */
+/* code in here is only included in portable-debug interpreter */
+
+/*
+ * Determine if an address is "interesting" to the debugger. This allows
+ * us to avoid scanning the entire event list before every instruction.
+ *
+ * The "debugBreakAddr" table is global and not synchronized.
+ */
+static bool isInterestingAddr(const u2* pc)
+{
+ const u2** ptr = gDvm.debugBreakAddr;
+ int i;
+
+ for (i = 0; i < MAX_BREAKPOINTS; i++, ptr++) {
+ if (*ptr == pc) {
+ LOGV("BKP: hit on %p\n", pc);
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Update the debugger on interesting events, such as hitting a breakpoint
+ * or a single-step point. This is called from the top of the interpreter
+ * loop, before the current instruction is processed.
+ *
+ * Set "methodEntry" if we've just entered the method. This detects
+ * method exit by checking to see if the next instruction is "return".
+ *
+ * This can't catch native method entry/exit, so we have to handle that
+ * at the point of invocation. We also need to catch it in dvmCallMethod
+ * if we want to capture native->native calls made through JNI.
+ *
+ * Notes to self:
+ * - Don't want to switch to VMWAIT while posting events to the debugger.
+ * Let the debugger code decide if we need to change state.
+ * - We may want to check for debugger-induced thread suspensions on
+ * every instruction. That would make a "suspend all" more responsive
+ * and reduce the chances of multiple simultaneous events occurring.
+ * However, it could change the behavior some.
+ *
+ * TODO: method entry/exit events are probably less common than location
+ * breakpoints. We may be able to speed things up a bit if we don't query
+ * the event list unless we know there's at least one lurking within.
+ */
+static void updateDebugger(const Method* method, const u2* pc, const u4* fp,
+ bool methodEntry, Thread* self)
+{
+ int eventFlags = 0;
+
+ /*
+ * Update xtra.currentPc on every instruction. We need to do this if
+ * there's a chance that we could get suspended. This can happen if
+ * eventFlags != 0 here, or somebody manually requests a suspend
+ * (which gets handled at PERIOD_CHECKS time). One place where this
+ * needs to be correct is in dvmAddSingleStep().
+ */
+ EXPORT_PC();
+
+ if (methodEntry)
+ eventFlags |= DBG_METHOD_ENTRY;
+
+ /*
+ * See if we have a breakpoint here.
+ *
+ * Depending on the "mods" associated with event(s) on this address,
+ * we may or may not actually send a message to the debugger.
+ *
+ * Checking method->debugBreakpointCount is slower on the device than
+ * just scanning the table (!). We could probably work something out
+ * where we just check it on method entry/exit and remember the result,
+ * but that's more fragile and requires passing more stuff around.
+ */
+#ifdef WITH_DEBUGGER
+ if (method->debugBreakpointCount > 0 && isInterestingAddr(pc)) {
+ eventFlags |= DBG_BREAKPOINT;
+ }
+#endif
+
+ /*
+ * If the debugger is single-stepping one of our threads, check to
+ * see if we're that thread and we've reached a step point.
+ */
+ const StepControl* pCtrl = &gDvm.stepControl;
+ if (pCtrl->active && pCtrl->thread == self) {
+ int line, frameDepth;
+ bool doStop = false;
+ const char* msg = NULL;
+
+ assert(!dvmIsNativeMethod(method));
+
+ if (pCtrl->depth == SD_INTO) {
+ /*
+ * Step into method calls. We break when the line number
+ * or method pointer changes. If we're in SS_MIN mode, we
+ * always stop.
+ */
+ if (pCtrl->method != method) {
+ doStop = true;
+ msg = "new method";
+ } else if (pCtrl->size == SS_MIN) {
+ doStop = true;
+ msg = "new instruction";
+ } else if (!dvmAddressSetGet(
+ pCtrl->pAddressSet, pc - method->insns)) {
+ doStop = true;
+ msg = "new line";
+ }
+ } else if (pCtrl->depth == SD_OVER) {
+ /*
+ * Step over method calls. We break when the line number is
+ * different and the frame depth is <= the original frame
+ * depth. (We can't just compare on the method, because we
+ * might get unrolled past it by an exception, and it's tricky
+ * to identify recursion.)
+ */
+ frameDepth = dvmComputeVagueFrameDepth(self, fp);
+ if (frameDepth < pCtrl->frameDepth) {
+ /* popped up one or more frames, always trigger */
+ doStop = true;
+ msg = "method pop";
+ } else if (frameDepth == pCtrl->frameDepth) {
+ /* same depth, see if we moved */
+ if (pCtrl->size == SS_MIN) {
+ doStop = true;
+ msg = "new instruction";
+ } else if (!dvmAddressSetGet(pCtrl->pAddressSet,
+ pc - method->insns)) {
+ doStop = true;
+ msg = "new line";
+ }
+ }
+ } else {
+ assert(pCtrl->depth == SD_OUT);
+ /*
+ * Return from the current method. We break when the frame
+ * depth pops up.
+ *
+ * This differs from the "method exit" break in that it stops
+ * with the PC at the next instruction in the returned-to
+ * function, rather than the end of the returning function.
+ */
+ frameDepth = dvmComputeVagueFrameDepth(self, fp);
+ if (frameDepth < pCtrl->frameDepth) {
+ doStop = true;
+ msg = "method pop";
+ }
+ }
+
+ if (doStop) {
+ LOGV("#####S %s\n", msg);
+ eventFlags |= DBG_SINGLE_STEP;
+ }
+ }
+
+ /*
+ * Check to see if this is a "return" instruction. JDWP says we should
+ * send the event *after* the code has been executed, but it also says
+ * the location we provide is the last instruction. Since the "return"
+ * instruction has no interesting side effects, we should be safe.
+ * (We can't just move this down to the returnFromMethod label because
+ * we potentially need to combine it with other events.)
+ *
+ * We're also not supposed to generate a method exit event if the method
+ * terminates "with a thrown exception".
+ */
+ u2 inst = INST_INST(FETCH(0));
+ if (inst == OP_RETURN_VOID || inst == OP_RETURN || inst == OP_RETURN_WIDE ||
+ inst == OP_RETURN_OBJECT)
+ {
+ eventFlags |= DBG_METHOD_EXIT;
+ }
+
+ /*
+ * If there's something interesting going on, see if it matches one
+ * of the debugger filters.
+ */
+ if (eventFlags != 0) {
+ Object* thisPtr = dvmGetThisPtr(method, fp);
+ if (thisPtr != NULL && !dvmIsValidObject(thisPtr)) {
+ /*
+ * TODO: remove this check if we're confident that the "this"
+ * pointer is where it should be -- slows us down, especially
+ * during single-step.
+ */
+ char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+ LOGE("HEY: invalid 'this' ptr %p (%s.%s %s)\n", thisPtr,
+ method->clazz->descriptor, method->name, desc);
+ free(desc);
+ dvmAbort();
+ }
+ dvmDbgPostLocationEvent(method, pc - method->insns, thisPtr,
+ eventFlags);
+ }
+}
+
+/*
+ * Perform some operations at the "top" of the interpreter loop.
+ * This stuff is required to support debugging and profiling.
+ *
+ * Using" __attribute__((noinline))" seems to do more harm than good. This
+ * is best when inlined due to the large number of parameters, most of
+ * which are local vars in the main interp loop.
+ */
+static void checkDebugAndProf(const u2* pc, const u4* fp, Thread* self,
+ const Method* method, bool* pIsMethodEntry)
+{
+ /* check to see if we've run off end of method */
+ assert(pc >= method->insns && pc <
+ method->insns + dvmGetMethodInsnsSize(method));
+
+#if 0
+ /*
+ * When we hit a specific method, enable verbose instruction logging.
+ * Sometimes it's helpful to use the debugger attach as a trigger too.
+ */
+ if (*pIsMethodEntry) {
+ static const char* cd = "Landroid/test/Arithmetic;";
+ static const char* mn = "shiftTest2";
+ static const char* sg = "()V";
+
+ if (/*gDvm.debuggerActive &&*/
+ strcmp(method->clazz->descriptor, cd) == 0 &&
+ strcmp(method->name, mn) == 0 &&
+ strcmp(method->signature, sg) == 0)
+ {
+ LOGW("Reached %s.%s, enabling verbose mode\n",
+ method->clazz->descriptor, method->name);
+ android_setMinPriority(LOG_TAG"i", ANDROID_LOG_VERBOSE);
+ dumpRegs(method, fp, true);
+ }
+
+ if (!gDvm.debuggerActive)
+ *pIsMethodEntry = false;
+ }
+#endif
+
+ /*
+ * If the debugger is attached, check for events. If the profiler is
+ * enabled, update that too.
+ *
+ * This code is executed for every instruction we interpret, so for
+ * performance we use a couple of #ifdef blocks instead of runtime tests.
+ */
+#ifdef WITH_PROFILER
+ /* profiler and probably debugger */
+ bool isEntry = *pIsMethodEntry;
+ if (isEntry) {
+ *pIsMethodEntry = false;
+ TRACE_METHOD_ENTER(self, method);
+ }
+ if (gDvm.debuggerActive) {
+ updateDebugger(method, pc, fp, isEntry, self);
+ }
+ if (gDvm.instructionCountEnableCount != 0) {
+ /*
+ * Count up the #of executed instructions. This isn't synchronized
+ * for thread-safety; if we need that we should make this
+ * thread-local and merge counts into the global area when threads
+ * exit (perhaps suspending all other threads GC-style and pulling
+ * the data out of them).
+ */
+ int inst = *pc & 0xff;
+ gDvm.executedInstrCounts[inst]++;
+ }
+#else
+ /* debugger only */
+ if (gDvm.debuggerActive) {
+ bool isEntry = *pIsMethodEntry;
+ updateDebugger(method, pc, fp, isEntry, self);
+ if (isEntry)
+ *pIsMethodEntry = false;
+ }
+#endif
+}
+
+
+/* File: portable/entry.c */
+/*
+ * Main interpreter loop.
+ *
+ * This was written with an ARM implementation in mind.
+ */
+bool INTERP_FUNC_NAME(Thread* self, InterpState* interpState)
+{
+#if defined(EASY_GDB)
+ StackSaveArea* debugSaveArea = SAVEAREA_FROM_FP(self->curFrame);
+#endif
+#if INTERP_TYPE == INTERP_DBG
+ bool debugIsMethodEntry = interpState->debugIsMethodEntry;
+#endif
+#if defined(WITH_TRACKREF_CHECKS)
+ int debugTrackedRefStart = interpState->debugTrackedRefStart;
+#endif
+ DvmDex* methodClassDex; // curMethod->clazz->pDvmDex
+ JValue retval;
+
+ /* core state */
+ const Method* curMethod; // method we're interpreting
+ const u2* pc; // program counter
+ u4* fp; // frame pointer
+ u2 inst; // current instruction
+ /* instruction decoding */
+ u2 ref; // 16-bit quantity fetched directly
+ u2 vsrc1, vsrc2, vdst; // usually used for register indexes
+ /* method call setup */
+ const Method* methodToCall;
+ bool methodCallRange;
+
+#if defined(THREADED_INTERP)
+ /* static computed goto table */
+ DEFINE_GOTO_TABLE(handlerTable);
+#endif
+
+ /* copy state in */
+ curMethod = interpState->method;
+ pc = interpState->pc;
+ fp = interpState->fp;
+ retval = interpState->retval; /* only need for kInterpEntryReturn? */
+
+ methodClassDex = curMethod->clazz->pDvmDex;
+
+ LOGVV("threadid=%d: entry(%s) %s.%s pc=0x%x fp=%p ep=%d\n",
+ self->threadId, (interpState->nextMode == INTERP_STD) ? "STD" : "DBG",
+ curMethod->clazz->descriptor, curMethod->name, pc - curMethod->insns,
+ fp, interpState->entryPoint);
+
+ /*
+ * DEBUG: scramble this to ensure we're not relying on it.
+ */
+ methodToCall = (const Method*) -1;
+
+#if INTERP_TYPE == INTERP_DBG
+ if (debugIsMethodEntry) {
+ ILOGD("|-- Now interpreting %s.%s", curMethod->clazz->descriptor,
+ curMethod->name);
+ DUMP_REGS(curMethod, interpState->fp, false);
+ }
+#endif
+
+ switch (interpState->entryPoint) {
+ case kInterpEntryInstr:
+ /* just fall through to instruction loop or threaded kickstart */
+ break;
+ case kInterpEntryReturn:
+ goto returnFromMethod;
+ case kInterpEntryThrow:
+ goto exceptionThrown;
+ default:
+ dvmAbort();
+ }
+
+#ifdef THREADED_INTERP
+ FINISH(0); /* fetch and execute first instruction */
+#else
+ while (1) {
+ CHECK_DEBUG_AND_PROF(); /* service debugger and profiling */
+ CHECK_TRACKED_REFS(); /* check local reference tracking */
+
+ /* fetch the next 16 bits from the instruction stream */
+ inst = FETCH(0);
+
+ switch (INST_INST(inst)) {
+#endif
+
+/*--- start of opcodes ---*/
+
+/* File: c/OP_NOP.c */
+HANDLE_OPCODE(OP_NOP)
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE.c */
+HANDLE_OPCODE(OP_MOVE /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|move%s v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_FROM16 /*vAA, vBBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|move%s/from16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_FROM16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MOVE_16.c */
+HANDLE_OPCODE(OP_MOVE_16 /*vAAAA, vBBBB*/)
+ vdst = FETCH(1);
+ vsrc1 = FETCH(2);
+ ILOGV("|move%s/16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(3);
+OP_END
+
+/* File: c/OP_MOVE_WIDE.c */
+HANDLE_OPCODE(OP_MOVE_WIDE /*vA, vB*/)
+ /* IMPORTANT: must correctly handle overlapping registers, e.g. both
+ * "move-wide v6, v7" and "move-wide v7, v6" */
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|move-wide v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+ kSpacing+5, vdst, GET_REGISTER_WIDE(vsrc1));
+ SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_WIDE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_WIDE_FROM16 /*vAA, vBBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|move-wide/from16 v%d,v%d (v%d=0x%08llx)", vdst, vsrc1,
+ vdst, GET_REGISTER_WIDE(vsrc1));
+ SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MOVE_WIDE_16.c */
+HANDLE_OPCODE(OP_MOVE_WIDE_16 /*vAAAA, vBBBB*/)
+ vdst = FETCH(1);
+ vsrc1 = FETCH(2);
+ ILOGV("|move-wide/16 v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+ kSpacing+8, vdst, GET_REGISTER_WIDE(vsrc1));
+ SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+ FINISH(3);
+OP_END
+
+/* File: c/OP_MOVE_OBJECT.c */
+/* File: c/OP_MOVE.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|move%s v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(1);
+OP_END
+
+
+/* File: c/OP_MOVE_OBJECT_FROM16.c */
+/* File: c/OP_MOVE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT_FROM16 /*vAA, vBBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|move%s/from16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_FROM16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(2);
+OP_END
+
+
+/* File: c/OP_MOVE_OBJECT_16.c */
+/* File: c/OP_MOVE_16.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT_16 /*vAAAA, vBBBB*/)
+ vdst = FETCH(1);
+ vsrc1 = FETCH(2);
+ ILOGV("|move%s/16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(3);
+OP_END
+
+
+/* File: c/OP_MOVE_RESULT.c */
+HANDLE_OPCODE(OP_MOVE_RESULT /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-result%s v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_RESULT) ? "" : "-object",
+ vdst, kSpacing+4, vdst,retval.i);
+ SET_REGISTER(vdst, retval.i);
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_RESULT_WIDE.c */
+HANDLE_OPCODE(OP_MOVE_RESULT_WIDE /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-result-wide v%d %s(0x%08llx)", vdst, kSpacing, retval.j);
+ SET_REGISTER_WIDE(vdst, retval.j);
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_RESULT_OBJECT.c */
+/* File: c/OP_MOVE_RESULT.c */
+HANDLE_OPCODE(OP_MOVE_RESULT_OBJECT /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-result%s v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_RESULT) ? "" : "-object",
+ vdst, kSpacing+4, vdst,retval.i);
+ SET_REGISTER(vdst, retval.i);
+ FINISH(1);
+OP_END
+
+
+/* File: c/OP_MOVE_EXCEPTION.c */
+HANDLE_OPCODE(OP_MOVE_EXCEPTION /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-exception v%d", vdst);
+ assert(self->exception != NULL);
+ SET_REGISTER(vdst, (u4)self->exception);
+ dvmClearException(self);
+ FINISH(1);
+OP_END
+
+/* File: c/OP_RETURN_VOID.c */
+HANDLE_OPCODE(OP_RETURN_VOID /**/)
+ ILOGV("|return-void");
+#ifndef NDEBUG
+ retval.j = 0xababababULL; // placate valgrind
+#endif
+ GOTO_returnFromMethod();
+OP_END
+
+/* File: c/OP_RETURN.c */
+HANDLE_OPCODE(OP_RETURN /*vAA*/)
+ vsrc1 = INST_AA(inst);
+ ILOGV("|return%s v%d",
+ (INST_INST(inst) == OP_RETURN) ? "" : "-object", vsrc1);
+ retval.i = GET_REGISTER(vsrc1);
+ GOTO_returnFromMethod();
+OP_END
+
+/* File: c/OP_RETURN_WIDE.c */
+HANDLE_OPCODE(OP_RETURN_WIDE /*vAA*/)
+ vsrc1 = INST_AA(inst);
+ ILOGV("|return-wide v%d", vsrc1);
+ retval.j = GET_REGISTER_WIDE(vsrc1);
+ GOTO_returnFromMethod();
+OP_END
+
+/* File: c/OP_RETURN_OBJECT.c */
+/* File: c/OP_RETURN.c */
+HANDLE_OPCODE(OP_RETURN_OBJECT /*vAA*/)
+ vsrc1 = INST_AA(inst);
+ ILOGV("|return%s v%d",
+ (INST_INST(inst) == OP_RETURN) ? "" : "-object", vsrc1);
+ retval.i = GET_REGISTER(vsrc1);
+ GOTO_returnFromMethod();
+OP_END
+
+
+/* File: c/OP_CONST_4.c */
+HANDLE_OPCODE(OP_CONST_4 /*vA, #+B*/)
+ {
+ s4 tmp;
+
+ vdst = INST_A(inst);
+ tmp = (s4) (INST_B(inst) << 28) >> 28; // sign extend 4-bit value
+ ILOGV("|const/4 v%d,#0x%02x", vdst, (s4)tmp);
+ SET_REGISTER(vdst, tmp);
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_CONST_16.c */
+HANDLE_OPCODE(OP_CONST_16 /*vAA, #+BBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+ SET_REGISTER(vdst, (s2) vsrc1);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST.c */
+HANDLE_OPCODE(OP_CONST /*vAA, #+BBBBBBBB*/)
+ {
+ u4 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u4)FETCH(2) << 16;
+ ILOGV("|const v%d,#0x%08x", vdst, tmp);
+ SET_REGISTER(vdst, tmp);
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_HIGH16.c */
+HANDLE_OPCODE(OP_CONST_HIGH16 /*vAA, #+BBBB0000*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const/high16 v%d,#0x%04x0000", vdst, vsrc1);
+ SET_REGISTER(vdst, vsrc1 << 16);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_WIDE_16.c */
+HANDLE_OPCODE(OP_CONST_WIDE_16 /*vAA, #+BBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const-wide/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+ SET_REGISTER_WIDE(vdst, (s2)vsrc1);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_WIDE_32.c */
+HANDLE_OPCODE(OP_CONST_WIDE_32 /*vAA, #+BBBBBBBB*/)
+ {
+ u4 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u4)FETCH(2) << 16;
+ ILOGV("|const-wide/32 v%d,#0x%08x", vdst, tmp);
+ SET_REGISTER_WIDE(vdst, (s4) tmp);
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_WIDE.c */
+HANDLE_OPCODE(OP_CONST_WIDE /*vAA, #+BBBBBBBBBBBBBBBB*/)
+ {
+ u8 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u8)FETCH(2) << 16;
+ tmp |= (u8)FETCH(3) << 32;
+ tmp |= (u8)FETCH(4) << 48;
+ ILOGV("|const-wide v%d,#0x%08llx", vdst, tmp);
+ SET_REGISTER_WIDE(vdst, tmp);
+ }
+ FINISH(5);
+OP_END
+
+/* File: c/OP_CONST_WIDE_HIGH16.c */
+HANDLE_OPCODE(OP_CONST_WIDE_HIGH16 /*vAA, #+BBBB000000000000*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const-wide/high16 v%d,#0x%04x000000000000", vdst, vsrc1);
+ SET_REGISTER_WIDE(vdst, ((u8) vsrc1) << 48);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_STRING.c */
+HANDLE_OPCODE(OP_CONST_STRING /*vAA, string@BBBB*/)
+ {
+ StringObject* strObj;
+
+ vdst = INST_AA(inst);
+ ref = FETCH(1);
+ ILOGV("|const-string v%d string@0x%04x", vdst, ref);
+ strObj = dvmDexGetResolvedString(methodClassDex, ref);
+ if (strObj == NULL) {
+ EXPORT_PC();
+ strObj = dvmResolveString(curMethod->clazz, ref);
+ if (strObj == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, (u4) strObj);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_STRING_JUMBO.c */
+HANDLE_OPCODE(OP_CONST_STRING_JUMBO /*vAA, string@BBBBBBBB*/)
+ {
+ StringObject* strObj;
+ u4 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u4)FETCH(2) << 16;
+ ILOGV("|const-string/jumbo v%d string@0x%08x", vdst, tmp);
+ strObj = dvmDexGetResolvedString(methodClassDex, tmp);
+ if (strObj == NULL) {
+ EXPORT_PC();
+ strObj = dvmResolveString(curMethod->clazz, tmp);
+ if (strObj == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, (u4) strObj);
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_CLASS.c */
+HANDLE_OPCODE(OP_CONST_CLASS /*vAA, class@BBBB*/)
+ {
+ ClassObject* clazz;
+
+ vdst = INST_AA(inst);
+ ref = FETCH(1);
+ ILOGV("|const-class v%d class@0x%04x", vdst, ref);
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ EXPORT_PC();
+ clazz = dvmResolveClass(curMethod->clazz, ref, true);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, (u4) clazz);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MONITOR_ENTER.c */
+HANDLE_OPCODE(OP_MONITOR_ENTER /*vAA*/)
+ {
+ Object* obj;
+
+ vsrc1 = INST_AA(inst);
+ ILOGV("|monitor-enter v%d %s(0x%08x)",
+ vsrc1, kSpacing+6, GET_REGISTER(vsrc1));
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (!checkForNullExportPC(obj, fp, pc))
+ GOTO_exceptionThrown();
+ ILOGV("+ locking %p %s\n", obj, obj->clazz->descriptor);
+#ifdef WITH_MONITOR_TRACKING
+ EXPORT_PC(); /* need for stack trace */
+#endif
+ dvmLockObject(self, obj);
+#ifdef WITH_DEADLOCK_PREDICTION
+ if (dvmCheckException(self))
+ GOTO_exceptionThrown();
+#endif
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MONITOR_EXIT.c */
+HANDLE_OPCODE(OP_MONITOR_EXIT /*vAA*/)
+ {
+ Object* obj;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst);
+ ILOGV("|monitor-exit v%d %s(0x%08x)",
+ vsrc1, kSpacing+5, GET_REGISTER(vsrc1));
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (!checkForNull(obj)) {
+ /*
+ * The exception needs to be processed at the *following*
+ * instruction, not the current instruction (see the Dalvik
+ * spec). Because we're jumping to an exception handler,
+ * we're not actually at risk of skipping an instruction
+ * by doing so.
+ */
+ ADJUST_PC(1); /* monitor-exit width is 1 */
+ GOTO_exceptionThrown();
+ }
+ ILOGV("+ unlocking %p %s\n", obj, obj->clazz->descriptor);
+ if (!dvmUnlockObject(self, obj)) {
+ assert(dvmCheckException(self));
+ ADJUST_PC(1);
+ GOTO_exceptionThrown();
+ }
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_CHECK_CAST.c */
+HANDLE_OPCODE(OP_CHECK_CAST /*vAA, class@BBBB*/)
+ {
+ ClassObject* clazz;
+ Object* obj;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst);
+ ref = FETCH(1); /* class to check against */
+ ILOGV("|check-cast v%d,class@0x%04x", vsrc1, ref);
+
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (obj != NULL) {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+ if (!checkForNull(obj))
+ GOTO_exceptionThrown();
+#endif
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ clazz = dvmResolveClass(curMethod->clazz, ref, false);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+ if (!dvmInstanceof(obj->clazz, clazz)) {
+ dvmThrowExceptionWithClassMessage(
+ "Ljava/lang/ClassCastException;", obj->clazz->descriptor);
+ GOTO_exceptionThrown();
+ }
+ }
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_INSTANCE_OF.c */
+HANDLE_OPCODE(OP_INSTANCE_OF /*vA, vB, class@CCCC*/)
+ {
+ ClassObject* clazz;
+ Object* obj;
+
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst); /* object to check */
+ ref = FETCH(1); /* class to check against */
+ ILOGV("|instance-of v%d,v%d,class@0x%04x", vdst, vsrc1, ref);
+
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (obj == NULL) {
+ SET_REGISTER(vdst, 0);
+ } else {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+ if (!checkForNullExportPC(obj, fp, pc))
+ GOTO_exceptionThrown();
+#endif
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ EXPORT_PC();
+ clazz = dvmResolveClass(curMethod->clazz, ref, true);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, dvmInstanceof(obj->clazz, clazz));
+ }
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_ARRAY_LENGTH.c */
+HANDLE_OPCODE(OP_ARRAY_LENGTH /*vA, vB*/)
+ {
+ ArrayObject* arrayObj;
+
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+ ILOGV("|array-length v%d,v%d (%p)", vdst, vsrc1, arrayObj);
+ if (!checkForNullExportPC((Object*) arrayObj, fp, pc))
+ GOTO_exceptionThrown();
+ /* verifier guarantees this is an array reference */
+ SET_REGISTER(vdst, arrayObj->length);
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_NEW_INSTANCE.c */
+HANDLE_OPCODE(OP_NEW_INSTANCE /*vAA, class@BBBB*/)
+ {
+ ClassObject* clazz;
+ Object* newObj;
+
+ EXPORT_PC();
+
+ vdst = INST_AA(inst);
+ ref = FETCH(1);
+ ILOGV("|new-instance v%d,class@0x%04x", vdst, ref);
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ clazz = dvmResolveClass(curMethod->clazz, ref, false);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+
+ if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz))
+ GOTO_exceptionThrown();
+
+ /*
+ * Note: the verifier can ensure that this never happens, allowing us
+ * to remove the check. However, the spec requires we throw the
+ * exception at runtime, not verify time, so the verifier would
+ * need to replace the new-instance call with a magic "throw
+ * InstantiationError" instruction.
+ *
+ * Since this relies on the verifier, which is optional, we would
+ * also need a "new-instance-quick" instruction to identify instances
+ * that don't require the check.
+ */
+ if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
+ dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationError;",
+ clazz->descriptor);
+ GOTO_exceptionThrown();
+ }
+ newObj = dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+ if (newObj == NULL)
+ GOTO_exceptionThrown();
+ SET_REGISTER(vdst, (u4) newObj);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_NEW_ARRAY.c */
+HANDLE_OPCODE(OP_NEW_ARRAY /*vA, vB, class@CCCC*/)
+ {
+ ClassObject* arrayClass;
+ ArrayObject* newArray;
+ s4 length;
+
+ EXPORT_PC();
+
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst); /* length reg */
+ ref = FETCH(1);
+ ILOGV("|new-array v%d,v%d,class@0x%04x (%d elements)",
+ vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
+ length = (s4) GET_REGISTER(vsrc1);
+ if (length < 0) {
+ dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+ GOTO_exceptionThrown();
+ }
+ arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (arrayClass == NULL) {
+ arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+ if (arrayClass == NULL)
+ GOTO_exceptionThrown();
+ }
+ /* verifier guarantees this is an array class */
+ assert(dvmIsArrayClass(arrayClass));
+ assert(dvmIsClassInitialized(arrayClass));
+
+ newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_DONT_TRACK);
+ if (newArray == NULL)
+ GOTO_exceptionThrown();
+ SET_REGISTER(vdst, (u4) newArray);
+ }
+ FINISH(2);
+OP_END
+
+
+/* File: c/OP_FILLED_NEW_ARRAY.c */
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY /*vB, {vD, vE, vF, vG, vA}, class@CCCC*/)
+ GOTO_invoke(filledNewArray, false);
+OP_END
+
+/* File: c/OP_FILLED_NEW_ARRAY_RANGE.c */
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_RANGE /*{vCCCC..v(CCCC+AA-1)}, class@BBBB*/)
+ GOTO_invoke(filledNewArray, true);
+OP_END
+
+/* File: c/OP_FILL_ARRAY_DATA.c */
+HANDLE_OPCODE(OP_FILL_ARRAY_DATA) /*vAA, +BBBBBBBB*/
+ {
+ const u2* arrayData;
+ s4 offset;
+ ArrayObject* arrayObj;
+
+ EXPORT_PC();
+ vsrc1 = INST_AA(inst);
+ offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+ ILOGV("|fill-array-data v%d +0x%04x", vsrc1, offset);
+ arrayData = pc + offset; // offset in 16-bit units
+#ifndef NDEBUG
+ if (arrayData < curMethod->insns ||
+ arrayData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
+ {
+ /* should have been caught in verifier */
+ dvmThrowException("Ljava/lang/InternalError;",
+ "bad fill array data");
+ GOTO_exceptionThrown();
+ }
+#endif
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+ if (!dvmInterpHandleFillArrayData(arrayObj, arrayData)) {
+ GOTO_exceptionThrown();
+ }
+ FINISH(3);
+ }
+OP_END
+
+/* File: c/OP_THROW.c */
+HANDLE_OPCODE(OP_THROW /*vAA*/)
+ {
+ Object* obj;
+
+ vsrc1 = INST_AA(inst);
+ ILOGV("|throw v%d (%p)", vsrc1, (void*)GET_REGISTER(vsrc1));
+ obj = (Object*) GET_REGISTER(vsrc1);
+ if (!checkForNullExportPC(obj, fp, pc)) {
+ /* will throw a null pointer exception */
+ LOGVV("Bad exception\n");
+ } else {
+ /* use the requested exception */
+ dvmSetException(self, obj);
+ }
+ GOTO_exceptionThrown();
+ }
+OP_END
+
+/* File: c/OP_GOTO.c */
+HANDLE_OPCODE(OP_GOTO /*+AA*/)
+ vdst = INST_AA(inst);
+ if ((s1)vdst < 0)
+ ILOGV("|goto -0x%02x", -((s1)vdst));
+ else
+ ILOGV("|goto +0x%02x", ((s1)vdst));
+ ILOGV("> branch taken");
+ if ((s1)vdst < 0)
+ PERIODIC_CHECKS(kInterpEntryInstr, (s1)vdst);
+ FINISH((s1)vdst);
+OP_END
+
+/* File: c/OP_GOTO_16.c */
+HANDLE_OPCODE(OP_GOTO_16 /*+AAAA*/)
+ {
+ s4 offset = (s2) FETCH(1); /* sign-extend next code unit */
+
+ if (offset < 0)
+ ILOGV("|goto/16 -0x%04x", -offset);
+ else
+ ILOGV("|goto/16 +0x%04x", offset);
+ ILOGV("> branch taken");
+ if (offset < 0)
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_GOTO_32.c */
+HANDLE_OPCODE(OP_GOTO_32 /*+AAAAAAAA*/)
+ {
+ s4 offset = FETCH(1); /* low-order 16 bits */
+ offset |= ((s4) FETCH(2)) << 16; /* high-order 16 bits */
+
+ if (offset < 0)
+ ILOGV("|goto/32 -0x%08x", -offset);
+ else
+ ILOGV("|goto/32 +0x%08x", offset);
+ ILOGV("> branch taken");
+ if (offset <= 0) /* allowed to branch to self */
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_PACKED_SWITCH.c */
+HANDLE_OPCODE(OP_PACKED_SWITCH /*vAA, +BBBB*/)
+ {
+ const u2* switchData;
+ u4 testVal;
+ s4 offset;
+
+ vsrc1 = INST_AA(inst);
+ offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+ ILOGV("|packed-switch v%d +0x%04x", vsrc1, vsrc2);
+ switchData = pc + offset; // offset in 16-bit units
+#ifndef NDEBUG
+ if (switchData < curMethod->insns ||
+ switchData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
+ {
+ /* should have been caught in verifier */
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/InternalError;", "bad packed switch");
+ GOTO_exceptionThrown();
+ }
+#endif
+ testVal = GET_REGISTER(vsrc1);
+
+ offset = dvmInterpHandlePackedSwitch(switchData, testVal);
+ ILOGV("> branch taken (0x%04x)\n", offset);
+ if (offset <= 0) /* uncommon */
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_SPARSE_SWITCH.c */
+HANDLE_OPCODE(OP_SPARSE_SWITCH /*vAA, +BBBB*/)
+ {
+ const u2* switchData;
+ u4 testVal;
+ s4 offset;
+
+ vsrc1 = INST_AA(inst);
+ offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+ ILOGV("|sparse-switch v%d +0x%04x", vsrc1, vsrc2);
+ switchData = pc + offset; // offset in 16-bit units
+#ifndef NDEBUG
+ if (switchData < curMethod->insns ||
+ switchData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
+ {
+ /* should have been caught in verifier */
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/InternalError;", "bad sparse switch");
+ GOTO_exceptionThrown();
+ }
+#endif
+ testVal = GET_REGISTER(vsrc1);
+
+ offset = dvmInterpHandleSparseSwitch(switchData, testVal);
+ ILOGV("> branch taken (0x%04x)\n", offset);
+ if (offset <= 0) /* uncommon */
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_CMPL_FLOAT.c */
+HANDLE_OP_CMPX(OP_CMPL_FLOAT, "l-float", float, _FLOAT, -1)
+OP_END
+
+/* File: c/OP_CMPG_FLOAT.c */
+HANDLE_OP_CMPX(OP_CMPG_FLOAT, "g-float", float, _FLOAT, 1)
+OP_END
+
+/* File: c/OP_CMPL_DOUBLE.c */
+HANDLE_OP_CMPX(OP_CMPL_DOUBLE, "l-double", double, _DOUBLE, -1)
+OP_END
+
+/* File: c/OP_CMPG_DOUBLE.c */
+HANDLE_OP_CMPX(OP_CMPG_DOUBLE, "g-double", double, _DOUBLE, 1)
+OP_END
+
+/* File: c/OP_CMP_LONG.c */
+HANDLE_OP_CMPX(OP_CMP_LONG, "-long", s8, _WIDE, 0)
+OP_END
+
+/* File: c/OP_IF_EQ.c */
+HANDLE_OP_IF_XX(OP_IF_EQ, "eq", ==)
+OP_END
+
+/* File: c/OP_IF_NE.c */
+HANDLE_OP_IF_XX(OP_IF_NE, "ne", !=)
+OP_END
+
+/* File: c/OP_IF_LT.c */
+HANDLE_OP_IF_XX(OP_IF_LT, "lt", <)
+OP_END
+
+/* File: c/OP_IF_GE.c */
+HANDLE_OP_IF_XX(OP_IF_GE, "ge", >=)
+OP_END
+
+/* File: c/OP_IF_GT.c */
+HANDLE_OP_IF_XX(OP_IF_GT, "gt", >)
+OP_END
+
+/* File: c/OP_IF_LE.c */
+HANDLE_OP_IF_XX(OP_IF_LE, "le", <=)
+OP_END
+
+/* File: c/OP_IF_EQZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_EQZ, "eqz", ==)
+OP_END
+
+/* File: c/OP_IF_NEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_NEZ, "nez", !=)
+OP_END
+
+/* File: c/OP_IF_LTZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_LTZ, "ltz", <)
+OP_END
+
+/* File: c/OP_IF_GEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_GEZ, "gez", >=)
+OP_END
+
+/* File: c/OP_IF_GTZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_GTZ, "gtz", >)
+OP_END
+
+/* File: c/OP_IF_LEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_LEZ, "lez", <=)
+OP_END
+
+/* File: c/OP_UNUSED_3E.c */
+HANDLE_OPCODE(OP_UNUSED_3E)
+OP_END
+
+/* File: c/OP_UNUSED_3F.c */
+HANDLE_OPCODE(OP_UNUSED_3F)
+OP_END
+
+/* File: c/OP_UNUSED_40.c */
+HANDLE_OPCODE(OP_UNUSED_40)
+OP_END
+
+/* File: c/OP_UNUSED_41.c */
+HANDLE_OPCODE(OP_UNUSED_41)
+OP_END
+
+/* File: c/OP_UNUSED_42.c */
+HANDLE_OPCODE(OP_UNUSED_42)
+OP_END
+
+/* File: c/OP_UNUSED_43.c */
+HANDLE_OPCODE(OP_UNUSED_43)
+OP_END
+
+/* File: c/OP_AGET.c */
+HANDLE_OP_AGET(OP_AGET, "", u4, )
+OP_END
+
+/* File: c/OP_AGET_WIDE.c */
+HANDLE_OP_AGET(OP_AGET_WIDE, "-wide", s8, _WIDE)
+OP_END
+
+/* File: c/OP_AGET_OBJECT.c */
+HANDLE_OP_AGET(OP_AGET_OBJECT, "-object", u4, )
+OP_END
+
+/* File: c/OP_AGET_BOOLEAN.c */
+HANDLE_OP_AGET(OP_AGET_BOOLEAN, "-boolean", u1, )
+OP_END
+
+/* File: c/OP_AGET_BYTE.c */
+HANDLE_OP_AGET(OP_AGET_BYTE, "-byte", s1, )
+OP_END
+
+/* File: c/OP_AGET_CHAR.c */
+HANDLE_OP_AGET(OP_AGET_CHAR, "-char", u2, )
+OP_END
+
+/* File: c/OP_AGET_SHORT.c */
+HANDLE_OP_AGET(OP_AGET_SHORT, "-short", s2, )
+OP_END
+
+/* File: c/OP_APUT.c */
+HANDLE_OP_APUT(OP_APUT, "", u4, )
+OP_END
+
+/* File: c/OP_APUT_WIDE.c */
+HANDLE_OP_APUT(OP_APUT_WIDE, "-wide", s8, _WIDE)
+OP_END
+
+/* File: c/OP_APUT_OBJECT.c */
+HANDLE_OPCODE(OP_APUT_OBJECT /*vAA, vBB, vCC*/)
+ {
+ ArrayObject* arrayObj;
+ Object* obj;
+ u2 arrayInfo;
+ EXPORT_PC();
+ vdst = INST_AA(inst); /* AA: source value */
+ arrayInfo = FETCH(1);
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */
+ vsrc2 = arrayInfo >> 8; /* CC: index */
+ ILOGV("|aput%s v%d,v%d,v%d", "-object", vdst, vsrc1, vsrc2);
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+ if (!checkForNull((Object*) arrayObj))
+ GOTO_exceptionThrown();
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) {
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;",
+ NULL);
+ GOTO_exceptionThrown();
+ }
+ obj = (Object*) GET_REGISTER(vdst);
+ if (obj != NULL) {
+ if (!checkForNull(obj))
+ GOTO_exceptionThrown();
+ if (!dvmCanPutArrayElement(obj->clazz, arrayObj->obj.clazz)) {
+ LOGV("Can't put a '%s'(%p) into array type='%s'(%p)\n",
+ obj->clazz->descriptor, obj,
+ arrayObj->obj.clazz->descriptor, arrayObj);
+ //dvmDumpClass(obj->clazz);
+ //dvmDumpClass(arrayObj->obj.clazz);
+ dvmThrowException("Ljava/lang/ArrayStoreException;", NULL);
+ GOTO_exceptionThrown();
+ }
+ }
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));
+ ((u4*) arrayObj->contents)[GET_REGISTER(vsrc2)] =
+ GET_REGISTER(vdst);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_APUT_BOOLEAN.c */
+HANDLE_OP_APUT(OP_APUT_BOOLEAN, "-boolean", u1, )
+OP_END
+
+/* File: c/OP_APUT_BYTE.c */
+HANDLE_OP_APUT(OP_APUT_BYTE, "-byte", s1, )
+OP_END
+
+/* File: c/OP_APUT_CHAR.c */
+HANDLE_OP_APUT(OP_APUT_CHAR, "-char", u2, )
+OP_END
+
+/* File: c/OP_APUT_SHORT.c */
+HANDLE_OP_APUT(OP_APUT_SHORT, "-short", s2, )
+OP_END
+
+/* File: c/OP_IGET.c */
+HANDLE_IGET_X(OP_IGET, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_WIDE.c */
+HANDLE_IGET_X(OP_IGET_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT.c */
+HANDLE_IGET_X(OP_IGET_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IGET_BOOLEAN.c */
+HANDLE_IGET_X(OP_IGET_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_BYTE.c */
+HANDLE_IGET_X(OP_IGET_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_CHAR.c */
+HANDLE_IGET_X(OP_IGET_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_SHORT.c */
+HANDLE_IGET_X(OP_IGET_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT.c */
+HANDLE_IPUT_X(OP_IPUT, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE.c */
+HANDLE_IPUT_X(OP_IPUT_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT.c */
+/*
+ * The VM spec says we should verify that the reference being stored into
+ * the field is assignment compatible. In practice, many popular VMs don't
+ * do this because it slows down a very common operation. It's not so bad
+ * for us, since "dexopt" quickens it whenever possible, but it's still an
+ * issue.
+ *
+ * To make this spec-complaint, we'd need to add a ClassObject pointer to
+ * the Field struct, resolve the field's type descriptor at link or class
+ * init time, and then verify the type here.
+ */
+HANDLE_IPUT_X(OP_IPUT_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_BOOLEAN.c */
+HANDLE_IPUT_X(OP_IPUT_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_BYTE.c */
+HANDLE_IPUT_X(OP_IPUT_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_CHAR.c */
+HANDLE_IPUT_X(OP_IPUT_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_SHORT.c */
+HANDLE_IPUT_X(OP_IPUT_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_SGET.c */
+HANDLE_SGET_X(OP_SGET, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_WIDE.c */
+HANDLE_SGET_X(OP_SGET_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT.c */
+HANDLE_SGET_X(OP_SGET_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_BOOLEAN.c */
+HANDLE_SGET_X(OP_SGET_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_BYTE.c */
+HANDLE_SGET_X(OP_SGET_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_CHAR.c */
+HANDLE_SGET_X(OP_SGET_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_SHORT.c */
+HANDLE_SGET_X(OP_SGET_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT.c */
+HANDLE_SPUT_X(OP_SPUT, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE.c */
+HANDLE_SPUT_X(OP_SPUT_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT.c */
+HANDLE_SPUT_X(OP_SPUT_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_BOOLEAN.c */
+HANDLE_SPUT_X(OP_SPUT_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_BYTE.c */
+HANDLE_SPUT_X(OP_SPUT_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_CHAR.c */
+HANDLE_SPUT_X(OP_SPUT_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_SHORT.c */
+HANDLE_SPUT_X(OP_SPUT_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeVirtual, false);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeSuper, false);
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeDirect, false);
+OP_END
+
+/* File: c/OP_INVOKE_STATIC.c */
+HANDLE_OPCODE(OP_INVOKE_STATIC /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeStatic, false);
+OP_END
+
+/* File: c/OP_INVOKE_INTERFACE.c */
+HANDLE_OPCODE(OP_INVOKE_INTERFACE /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeInterface, false);
+OP_END
+
+/* File: c/OP_UNUSED_73.c */
+HANDLE_OPCODE(OP_UNUSED_73)
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeVirtual, true);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeSuper, true);
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeDirect, true);
+OP_END
+
+/* File: c/OP_INVOKE_STATIC_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_STATIC_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeStatic, true);
+OP_END
+
+/* File: c/OP_INVOKE_INTERFACE_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_INTERFACE_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeInterface, true);
+OP_END
+
+/* File: c/OP_UNUSED_79.c */
+HANDLE_OPCODE(OP_UNUSED_79)
+OP_END
+
+/* File: c/OP_UNUSED_7A.c */
+HANDLE_OPCODE(OP_UNUSED_7A)
+OP_END
+
+/* File: c/OP_NEG_INT.c */
+HANDLE_UNOP(OP_NEG_INT, "neg-int", -, , )
+OP_END
+
+/* File: c/OP_NOT_INT.c */
+HANDLE_UNOP(OP_NOT_INT, "not-int", , ^ 0xffffffff, )
+OP_END
+
+/* File: c/OP_NEG_LONG.c */
+HANDLE_UNOP(OP_NEG_LONG, "neg-long", -, , _WIDE)
+OP_END
+
+/* File: c/OP_NOT_LONG.c */
+HANDLE_UNOP(OP_NOT_LONG, "not-long", , ^ 0xffffffffffffffffULL, _WIDE)
+OP_END
+
+/* File: c/OP_NEG_FLOAT.c */
+HANDLE_UNOP(OP_NEG_FLOAT, "neg-float", -, , _FLOAT)
+OP_END
+
+/* File: c/OP_NEG_DOUBLE.c */
+HANDLE_UNOP(OP_NEG_DOUBLE, "neg-double", -, , _DOUBLE)
+OP_END
+
+/* File: c/OP_INT_TO_LONG.c */
+HANDLE_NUMCONV(OP_INT_TO_LONG, "int-to-long", _INT, _WIDE)
+OP_END
+
+/* File: c/OP_INT_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_INT_TO_FLOAT, "int-to-float", _INT, _FLOAT)
+OP_END
+
+/* File: c/OP_INT_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_INT_TO_DOUBLE, "int-to-double", _INT, _DOUBLE)
+OP_END
+
+/* File: c/OP_LONG_TO_INT.c */
+HANDLE_NUMCONV(OP_LONG_TO_INT, "long-to-int", _WIDE, _INT)
+OP_END
+
+/* File: c/OP_LONG_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_LONG_TO_FLOAT, "long-to-float", _WIDE, _FLOAT)
+OP_END
+
+/* File: c/OP_LONG_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_LONG_TO_DOUBLE, "long-to-double", _WIDE, _DOUBLE)
+OP_END
+
+/* File: c/OP_FLOAT_TO_INT.c */
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_INT, "float-to-int",
+ float, _FLOAT, s4, _INT)
+OP_END
+
+/* File: c/OP_FLOAT_TO_LONG.c */
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_LONG, "float-to-long",
+ float, _FLOAT, s8, _WIDE)
+OP_END
+
+/* File: c/OP_FLOAT_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_FLOAT_TO_DOUBLE, "float-to-double", _FLOAT, _DOUBLE)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_INT.c */
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_INT, "double-to-int",
+ double, _DOUBLE, s4, _INT)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_LONG.c */
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_LONG, "double-to-long",
+ double, _DOUBLE, s8, _WIDE)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_DOUBLE_TO_FLOAT, "double-to-float", _DOUBLE, _FLOAT)
+OP_END
+
+/* File: c/OP_INT_TO_BYTE.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_BYTE, "byte", s1)
+OP_END
+
+/* File: c/OP_INT_TO_CHAR.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_CHAR, "char", u2)
+OP_END
+
+/* File: c/OP_INT_TO_SHORT.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_SHORT, "short", s2) /* want sign bit */
+OP_END
+
+/* File: c/OP_ADD_INT.c */
+HANDLE_OP_X_INT(OP_ADD_INT, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_INT.c */
+HANDLE_OP_X_INT(OP_SUB_INT, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_INT.c */
+HANDLE_OP_X_INT(OP_MUL_INT, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT.c */
+HANDLE_OP_X_INT(OP_DIV_INT, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT.c */
+HANDLE_OP_X_INT(OP_REM_INT, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT.c */
+HANDLE_OP_X_INT(OP_AND_INT, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT.c */
+HANDLE_OP_X_INT(OP_OR_INT, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT.c */
+HANDLE_OP_X_INT(OP_XOR_INT, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_INT.c */
+HANDLE_OP_SHX_INT(OP_SHL_INT, "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT.c */
+HANDLE_OP_SHX_INT(OP_SHR_INT, "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT.c */
+HANDLE_OP_SHX_INT(OP_USHR_INT, "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_ADD_LONG.c */
+HANDLE_OP_X_LONG(OP_ADD_LONG, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_LONG.c */
+HANDLE_OP_X_LONG(OP_SUB_LONG, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_LONG.c */
+HANDLE_OP_X_LONG(OP_MUL_LONG, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_LONG.c */
+HANDLE_OP_X_LONG(OP_DIV_LONG, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_LONG.c */
+HANDLE_OP_X_LONG(OP_REM_LONG, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_LONG.c */
+HANDLE_OP_X_LONG(OP_AND_LONG, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_LONG.c */
+HANDLE_OP_X_LONG(OP_OR_LONG, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_LONG.c */
+HANDLE_OP_X_LONG(OP_XOR_LONG, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_LONG.c */
+HANDLE_OP_SHX_LONG(OP_SHL_LONG, "shl", (s8), <<)
+OP_END
+
+/* File: c/OP_SHR_LONG.c */
+HANDLE_OP_SHX_LONG(OP_SHR_LONG, "shr", (s8), >>)
+OP_END
+
+/* File: c/OP_USHR_LONG.c */
+HANDLE_OP_SHX_LONG(OP_USHR_LONG, "ushr", (u8), >>)
+OP_END
+
+/* File: c/OP_ADD_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_ADD_FLOAT, "add", +)
+OP_END
+
+/* File: c/OP_SUB_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_SUB_FLOAT, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_MUL_FLOAT, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_DIV_FLOAT, "div", /)
+OP_END
+
+/* File: c/OP_REM_FLOAT.c */
+HANDLE_OPCODE(OP_REM_FLOAT /*vAA, vBB, vCC*/)
+ {
+ u2 srcRegs;
+ vdst = INST_AA(inst);
+ srcRegs = FETCH(1);
+ vsrc1 = srcRegs & 0xff;
+ vsrc2 = srcRegs >> 8;
+ ILOGV("|%s-float v%d,v%d,v%d", "mod", vdst, vsrc1, vsrc2);
+ SET_REGISTER_FLOAT(vdst,
+ fmodf(GET_REGISTER_FLOAT(vsrc1), GET_REGISTER_FLOAT(vsrc2)));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_ADD_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_ADD_DOUBLE, "add", +)
+OP_END
+
+/* File: c/OP_SUB_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_SUB_DOUBLE, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_MUL_DOUBLE, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_DIV_DOUBLE, "div", /)
+OP_END
+
+/* File: c/OP_REM_DOUBLE.c */
+HANDLE_OPCODE(OP_REM_DOUBLE /*vAA, vBB, vCC*/)
+ {
+ u2 srcRegs;
+ vdst = INST_AA(inst);
+ srcRegs = FETCH(1);
+ vsrc1 = srcRegs & 0xff;
+ vsrc2 = srcRegs >> 8;
+ ILOGV("|%s-double v%d,v%d,v%d", "mod", vdst, vsrc1, vsrc2);
+ SET_REGISTER_DOUBLE(vdst,
+ fmod(GET_REGISTER_DOUBLE(vsrc1), GET_REGISTER_DOUBLE(vsrc2)));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_ADD_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_ADD_INT_2ADDR, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_SUB_INT_2ADDR, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_MUL_INT_2ADDR, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_DIV_INT_2ADDR, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_REM_INT_2ADDR, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_AND_INT_2ADDR, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_OR_INT_2ADDR, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_XOR_INT_2ADDR, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_SHL_INT_2ADDR, "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_SHR_INT_2ADDR, "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_USHR_INT_2ADDR, "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_ADD_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_ADD_LONG_2ADDR, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_SUB_LONG_2ADDR, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_MUL_LONG_2ADDR, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_DIV_LONG_2ADDR, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_REM_LONG_2ADDR, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_AND_LONG_2ADDR, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_OR_LONG_2ADDR, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_XOR_LONG_2ADDR, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHL_LONG_2ADDR, "shl", (s8), <<)
+OP_END
+
+/* File: c/OP_SHR_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHR_LONG_2ADDR, "shr", (s8), >>)
+OP_END
+
+/* File: c/OP_USHR_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_USHR_LONG_2ADDR, "ushr", (u8), >>)
+OP_END
+
+/* File: c/OP_ADD_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_ADD_FLOAT_2ADDR, "add", +)
+OP_END
+
+/* File: c/OP_SUB_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_SUB_FLOAT_2ADDR, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_MUL_FLOAT_2ADDR, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_DIV_FLOAT_2ADDR, "div", /)
+OP_END
+
+/* File: c/OP_REM_FLOAT_2ADDR.c */
+HANDLE_OPCODE(OP_REM_FLOAT_2ADDR /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|%s-float-2addr v%d,v%d", "mod", vdst, vsrc1);
+ SET_REGISTER_FLOAT(vdst,
+ fmodf(GET_REGISTER_FLOAT(vdst), GET_REGISTER_FLOAT(vsrc1)));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_ADD_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_ADD_DOUBLE_2ADDR, "add", +)
+OP_END
+
+/* File: c/OP_SUB_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_SUB_DOUBLE_2ADDR, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_MUL_DOUBLE_2ADDR, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_DIV_DOUBLE_2ADDR, "div", /)
+OP_END
+
+/* File: c/OP_REM_DOUBLE_2ADDR.c */
+HANDLE_OPCODE(OP_REM_DOUBLE_2ADDR /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|%s-double-2addr v%d,v%d", "mod", vdst, vsrc1);
+ SET_REGISTER_DOUBLE(vdst,
+ fmod(GET_REGISTER_DOUBLE(vdst), GET_REGISTER_DOUBLE(vsrc1)));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_ADD_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_ADD_INT_LIT16, "add", +, 0)
+OP_END
+
+/* File: c/OP_RSUB_INT.c */
+HANDLE_OPCODE(OP_RSUB_INT /*vA, vB, #+CCCC*/)
+ {
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ vsrc2 = FETCH(1);
+ ILOGV("|rsub-int v%d,v%d,#+0x%04x", vdst, vsrc1, vsrc2);
+ SET_REGISTER(vdst, (s2) vsrc2 - (s4) GET_REGISTER(vsrc1));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MUL_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_MUL_INT_LIT16, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_DIV_INT_LIT16, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_REM_INT_LIT16, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_AND_INT_LIT16, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_OR_INT_LIT16, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_XOR_INT_LIT16, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_ADD_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_ADD_INT_LIT8, "add", +, 0)
+OP_END
+
+/* File: c/OP_RSUB_INT_LIT8.c */
+HANDLE_OPCODE(OP_RSUB_INT_LIT8 /*vAA, vBB, #+CC*/)
+ {
+ u2 litInfo;
+ vdst = INST_AA(inst);
+ litInfo = FETCH(1);
+ vsrc1 = litInfo & 0xff;
+ vsrc2 = litInfo >> 8;
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", "rsub", vdst, vsrc1, vsrc2);
+ SET_REGISTER(vdst, (s1) vsrc2 - (s4) GET_REGISTER(vsrc1));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MUL_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_MUL_INT_LIT8, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_DIV_INT_LIT8, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_REM_INT_LIT8, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_AND_INT_LIT8, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_OR_INT_LIT8, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_XOR_INT_LIT8, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_SHL_INT_LIT8, "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_SHR_INT_LIT8, "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_USHR_INT_LIT8, "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_UNUSED_E3.c */
+HANDLE_OPCODE(OP_UNUSED_E3)
+OP_END
+
+/* File: c/OP_UNUSED_E4.c */
+HANDLE_OPCODE(OP_UNUSED_E4)
+OP_END
+
+/* File: c/OP_UNUSED_E5.c */
+HANDLE_OPCODE(OP_UNUSED_E5)
+OP_END
+
+/* File: c/OP_UNUSED_E6.c */
+HANDLE_OPCODE(OP_UNUSED_E6)
+OP_END
+
+/* File: c/OP_UNUSED_E7.c */
+HANDLE_OPCODE(OP_UNUSED_E7)
+OP_END
+
+/* File: c/OP_UNUSED_E8.c */
+HANDLE_OPCODE(OP_UNUSED_E8)
+OP_END
+
+/* File: c/OP_UNUSED_E9.c */
+HANDLE_OPCODE(OP_UNUSED_E9)
+OP_END
+
+/* File: c/OP_UNUSED_EA.c */
+HANDLE_OPCODE(OP_UNUSED_EA)
+OP_END
+
+/* File: c/OP_UNUSED_EB.c */
+HANDLE_OPCODE(OP_UNUSED_EB)
+OP_END
+
+/* File: c/OP_UNUSED_EC.c */
+HANDLE_OPCODE(OP_UNUSED_EC)
+OP_END
+
+/* File: c/OP_UNUSED_ED.c */
+HANDLE_OPCODE(OP_UNUSED_ED)
+OP_END
+
+/* File: c/OP_EXECUTE_INLINE.c */
+HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
+ {
+ /*
+ * This has the same form as other method calls, but we ignore
+ * the 5th argument (vA). This is chiefly because the first four
+ * arguments to a function on ARM are in registers.
+ *
+ * We only set the arguments that are actually used, leaving
+ * the rest uninitialized. We're assuming that, if the method
+ * needs them, they'll be specified in the call.
+ *
+ * This annoys gcc when optimizations are enabled, causing a
+ * "may be used uninitialized" warning. We can quiet the warnings
+ * for a slight penalty (5%: 373ns vs. 393ns on empty method). Note
+ * that valgrind is perfectly happy with this arrangement, because
+ * the uninitialiezd values are never actually used.
+ */
+ u4 arg0, arg1, arg2, arg3;
+ //arg0 = arg1 = arg2 = arg3 = 0;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_B(inst); /* #of args */
+ ref = FETCH(1); /* inline call "ref" */
+ vdst = FETCH(2); /* 0-4 register indices */
+ ILOGV("|execute-inline args=%d @%d {regs=0x%04x}",
+ vsrc1, ref, vdst);
+
+ assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
+ assert(vsrc1 <= 4);
+
+ switch (vsrc1) {
+ case 4:
+ arg3 = GET_REGISTER(vdst >> 12);
+ /* fall through */
+ case 3:
+ arg2 = GET_REGISTER((vdst & 0x0f00) >> 8);
+ /* fall through */
+ case 2:
+ arg1 = GET_REGISTER((vdst & 0x00f0) >> 4);
+ /* fall through */
+ case 1:
+ arg0 = GET_REGISTER(vdst & 0x0f);
+ /* fall through */
+ default: // case 0
+ ;
+ }
+
+#if INTERP_TYPE == INTERP_DBG
+ if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+#else
+ if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+#endif
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_UNUSED_EF.c */
+HANDLE_OPCODE(OP_UNUSED_EF)
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT_EMPTY.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT_EMPTY /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+#if INTERP_TYPE != INTERP_DBG
+ //LOGI("Ignoring empty\n");
+ FINISH(3);
+#else
+ if (!gDvm.debuggerActive) {
+ //LOGI("Skipping empty\n");
+ FINISH(3); // don't want it to show up in profiler output
+ } else {
+ //LOGI("Running empty\n");
+ /* fall through to OP_INVOKE_DIRECT */
+ GOTO_invoke(invokeDirect, false);
+ }
+#endif
+OP_END
+
+/* File: c/OP_UNUSED_F1.c */
+HANDLE_OPCODE(OP_UNUSED_F1)
+OP_END
+
+/* File: c/OP_IGET_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_QUICK, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_WIDE_QUICK, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_OBJECT_QUICK, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_QUICK, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_WIDE_QUICK, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_OBJECT_QUICK, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_QUICK.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeVirtualQuick, false);
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK_RANGE/*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeVirtualQuick, true);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_QUICK.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeSuperQuick, false);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_QUICK_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeSuperQuick, true);
+OP_END
+
+/* File: c/OP_UNUSED_FC.c */
+HANDLE_OPCODE(OP_UNUSED_FC)
+OP_END
+
+/* File: c/OP_UNUSED_FD.c */
+HANDLE_OPCODE(OP_UNUSED_FD)
+OP_END
+
+/* File: c/OP_UNUSED_FE.c */
+HANDLE_OPCODE(OP_UNUSED_FE)
+OP_END
+
+/* File: c/OP_UNUSED_FF.c */
+HANDLE_OPCODE(OP_UNUSED_FF)
+ /*
+ * In portable interp, most unused opcodes will fall through to here.
+ */
+ LOGE("unknown opcode 0x%02x\n", INST_INST(inst));
+ dvmAbort();
+ FINISH(1);
+OP_END
+
+/* File: c/gotoTargets.c */
+/*
+ * C footer. This has some common code shared by the various targets.
+ */
+
+/*
+ * Everything from here on is a "goto target". In the basic interpreter
+ * we jump into these targets and then jump directly to the handler for
+ * next instruction. Here, these are subroutines that return to the caller.
+ */
+
+GOTO_TARGET(filledNewArray, bool methodCallRange)
+ {
+ ClassObject* arrayClass;
+ ArrayObject* newArray;
+ u4* contents;
+ char typeCh;
+ int i;
+ u4 arg5;
+
+ EXPORT_PC();
+
+ ref = FETCH(1); /* class ref */
+ vdst = FETCH(2); /* first 4 regs -or- range base */
+
+ if (methodCallRange) {
+ vsrc1 = INST_AA(inst); /* #of elements */
+ arg5 = -1; /* silence compiler warning */
+ ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ } else {
+ arg5 = INST_A(inst);
+ vsrc1 = INST_B(inst); /* #of elements */
+ ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1, ref, vdst, arg5);
+ }
+
+ /*
+ * Resolve the array class.
+ */
+ arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (arrayClass == NULL) {
+ arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+ if (arrayClass == NULL)
+ GOTO_exceptionThrown();
+ }
+ /*
+ if (!dvmIsArrayClass(arrayClass)) {
+ dvmThrowException("Ljava/lang/RuntimeError;",
+ "filled-new-array needs array class");
+ GOTO_exceptionThrown();
+ }
+ */
+ /* verifier guarantees this is an array class */
+ assert(dvmIsArrayClass(arrayClass));
+ assert(dvmIsClassInitialized(arrayClass));
+
+ /*
+ * Create an array of the specified type.
+ */
+ LOGVV("+++ filled-new-array type is '%s'\n", arrayClass->descriptor);
+ typeCh = arrayClass->descriptor[1];
+ if (typeCh == 'D' || typeCh == 'J') {
+ /* category 2 primitives not allowed */
+ dvmThrowException("Ljava/lang/RuntimeError;",
+ "bad filled array req");
+ GOTO_exceptionThrown();
+ } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
+ /* TODO: requires multiple "fill in" loops with different widths */
+ LOGE("non-int primitives not implemented\n");
+ dvmThrowException("Ljava/lang/InternalError;",
+ "filled-new-array not implemented for anything but 'int'");
+ GOTO_exceptionThrown();
+ }
+
+ newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK);
+ if (newArray == NULL)
+ GOTO_exceptionThrown();
+
+ /*
+ * Fill in the elements. It's legal for vsrc1 to be zero.
+ */
+ contents = (u4*) newArray->contents;
+ if (methodCallRange) {
+ for (i = 0; i < vsrc1; i++)
+ contents[i] = GET_REGISTER(vdst+i);
+ } else {
+ assert(vsrc1 <= 5);
+ if (vsrc1 == 5) {
+ contents[4] = GET_REGISTER(arg5);
+ vsrc1--;
+ }
+ for (i = 0; i < vsrc1; i++) {
+ contents[i] = GET_REGISTER(vdst & 0x0f);
+ vdst >>= 4;
+ }
+ }
+
+ retval.l = newArray;
+ }
+ FINISH(3);
+GOTO_TARGET_END
+
+
+GOTO_TARGET(invokeVirtual, bool methodCallRange)
+ {
+ Method* baseMethod;
+ Object* thisPtr;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ /*
+ * Resolve the method. This is the correct method for the static
+ * type of the object. We also verify access permissions here.
+ */
+ baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (baseMethod == NULL) {
+ baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+ if (baseMethod == NULL) {
+ ILOGV("+ unknown method or access denied\n");
+ GOTO_exceptionThrown();
+ }
+ }
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method.
+ */
+ assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
+ methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ /*
+ * This can happen if you create two classes, Base and Sub, where
+ * Sub is a sub-class of Base. Declare a protected abstract
+ * method foo() in Base, and invoke foo() from a method in Base.
+ * Base is an "abstract base class" and is never instantiated
+ * directly. Now, Override foo() in Sub, and use Sub. This
+ * Works fine unless Sub stops providing an implementation of
+ * the method.
+ */
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+
+ LOGVV("+++ base=%s.%s virtual[%d]=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ (u4) baseMethod->methodIndex,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+#if 0
+ if (vsrc1 != methodToCall->insSize) {
+ LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ (u4) baseMethod->methodIndex,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ //dvmDumpClass(baseMethod->clazz);
+ //dvmDumpClass(methodToCall->clazz);
+ dvmDumpAllClasses(0);
+ }
+#endif
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuper, bool methodCallRange)
+ {
+ Method* baseMethod;
+ u2 thisReg;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange) {
+ ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ /* impossible in well-formed code, but we must check nevertheless */
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+ /*
+ * Resolve the method. This is the correct method for the static
+ * type of the object. We also verify access permissions here.
+ * The first arg to dvmResolveMethod() is just the referring class
+ * (used for class loaders and such), so we don't want to pass
+ * the superclass into the resolution call.
+ */
+ baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (baseMethod == NULL) {
+ baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+ if (baseMethod == NULL) {
+ ILOGV("+ unknown method or access denied\n");
+ GOTO_exceptionThrown();
+ }
+ }
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method's class.
+ *
+ * We're using the current method's class' superclass, not the
+ * superclass of "this". This is because we might be executing
+ * in a method inherited from a superclass, and we want to run
+ * in that class' superclass.
+ */
+ if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) {
+ /*
+ * Method does not exist in the superclass. Could happen if
+ * superclass gets updated.
+ */
+ dvmThrowException("Ljava/lang/NoSuchMethodError;",
+ baseMethod->name);
+ GOTO_exceptionThrown();
+ }
+ methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+ LOGVV("+++ base=%s.%s super-virtual=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeInterface, bool methodCallRange)
+ {
+ Object* thisPtr;
+ ClassObject* thisClass;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ thisClass = thisPtr->clazz;
+
+ /*
+ * Given a class and a method index, find the Method* with the
+ * actual code we want to execute.
+ */
+ methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod,
+ methodClassDex);
+ if (methodToCall == NULL) {
+ assert(dvmCheckException(self));
+ GOTO_exceptionThrown();
+ }
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeDirect, bool methodCallRange)
+ {
+ u2 thisReg;
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ EXPORT_PC();
+
+ if (methodCallRange) {
+ ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+ methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (methodToCall == NULL) {
+ methodToCall = dvmResolveMethod(curMethod->clazz, ref,
+ METHOD_DIRECT);
+ if (methodToCall == NULL) {
+ ILOGV("+ unknown direct method\n"); // should be impossible
+ GOTO_exceptionThrown();
+ }
+ }
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeStatic, bool methodCallRange)
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ EXPORT_PC();
+
+ if (methodCallRange)
+ ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ else
+ ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+
+ methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (methodToCall == NULL) {
+ methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC);
+ if (methodToCall == NULL) {
+ ILOGV("+ unknown method\n");
+ GOTO_exceptionThrown();
+ }
+ }
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+ {
+ Object* thisPtr;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* vtable index */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method.
+ */
+ assert(ref < thisPtr->clazz->vtableCount);
+ methodToCall = thisPtr->clazz->vtable[ref];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+
+ LOGVV("+++ virtual[%d]=%s.%s\n",
+ ref, methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+ {
+ u2 thisReg;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* vtable index */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange) {
+ ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ /* impossible in well-formed code, but we must check nevertheless */
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+#if 0 /* impossible in optimized + verified code */
+ if (ref >= curMethod->clazz->super->vtableCount) {
+ dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(ref < curMethod->clazz->super->vtableCount);
+#endif
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method's class.
+ *
+ * We're using the current method's class' superclass, not the
+ * superclass of "this". This is because we might be executing
+ * in a method inherited from a superclass, and we want to run
+ * in the method's class' superclass.
+ */
+ methodToCall = curMethod->clazz->super->vtable[ref];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+ LOGVV("+++ super-virtual[%d]=%s.%s\n",
+ ref, methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+
+
+ /*
+ * General handling for return-void, return, and return-wide. Put the
+ * return value in "retval" before jumping here.
+ */
+GOTO_TARGET(returnFromMethod)
+ {
+ StackSaveArea* saveArea;
+
+ /*
+ * We must do this BEFORE we pop the previous stack frame off, so
+ * that the GC can see the return value (if any) in the local vars.
+ *
+ * Since this is now an interpreter switch point, we must do it before
+ * we do anything at all.
+ */
+ PERIODIC_CHECKS(kInterpEntryReturn, 0);
+
+ ILOGV("> retval=0x%llx (leaving %s.%s %s)",
+ retval.j, curMethod->clazz->descriptor, curMethod->name,
+ curMethod->signature);
+ //DUMP_REGS(curMethod, fp);
+
+ saveArea = SAVEAREA_FROM_FP(fp);
+
+#ifdef EASY_GDB
+ debugSaveArea = saveArea;
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_EXIT(self, curMethod);
+#endif
+
+ /* back up to previous frame and see if we hit a break */
+ fp = saveArea->prevFrame;
+ assert(fp != NULL);
+ if (dvmIsBreakFrame(fp)) {
+ /* bail without popping the method frame from stack */
+ LOGVV("+++ returned into break frame\n");
+ GOTO_bail();
+ }
+
+ /* update thread FP, and reset local variables */
+ self->curFrame = fp;
+ curMethod = SAVEAREA_FROM_FP(fp)->method;
+ //methodClass = curMethod->clazz;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = saveArea->savedPc;
+ ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+
+ /* use FINISH on the caller's invoke instruction */
+ //u2 invokeInstr = INST_INST(FETCH(0));
+ if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+ invokeInstr <= OP_INVOKE_INTERFACE*/)
+ {
+ FINISH(3);
+ } else {
+ //LOGE("Unknown invoke instr %02x at %d\n",
+ // invokeInstr, (int) (pc - curMethod->insns));
+ assert(false);
+ }
+ }
+GOTO_TARGET_END
+
+
+ /*
+ * Jump here when the code throws an exception.
+ *
+ * By the time we get here, the Throwable has been created and the stack
+ * trace has been saved off.
+ */
+GOTO_TARGET(exceptionThrown)
+ {
+ Object* exception;
+ int catchRelPc;
+
+ /*
+ * Since this is now an interpreter switch point, we must do it before
+ * we do anything at all.
+ */
+ PERIODIC_CHECKS(kInterpEntryThrow, 0);
+
+ /*
+ * We save off the exception and clear the exception status. While
+ * processing the exception we might need to load some Throwable
+ * classes, and we don't want class loader exceptions to get
+ * confused with this one.
+ */
+ assert(dvmCheckException(self));
+ exception = dvmGetException(self);
+ dvmAddTrackedAlloc(exception, self);
+ dvmClearException(self);
+
+ LOGV("Handling exception %s at %s:%d\n",
+ exception->clazz->descriptor, curMethod->name,
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ /*
+ * Tell the debugger about it.
+ *
+ * TODO: if the exception was thrown by interpreted code, control
+ * fell through native, and then back to us, we will report the
+ * exception at the point of the throw and again here. We can avoid
+ * this by not reporting exceptions when we jump here directly from
+ * the native call code above, but then we won't report exceptions
+ * that were thrown *from* the JNI code (as opposed to *through* it).
+ *
+ * The correct solution is probably to ignore from-native exceptions
+ * here, and have the JNI exception code do the reporting to the
+ * debugger.
+ */
+ if (gDvm.debuggerActive) {
+ void* catchFrame;
+ catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
+ exception, true, &catchFrame);
+ dvmDbgPostException(fp, pc - curMethod->insns, catchFrame,
+ catchRelPc, exception);
+ }
+#endif
+
+ /*
+ * We need to unroll to the catch block or the nearest "break"
+ * frame.
+ *
+ * A break frame could indicate that we have reached an intermediate
+ * native call, or have gone off the top of the stack and the thread
+ * needs to exit. Either way, we return from here, leaving the
+ * exception raised.
+ *
+ * If we do find a catch block, we want to transfer execution to
+ * that point.
+ */
+ catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
+ exception, false, (void*)&fp);
+
+ /*
+ * Restore the stack bounds after an overflow. This isn't going to
+ * be correct in all circumstances, e.g. if JNI code devours the
+ * exception this won't happen until some other exception gets
+ * thrown. If the code keeps pushing the stack bounds we'll end
+ * up aborting the VM.
+ *
+ * Note we want to do this *after* the call to dvmFindCatchBlock,
+ * because that may need extra stack space to resolve exception
+ * classes (e.g. through a class loader).
+ */
+ if (self->stackOverflowed)
+ dvmCleanupStackOverflow(self);
+
+ if (catchRelPc < 0) {
+ /* falling through to JNI code or off the bottom of the stack */
+#if DVM_SHOW_EXCEPTION >= 2
+ LOGD("Exception %s from %s:%d not caught locally\n",
+ exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+#endif
+ dvmSetException(self, exception);
+ dvmReleaseTrackedAlloc(exception, self);
+ GOTO_bail();
+ }
+
+#if DVM_SHOW_EXCEPTION >= 3
+ {
+ const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method;
+ LOGD("Exception %s thrown from %s:%d to %s:%d\n",
+ exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns),
+ dvmGetMethodSourceFile(catchMethod),
+ dvmLineNumFromPC(catchMethod, catchRelPc));
+ }
+#endif
+
+ /*
+ * Adjust local variables to match self->curFrame and the
+ * updated PC.
+ */
+ //fp = (u4*) self->curFrame;
+ curMethod = SAVEAREA_FROM_FP(fp)->method;
+ //methodClass = curMethod->clazz;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = curMethod->insns + catchRelPc;
+ ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+ DUMP_REGS(curMethod, fp, false); // show all regs
+
+ /*
+ * Restore the exception if the handler wants it.
+ *
+ * The Dalvik spec mandates that, if an exception handler wants to
+ * do something with the exception, the first instruction executed
+ * must be "move-exception". We can pass the exception along
+ * through the thread struct, and let the move-exception instruction
+ * clear it for us.
+ *
+ * If the handler doesn't call move-exception, we don't want to
+ * finish here with an exception still pending.
+ */
+ if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
+ dvmSetException(self, exception);
+
+ dvmReleaseTrackedAlloc(exception, self);
+ FINISH(0);
+ }
+GOTO_TARGET_END
+
+
+ /*
+ * General handling for invoke-{virtual,super,direct,static,interface},
+ * including "quick" variants.
+ *
+ * Set "methodToCall" to the Method we're calling, and "methodCallRange"
+ * depending on whether this is a "/range" instruction.
+ *
+ * For a range call:
+ * "vsrc1" holds the argument count (8 bits)
+ * "vdst" holds the first argument in the range
+ * For a non-range call:
+ * "vsrc1" holds the argument count (4 bits) and the 5th argument index
+ * "vdst" holds four 4-bit register indices
+ *
+ * The caller must EXPORT_PC before jumping here, because any method
+ * call can throw a stack overflow exception.
+ */
+GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
+ u2 count, u2 regs)
+ {
+ STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;);
+
+ //printf("range=%d call=%p count=%d regs=0x%04x\n",
+ // methodCallRange, methodToCall, count, regs);
+ //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
+ // methodToCall->name, methodToCall->signature);
+
+ u4* outs;
+ int i;
+
+ /*
+ * Copy args. This may corrupt vsrc1/vdst.
+ */
+ if (methodCallRange) {
+ // could use memcpy or a "Duff's device"; most functions have
+ // so few args it won't matter much
+ assert(vsrc1 <= curMethod->outsSize);
+ assert(vsrc1 == methodToCall->insSize);
+ outs = OUTS_FROM_FP(fp, vsrc1);
+ for (i = 0; i < vsrc1; i++)
+ outs[i] = GET_REGISTER(vdst+i);
+ } else {
+ u4 count = vsrc1 >> 4;
+
+ assert(count <= curMethod->outsSize);
+ assert(count == methodToCall->insSize);
+ assert(count <= 5);
+
+ outs = OUTS_FROM_FP(fp, count);
+#if 0
+ if (count == 5) {
+ outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+ count--;
+ }
+ for (i = 0; i < (int) count; i++) {
+ outs[i] = GET_REGISTER(vdst & 0x0f);
+ vdst >>= 4;
+ }
+#else
+ // This version executes fewer instructions but is larger
+ // overall. Seems to be a teensy bit faster.
+ assert((vdst >> 16) == 0); // 16 bits -or- high 16 bits clear
+ switch (count) {
+ case 5:
+ outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+ case 4:
+ outs[3] = GET_REGISTER(vdst >> 12);
+ case 3:
+ outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
+ case 2:
+ outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
+ case 1:
+ outs[0] = GET_REGISTER(vdst & 0x0f);
+ default:
+ ;
+ }
+#endif
+ }
+ }
+
+ /*
+ * (This was originally a "goto" target; I've kept it separate from the
+ * stuff above in case we want to refactor things again.)
+ *
+ * At this point, we have the arguments stored in the "outs" area of
+ * the current method's stack frame, and the method to call in
+ * "methodToCall". Push a new stack frame.
+ */
+ {
+ StackSaveArea* newSaveArea;
+ u4* newFp;
+
+ ILOGV("> %s%s.%s %s",
+ dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
+ methodToCall->clazz->descriptor, methodToCall->name,
+ methodToCall->signature);
+
+ newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
+ newSaveArea = SAVEAREA_FROM_FP(newFp);
+
+ /* verify that we have enough space */
+ if (true) {
+ u1* bottom;
+ bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
+ if (bottom < self->interpStackEnd) {
+ /* stack overflow */
+ LOGV("Stack overflow on method call (start=%p end=%p newBot=%p size=%d '%s')\n",
+ self->interpStackStart, self->interpStackEnd, bottom,
+ self->interpStackSize, methodToCall->name);
+ dvmHandleStackOverflow(self);
+ assert(dvmCheckException(self));
+ GOTO_exceptionThrown();
+ }
+ //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p\n",
+ // fp, newFp, newSaveArea, bottom);
+ }
+
+#ifdef LOG_INSTR
+ if (methodToCall->registersSize > methodToCall->insSize) {
+ /*
+ * This makes valgrind quiet when we print registers that
+ * haven't been initialized. Turn it off when the debug
+ * messages are disabled -- we want valgrind to report any
+ * used-before-initialized issues.
+ */
+ memset(newFp, 0xcc,
+ (methodToCall->registersSize - methodToCall->insSize) * 4);
+ }
+#endif
+
+#ifdef EASY_GDB
+ newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
+#endif
+ newSaveArea->prevFrame = fp;
+ newSaveArea->savedPc = pc;
+ newSaveArea->method = methodToCall;
+
+ if (!dvmIsNativeMethod(methodToCall)) {
+ /*
+ * "Call" interpreted code. Reposition the PC, update the
+ * frame pointer and other local state, and continue.
+ */
+ curMethod = methodToCall;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = methodToCall->insns;
+ fp = self->curFrame = newFp;
+#ifdef EASY_GDB
+ debugSaveArea = SAVEAREA_FROM_FP(newFp);
+#endif
+#if INTERP_TYPE == INTERP_DBG
+ debugIsMethodEntry = true; // profiling, debugging
+#endif
+ ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+ DUMP_REGS(curMethod, fp, true); // show input args
+ FINISH(0); // jump to method start
+ } else {
+ /* set this up for JNI locals, even if not a JNI native */
+ newSaveArea->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+
+ self->curFrame = newFp;
+
+ DUMP_REGS(methodToCall, newFp, true); // show input args
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ if (gDvm.debuggerActive) {
+ dvmDbgPostLocationEvent(methodToCall, -1,
+ dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY);
+ }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_ENTER(self, methodToCall);
+#endif
+
+ ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
+ methodToCall->name, methodToCall->signature);
+
+ /*
+ * Jump through native call bridge. Because we leave no
+ * space for locals on native calls, "newFp" points directly
+ * to the method arguments.
+ */
+ (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ if (gDvm.debuggerActive) {
+ dvmDbgPostLocationEvent(methodToCall, -1,
+ dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT);
+ }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_EXIT(self, methodToCall);
+#endif
+
+ /* pop frame off */
+ dvmPopJniLocals(self, newSaveArea);
+ self->curFrame = fp;
+
+ /*
+ * If the native code threw an exception, or interpreted code
+ * invoked by the native call threw one and nobody has cleared
+ * it, jump to our local exception handling.
+ */
+ if (dvmCheckException(self)) {
+ LOGV("Exception thrown by/below native code\n");
+ GOTO_exceptionThrown();
+ }
+
+ ILOGD("> retval=0x%llx (leaving native)", retval.j);
+ ILOGD("> (return from native %s.%s to %s.%s %s)",
+ methodToCall->clazz->descriptor, methodToCall->name,
+ curMethod->clazz->descriptor, curMethod->name,
+ curMethod->signature);
+
+ //u2 invokeInstr = INST_INST(FETCH(0));
+ if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+ invokeInstr <= OP_INVOKE_INTERFACE*/)
+ {
+ FINISH(3);
+ } else {
+ //LOGE("Unknown invoke instr %02x at %d\n",
+ // invokeInstr, (int) (pc - curMethod->insns));
+ assert(false);
+ }
+ }
+ }
+ assert(false); // should not get here
+GOTO_TARGET_END
+
+
+/* File: portable/enddefs.c */
+/*--- end of opcodes ---*/
+
+#ifndef THREADED_INTERP
+ } // end of "switch"
+ } // end of "while"
+#endif
+
+bail:
+ ILOGD("|-- Leaving interpreter loop"); // note "curMethod" may be NULL
+
+ interpState->retval = retval;
+ return false;
+
+bail_switch:
+ /*
+ * The standard interpreter currently doesn't set or care about the
+ * "debugIsMethodEntry" value, so setting this is only of use if we're
+ * switching between two "debug" interpreters, which we never do.
+ *
+ * TODO: figure out if preserving this makes any sense.
+ */
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+# if INTERP_TYPE == INTERP_DBG
+ interpState->debugIsMethodEntry = debugIsMethodEntry;
+# else
+ interpState->debugIsMethodEntry = false;
+# endif
+#endif
+
+ /* export state changes */
+ interpState->method = curMethod;
+ interpState->pc = pc;
+ interpState->fp = fp;
+ /* debugTrackedRefStart doesn't change */
+ interpState->retval = retval; /* need for _entryPoint=ret */
+ interpState->nextMode =
+ (INTERP_TYPE == INTERP_STD) ? INTERP_DBG : INTERP_STD;
+ LOGVV(" meth='%s.%s' pc=0x%x fp=%p\n",
+ curMethod->clazz->descriptor, curMethod->name,
+ pc - curMethod->insns, fp);
+ return true;
+}
+
+
diff --git a/vm/mterp/out/InterpC-portstd.c b/vm/mterp/out/InterpC-portstd.c
new file mode 100644
index 0000000..64e5ccd
--- /dev/null
+++ b/vm/mterp/out/InterpC-portstd.c
@@ -0,0 +1,3984 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'portstd'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.c */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h> // needed for fmod, fmodf
+
+/*
+ * Configuration defines. These affect the C implementations, i.e. the
+ * portable interpreter(s) and C stubs.
+ *
+ * Some defines are controlled by the Makefile, e.g.:
+ * WITH_PROFILER
+ * WITH_DEBUGGER
+ * WITH_INSTR_CHECKS
+ * WITH_TRACKREF_CHECKS
+ * EASY_GDB
+ * NDEBUG
+ *
+ * If THREADED_INTERP is not defined, we use a classic "while true / switch"
+ * interpreter. If it is defined, then the tail end of each instruction
+ * handler fetches the next instruction and jumps directly to the handler.
+ * This increases the size of the "Std" interpreter by about 10%, but
+ * provides a speedup of about the same magnitude.
+ *
+ * There's a "hybrid" approach that uses a goto table instead of a switch
+ * statement, avoiding the "is the opcode in range" tests required for switch.
+ * The performance is close to the threaded version, and without the 10%
+ * size increase, but the benchmark results are off enough that it's not
+ * worth adding as a third option.
+ */
+#define THREADED_INTERP /* threaded vs. while-loop interpreter */
+
+#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia */
+# define CHECK_BRANCH_OFFSETS
+# define CHECK_REGISTER_INDICES
+#endif
+
+/*
+ * ARM EABI requires 64-bit alignment for access to 64-bit data types. We
+ * can't just use pointers to copy 64-bit values out of our interpreted
+ * register set, because gcc will generate ldrd/strd.
+ *
+ * The __UNION version copies data in and out of a union. The __MEMCPY
+ * version uses a memcpy() call to do the transfer; gcc is smart enough to
+ * not actually call memcpy(). The __UNION version is very bad on ARM;
+ * it only uses one more instruction than __MEMCPY, but for some reason
+ * gcc thinks it needs separate storage for every instance of the union.
+ * On top of that, it feels the need to zero them out at the start of the
+ * method. Net result is we zero out ~700 bytes of stack space at the top
+ * of the interpreter using ARM STM instructions.
+ */
+#if defined(__ARM_EABI__)
+//# define NO_UNALIGN_64__UNION
+# define NO_UNALIGN_64__MEMCPY
+#endif
+
+//#define LOG_INSTR /* verbose debugging */
+/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
+
+/*
+ * Keep a tally of accesses to fields. Currently only works if full DEX
+ * optimization is disabled.
+ */
+#ifdef PROFILE_FIELD_ACCESS
+# define UPDATE_FIELD_GET(_field) { (_field)->gets++; }
+# define UPDATE_FIELD_PUT(_field) { (_field)->puts++; }
+#else
+# define UPDATE_FIELD_GET(_field) ((void)0)
+# define UPDATE_FIELD_PUT(_field) ((void)0)
+#endif
+
+/*
+ * Adjust the program counter. "_offset" is a signed int, in 16-bit units.
+ *
+ * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
+ *
+ * We don't advance the program counter until we finish an instruction or
+ * branch, because we do want to have to unroll the PC if there's an
+ * exception.
+ */
+#ifdef CHECK_BRANCH_OFFSETS
+# define ADJUST_PC(_offset) do { \
+ int myoff = _offset; /* deref only once */ \
+ if (pc + myoff < curMethod->insns || \
+ pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
+ { \
+ char* desc; \
+ desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
+ LOGE("Invalid branch %d at 0x%04x in %s.%s %s\n", \
+ myoff, (int) (pc - curMethod->insns), \
+ curMethod->clazz->descriptor, curMethod->name, desc); \
+ free(desc); \
+ dvmAbort(); \
+ } \
+ pc += myoff; \
+ } while (false)
+#else
+# define ADJUST_PC(_offset) (pc += _offset)
+#endif
+
+/*
+ * If enabled, log instructions as we execute them.
+ */
+#ifdef LOG_INSTR
+# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
+# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
+# define ILOG(_level, ...) do { \
+ char debugStrBuf[128]; \
+ snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
+ if (curMethod != NULL) \
+ LOG(_level, LOG_TAG"i", "%-2d|%04x%s\n", \
+ self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
+ else \
+ LOG(_level, LOG_TAG"i", "%-2d|####%s\n", \
+ self->threadId, debugStrBuf); \
+ } while(false)
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
+# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
+static const char kSpacing[] = " ";
+#else
+# define ILOGD(...) ((void)0)
+# define ILOGV(...) ((void)0)
+# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
+#endif
+
+/* get a long from an array of u4 */
+static inline s8 getLongFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.ll;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ s8 val;
+ memcpy(&val, &ptr[idx], 8);
+ return val;
+#else
+ return *((s8*) &ptr[idx]);
+#endif
+}
+
+/* store a long into an array of u4 */
+static inline void putLongToArray(u4* ptr, int idx, s8 val)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.ll = val;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &val, 8);
+#else
+ *((s8*) &ptr[idx]) = val;
+#endif
+}
+
+/* get a double from an array of u4 */
+static inline double getDoubleFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.d;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ double dval;
+ memcpy(&dval, &ptr[idx], 8);
+ return dval;
+#else
+ return *((double*) &ptr[idx]);
+#endif
+}
+
+/* store a double into an array of u4 */
+static inline void putDoubleToArray(u4* ptr, int idx, double dval)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.d = dval;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &dval, 8);
+#else
+ *((double*) &ptr[idx]) = dval;
+#endif
+}
+
+/*
+ * If enabled, validate the register number on every access. Otherwise,
+ * just do an array access.
+ *
+ * Assumes the existence of "u4* fp".
+ *
+ * "_idx" may be referenced more than once.
+ */
+#ifdef CHECK_REGISTER_INDICES
+# define GET_REGISTER(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)]) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
+# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER_WIDE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putLongToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_FLOAT(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
+# define SET_REGISTER_FLOAT(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
+# define GET_REGISTER_DOUBLE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
+# define SET_REGISTER_DOUBLE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putDoubleToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969.0) )
+#else
+# define GET_REGISTER(_idx) (fp[(_idx)])
+# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
+# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
+# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
+# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
+# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
+# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
+# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
+# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
+#endif
+
+/*
+ * Get 16 bits from the specified offset of the program counter. We always
+ * want to load 16 bits at a time from the instruction stream -- it's more
+ * efficient than 8 and won't have the alignment problems that 32 might.
+ *
+ * Assumes existence of "const u2* pc".
+ */
+#define FETCH(_offset) (pc[(_offset)])
+
+/*
+ * Extract instruction byte from 16-bit fetch (_inst is a u2).
+ */
+#define INST_INST(_inst) ((_inst) & 0xff)
+
+/*
+ * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
+ */
+#define INST_A(_inst) (((_inst) >> 8) & 0x0f)
+#define INST_B(_inst) ((_inst) >> 12)
+
+/*
+ * Get the 8-bit "vAA" 8-bit register index from the instruction word.
+ * (_inst is u2)
+ */
+#define INST_AA(_inst) ((_inst) >> 8)
+
+/*
+ * The current PC must be available to Throwable constructors, e.g.
+ * those created by dvmThrowException(), so that the exception stack
+ * trace can be generated correctly. If we don't do this, the offset
+ * within the current method won't be shown correctly. See the notes
+ * in Exception.c.
+ *
+ * Assumes existence of "u4* fp" and "const u2* pc".
+ */
+#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
+
+/*
+ * Determine if we need to switch to a different interpreter. "_current"
+ * is either INTERP_STD or INTERP_DBG. It should be fixed for a given
+ * interpreter generation file, which should remove the outer conditional
+ * from the following.
+ *
+ * If we're building without debug and profiling support, we never switch.
+ */
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+# define NEED_INTERP_SWITCH(_current) ( \
+ (_current == INTERP_STD) ? \
+ dvmDebuggerOrProfilerActive() : !dvmDebuggerOrProfilerActive() )
+#else
+# define NEED_INTERP_SWITCH(_current) (false)
+#endif
+
+/*
+ * Look up an interface on a class using the cache.
+ */
+INLINE Method* dvmFindInterfaceMethodInCache(ClassObject* thisClass,
+ u4 methodIdx, const Method* method, DvmDex* methodClassDex)
+{
+#define ATOMIC_CACHE_CALC \
+ dvmInterpFindInterfaceMethod(thisClass, methodIdx, method, methodClassDex)
+
+ return (Method*) ATOMIC_CACHE_LOOKUP(methodClassDex->pInterfaceCache,
+ DEX_INTERFACE_CACHE_SIZE, thisClass, methodIdx);
+
+#undef ATOMIC_CACHE_CALC
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, throw an exception. Assumes the
+ * pc has already been exported to the stack.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler calls into
+ * something that could throw an exception (so we have already called
+ * EXPORT_PC at the top).
+ */
+static inline bool checkForNull(Object* obj)
+{
+ if (obj == NULL) {
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, export the PC into the stack
+ * frame and throw an exception.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler doesn't do
+ * anything else that can throw an exception.
+ */
+static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
+{
+ if (obj == NULL) {
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+
+/* File: portable/portstd.c */
+#define INTERP_FUNC_NAME dvmInterpretStd
+#define INTERP_TYPE INTERP_STD
+
+#define CHECK_DEBUG_AND_PROF() ((void)0)
+
+/* File: portable/stubdefs.c */
+/*
+ * In the C mterp stubs, "goto" is a function call followed immediately
+ * by a return.
+ */
+
+#define GOTO_TARGET_DECL(_target, ...)
+
+#define GOTO_TARGET(_target, ...) _target:
+
+#define GOTO_TARGET_END
+
+/* ugh */
+#define STUB_HACK(x)
+
+/*
+ * Instruction framing. For a switch-oriented implementation this is
+ * case/break, for a threaded implementation it's a goto label and an
+ * instruction fetch/computed goto.
+ *
+ * Assumes the existence of "const u2* pc" and (for threaded operation)
+ * "u2 inst".
+ */
+#ifdef THREADED_INTERP
+# define H(_op) &&op_##_op
+# define HANDLE_OPCODE(_op) op_##_op:
+# define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ inst = FETCH(0); \
+ CHECK_DEBUG_AND_PROF(); \
+ CHECK_TRACKED_REFS(); \
+ goto *handlerTable[INST_INST(inst)]; \
+ }
+#else
+# define HANDLE_OPCODE(_op) case _op:
+# define FINISH(_offset) { ADJUST_PC(_offset); break; }
+#endif
+
+#define OP_END
+
+#if defined(WITH_TRACKREF_CHECKS)
+# define CHECK_TRACKED_REFS() \
+ dvmInterpCheckTrackedRefs(self, curMethod, debugTrackedRefStart)
+#else
+# define CHECK_TRACKED_REFS() ((void)0)
+#endif
+
+
+/*
+ * The "goto" targets just turn into goto statements. The "arguments" are
+ * passed through local variables.
+ */
+
+#define GOTO_exceptionThrown() goto exceptionThrown;
+
+#define GOTO_returnFromMethod() goto returnFromMethod;
+
+#define GOTO_invoke(_target, _methodCallRange) \
+ do { \
+ methodCallRange = _methodCallRange; \
+ goto _target; \
+ } while(false)
+
+/* for this, the "args" are already in the locals */
+#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) goto invokeMethod;
+
+#define GOTO_bail() goto bail;
+#define GOTO_bail_switch() goto bail_switch;
+
+/*
+ * Periodically check for thread suspension.
+ *
+ * While we're at it, see if a debugger has attached or the profiler has
+ * started. If so, switch to a different "goto" table.
+ */
+#define PERIODIC_CHECKS(_entryPoint, _pcadj) { \
+ dvmCheckSuspendQuick(self); \
+ if (NEED_INTERP_SWITCH(INTERP_TYPE)) { \
+ ADJUST_PC(_pcadj); \
+ interpState->entryPoint = _entryPoint; \
+ LOGVV("threadid=%d: switch to %s ep=%d adj=%d\n", \
+ self->threadId, \
+ (interpState->nextMode == INTERP_STD) ? "STD" : "DBG", \
+ (_entryPoint), (_pcadj)); \
+ GOTO_bail_switch(); \
+ } \
+ }
+
+
+/* File: c/opcommon.c */
+/* forward declarations of goto targets */
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+ u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+/*
+ * ===========================================================================
+ *
+ * What follows are opcode definitions shared between multiple opcodes with
+ * minor substitutions handled by the C pre-processor. These should probably
+ * use the mterp substitution mechanism instead, with the code here moved
+ * into common fragment files (like the asm "binop.S"), although it's hard
+ * to give up the C preprocessor in favor of the much simpler text subst.
+ *
+ * ===========================================================================
+ */
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_totype(vdst, \
+ GET_REGISTER##_fromtype(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
+ _tovtype, _tortype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ { \
+ /* spec defines specific handling for +/- inf and NaN values */ \
+ _fromvtype val; \
+ _tovtype intMin, intMax, result; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ val = GET_REGISTER##_fromrtype(vsrc1); \
+ intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
+ intMax = ~intMin; \
+ result = (_tovtype) val; \
+ if (val >= intMax) /* +inf */ \
+ result = intMax; \
+ else if (val <= intMin) /* -inf */ \
+ result = intMin; \
+ else if (val != val) /* NaN */ \
+ result = 0; \
+ else \
+ result = (_tovtype) val; \
+ SET_REGISTER##_tortype(vdst, result); \
+ } \
+ FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
+ FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ int result; \
+ u2 regs; \
+ _varType val1, val2; \
+ vdst = INST_AA(inst); \
+ regs = FETCH(1); \
+ vsrc1 = regs & 0xff; \
+ vsrc2 = regs >> 8; \
+ ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ val1 = GET_REGISTER##_type(vsrc1); \
+ val2 = GET_REGISTER##_type(vsrc2); \
+ if (val1 == val2) \
+ result = 0; \
+ else if (val1 < val2) \
+ result = -1; \
+ else if (val1 > val2) \
+ result = 1; \
+ else \
+ result = (_nanVal); \
+ ILOGV("+ result=%d\n", result); \
+ SET_REGISTER(vdst, result); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
+ vsrc1 = INST_A(inst); \
+ vsrc2 = INST_B(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
+ branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
+ FINISH(2); \
+ }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
+ vsrc1 = INST_AA(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
+ FINISH(2); \
+ }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
+ FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ secondVal = GET_REGISTER(vsrc2); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ vsrc2 = FETCH(1); \
+ ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s2) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
+ /* won't generate /lit16 instr for this; check anyway */ \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op (s2) vsrc2; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s1) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op ((s1) vsrc2); \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vdst); \
+ secondVal = GET_REGISTER(vsrc1); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vsrc1); \
+ secondVal = GET_REGISTER_WIDE(vsrc2); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vdst); \
+ secondVal = GET_REGISTER_WIDE(vsrc1); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* index */ \
+ ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ LOGV("Invalid array access: %p %d (len=%d)\n", \
+ arrayObj, vsrc2, arrayObj->length); \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]); \
+ ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); /* AA: source value */ \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* CC: index */ \
+ ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] = \
+ GET_REGISTER##_regsize(vdst); \
+ } \
+ FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits. Consider:
+ * short foo = -1 (sets a 32-bit register to 0xffffffff)
+ * iput-quick foo (writes all 32 bits to the field)
+ * short bar = 1 (sets a 32-bit register to 0x00000001)
+ * iput-short (writes the low 16 bits to the field)
+ * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field. This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time. On
+ * a device with a 16-bit data bus this is sub-optimal. (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ dvmGetField##_ftype(obj, ifield->byteOffset)); \
+ ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
+ ILOGV("+ IGETQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetField##_ftype(obj, ifield->byteOffset, \
+ GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUTQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
+ ILOGV("+ SGET '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&sfield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ SPUT '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&sfield->field); \
+ } \
+ FINISH(2);
+
+
+/* File: portable/entry.c */
+/*
+ * Main interpreter loop.
+ *
+ * This was written with an ARM implementation in mind.
+ */
+bool INTERP_FUNC_NAME(Thread* self, InterpState* interpState)
+{
+#if defined(EASY_GDB)
+ StackSaveArea* debugSaveArea = SAVEAREA_FROM_FP(self->curFrame);
+#endif
+#if INTERP_TYPE == INTERP_DBG
+ bool debugIsMethodEntry = interpState->debugIsMethodEntry;
+#endif
+#if defined(WITH_TRACKREF_CHECKS)
+ int debugTrackedRefStart = interpState->debugTrackedRefStart;
+#endif
+ DvmDex* methodClassDex; // curMethod->clazz->pDvmDex
+ JValue retval;
+
+ /* core state */
+ const Method* curMethod; // method we're interpreting
+ const u2* pc; // program counter
+ u4* fp; // frame pointer
+ u2 inst; // current instruction
+ /* instruction decoding */
+ u2 ref; // 16-bit quantity fetched directly
+ u2 vsrc1, vsrc2, vdst; // usually used for register indexes
+ /* method call setup */
+ const Method* methodToCall;
+ bool methodCallRange;
+
+#if defined(THREADED_INTERP)
+ /* static computed goto table */
+ DEFINE_GOTO_TABLE(handlerTable);
+#endif
+
+ /* copy state in */
+ curMethod = interpState->method;
+ pc = interpState->pc;
+ fp = interpState->fp;
+ retval = interpState->retval; /* only need for kInterpEntryReturn? */
+
+ methodClassDex = curMethod->clazz->pDvmDex;
+
+ LOGVV("threadid=%d: entry(%s) %s.%s pc=0x%x fp=%p ep=%d\n",
+ self->threadId, (interpState->nextMode == INTERP_STD) ? "STD" : "DBG",
+ curMethod->clazz->descriptor, curMethod->name, pc - curMethod->insns,
+ fp, interpState->entryPoint);
+
+ /*
+ * DEBUG: scramble this to ensure we're not relying on it.
+ */
+ methodToCall = (const Method*) -1;
+
+#if INTERP_TYPE == INTERP_DBG
+ if (debugIsMethodEntry) {
+ ILOGD("|-- Now interpreting %s.%s", curMethod->clazz->descriptor,
+ curMethod->name);
+ DUMP_REGS(curMethod, interpState->fp, false);
+ }
+#endif
+
+ switch (interpState->entryPoint) {
+ case kInterpEntryInstr:
+ /* just fall through to instruction loop or threaded kickstart */
+ break;
+ case kInterpEntryReturn:
+ goto returnFromMethod;
+ case kInterpEntryThrow:
+ goto exceptionThrown;
+ default:
+ dvmAbort();
+ }
+
+#ifdef THREADED_INTERP
+ FINISH(0); /* fetch and execute first instruction */
+#else
+ while (1) {
+ CHECK_DEBUG_AND_PROF(); /* service debugger and profiling */
+ CHECK_TRACKED_REFS(); /* check local reference tracking */
+
+ /* fetch the next 16 bits from the instruction stream */
+ inst = FETCH(0);
+
+ switch (INST_INST(inst)) {
+#endif
+
+/*--- start of opcodes ---*/
+
+/* File: c/OP_NOP.c */
+HANDLE_OPCODE(OP_NOP)
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE.c */
+HANDLE_OPCODE(OP_MOVE /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|move%s v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_FROM16 /*vAA, vBBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|move%s/from16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_FROM16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MOVE_16.c */
+HANDLE_OPCODE(OP_MOVE_16 /*vAAAA, vBBBB*/)
+ vdst = FETCH(1);
+ vsrc1 = FETCH(2);
+ ILOGV("|move%s/16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(3);
+OP_END
+
+/* File: c/OP_MOVE_WIDE.c */
+HANDLE_OPCODE(OP_MOVE_WIDE /*vA, vB*/)
+ /* IMPORTANT: must correctly handle overlapping registers, e.g. both
+ * "move-wide v6, v7" and "move-wide v7, v6" */
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|move-wide v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+ kSpacing+5, vdst, GET_REGISTER_WIDE(vsrc1));
+ SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_WIDE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_WIDE_FROM16 /*vAA, vBBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|move-wide/from16 v%d,v%d (v%d=0x%08llx)", vdst, vsrc1,
+ vdst, GET_REGISTER_WIDE(vsrc1));
+ SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MOVE_WIDE_16.c */
+HANDLE_OPCODE(OP_MOVE_WIDE_16 /*vAAAA, vBBBB*/)
+ vdst = FETCH(1);
+ vsrc1 = FETCH(2);
+ ILOGV("|move-wide/16 v%d,v%d %s(v%d=0x%08llx)", vdst, vsrc1,
+ kSpacing+8, vdst, GET_REGISTER_WIDE(vsrc1));
+ SET_REGISTER_WIDE(vdst, GET_REGISTER_WIDE(vsrc1));
+ FINISH(3);
+OP_END
+
+/* File: c/OP_MOVE_OBJECT.c */
+/* File: c/OP_MOVE.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|move%s v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(1);
+OP_END
+
+
+/* File: c/OP_MOVE_OBJECT_FROM16.c */
+/* File: c/OP_MOVE_FROM16.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT_FROM16 /*vAA, vBBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|move%s/from16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_FROM16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(2);
+OP_END
+
+
+/* File: c/OP_MOVE_OBJECT_16.c */
+/* File: c/OP_MOVE_16.c */
+HANDLE_OPCODE(OP_MOVE_OBJECT_16 /*vAAAA, vBBBB*/)
+ vdst = FETCH(1);
+ vsrc1 = FETCH(2);
+ ILOGV("|move%s/16 v%d,v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_16) ? "" : "-object", vdst, vsrc1,
+ kSpacing, vdst, GET_REGISTER(vsrc1));
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1));
+ FINISH(3);
+OP_END
+
+
+/* File: c/OP_MOVE_RESULT.c */
+HANDLE_OPCODE(OP_MOVE_RESULT /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-result%s v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_RESULT) ? "" : "-object",
+ vdst, kSpacing+4, vdst,retval.i);
+ SET_REGISTER(vdst, retval.i);
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_RESULT_WIDE.c */
+HANDLE_OPCODE(OP_MOVE_RESULT_WIDE /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-result-wide v%d %s(0x%08llx)", vdst, kSpacing, retval.j);
+ SET_REGISTER_WIDE(vdst, retval.j);
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MOVE_RESULT_OBJECT.c */
+/* File: c/OP_MOVE_RESULT.c */
+HANDLE_OPCODE(OP_MOVE_RESULT_OBJECT /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-result%s v%d %s(v%d=0x%08x)",
+ (INST_INST(inst) == OP_MOVE_RESULT) ? "" : "-object",
+ vdst, kSpacing+4, vdst,retval.i);
+ SET_REGISTER(vdst, retval.i);
+ FINISH(1);
+OP_END
+
+
+/* File: c/OP_MOVE_EXCEPTION.c */
+HANDLE_OPCODE(OP_MOVE_EXCEPTION /*vAA*/)
+ vdst = INST_AA(inst);
+ ILOGV("|move-exception v%d", vdst);
+ assert(self->exception != NULL);
+ SET_REGISTER(vdst, (u4)self->exception);
+ dvmClearException(self);
+ FINISH(1);
+OP_END
+
+/* File: c/OP_RETURN_VOID.c */
+HANDLE_OPCODE(OP_RETURN_VOID /**/)
+ ILOGV("|return-void");
+#ifndef NDEBUG
+ retval.j = 0xababababULL; // placate valgrind
+#endif
+ GOTO_returnFromMethod();
+OP_END
+
+/* File: c/OP_RETURN.c */
+HANDLE_OPCODE(OP_RETURN /*vAA*/)
+ vsrc1 = INST_AA(inst);
+ ILOGV("|return%s v%d",
+ (INST_INST(inst) == OP_RETURN) ? "" : "-object", vsrc1);
+ retval.i = GET_REGISTER(vsrc1);
+ GOTO_returnFromMethod();
+OP_END
+
+/* File: c/OP_RETURN_WIDE.c */
+HANDLE_OPCODE(OP_RETURN_WIDE /*vAA*/)
+ vsrc1 = INST_AA(inst);
+ ILOGV("|return-wide v%d", vsrc1);
+ retval.j = GET_REGISTER_WIDE(vsrc1);
+ GOTO_returnFromMethod();
+OP_END
+
+/* File: c/OP_RETURN_OBJECT.c */
+/* File: c/OP_RETURN.c */
+HANDLE_OPCODE(OP_RETURN_OBJECT /*vAA*/)
+ vsrc1 = INST_AA(inst);
+ ILOGV("|return%s v%d",
+ (INST_INST(inst) == OP_RETURN) ? "" : "-object", vsrc1);
+ retval.i = GET_REGISTER(vsrc1);
+ GOTO_returnFromMethod();
+OP_END
+
+
+/* File: c/OP_CONST_4.c */
+HANDLE_OPCODE(OP_CONST_4 /*vA, #+B*/)
+ {
+ s4 tmp;
+
+ vdst = INST_A(inst);
+ tmp = (s4) (INST_B(inst) << 28) >> 28; // sign extend 4-bit value
+ ILOGV("|const/4 v%d,#0x%02x", vdst, (s4)tmp);
+ SET_REGISTER(vdst, tmp);
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_CONST_16.c */
+HANDLE_OPCODE(OP_CONST_16 /*vAA, #+BBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+ SET_REGISTER(vdst, (s2) vsrc1);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST.c */
+HANDLE_OPCODE(OP_CONST /*vAA, #+BBBBBBBB*/)
+ {
+ u4 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u4)FETCH(2) << 16;
+ ILOGV("|const v%d,#0x%08x", vdst, tmp);
+ SET_REGISTER(vdst, tmp);
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_HIGH16.c */
+HANDLE_OPCODE(OP_CONST_HIGH16 /*vAA, #+BBBB0000*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const/high16 v%d,#0x%04x0000", vdst, vsrc1);
+ SET_REGISTER(vdst, vsrc1 << 16);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_WIDE_16.c */
+HANDLE_OPCODE(OP_CONST_WIDE_16 /*vAA, #+BBBB*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const-wide/16 v%d,#0x%04x", vdst, (s2)vsrc1);
+ SET_REGISTER_WIDE(vdst, (s2)vsrc1);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_WIDE_32.c */
+HANDLE_OPCODE(OP_CONST_WIDE_32 /*vAA, #+BBBBBBBB*/)
+ {
+ u4 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u4)FETCH(2) << 16;
+ ILOGV("|const-wide/32 v%d,#0x%08x", vdst, tmp);
+ SET_REGISTER_WIDE(vdst, (s4) tmp);
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_WIDE.c */
+HANDLE_OPCODE(OP_CONST_WIDE /*vAA, #+BBBBBBBBBBBBBBBB*/)
+ {
+ u8 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u8)FETCH(2) << 16;
+ tmp |= (u8)FETCH(3) << 32;
+ tmp |= (u8)FETCH(4) << 48;
+ ILOGV("|const-wide v%d,#0x%08llx", vdst, tmp);
+ SET_REGISTER_WIDE(vdst, tmp);
+ }
+ FINISH(5);
+OP_END
+
+/* File: c/OP_CONST_WIDE_HIGH16.c */
+HANDLE_OPCODE(OP_CONST_WIDE_HIGH16 /*vAA, #+BBBB000000000000*/)
+ vdst = INST_AA(inst);
+ vsrc1 = FETCH(1);
+ ILOGV("|const-wide/high16 v%d,#0x%04x000000000000", vdst, vsrc1);
+ SET_REGISTER_WIDE(vdst, ((u8) vsrc1) << 48);
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_STRING.c */
+HANDLE_OPCODE(OP_CONST_STRING /*vAA, string@BBBB*/)
+ {
+ StringObject* strObj;
+
+ vdst = INST_AA(inst);
+ ref = FETCH(1);
+ ILOGV("|const-string v%d string@0x%04x", vdst, ref);
+ strObj = dvmDexGetResolvedString(methodClassDex, ref);
+ if (strObj == NULL) {
+ EXPORT_PC();
+ strObj = dvmResolveString(curMethod->clazz, ref);
+ if (strObj == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, (u4) strObj);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_CONST_STRING_JUMBO.c */
+HANDLE_OPCODE(OP_CONST_STRING_JUMBO /*vAA, string@BBBBBBBB*/)
+ {
+ StringObject* strObj;
+ u4 tmp;
+
+ vdst = INST_AA(inst);
+ tmp = FETCH(1);
+ tmp |= (u4)FETCH(2) << 16;
+ ILOGV("|const-string/jumbo v%d string@0x%08x", vdst, tmp);
+ strObj = dvmDexGetResolvedString(methodClassDex, tmp);
+ if (strObj == NULL) {
+ EXPORT_PC();
+ strObj = dvmResolveString(curMethod->clazz, tmp);
+ if (strObj == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, (u4) strObj);
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_CONST_CLASS.c */
+HANDLE_OPCODE(OP_CONST_CLASS /*vAA, class@BBBB*/)
+ {
+ ClassObject* clazz;
+
+ vdst = INST_AA(inst);
+ ref = FETCH(1);
+ ILOGV("|const-class v%d class@0x%04x", vdst, ref);
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ EXPORT_PC();
+ clazz = dvmResolveClass(curMethod->clazz, ref, true);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, (u4) clazz);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MONITOR_ENTER.c */
+HANDLE_OPCODE(OP_MONITOR_ENTER /*vAA*/)
+ {
+ Object* obj;
+
+ vsrc1 = INST_AA(inst);
+ ILOGV("|monitor-enter v%d %s(0x%08x)",
+ vsrc1, kSpacing+6, GET_REGISTER(vsrc1));
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (!checkForNullExportPC(obj, fp, pc))
+ GOTO_exceptionThrown();
+ ILOGV("+ locking %p %s\n", obj, obj->clazz->descriptor);
+#ifdef WITH_MONITOR_TRACKING
+ EXPORT_PC(); /* need for stack trace */
+#endif
+ dvmLockObject(self, obj);
+#ifdef WITH_DEADLOCK_PREDICTION
+ if (dvmCheckException(self))
+ GOTO_exceptionThrown();
+#endif
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_MONITOR_EXIT.c */
+HANDLE_OPCODE(OP_MONITOR_EXIT /*vAA*/)
+ {
+ Object* obj;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst);
+ ILOGV("|monitor-exit v%d %s(0x%08x)",
+ vsrc1, kSpacing+5, GET_REGISTER(vsrc1));
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (!checkForNull(obj)) {
+ /*
+ * The exception needs to be processed at the *following*
+ * instruction, not the current instruction (see the Dalvik
+ * spec). Because we're jumping to an exception handler,
+ * we're not actually at risk of skipping an instruction
+ * by doing so.
+ */
+ ADJUST_PC(1); /* monitor-exit width is 1 */
+ GOTO_exceptionThrown();
+ }
+ ILOGV("+ unlocking %p %s\n", obj, obj->clazz->descriptor);
+ if (!dvmUnlockObject(self, obj)) {
+ assert(dvmCheckException(self));
+ ADJUST_PC(1);
+ GOTO_exceptionThrown();
+ }
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_CHECK_CAST.c */
+HANDLE_OPCODE(OP_CHECK_CAST /*vAA, class@BBBB*/)
+ {
+ ClassObject* clazz;
+ Object* obj;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst);
+ ref = FETCH(1); /* class to check against */
+ ILOGV("|check-cast v%d,class@0x%04x", vsrc1, ref);
+
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (obj != NULL) {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+ if (!checkForNull(obj))
+ GOTO_exceptionThrown();
+#endif
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ clazz = dvmResolveClass(curMethod->clazz, ref, false);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+ if (!dvmInstanceof(obj->clazz, clazz)) {
+ dvmThrowExceptionWithClassMessage(
+ "Ljava/lang/ClassCastException;", obj->clazz->descriptor);
+ GOTO_exceptionThrown();
+ }
+ }
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_INSTANCE_OF.c */
+HANDLE_OPCODE(OP_INSTANCE_OF /*vA, vB, class@CCCC*/)
+ {
+ ClassObject* clazz;
+ Object* obj;
+
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst); /* object to check */
+ ref = FETCH(1); /* class to check against */
+ ILOGV("|instance-of v%d,v%d,class@0x%04x", vdst, vsrc1, ref);
+
+ obj = (Object*)GET_REGISTER(vsrc1);
+ if (obj == NULL) {
+ SET_REGISTER(vdst, 0);
+ } else {
+#if defined(WITH_EXTRA_OBJECT_VALIDATION)
+ if (!checkForNullExportPC(obj, fp, pc))
+ GOTO_exceptionThrown();
+#endif
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ EXPORT_PC();
+ clazz = dvmResolveClass(curMethod->clazz, ref, true);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+ SET_REGISTER(vdst, dvmInstanceof(obj->clazz, clazz));
+ }
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_ARRAY_LENGTH.c */
+HANDLE_OPCODE(OP_ARRAY_LENGTH /*vA, vB*/)
+ {
+ ArrayObject* arrayObj;
+
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+ ILOGV("|array-length v%d,v%d (%p)", vdst, vsrc1, arrayObj);
+ if (!checkForNullExportPC((Object*) arrayObj, fp, pc))
+ GOTO_exceptionThrown();
+ /* verifier guarantees this is an array reference */
+ SET_REGISTER(vdst, arrayObj->length);
+ }
+ FINISH(1);
+OP_END
+
+/* File: c/OP_NEW_INSTANCE.c */
+HANDLE_OPCODE(OP_NEW_INSTANCE /*vAA, class@BBBB*/)
+ {
+ ClassObject* clazz;
+ Object* newObj;
+
+ EXPORT_PC();
+
+ vdst = INST_AA(inst);
+ ref = FETCH(1);
+ ILOGV("|new-instance v%d,class@0x%04x", vdst, ref);
+ clazz = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (clazz == NULL) {
+ clazz = dvmResolveClass(curMethod->clazz, ref, false);
+ if (clazz == NULL)
+ GOTO_exceptionThrown();
+ }
+
+ if (!dvmIsClassInitialized(clazz) && !dvmInitClass(clazz))
+ GOTO_exceptionThrown();
+
+ /*
+ * Note: the verifier can ensure that this never happens, allowing us
+ * to remove the check. However, the spec requires we throw the
+ * exception at runtime, not verify time, so the verifier would
+ * need to replace the new-instance call with a magic "throw
+ * InstantiationError" instruction.
+ *
+ * Since this relies on the verifier, which is optional, we would
+ * also need a "new-instance-quick" instruction to identify instances
+ * that don't require the check.
+ */
+ if (dvmIsInterfaceClass(clazz) || dvmIsAbstractClass(clazz)) {
+ dvmThrowExceptionWithClassMessage("Ljava/lang/InstantiationError;",
+ clazz->descriptor);
+ GOTO_exceptionThrown();
+ }
+ newObj = dvmAllocObject(clazz, ALLOC_DONT_TRACK);
+ if (newObj == NULL)
+ GOTO_exceptionThrown();
+ SET_REGISTER(vdst, (u4) newObj);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_NEW_ARRAY.c */
+HANDLE_OPCODE(OP_NEW_ARRAY /*vA, vB, class@CCCC*/)
+ {
+ ClassObject* arrayClass;
+ ArrayObject* newArray;
+ s4 length;
+
+ EXPORT_PC();
+
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst); /* length reg */
+ ref = FETCH(1);
+ ILOGV("|new-array v%d,v%d,class@0x%04x (%d elements)",
+ vdst, vsrc1, ref, (s4) GET_REGISTER(vsrc1));
+ length = (s4) GET_REGISTER(vsrc1);
+ if (length < 0) {
+ dvmThrowException("Ljava/lang/NegativeArraySizeException;", NULL);
+ GOTO_exceptionThrown();
+ }
+ arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (arrayClass == NULL) {
+ arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+ if (arrayClass == NULL)
+ GOTO_exceptionThrown();
+ }
+ /* verifier guarantees this is an array class */
+ assert(dvmIsArrayClass(arrayClass));
+ assert(dvmIsClassInitialized(arrayClass));
+
+ newArray = dvmAllocArrayByClass(arrayClass, length, ALLOC_DONT_TRACK);
+ if (newArray == NULL)
+ GOTO_exceptionThrown();
+ SET_REGISTER(vdst, (u4) newArray);
+ }
+ FINISH(2);
+OP_END
+
+
+/* File: c/OP_FILLED_NEW_ARRAY.c */
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY /*vB, {vD, vE, vF, vG, vA}, class@CCCC*/)
+ GOTO_invoke(filledNewArray, false);
+OP_END
+
+/* File: c/OP_FILLED_NEW_ARRAY_RANGE.c */
+HANDLE_OPCODE(OP_FILLED_NEW_ARRAY_RANGE /*{vCCCC..v(CCCC+AA-1)}, class@BBBB*/)
+ GOTO_invoke(filledNewArray, true);
+OP_END
+
+/* File: c/OP_FILL_ARRAY_DATA.c */
+HANDLE_OPCODE(OP_FILL_ARRAY_DATA) /*vAA, +BBBBBBBB*/
+ {
+ const u2* arrayData;
+ s4 offset;
+ ArrayObject* arrayObj;
+
+ EXPORT_PC();
+ vsrc1 = INST_AA(inst);
+ offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+ ILOGV("|fill-array-data v%d +0x%04x", vsrc1, offset);
+ arrayData = pc + offset; // offset in 16-bit units
+#ifndef NDEBUG
+ if (arrayData < curMethod->insns ||
+ arrayData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
+ {
+ /* should have been caught in verifier */
+ dvmThrowException("Ljava/lang/InternalError;",
+ "bad fill array data");
+ GOTO_exceptionThrown();
+ }
+#endif
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+ if (!dvmInterpHandleFillArrayData(arrayObj, arrayData)) {
+ GOTO_exceptionThrown();
+ }
+ FINISH(3);
+ }
+OP_END
+
+/* File: c/OP_THROW.c */
+HANDLE_OPCODE(OP_THROW /*vAA*/)
+ {
+ Object* obj;
+
+ vsrc1 = INST_AA(inst);
+ ILOGV("|throw v%d (%p)", vsrc1, (void*)GET_REGISTER(vsrc1));
+ obj = (Object*) GET_REGISTER(vsrc1);
+ if (!checkForNullExportPC(obj, fp, pc)) {
+ /* will throw a null pointer exception */
+ LOGVV("Bad exception\n");
+ } else {
+ /* use the requested exception */
+ dvmSetException(self, obj);
+ }
+ GOTO_exceptionThrown();
+ }
+OP_END
+
+/* File: c/OP_GOTO.c */
+HANDLE_OPCODE(OP_GOTO /*+AA*/)
+ vdst = INST_AA(inst);
+ if ((s1)vdst < 0)
+ ILOGV("|goto -0x%02x", -((s1)vdst));
+ else
+ ILOGV("|goto +0x%02x", ((s1)vdst));
+ ILOGV("> branch taken");
+ if ((s1)vdst < 0)
+ PERIODIC_CHECKS(kInterpEntryInstr, (s1)vdst);
+ FINISH((s1)vdst);
+OP_END
+
+/* File: c/OP_GOTO_16.c */
+HANDLE_OPCODE(OP_GOTO_16 /*+AAAA*/)
+ {
+ s4 offset = (s2) FETCH(1); /* sign-extend next code unit */
+
+ if (offset < 0)
+ ILOGV("|goto/16 -0x%04x", -offset);
+ else
+ ILOGV("|goto/16 +0x%04x", offset);
+ ILOGV("> branch taken");
+ if (offset < 0)
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_GOTO_32.c */
+HANDLE_OPCODE(OP_GOTO_32 /*+AAAAAAAA*/)
+ {
+ s4 offset = FETCH(1); /* low-order 16 bits */
+ offset |= ((s4) FETCH(2)) << 16; /* high-order 16 bits */
+
+ if (offset < 0)
+ ILOGV("|goto/32 -0x%08x", -offset);
+ else
+ ILOGV("|goto/32 +0x%08x", offset);
+ ILOGV("> branch taken");
+ if (offset <= 0) /* allowed to branch to self */
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_PACKED_SWITCH.c */
+HANDLE_OPCODE(OP_PACKED_SWITCH /*vAA, +BBBB*/)
+ {
+ const u2* switchData;
+ u4 testVal;
+ s4 offset;
+
+ vsrc1 = INST_AA(inst);
+ offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+ ILOGV("|packed-switch v%d +0x%04x", vsrc1, vsrc2);
+ switchData = pc + offset; // offset in 16-bit units
+#ifndef NDEBUG
+ if (switchData < curMethod->insns ||
+ switchData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
+ {
+ /* should have been caught in verifier */
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/InternalError;", "bad packed switch");
+ GOTO_exceptionThrown();
+ }
+#endif
+ testVal = GET_REGISTER(vsrc1);
+
+ offset = dvmInterpHandlePackedSwitch(switchData, testVal);
+ ILOGV("> branch taken (0x%04x)\n", offset);
+ if (offset <= 0) /* uncommon */
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_SPARSE_SWITCH.c */
+HANDLE_OPCODE(OP_SPARSE_SWITCH /*vAA, +BBBB*/)
+ {
+ const u2* switchData;
+ u4 testVal;
+ s4 offset;
+
+ vsrc1 = INST_AA(inst);
+ offset = FETCH(1) | (((s4) FETCH(2)) << 16);
+ ILOGV("|sparse-switch v%d +0x%04x", vsrc1, vsrc2);
+ switchData = pc + offset; // offset in 16-bit units
+#ifndef NDEBUG
+ if (switchData < curMethod->insns ||
+ switchData >= curMethod->insns + dvmGetMethodInsnsSize(curMethod))
+ {
+ /* should have been caught in verifier */
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/InternalError;", "bad sparse switch");
+ GOTO_exceptionThrown();
+ }
+#endif
+ testVal = GET_REGISTER(vsrc1);
+
+ offset = dvmInterpHandleSparseSwitch(switchData, testVal);
+ ILOGV("> branch taken (0x%04x)\n", offset);
+ if (offset <= 0) /* uncommon */
+ PERIODIC_CHECKS(kInterpEntryInstr, offset);
+ FINISH(offset);
+ }
+OP_END
+
+/* File: c/OP_CMPL_FLOAT.c */
+HANDLE_OP_CMPX(OP_CMPL_FLOAT, "l-float", float, _FLOAT, -1)
+OP_END
+
+/* File: c/OP_CMPG_FLOAT.c */
+HANDLE_OP_CMPX(OP_CMPG_FLOAT, "g-float", float, _FLOAT, 1)
+OP_END
+
+/* File: c/OP_CMPL_DOUBLE.c */
+HANDLE_OP_CMPX(OP_CMPL_DOUBLE, "l-double", double, _DOUBLE, -1)
+OP_END
+
+/* File: c/OP_CMPG_DOUBLE.c */
+HANDLE_OP_CMPX(OP_CMPG_DOUBLE, "g-double", double, _DOUBLE, 1)
+OP_END
+
+/* File: c/OP_CMP_LONG.c */
+HANDLE_OP_CMPX(OP_CMP_LONG, "-long", s8, _WIDE, 0)
+OP_END
+
+/* File: c/OP_IF_EQ.c */
+HANDLE_OP_IF_XX(OP_IF_EQ, "eq", ==)
+OP_END
+
+/* File: c/OP_IF_NE.c */
+HANDLE_OP_IF_XX(OP_IF_NE, "ne", !=)
+OP_END
+
+/* File: c/OP_IF_LT.c */
+HANDLE_OP_IF_XX(OP_IF_LT, "lt", <)
+OP_END
+
+/* File: c/OP_IF_GE.c */
+HANDLE_OP_IF_XX(OP_IF_GE, "ge", >=)
+OP_END
+
+/* File: c/OP_IF_GT.c */
+HANDLE_OP_IF_XX(OP_IF_GT, "gt", >)
+OP_END
+
+/* File: c/OP_IF_LE.c */
+HANDLE_OP_IF_XX(OP_IF_LE, "le", <=)
+OP_END
+
+/* File: c/OP_IF_EQZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_EQZ, "eqz", ==)
+OP_END
+
+/* File: c/OP_IF_NEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_NEZ, "nez", !=)
+OP_END
+
+/* File: c/OP_IF_LTZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_LTZ, "ltz", <)
+OP_END
+
+/* File: c/OP_IF_GEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_GEZ, "gez", >=)
+OP_END
+
+/* File: c/OP_IF_GTZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_GTZ, "gtz", >)
+OP_END
+
+/* File: c/OP_IF_LEZ.c */
+HANDLE_OP_IF_XXZ(OP_IF_LEZ, "lez", <=)
+OP_END
+
+/* File: c/OP_UNUSED_3E.c */
+HANDLE_OPCODE(OP_UNUSED_3E)
+OP_END
+
+/* File: c/OP_UNUSED_3F.c */
+HANDLE_OPCODE(OP_UNUSED_3F)
+OP_END
+
+/* File: c/OP_UNUSED_40.c */
+HANDLE_OPCODE(OP_UNUSED_40)
+OP_END
+
+/* File: c/OP_UNUSED_41.c */
+HANDLE_OPCODE(OP_UNUSED_41)
+OP_END
+
+/* File: c/OP_UNUSED_42.c */
+HANDLE_OPCODE(OP_UNUSED_42)
+OP_END
+
+/* File: c/OP_UNUSED_43.c */
+HANDLE_OPCODE(OP_UNUSED_43)
+OP_END
+
+/* File: c/OP_AGET.c */
+HANDLE_OP_AGET(OP_AGET, "", u4, )
+OP_END
+
+/* File: c/OP_AGET_WIDE.c */
+HANDLE_OP_AGET(OP_AGET_WIDE, "-wide", s8, _WIDE)
+OP_END
+
+/* File: c/OP_AGET_OBJECT.c */
+HANDLE_OP_AGET(OP_AGET_OBJECT, "-object", u4, )
+OP_END
+
+/* File: c/OP_AGET_BOOLEAN.c */
+HANDLE_OP_AGET(OP_AGET_BOOLEAN, "-boolean", u1, )
+OP_END
+
+/* File: c/OP_AGET_BYTE.c */
+HANDLE_OP_AGET(OP_AGET_BYTE, "-byte", s1, )
+OP_END
+
+/* File: c/OP_AGET_CHAR.c */
+HANDLE_OP_AGET(OP_AGET_CHAR, "-char", u2, )
+OP_END
+
+/* File: c/OP_AGET_SHORT.c */
+HANDLE_OP_AGET(OP_AGET_SHORT, "-short", s2, )
+OP_END
+
+/* File: c/OP_APUT.c */
+HANDLE_OP_APUT(OP_APUT, "", u4, )
+OP_END
+
+/* File: c/OP_APUT_WIDE.c */
+HANDLE_OP_APUT(OP_APUT_WIDE, "-wide", s8, _WIDE)
+OP_END
+
+/* File: c/OP_APUT_OBJECT.c */
+HANDLE_OPCODE(OP_APUT_OBJECT /*vAA, vBB, vCC*/)
+ {
+ ArrayObject* arrayObj;
+ Object* obj;
+ u2 arrayInfo;
+ EXPORT_PC();
+ vdst = INST_AA(inst); /* AA: source value */
+ arrayInfo = FETCH(1);
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */
+ vsrc2 = arrayInfo >> 8; /* CC: index */
+ ILOGV("|aput%s v%d,v%d,v%d", "-object", vdst, vsrc1, vsrc2);
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1);
+ if (!checkForNull((Object*) arrayObj))
+ GOTO_exceptionThrown();
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) {
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;",
+ NULL);
+ GOTO_exceptionThrown();
+ }
+ obj = (Object*) GET_REGISTER(vdst);
+ if (obj != NULL) {
+ if (!checkForNull(obj))
+ GOTO_exceptionThrown();
+ if (!dvmCanPutArrayElement(obj->clazz, arrayObj->obj.clazz)) {
+ LOGV("Can't put a '%s'(%p) into array type='%s'(%p)\n",
+ obj->clazz->descriptor, obj,
+ arrayObj->obj.clazz->descriptor, arrayObj);
+ //dvmDumpClass(obj->clazz);
+ //dvmDumpClass(arrayObj->obj.clazz);
+ dvmThrowException("Ljava/lang/ArrayStoreException;", NULL);
+ GOTO_exceptionThrown();
+ }
+ }
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));
+ ((u4*) arrayObj->contents)[GET_REGISTER(vsrc2)] =
+ GET_REGISTER(vdst);
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_APUT_BOOLEAN.c */
+HANDLE_OP_APUT(OP_APUT_BOOLEAN, "-boolean", u1, )
+OP_END
+
+/* File: c/OP_APUT_BYTE.c */
+HANDLE_OP_APUT(OP_APUT_BYTE, "-byte", s1, )
+OP_END
+
+/* File: c/OP_APUT_CHAR.c */
+HANDLE_OP_APUT(OP_APUT_CHAR, "-char", u2, )
+OP_END
+
+/* File: c/OP_APUT_SHORT.c */
+HANDLE_OP_APUT(OP_APUT_SHORT, "-short", s2, )
+OP_END
+
+/* File: c/OP_IGET.c */
+HANDLE_IGET_X(OP_IGET, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_WIDE.c */
+HANDLE_IGET_X(OP_IGET_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT.c */
+HANDLE_IGET_X(OP_IGET_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IGET_BOOLEAN.c */
+HANDLE_IGET_X(OP_IGET_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_BYTE.c */
+HANDLE_IGET_X(OP_IGET_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_CHAR.c */
+HANDLE_IGET_X(OP_IGET_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_SHORT.c */
+HANDLE_IGET_X(OP_IGET_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT.c */
+HANDLE_IPUT_X(OP_IPUT, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE.c */
+HANDLE_IPUT_X(OP_IPUT_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT.c */
+/*
+ * The VM spec says we should verify that the reference being stored into
+ * the field is assignment compatible. In practice, many popular VMs don't
+ * do this because it slows down a very common operation. It's not so bad
+ * for us, since "dexopt" quickens it whenever possible, but it's still an
+ * issue.
+ *
+ * To make this spec-complaint, we'd need to add a ClassObject pointer to
+ * the Field struct, resolve the field's type descriptor at link or class
+ * init time, and then verify the type here.
+ */
+HANDLE_IPUT_X(OP_IPUT_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_BOOLEAN.c */
+HANDLE_IPUT_X(OP_IPUT_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_BYTE.c */
+HANDLE_IPUT_X(OP_IPUT_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_CHAR.c */
+HANDLE_IPUT_X(OP_IPUT_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_SHORT.c */
+HANDLE_IPUT_X(OP_IPUT_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_SGET.c */
+HANDLE_SGET_X(OP_SGET, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_WIDE.c */
+HANDLE_SGET_X(OP_SGET_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SGET_OBJECT.c */
+HANDLE_SGET_X(OP_SGET_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SGET_BOOLEAN.c */
+HANDLE_SGET_X(OP_SGET_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_BYTE.c */
+HANDLE_SGET_X(OP_SGET_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_CHAR.c */
+HANDLE_SGET_X(OP_SGET_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_SGET_SHORT.c */
+HANDLE_SGET_X(OP_SGET_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT.c */
+HANDLE_SPUT_X(OP_SPUT, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_WIDE.c */
+HANDLE_SPUT_X(OP_SPUT_WIDE, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_SPUT_OBJECT.c */
+HANDLE_SPUT_X(OP_SPUT_OBJECT, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_SPUT_BOOLEAN.c */
+HANDLE_SPUT_X(OP_SPUT_BOOLEAN, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_BYTE.c */
+HANDLE_SPUT_X(OP_SPUT_BYTE, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_CHAR.c */
+HANDLE_SPUT_X(OP_SPUT_CHAR, "", Int, )
+OP_END
+
+/* File: c/OP_SPUT_SHORT.c */
+HANDLE_SPUT_X(OP_SPUT_SHORT, "", Int, )
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeVirtual, false);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeSuper, false);
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeDirect, false);
+OP_END
+
+/* File: c/OP_INVOKE_STATIC.c */
+HANDLE_OPCODE(OP_INVOKE_STATIC /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeStatic, false);
+OP_END
+
+/* File: c/OP_INVOKE_INTERFACE.c */
+HANDLE_OPCODE(OP_INVOKE_INTERFACE /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeInterface, false);
+OP_END
+
+/* File: c/OP_UNUSED_73.c */
+HANDLE_OPCODE(OP_UNUSED_73)
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeVirtual, true);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeSuper, true);
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeDirect, true);
+OP_END
+
+/* File: c/OP_INVOKE_STATIC_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_STATIC_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeStatic, true);
+OP_END
+
+/* File: c/OP_INVOKE_INTERFACE_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_INTERFACE_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeInterface, true);
+OP_END
+
+/* File: c/OP_UNUSED_79.c */
+HANDLE_OPCODE(OP_UNUSED_79)
+OP_END
+
+/* File: c/OP_UNUSED_7A.c */
+HANDLE_OPCODE(OP_UNUSED_7A)
+OP_END
+
+/* File: c/OP_NEG_INT.c */
+HANDLE_UNOP(OP_NEG_INT, "neg-int", -, , )
+OP_END
+
+/* File: c/OP_NOT_INT.c */
+HANDLE_UNOP(OP_NOT_INT, "not-int", , ^ 0xffffffff, )
+OP_END
+
+/* File: c/OP_NEG_LONG.c */
+HANDLE_UNOP(OP_NEG_LONG, "neg-long", -, , _WIDE)
+OP_END
+
+/* File: c/OP_NOT_LONG.c */
+HANDLE_UNOP(OP_NOT_LONG, "not-long", , ^ 0xffffffffffffffffULL, _WIDE)
+OP_END
+
+/* File: c/OP_NEG_FLOAT.c */
+HANDLE_UNOP(OP_NEG_FLOAT, "neg-float", -, , _FLOAT)
+OP_END
+
+/* File: c/OP_NEG_DOUBLE.c */
+HANDLE_UNOP(OP_NEG_DOUBLE, "neg-double", -, , _DOUBLE)
+OP_END
+
+/* File: c/OP_INT_TO_LONG.c */
+HANDLE_NUMCONV(OP_INT_TO_LONG, "int-to-long", _INT, _WIDE)
+OP_END
+
+/* File: c/OP_INT_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_INT_TO_FLOAT, "int-to-float", _INT, _FLOAT)
+OP_END
+
+/* File: c/OP_INT_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_INT_TO_DOUBLE, "int-to-double", _INT, _DOUBLE)
+OP_END
+
+/* File: c/OP_LONG_TO_INT.c */
+HANDLE_NUMCONV(OP_LONG_TO_INT, "long-to-int", _WIDE, _INT)
+OP_END
+
+/* File: c/OP_LONG_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_LONG_TO_FLOAT, "long-to-float", _WIDE, _FLOAT)
+OP_END
+
+/* File: c/OP_LONG_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_LONG_TO_DOUBLE, "long-to-double", _WIDE, _DOUBLE)
+OP_END
+
+/* File: c/OP_FLOAT_TO_INT.c */
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_INT, "float-to-int",
+ float, _FLOAT, s4, _INT)
+OP_END
+
+/* File: c/OP_FLOAT_TO_LONG.c */
+HANDLE_FLOAT_TO_INT(OP_FLOAT_TO_LONG, "float-to-long",
+ float, _FLOAT, s8, _WIDE)
+OP_END
+
+/* File: c/OP_FLOAT_TO_DOUBLE.c */
+HANDLE_NUMCONV(OP_FLOAT_TO_DOUBLE, "float-to-double", _FLOAT, _DOUBLE)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_INT.c */
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_INT, "double-to-int",
+ double, _DOUBLE, s4, _INT)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_LONG.c */
+HANDLE_FLOAT_TO_INT(OP_DOUBLE_TO_LONG, "double-to-long",
+ double, _DOUBLE, s8, _WIDE)
+OP_END
+
+/* File: c/OP_DOUBLE_TO_FLOAT.c */
+HANDLE_NUMCONV(OP_DOUBLE_TO_FLOAT, "double-to-float", _DOUBLE, _FLOAT)
+OP_END
+
+/* File: c/OP_INT_TO_BYTE.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_BYTE, "byte", s1)
+OP_END
+
+/* File: c/OP_INT_TO_CHAR.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_CHAR, "char", u2)
+OP_END
+
+/* File: c/OP_INT_TO_SHORT.c */
+HANDLE_INT_TO_SMALL(OP_INT_TO_SHORT, "short", s2) /* want sign bit */
+OP_END
+
+/* File: c/OP_ADD_INT.c */
+HANDLE_OP_X_INT(OP_ADD_INT, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_INT.c */
+HANDLE_OP_X_INT(OP_SUB_INT, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_INT.c */
+HANDLE_OP_X_INT(OP_MUL_INT, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT.c */
+HANDLE_OP_X_INT(OP_DIV_INT, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT.c */
+HANDLE_OP_X_INT(OP_REM_INT, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT.c */
+HANDLE_OP_X_INT(OP_AND_INT, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT.c */
+HANDLE_OP_X_INT(OP_OR_INT, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT.c */
+HANDLE_OP_X_INT(OP_XOR_INT, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_INT.c */
+HANDLE_OP_SHX_INT(OP_SHL_INT, "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT.c */
+HANDLE_OP_SHX_INT(OP_SHR_INT, "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT.c */
+HANDLE_OP_SHX_INT(OP_USHR_INT, "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_ADD_LONG.c */
+HANDLE_OP_X_LONG(OP_ADD_LONG, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_LONG.c */
+HANDLE_OP_X_LONG(OP_SUB_LONG, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_LONG.c */
+HANDLE_OP_X_LONG(OP_MUL_LONG, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_LONG.c */
+HANDLE_OP_X_LONG(OP_DIV_LONG, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_LONG.c */
+HANDLE_OP_X_LONG(OP_REM_LONG, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_LONG.c */
+HANDLE_OP_X_LONG(OP_AND_LONG, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_LONG.c */
+HANDLE_OP_X_LONG(OP_OR_LONG, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_LONG.c */
+HANDLE_OP_X_LONG(OP_XOR_LONG, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_LONG.c */
+HANDLE_OP_SHX_LONG(OP_SHL_LONG, "shl", (s8), <<)
+OP_END
+
+/* File: c/OP_SHR_LONG.c */
+HANDLE_OP_SHX_LONG(OP_SHR_LONG, "shr", (s8), >>)
+OP_END
+
+/* File: c/OP_USHR_LONG.c */
+HANDLE_OP_SHX_LONG(OP_USHR_LONG, "ushr", (u8), >>)
+OP_END
+
+/* File: c/OP_ADD_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_ADD_FLOAT, "add", +)
+OP_END
+
+/* File: c/OP_SUB_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_SUB_FLOAT, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_MUL_FLOAT, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_FLOAT.c */
+HANDLE_OP_X_FLOAT(OP_DIV_FLOAT, "div", /)
+OP_END
+
+/* File: c/OP_REM_FLOAT.c */
+HANDLE_OPCODE(OP_REM_FLOAT /*vAA, vBB, vCC*/)
+ {
+ u2 srcRegs;
+ vdst = INST_AA(inst);
+ srcRegs = FETCH(1);
+ vsrc1 = srcRegs & 0xff;
+ vsrc2 = srcRegs >> 8;
+ ILOGV("|%s-float v%d,v%d,v%d", "mod", vdst, vsrc1, vsrc2);
+ SET_REGISTER_FLOAT(vdst,
+ fmodf(GET_REGISTER_FLOAT(vsrc1), GET_REGISTER_FLOAT(vsrc2)));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_ADD_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_ADD_DOUBLE, "add", +)
+OP_END
+
+/* File: c/OP_SUB_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_SUB_DOUBLE, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_MUL_DOUBLE, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_DOUBLE.c */
+HANDLE_OP_X_DOUBLE(OP_DIV_DOUBLE, "div", /)
+OP_END
+
+/* File: c/OP_REM_DOUBLE.c */
+HANDLE_OPCODE(OP_REM_DOUBLE /*vAA, vBB, vCC*/)
+ {
+ u2 srcRegs;
+ vdst = INST_AA(inst);
+ srcRegs = FETCH(1);
+ vsrc1 = srcRegs & 0xff;
+ vsrc2 = srcRegs >> 8;
+ ILOGV("|%s-double v%d,v%d,v%d", "mod", vdst, vsrc1, vsrc2);
+ SET_REGISTER_DOUBLE(vdst,
+ fmod(GET_REGISTER_DOUBLE(vsrc1), GET_REGISTER_DOUBLE(vsrc2)));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_ADD_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_ADD_INT_2ADDR, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_SUB_INT_2ADDR, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_MUL_INT_2ADDR, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_DIV_INT_2ADDR, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_REM_INT_2ADDR, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_AND_INT_2ADDR, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_OR_INT_2ADDR, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT_2ADDR.c */
+HANDLE_OP_X_INT_2ADDR(OP_XOR_INT_2ADDR, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_SHL_INT_2ADDR, "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_SHR_INT_2ADDR, "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT_2ADDR.c */
+HANDLE_OP_SHX_INT_2ADDR(OP_USHR_INT_2ADDR, "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_ADD_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_ADD_LONG_2ADDR, "add", +, 0)
+OP_END
+
+/* File: c/OP_SUB_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_SUB_LONG_2ADDR, "sub", -, 0)
+OP_END
+
+/* File: c/OP_MUL_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_MUL_LONG_2ADDR, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_DIV_LONG_2ADDR, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_REM_LONG_2ADDR, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_AND_LONG_2ADDR, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_OR_LONG_2ADDR, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_LONG_2ADDR.c */
+HANDLE_OP_X_LONG_2ADDR(OP_XOR_LONG_2ADDR, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHL_LONG_2ADDR, "shl", (s8), <<)
+OP_END
+
+/* File: c/OP_SHR_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_SHR_LONG_2ADDR, "shr", (s8), >>)
+OP_END
+
+/* File: c/OP_USHR_LONG_2ADDR.c */
+HANDLE_OP_SHX_LONG_2ADDR(OP_USHR_LONG_2ADDR, "ushr", (u8), >>)
+OP_END
+
+/* File: c/OP_ADD_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_ADD_FLOAT_2ADDR, "add", +)
+OP_END
+
+/* File: c/OP_SUB_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_SUB_FLOAT_2ADDR, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_MUL_FLOAT_2ADDR, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_FLOAT_2ADDR.c */
+HANDLE_OP_X_FLOAT_2ADDR(OP_DIV_FLOAT_2ADDR, "div", /)
+OP_END
+
+/* File: c/OP_REM_FLOAT_2ADDR.c */
+HANDLE_OPCODE(OP_REM_FLOAT_2ADDR /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|%s-float-2addr v%d,v%d", "mod", vdst, vsrc1);
+ SET_REGISTER_FLOAT(vdst,
+ fmodf(GET_REGISTER_FLOAT(vdst), GET_REGISTER_FLOAT(vsrc1)));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_ADD_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_ADD_DOUBLE_2ADDR, "add", +)
+OP_END
+
+/* File: c/OP_SUB_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_SUB_DOUBLE_2ADDR, "sub", -)
+OP_END
+
+/* File: c/OP_MUL_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_MUL_DOUBLE_2ADDR, "mul", *)
+OP_END
+
+/* File: c/OP_DIV_DOUBLE_2ADDR.c */
+HANDLE_OP_X_DOUBLE_2ADDR(OP_DIV_DOUBLE_2ADDR, "div", /)
+OP_END
+
+/* File: c/OP_REM_DOUBLE_2ADDR.c */
+HANDLE_OPCODE(OP_REM_DOUBLE_2ADDR /*vA, vB*/)
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ ILOGV("|%s-double-2addr v%d,v%d", "mod", vdst, vsrc1);
+ SET_REGISTER_DOUBLE(vdst,
+ fmod(GET_REGISTER_DOUBLE(vdst), GET_REGISTER_DOUBLE(vsrc1)));
+ FINISH(1);
+OP_END
+
+/* File: c/OP_ADD_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_ADD_INT_LIT16, "add", +, 0)
+OP_END
+
+/* File: c/OP_RSUB_INT.c */
+HANDLE_OPCODE(OP_RSUB_INT /*vA, vB, #+CCCC*/)
+ {
+ vdst = INST_A(inst);
+ vsrc1 = INST_B(inst);
+ vsrc2 = FETCH(1);
+ ILOGV("|rsub-int v%d,v%d,#+0x%04x", vdst, vsrc1, vsrc2);
+ SET_REGISTER(vdst, (s2) vsrc2 - (s4) GET_REGISTER(vsrc1));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MUL_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_MUL_INT_LIT16, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_DIV_INT_LIT16, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_REM_INT_LIT16, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_AND_INT_LIT16, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_OR_INT_LIT16, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT_LIT16.c */
+HANDLE_OP_X_INT_LIT16(OP_XOR_INT_LIT16, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_ADD_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_ADD_INT_LIT8, "add", +, 0)
+OP_END
+
+/* File: c/OP_RSUB_INT_LIT8.c */
+HANDLE_OPCODE(OP_RSUB_INT_LIT8 /*vAA, vBB, #+CC*/)
+ {
+ u2 litInfo;
+ vdst = INST_AA(inst);
+ litInfo = FETCH(1);
+ vsrc1 = litInfo & 0xff;
+ vsrc2 = litInfo >> 8;
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", "rsub", vdst, vsrc1, vsrc2);
+ SET_REGISTER(vdst, (s1) vsrc2 - (s4) GET_REGISTER(vsrc1));
+ }
+ FINISH(2);
+OP_END
+
+/* File: c/OP_MUL_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_MUL_INT_LIT8, "mul", *, 0)
+OP_END
+
+/* File: c/OP_DIV_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_DIV_INT_LIT8, "div", /, 1)
+OP_END
+
+/* File: c/OP_REM_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_REM_INT_LIT8, "rem", %, 2)
+OP_END
+
+/* File: c/OP_AND_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_AND_INT_LIT8, "and", &, 0)
+OP_END
+
+/* File: c/OP_OR_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_OR_INT_LIT8, "or", |, 0)
+OP_END
+
+/* File: c/OP_XOR_INT_LIT8.c */
+HANDLE_OP_X_INT_LIT8(OP_XOR_INT_LIT8, "xor", ^, 0)
+OP_END
+
+/* File: c/OP_SHL_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_SHL_INT_LIT8, "shl", (s4), <<)
+OP_END
+
+/* File: c/OP_SHR_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_SHR_INT_LIT8, "shr", (s4), >>)
+OP_END
+
+/* File: c/OP_USHR_INT_LIT8.c */
+HANDLE_OP_SHX_INT_LIT8(OP_USHR_INT_LIT8, "ushr", (u4), >>)
+OP_END
+
+/* File: c/OP_UNUSED_E3.c */
+HANDLE_OPCODE(OP_UNUSED_E3)
+OP_END
+
+/* File: c/OP_UNUSED_E4.c */
+HANDLE_OPCODE(OP_UNUSED_E4)
+OP_END
+
+/* File: c/OP_UNUSED_E5.c */
+HANDLE_OPCODE(OP_UNUSED_E5)
+OP_END
+
+/* File: c/OP_UNUSED_E6.c */
+HANDLE_OPCODE(OP_UNUSED_E6)
+OP_END
+
+/* File: c/OP_UNUSED_E7.c */
+HANDLE_OPCODE(OP_UNUSED_E7)
+OP_END
+
+/* File: c/OP_UNUSED_E8.c */
+HANDLE_OPCODE(OP_UNUSED_E8)
+OP_END
+
+/* File: c/OP_UNUSED_E9.c */
+HANDLE_OPCODE(OP_UNUSED_E9)
+OP_END
+
+/* File: c/OP_UNUSED_EA.c */
+HANDLE_OPCODE(OP_UNUSED_EA)
+OP_END
+
+/* File: c/OP_UNUSED_EB.c */
+HANDLE_OPCODE(OP_UNUSED_EB)
+OP_END
+
+/* File: c/OP_UNUSED_EC.c */
+HANDLE_OPCODE(OP_UNUSED_EC)
+OP_END
+
+/* File: c/OP_UNUSED_ED.c */
+HANDLE_OPCODE(OP_UNUSED_ED)
+OP_END
+
+/* File: c/OP_EXECUTE_INLINE.c */
+HANDLE_OPCODE(OP_EXECUTE_INLINE /*vB, {vD, vE, vF, vG}, inline@CCCC*/)
+ {
+ /*
+ * This has the same form as other method calls, but we ignore
+ * the 5th argument (vA). This is chiefly because the first four
+ * arguments to a function on ARM are in registers.
+ *
+ * We only set the arguments that are actually used, leaving
+ * the rest uninitialized. We're assuming that, if the method
+ * needs them, they'll be specified in the call.
+ *
+ * This annoys gcc when optimizations are enabled, causing a
+ * "may be used uninitialized" warning. We can quiet the warnings
+ * for a slight penalty (5%: 373ns vs. 393ns on empty method). Note
+ * that valgrind is perfectly happy with this arrangement, because
+ * the uninitialiezd values are never actually used.
+ */
+ u4 arg0, arg1, arg2, arg3;
+ //arg0 = arg1 = arg2 = arg3 = 0;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_B(inst); /* #of args */
+ ref = FETCH(1); /* inline call "ref" */
+ vdst = FETCH(2); /* 0-4 register indices */
+ ILOGV("|execute-inline args=%d @%d {regs=0x%04x}",
+ vsrc1, ref, vdst);
+
+ assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
+ assert(vsrc1 <= 4);
+
+ switch (vsrc1) {
+ case 4:
+ arg3 = GET_REGISTER(vdst >> 12);
+ /* fall through */
+ case 3:
+ arg2 = GET_REGISTER((vdst & 0x0f00) >> 8);
+ /* fall through */
+ case 2:
+ arg1 = GET_REGISTER((vdst & 0x00f0) >> 4);
+ /* fall through */
+ case 1:
+ arg0 = GET_REGISTER(vdst & 0x0f);
+ /* fall through */
+ default: // case 0
+ ;
+ }
+
+#if INTERP_TYPE == INTERP_DBG
+ if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+#else
+ if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
+ GOTO_exceptionThrown();
+#endif
+ }
+ FINISH(3);
+OP_END
+
+/* File: c/OP_UNUSED_EF.c */
+HANDLE_OPCODE(OP_UNUSED_EF)
+OP_END
+
+/* File: c/OP_INVOKE_DIRECT_EMPTY.c */
+HANDLE_OPCODE(OP_INVOKE_DIRECT_EMPTY /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+#if INTERP_TYPE != INTERP_DBG
+ //LOGI("Ignoring empty\n");
+ FINISH(3);
+#else
+ if (!gDvm.debuggerActive) {
+ //LOGI("Skipping empty\n");
+ FINISH(3); // don't want it to show up in profiler output
+ } else {
+ //LOGI("Running empty\n");
+ /* fall through to OP_INVOKE_DIRECT */
+ GOTO_invoke(invokeDirect, false);
+ }
+#endif
+OP_END
+
+/* File: c/OP_UNUSED_F1.c */
+HANDLE_OPCODE(OP_UNUSED_F1)
+OP_END
+
+/* File: c/OP_IGET_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_QUICK, "", Int, )
+OP_END
+
+/* File: c/OP_IGET_WIDE_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_WIDE_QUICK, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IGET_OBJECT_QUICK.c */
+HANDLE_IGET_X_QUICK(OP_IGET_OBJECT_QUICK, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_IPUT_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_QUICK, "", Int, )
+OP_END
+
+/* File: c/OP_IPUT_WIDE_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_WIDE_QUICK, "-wide", Long, _WIDE)
+OP_END
+
+/* File: c/OP_IPUT_OBJECT_QUICK.c */
+HANDLE_IPUT_X_QUICK(OP_IPUT_OBJECT_QUICK, "-object", Object, _AS_OBJECT)
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_QUICK.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeVirtualQuick, false);
+OP_END
+
+/* File: c/OP_INVOKE_VIRTUAL_QUICK_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_VIRTUAL_QUICK_RANGE/*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeVirtualQuick, true);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_QUICK.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK /*vB, {vD, vE, vF, vG, vA}, meth@CCCC*/)
+ GOTO_invoke(invokeSuperQuick, false);
+OP_END
+
+/* File: c/OP_INVOKE_SUPER_QUICK_RANGE.c */
+HANDLE_OPCODE(OP_INVOKE_SUPER_QUICK_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
+ GOTO_invoke(invokeSuperQuick, true);
+OP_END
+
+/* File: c/OP_UNUSED_FC.c */
+HANDLE_OPCODE(OP_UNUSED_FC)
+OP_END
+
+/* File: c/OP_UNUSED_FD.c */
+HANDLE_OPCODE(OP_UNUSED_FD)
+OP_END
+
+/* File: c/OP_UNUSED_FE.c */
+HANDLE_OPCODE(OP_UNUSED_FE)
+OP_END
+
+/* File: c/OP_UNUSED_FF.c */
+HANDLE_OPCODE(OP_UNUSED_FF)
+ /*
+ * In portable interp, most unused opcodes will fall through to here.
+ */
+ LOGE("unknown opcode 0x%02x\n", INST_INST(inst));
+ dvmAbort();
+ FINISH(1);
+OP_END
+
+/* File: c/gotoTargets.c */
+/*
+ * C footer. This has some common code shared by the various targets.
+ */
+
+/*
+ * Everything from here on is a "goto target". In the basic interpreter
+ * we jump into these targets and then jump directly to the handler for
+ * next instruction. Here, these are subroutines that return to the caller.
+ */
+
+GOTO_TARGET(filledNewArray, bool methodCallRange)
+ {
+ ClassObject* arrayClass;
+ ArrayObject* newArray;
+ u4* contents;
+ char typeCh;
+ int i;
+ u4 arg5;
+
+ EXPORT_PC();
+
+ ref = FETCH(1); /* class ref */
+ vdst = FETCH(2); /* first 4 regs -or- range base */
+
+ if (methodCallRange) {
+ vsrc1 = INST_AA(inst); /* #of elements */
+ arg5 = -1; /* silence compiler warning */
+ ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ } else {
+ arg5 = INST_A(inst);
+ vsrc1 = INST_B(inst); /* #of elements */
+ ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1, ref, vdst, arg5);
+ }
+
+ /*
+ * Resolve the array class.
+ */
+ arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (arrayClass == NULL) {
+ arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+ if (arrayClass == NULL)
+ GOTO_exceptionThrown();
+ }
+ /*
+ if (!dvmIsArrayClass(arrayClass)) {
+ dvmThrowException("Ljava/lang/RuntimeError;",
+ "filled-new-array needs array class");
+ GOTO_exceptionThrown();
+ }
+ */
+ /* verifier guarantees this is an array class */
+ assert(dvmIsArrayClass(arrayClass));
+ assert(dvmIsClassInitialized(arrayClass));
+
+ /*
+ * Create an array of the specified type.
+ */
+ LOGVV("+++ filled-new-array type is '%s'\n", arrayClass->descriptor);
+ typeCh = arrayClass->descriptor[1];
+ if (typeCh == 'D' || typeCh == 'J') {
+ /* category 2 primitives not allowed */
+ dvmThrowException("Ljava/lang/RuntimeError;",
+ "bad filled array req");
+ GOTO_exceptionThrown();
+ } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
+ /* TODO: requires multiple "fill in" loops with different widths */
+ LOGE("non-int primitives not implemented\n");
+ dvmThrowException("Ljava/lang/InternalError;",
+ "filled-new-array not implemented for anything but 'int'");
+ GOTO_exceptionThrown();
+ }
+
+ newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK);
+ if (newArray == NULL)
+ GOTO_exceptionThrown();
+
+ /*
+ * Fill in the elements. It's legal for vsrc1 to be zero.
+ */
+ contents = (u4*) newArray->contents;
+ if (methodCallRange) {
+ for (i = 0; i < vsrc1; i++)
+ contents[i] = GET_REGISTER(vdst+i);
+ } else {
+ assert(vsrc1 <= 5);
+ if (vsrc1 == 5) {
+ contents[4] = GET_REGISTER(arg5);
+ vsrc1--;
+ }
+ for (i = 0; i < vsrc1; i++) {
+ contents[i] = GET_REGISTER(vdst & 0x0f);
+ vdst >>= 4;
+ }
+ }
+
+ retval.l = newArray;
+ }
+ FINISH(3);
+GOTO_TARGET_END
+
+
+GOTO_TARGET(invokeVirtual, bool methodCallRange)
+ {
+ Method* baseMethod;
+ Object* thisPtr;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ /*
+ * Resolve the method. This is the correct method for the static
+ * type of the object. We also verify access permissions here.
+ */
+ baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (baseMethod == NULL) {
+ baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+ if (baseMethod == NULL) {
+ ILOGV("+ unknown method or access denied\n");
+ GOTO_exceptionThrown();
+ }
+ }
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method.
+ */
+ assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
+ methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ /*
+ * This can happen if you create two classes, Base and Sub, where
+ * Sub is a sub-class of Base. Declare a protected abstract
+ * method foo() in Base, and invoke foo() from a method in Base.
+ * Base is an "abstract base class" and is never instantiated
+ * directly. Now, Override foo() in Sub, and use Sub. This
+ * Works fine unless Sub stops providing an implementation of
+ * the method.
+ */
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+
+ LOGVV("+++ base=%s.%s virtual[%d]=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ (u4) baseMethod->methodIndex,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+#if 0
+ if (vsrc1 != methodToCall->insSize) {
+ LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ (u4) baseMethod->methodIndex,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ //dvmDumpClass(baseMethod->clazz);
+ //dvmDumpClass(methodToCall->clazz);
+ dvmDumpAllClasses(0);
+ }
+#endif
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuper, bool methodCallRange)
+ {
+ Method* baseMethod;
+ u2 thisReg;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange) {
+ ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ /* impossible in well-formed code, but we must check nevertheless */
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+ /*
+ * Resolve the method. This is the correct method for the static
+ * type of the object. We also verify access permissions here.
+ * The first arg to dvmResolveMethod() is just the referring class
+ * (used for class loaders and such), so we don't want to pass
+ * the superclass into the resolution call.
+ */
+ baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (baseMethod == NULL) {
+ baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+ if (baseMethod == NULL) {
+ ILOGV("+ unknown method or access denied\n");
+ GOTO_exceptionThrown();
+ }
+ }
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method's class.
+ *
+ * We're using the current method's class' superclass, not the
+ * superclass of "this". This is because we might be executing
+ * in a method inherited from a superclass, and we want to run
+ * in that class' superclass.
+ */
+ if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) {
+ /*
+ * Method does not exist in the superclass. Could happen if
+ * superclass gets updated.
+ */
+ dvmThrowException("Ljava/lang/NoSuchMethodError;",
+ baseMethod->name);
+ GOTO_exceptionThrown();
+ }
+ methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+ LOGVV("+++ base=%s.%s super-virtual=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeInterface, bool methodCallRange)
+ {
+ Object* thisPtr;
+ ClassObject* thisClass;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ thisClass = thisPtr->clazz;
+
+ /*
+ * Given a class and a method index, find the Method* with the
+ * actual code we want to execute.
+ */
+ methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod,
+ methodClassDex);
+ if (methodToCall == NULL) {
+ assert(dvmCheckException(self));
+ GOTO_exceptionThrown();
+ }
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeDirect, bool methodCallRange)
+ {
+ u2 thisReg;
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ EXPORT_PC();
+
+ if (methodCallRange) {
+ ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+ methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (methodToCall == NULL) {
+ methodToCall = dvmResolveMethod(curMethod->clazz, ref,
+ METHOD_DIRECT);
+ if (methodToCall == NULL) {
+ ILOGV("+ unknown direct method\n"); // should be impossible
+ GOTO_exceptionThrown();
+ }
+ }
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeStatic, bool methodCallRange)
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ EXPORT_PC();
+
+ if (methodCallRange)
+ ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ else
+ ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+
+ methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (methodToCall == NULL) {
+ methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC);
+ if (methodToCall == NULL) {
+ ILOGV("+ unknown method\n");
+ GOTO_exceptionThrown();
+ }
+ }
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+ {
+ Object* thisPtr;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* vtable index */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method.
+ */
+ assert(ref < thisPtr->clazz->vtableCount);
+ methodToCall = thisPtr->clazz->vtable[ref];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+
+ LOGVV("+++ virtual[%d]=%s.%s\n",
+ ref, methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+ {
+ u2 thisReg;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* vtable index */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange) {
+ ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ /* impossible in well-formed code, but we must check nevertheless */
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+#if 0 /* impossible in optimized + verified code */
+ if (ref >= curMethod->clazz->super->vtableCount) {
+ dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(ref < curMethod->clazz->super->vtableCount);
+#endif
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method's class.
+ *
+ * We're using the current method's class' superclass, not the
+ * superclass of "this". This is because we might be executing
+ * in a method inherited from a superclass, and we want to run
+ * in the method's class' superclass.
+ */
+ methodToCall = curMethod->clazz->super->vtable[ref];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+ LOGVV("+++ super-virtual[%d]=%s.%s\n",
+ ref, methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+
+
+ /*
+ * General handling for return-void, return, and return-wide. Put the
+ * return value in "retval" before jumping here.
+ */
+GOTO_TARGET(returnFromMethod)
+ {
+ StackSaveArea* saveArea;
+
+ /*
+ * We must do this BEFORE we pop the previous stack frame off, so
+ * that the GC can see the return value (if any) in the local vars.
+ *
+ * Since this is now an interpreter switch point, we must do it before
+ * we do anything at all.
+ */
+ PERIODIC_CHECKS(kInterpEntryReturn, 0);
+
+ ILOGV("> retval=0x%llx (leaving %s.%s %s)",
+ retval.j, curMethod->clazz->descriptor, curMethod->name,
+ curMethod->signature);
+ //DUMP_REGS(curMethod, fp);
+
+ saveArea = SAVEAREA_FROM_FP(fp);
+
+#ifdef EASY_GDB
+ debugSaveArea = saveArea;
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_EXIT(self, curMethod);
+#endif
+
+ /* back up to previous frame and see if we hit a break */
+ fp = saveArea->prevFrame;
+ assert(fp != NULL);
+ if (dvmIsBreakFrame(fp)) {
+ /* bail without popping the method frame from stack */
+ LOGVV("+++ returned into break frame\n");
+ GOTO_bail();
+ }
+
+ /* update thread FP, and reset local variables */
+ self->curFrame = fp;
+ curMethod = SAVEAREA_FROM_FP(fp)->method;
+ //methodClass = curMethod->clazz;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = saveArea->savedPc;
+ ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+
+ /* use FINISH on the caller's invoke instruction */
+ //u2 invokeInstr = INST_INST(FETCH(0));
+ if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+ invokeInstr <= OP_INVOKE_INTERFACE*/)
+ {
+ FINISH(3);
+ } else {
+ //LOGE("Unknown invoke instr %02x at %d\n",
+ // invokeInstr, (int) (pc - curMethod->insns));
+ assert(false);
+ }
+ }
+GOTO_TARGET_END
+
+
+ /*
+ * Jump here when the code throws an exception.
+ *
+ * By the time we get here, the Throwable has been created and the stack
+ * trace has been saved off.
+ */
+GOTO_TARGET(exceptionThrown)
+ {
+ Object* exception;
+ int catchRelPc;
+
+ /*
+ * Since this is now an interpreter switch point, we must do it before
+ * we do anything at all.
+ */
+ PERIODIC_CHECKS(kInterpEntryThrow, 0);
+
+ /*
+ * We save off the exception and clear the exception status. While
+ * processing the exception we might need to load some Throwable
+ * classes, and we don't want class loader exceptions to get
+ * confused with this one.
+ */
+ assert(dvmCheckException(self));
+ exception = dvmGetException(self);
+ dvmAddTrackedAlloc(exception, self);
+ dvmClearException(self);
+
+ LOGV("Handling exception %s at %s:%d\n",
+ exception->clazz->descriptor, curMethod->name,
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ /*
+ * Tell the debugger about it.
+ *
+ * TODO: if the exception was thrown by interpreted code, control
+ * fell through native, and then back to us, we will report the
+ * exception at the point of the throw and again here. We can avoid
+ * this by not reporting exceptions when we jump here directly from
+ * the native call code above, but then we won't report exceptions
+ * that were thrown *from* the JNI code (as opposed to *through* it).
+ *
+ * The correct solution is probably to ignore from-native exceptions
+ * here, and have the JNI exception code do the reporting to the
+ * debugger.
+ */
+ if (gDvm.debuggerActive) {
+ void* catchFrame;
+ catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
+ exception, true, &catchFrame);
+ dvmDbgPostException(fp, pc - curMethod->insns, catchFrame,
+ catchRelPc, exception);
+ }
+#endif
+
+ /*
+ * We need to unroll to the catch block or the nearest "break"
+ * frame.
+ *
+ * A break frame could indicate that we have reached an intermediate
+ * native call, or have gone off the top of the stack and the thread
+ * needs to exit. Either way, we return from here, leaving the
+ * exception raised.
+ *
+ * If we do find a catch block, we want to transfer execution to
+ * that point.
+ */
+ catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
+ exception, false, (void*)&fp);
+
+ /*
+ * Restore the stack bounds after an overflow. This isn't going to
+ * be correct in all circumstances, e.g. if JNI code devours the
+ * exception this won't happen until some other exception gets
+ * thrown. If the code keeps pushing the stack bounds we'll end
+ * up aborting the VM.
+ *
+ * Note we want to do this *after* the call to dvmFindCatchBlock,
+ * because that may need extra stack space to resolve exception
+ * classes (e.g. through a class loader).
+ */
+ if (self->stackOverflowed)
+ dvmCleanupStackOverflow(self);
+
+ if (catchRelPc < 0) {
+ /* falling through to JNI code or off the bottom of the stack */
+#if DVM_SHOW_EXCEPTION >= 2
+ LOGD("Exception %s from %s:%d not caught locally\n",
+ exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+#endif
+ dvmSetException(self, exception);
+ dvmReleaseTrackedAlloc(exception, self);
+ GOTO_bail();
+ }
+
+#if DVM_SHOW_EXCEPTION >= 3
+ {
+ const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method;
+ LOGD("Exception %s thrown from %s:%d to %s:%d\n",
+ exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns),
+ dvmGetMethodSourceFile(catchMethod),
+ dvmLineNumFromPC(catchMethod, catchRelPc));
+ }
+#endif
+
+ /*
+ * Adjust local variables to match self->curFrame and the
+ * updated PC.
+ */
+ //fp = (u4*) self->curFrame;
+ curMethod = SAVEAREA_FROM_FP(fp)->method;
+ //methodClass = curMethod->clazz;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = curMethod->insns + catchRelPc;
+ ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+ DUMP_REGS(curMethod, fp, false); // show all regs
+
+ /*
+ * Restore the exception if the handler wants it.
+ *
+ * The Dalvik spec mandates that, if an exception handler wants to
+ * do something with the exception, the first instruction executed
+ * must be "move-exception". We can pass the exception along
+ * through the thread struct, and let the move-exception instruction
+ * clear it for us.
+ *
+ * If the handler doesn't call move-exception, we don't want to
+ * finish here with an exception still pending.
+ */
+ if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
+ dvmSetException(self, exception);
+
+ dvmReleaseTrackedAlloc(exception, self);
+ FINISH(0);
+ }
+GOTO_TARGET_END
+
+
+ /*
+ * General handling for invoke-{virtual,super,direct,static,interface},
+ * including "quick" variants.
+ *
+ * Set "methodToCall" to the Method we're calling, and "methodCallRange"
+ * depending on whether this is a "/range" instruction.
+ *
+ * For a range call:
+ * "vsrc1" holds the argument count (8 bits)
+ * "vdst" holds the first argument in the range
+ * For a non-range call:
+ * "vsrc1" holds the argument count (4 bits) and the 5th argument index
+ * "vdst" holds four 4-bit register indices
+ *
+ * The caller must EXPORT_PC before jumping here, because any method
+ * call can throw a stack overflow exception.
+ */
+GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
+ u2 count, u2 regs)
+ {
+ STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;);
+
+ //printf("range=%d call=%p count=%d regs=0x%04x\n",
+ // methodCallRange, methodToCall, count, regs);
+ //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
+ // methodToCall->name, methodToCall->signature);
+
+ u4* outs;
+ int i;
+
+ /*
+ * Copy args. This may corrupt vsrc1/vdst.
+ */
+ if (methodCallRange) {
+ // could use memcpy or a "Duff's device"; most functions have
+ // so few args it won't matter much
+ assert(vsrc1 <= curMethod->outsSize);
+ assert(vsrc1 == methodToCall->insSize);
+ outs = OUTS_FROM_FP(fp, vsrc1);
+ for (i = 0; i < vsrc1; i++)
+ outs[i] = GET_REGISTER(vdst+i);
+ } else {
+ u4 count = vsrc1 >> 4;
+
+ assert(count <= curMethod->outsSize);
+ assert(count == methodToCall->insSize);
+ assert(count <= 5);
+
+ outs = OUTS_FROM_FP(fp, count);
+#if 0
+ if (count == 5) {
+ outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+ count--;
+ }
+ for (i = 0; i < (int) count; i++) {
+ outs[i] = GET_REGISTER(vdst & 0x0f);
+ vdst >>= 4;
+ }
+#else
+ // This version executes fewer instructions but is larger
+ // overall. Seems to be a teensy bit faster.
+ assert((vdst >> 16) == 0); // 16 bits -or- high 16 bits clear
+ switch (count) {
+ case 5:
+ outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+ case 4:
+ outs[3] = GET_REGISTER(vdst >> 12);
+ case 3:
+ outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
+ case 2:
+ outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
+ case 1:
+ outs[0] = GET_REGISTER(vdst & 0x0f);
+ default:
+ ;
+ }
+#endif
+ }
+ }
+
+ /*
+ * (This was originally a "goto" target; I've kept it separate from the
+ * stuff above in case we want to refactor things again.)
+ *
+ * At this point, we have the arguments stored in the "outs" area of
+ * the current method's stack frame, and the method to call in
+ * "methodToCall". Push a new stack frame.
+ */
+ {
+ StackSaveArea* newSaveArea;
+ u4* newFp;
+
+ ILOGV("> %s%s.%s %s",
+ dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
+ methodToCall->clazz->descriptor, methodToCall->name,
+ methodToCall->signature);
+
+ newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
+ newSaveArea = SAVEAREA_FROM_FP(newFp);
+
+ /* verify that we have enough space */
+ if (true) {
+ u1* bottom;
+ bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
+ if (bottom < self->interpStackEnd) {
+ /* stack overflow */
+ LOGV("Stack overflow on method call (start=%p end=%p newBot=%p size=%d '%s')\n",
+ self->interpStackStart, self->interpStackEnd, bottom,
+ self->interpStackSize, methodToCall->name);
+ dvmHandleStackOverflow(self);
+ assert(dvmCheckException(self));
+ GOTO_exceptionThrown();
+ }
+ //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p\n",
+ // fp, newFp, newSaveArea, bottom);
+ }
+
+#ifdef LOG_INSTR
+ if (methodToCall->registersSize > methodToCall->insSize) {
+ /*
+ * This makes valgrind quiet when we print registers that
+ * haven't been initialized. Turn it off when the debug
+ * messages are disabled -- we want valgrind to report any
+ * used-before-initialized issues.
+ */
+ memset(newFp, 0xcc,
+ (methodToCall->registersSize - methodToCall->insSize) * 4);
+ }
+#endif
+
+#ifdef EASY_GDB
+ newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
+#endif
+ newSaveArea->prevFrame = fp;
+ newSaveArea->savedPc = pc;
+ newSaveArea->method = methodToCall;
+
+ if (!dvmIsNativeMethod(methodToCall)) {
+ /*
+ * "Call" interpreted code. Reposition the PC, update the
+ * frame pointer and other local state, and continue.
+ */
+ curMethod = methodToCall;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = methodToCall->insns;
+ fp = self->curFrame = newFp;
+#ifdef EASY_GDB
+ debugSaveArea = SAVEAREA_FROM_FP(newFp);
+#endif
+#if INTERP_TYPE == INTERP_DBG
+ debugIsMethodEntry = true; // profiling, debugging
+#endif
+ ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+ DUMP_REGS(curMethod, fp, true); // show input args
+ FINISH(0); // jump to method start
+ } else {
+ /* set this up for JNI locals, even if not a JNI native */
+ newSaveArea->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+
+ self->curFrame = newFp;
+
+ DUMP_REGS(methodToCall, newFp, true); // show input args
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ if (gDvm.debuggerActive) {
+ dvmDbgPostLocationEvent(methodToCall, -1,
+ dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY);
+ }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_ENTER(self, methodToCall);
+#endif
+
+ ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
+ methodToCall->name, methodToCall->signature);
+
+ /*
+ * Jump through native call bridge. Because we leave no
+ * space for locals on native calls, "newFp" points directly
+ * to the method arguments.
+ */
+ (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ if (gDvm.debuggerActive) {
+ dvmDbgPostLocationEvent(methodToCall, -1,
+ dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT);
+ }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_EXIT(self, methodToCall);
+#endif
+
+ /* pop frame off */
+ dvmPopJniLocals(self, newSaveArea);
+ self->curFrame = fp;
+
+ /*
+ * If the native code threw an exception, or interpreted code
+ * invoked by the native call threw one and nobody has cleared
+ * it, jump to our local exception handling.
+ */
+ if (dvmCheckException(self)) {
+ LOGV("Exception thrown by/below native code\n");
+ GOTO_exceptionThrown();
+ }
+
+ ILOGD("> retval=0x%llx (leaving native)", retval.j);
+ ILOGD("> (return from native %s.%s to %s.%s %s)",
+ methodToCall->clazz->descriptor, methodToCall->name,
+ curMethod->clazz->descriptor, curMethod->name,
+ curMethod->signature);
+
+ //u2 invokeInstr = INST_INST(FETCH(0));
+ if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+ invokeInstr <= OP_INVOKE_INTERFACE*/)
+ {
+ FINISH(3);
+ } else {
+ //LOGE("Unknown invoke instr %02x at %d\n",
+ // invokeInstr, (int) (pc - curMethod->insns));
+ assert(false);
+ }
+ }
+ }
+ assert(false); // should not get here
+GOTO_TARGET_END
+
+
+/* File: portable/enddefs.c */
+/*--- end of opcodes ---*/
+
+#ifndef THREADED_INTERP
+ } // end of "switch"
+ } // end of "while"
+#endif
+
+bail:
+ ILOGD("|-- Leaving interpreter loop"); // note "curMethod" may be NULL
+
+ interpState->retval = retval;
+ return false;
+
+bail_switch:
+ /*
+ * The standard interpreter currently doesn't set or care about the
+ * "debugIsMethodEntry" value, so setting this is only of use if we're
+ * switching between two "debug" interpreters, which we never do.
+ *
+ * TODO: figure out if preserving this makes any sense.
+ */
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+# if INTERP_TYPE == INTERP_DBG
+ interpState->debugIsMethodEntry = debugIsMethodEntry;
+# else
+ interpState->debugIsMethodEntry = false;
+# endif
+#endif
+
+ /* export state changes */
+ interpState->method = curMethod;
+ interpState->pc = pc;
+ interpState->fp = fp;
+ /* debugTrackedRefStart doesn't change */
+ interpState->retval = retval; /* need for _entryPoint=ret */
+ interpState->nextMode =
+ (INTERP_TYPE == INTERP_STD) ? INTERP_DBG : INTERP_STD;
+ LOGVV(" meth='%s.%s' pc=0x%x fp=%p\n",
+ curMethod->clazz->descriptor, curMethod->name,
+ pc - curMethod->insns, fp);
+ return true;
+}
+
+
diff --git a/vm/mterp/out/InterpC-x86.c b/vm/mterp/out/InterpC-x86.c
new file mode 100644
index 0000000..cd5fe95
--- /dev/null
+++ b/vm/mterp/out/InterpC-x86.c
@@ -0,0 +1,2119 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'x86'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.c */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h> // needed for fmod, fmodf
+
+/*
+ * Configuration defines. These affect the C implementations, i.e. the
+ * portable interpreter(s) and C stubs.
+ *
+ * Some defines are controlled by the Makefile, e.g.:
+ * WITH_PROFILER
+ * WITH_DEBUGGER
+ * WITH_INSTR_CHECKS
+ * WITH_TRACKREF_CHECKS
+ * EASY_GDB
+ * NDEBUG
+ *
+ * If THREADED_INTERP is not defined, we use a classic "while true / switch"
+ * interpreter. If it is defined, then the tail end of each instruction
+ * handler fetches the next instruction and jumps directly to the handler.
+ * This increases the size of the "Std" interpreter by about 10%, but
+ * provides a speedup of about the same magnitude.
+ *
+ * There's a "hybrid" approach that uses a goto table instead of a switch
+ * statement, avoiding the "is the opcode in range" tests required for switch.
+ * The performance is close to the threaded version, and without the 10%
+ * size increase, but the benchmark results are off enough that it's not
+ * worth adding as a third option.
+ */
+#define THREADED_INTERP /* threaded vs. while-loop interpreter */
+
+#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia */
+# define CHECK_BRANCH_OFFSETS
+# define CHECK_REGISTER_INDICES
+#endif
+
+/*
+ * ARM EABI requires 64-bit alignment for access to 64-bit data types. We
+ * can't just use pointers to copy 64-bit values out of our interpreted
+ * register set, because gcc will generate ldrd/strd.
+ *
+ * The __UNION version copies data in and out of a union. The __MEMCPY
+ * version uses a memcpy() call to do the transfer; gcc is smart enough to
+ * not actually call memcpy(). The __UNION version is very bad on ARM;
+ * it only uses one more instruction than __MEMCPY, but for some reason
+ * gcc thinks it needs separate storage for every instance of the union.
+ * On top of that, it feels the need to zero them out at the start of the
+ * method. Net result is we zero out ~700 bytes of stack space at the top
+ * of the interpreter using ARM STM instructions.
+ */
+#if defined(__ARM_EABI__)
+//# define NO_UNALIGN_64__UNION
+# define NO_UNALIGN_64__MEMCPY
+#endif
+
+//#define LOG_INSTR /* verbose debugging */
+/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
+
+/*
+ * Keep a tally of accesses to fields. Currently only works if full DEX
+ * optimization is disabled.
+ */
+#ifdef PROFILE_FIELD_ACCESS
+# define UPDATE_FIELD_GET(_field) { (_field)->gets++; }
+# define UPDATE_FIELD_PUT(_field) { (_field)->puts++; }
+#else
+# define UPDATE_FIELD_GET(_field) ((void)0)
+# define UPDATE_FIELD_PUT(_field) ((void)0)
+#endif
+
+/*
+ * Adjust the program counter. "_offset" is a signed int, in 16-bit units.
+ *
+ * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
+ *
+ * We don't advance the program counter until we finish an instruction or
+ * branch, because we do want to have to unroll the PC if there's an
+ * exception.
+ */
+#ifdef CHECK_BRANCH_OFFSETS
+# define ADJUST_PC(_offset) do { \
+ int myoff = _offset; /* deref only once */ \
+ if (pc + myoff < curMethod->insns || \
+ pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
+ { \
+ char* desc; \
+ desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
+ LOGE("Invalid branch %d at 0x%04x in %s.%s %s\n", \
+ myoff, (int) (pc - curMethod->insns), \
+ curMethod->clazz->descriptor, curMethod->name, desc); \
+ free(desc); \
+ dvmAbort(); \
+ } \
+ pc += myoff; \
+ } while (false)
+#else
+# define ADJUST_PC(_offset) (pc += _offset)
+#endif
+
+/*
+ * If enabled, log instructions as we execute them.
+ */
+#ifdef LOG_INSTR
+# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
+# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
+# define ILOG(_level, ...) do { \
+ char debugStrBuf[128]; \
+ snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
+ if (curMethod != NULL) \
+ LOG(_level, LOG_TAG"i", "%-2d|%04x%s\n", \
+ self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
+ else \
+ LOG(_level, LOG_TAG"i", "%-2d|####%s\n", \
+ self->threadId, debugStrBuf); \
+ } while(false)
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
+# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
+static const char kSpacing[] = " ";
+#else
+# define ILOGD(...) ((void)0)
+# define ILOGV(...) ((void)0)
+# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
+#endif
+
+/* get a long from an array of u4 */
+static inline s8 getLongFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.ll;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ s8 val;
+ memcpy(&val, &ptr[idx], 8);
+ return val;
+#else
+ return *((s8*) &ptr[idx]);
+#endif
+}
+
+/* store a long into an array of u4 */
+static inline void putLongToArray(u4* ptr, int idx, s8 val)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.ll = val;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &val, 8);
+#else
+ *((s8*) &ptr[idx]) = val;
+#endif
+}
+
+/* get a double from an array of u4 */
+static inline double getDoubleFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.d;
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ double dval;
+ memcpy(&dval, &ptr[idx], 8);
+ return dval;
+#else
+ return *((double*) &ptr[idx]);
+#endif
+}
+
+/* store a double into an array of u4 */
+static inline void putDoubleToArray(u4* ptr, int idx, double dval)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.d = dval;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#elif defined(NO_UNALIGN_64__MEMCPY)
+ memcpy(&ptr[idx], &dval, 8);
+#else
+ *((double*) &ptr[idx]) = dval;
+#endif
+}
+
+/*
+ * If enabled, validate the register number on every access. Otherwise,
+ * just do an array access.
+ *
+ * Assumes the existence of "u4* fp".
+ *
+ * "_idx" may be referenced more than once.
+ */
+#ifdef CHECK_REGISTER_INDICES
+# define GET_REGISTER(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)]) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
+# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER_WIDE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putLongToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_FLOAT(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
+# define SET_REGISTER_FLOAT(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
+# define GET_REGISTER_DOUBLE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
+# define SET_REGISTER_DOUBLE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ putDoubleToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969.0) )
+#else
+# define GET_REGISTER(_idx) (fp[(_idx)])
+# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
+# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
+# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
+# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
+# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
+# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
+# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
+# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
+#endif
+
+/*
+ * Get 16 bits from the specified offset of the program counter. We always
+ * want to load 16 bits at a time from the instruction stream -- it's more
+ * efficient than 8 and won't have the alignment problems that 32 might.
+ *
+ * Assumes existence of "const u2* pc".
+ */
+#define FETCH(_offset) (pc[(_offset)])
+
+/*
+ * Extract instruction byte from 16-bit fetch (_inst is a u2).
+ */
+#define INST_INST(_inst) ((_inst) & 0xff)
+
+/*
+ * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
+ */
+#define INST_A(_inst) (((_inst) >> 8) & 0x0f)
+#define INST_B(_inst) ((_inst) >> 12)
+
+/*
+ * Get the 8-bit "vAA" 8-bit register index from the instruction word.
+ * (_inst is u2)
+ */
+#define INST_AA(_inst) ((_inst) >> 8)
+
+/*
+ * The current PC must be available to Throwable constructors, e.g.
+ * those created by dvmThrowException(), so that the exception stack
+ * trace can be generated correctly. If we don't do this, the offset
+ * within the current method won't be shown correctly. See the notes
+ * in Exception.c.
+ *
+ * Assumes existence of "u4* fp" and "const u2* pc".
+ */
+#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
+
+/*
+ * Determine if we need to switch to a different interpreter. "_current"
+ * is either INTERP_STD or INTERP_DBG. It should be fixed for a given
+ * interpreter generation file, which should remove the outer conditional
+ * from the following.
+ *
+ * If we're building without debug and profiling support, we never switch.
+ */
+#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER)
+# define NEED_INTERP_SWITCH(_current) ( \
+ (_current == INTERP_STD) ? \
+ dvmDebuggerOrProfilerActive() : !dvmDebuggerOrProfilerActive() )
+#else
+# define NEED_INTERP_SWITCH(_current) (false)
+#endif
+
+/*
+ * Look up an interface on a class using the cache.
+ */
+INLINE Method* dvmFindInterfaceMethodInCache(ClassObject* thisClass,
+ u4 methodIdx, const Method* method, DvmDex* methodClassDex)
+{
+#define ATOMIC_CACHE_CALC \
+ dvmInterpFindInterfaceMethod(thisClass, methodIdx, method, methodClassDex)
+
+ return (Method*) ATOMIC_CACHE_LOOKUP(methodClassDex->pInterfaceCache,
+ DEX_INTERFACE_CACHE_SIZE, thisClass, methodIdx);
+
+#undef ATOMIC_CACHE_CALC
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, throw an exception. Assumes the
+ * pc has already been exported to the stack.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler calls into
+ * something that could throw an exception (so we have already called
+ * EXPORT_PC at the top).
+ */
+static inline bool checkForNull(Object* obj)
+{
+ if (obj == NULL) {
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, export the PC into the stack
+ * frame and throw an exception.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler doesn't do
+ * anything else that can throw an exception.
+ */
+static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
+{
+ if (obj == NULL) {
+ EXPORT_PC();
+ dvmThrowException("Ljava/lang/NullPointerException;", NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsValidObject(obj)) {
+ LOGE("Invalid object %p\n", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+
+/* File: cstubs/stubdefs.c */
+/* this is a standard (no debug support) interpreter */
+#define INTERP_TYPE INTERP_STD
+#define CHECK_DEBUG_AND_PROF() ((void)0)
+# define CHECK_TRACKED_REFS() ((void)0)
+
+/*
+ * In the C mterp stubs, "goto" is a function call followed immediately
+ * by a return.
+ */
+
+#define GOTO_TARGET_DECL(_target, ...) \
+ void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+
+#define GOTO_TARGET(_target, ...) \
+ void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
+ u2 ref, vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0); \
+ const Method* methodToCall; \
+ StackSaveArea* debugSaveArea;
+
+#define GOTO_TARGET_END }
+
+/*
+ * Redefine what used to be local variable accesses into MterpGlue struct
+ * references. (These are undefined down in "footer.c".)
+ */
+#define retval glue->retval
+#define pc glue->pc
+#define fp glue->fp
+#define curMethod glue->method
+#define methodClassDex glue->methodClassDex
+#define self glue->self
+#define debugTrackedRefStart glue->debugTrackedRefStart
+
+/* ugh */
+#define STUB_HACK(x) x
+
+
+/*
+ * Opcode handler framing macros. Here, each opcode is a separate function
+ * that takes a "glue" argument and returns void. We can't declare
+ * these "static" because they may be called from an assembly stub.
+ */
+#define HANDLE_OPCODE(_op) \
+ void dvmMterp_##_op(MterpGlue* glue) { \
+ u2 ref, vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0);
+
+#define OP_END }
+
+/*
+ * Like the "portable" FINISH, but don't reload "inst", and return to caller
+ * when done.
+ */
+#define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ CHECK_DEBUG_AND_PROF(); \
+ CHECK_TRACKED_REFS(); \
+ return; \
+ }
+
+
+/*
+ * The "goto label" statements turn into function calls followed by
+ * return statements. Some of the functions take arguments, which in the
+ * portable interpreter are handled by assigning values to globals.
+ */
+
+#define GOTO_exceptionThrown() \
+ do { \
+ dvmMterp_exceptionThrown(glue); \
+ return; \
+ } while(false)
+
+#define GOTO_returnFromMethod() \
+ do { \
+ dvmMterp_returnFromMethod(glue); \
+ return; \
+ } while(false)
+
+#define GOTO_invoke(_target, _methodCallRange) \
+ do { \
+ dvmMterp_##_target(glue, _methodCallRange); \
+ return; \
+ } while(false)
+
+#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \
+ do { \
+ dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall, \
+ _vsrc1, _vdst); \
+ return; \
+ } while(false)
+
+/*
+ * As a special case, "goto bail" turns into a longjmp. Use "bail_switch"
+ * if we need to switch to the other interpreter upon our return.
+ */
+#define GOTO_bail() \
+ dvmMterpStdBail(glue, false);
+#define GOTO_bail_switch() \
+ dvmMterpStdBail(glue, true);
+
+/*
+ * Periodically check for thread suspension.
+ *
+ * While we're at it, see if a debugger has attached or the profiler has
+ * started. If so, switch to a different "goto" table.
+ */
+#define PERIODIC_CHECKS(_entryPoint, _pcadj) { \
+ dvmCheckSuspendQuick(self); \
+ if (NEED_INTERP_SWITCH(INTERP_TYPE)) { \
+ ADJUST_PC(_pcadj); \
+ glue->entryPoint = _entryPoint; \
+ LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n", \
+ glue->self->threadId, (_entryPoint), (_pcadj)); \
+ GOTO_bail_switch(); \
+ } \
+ }
+
+
+/* File: c/opcommon.c */
+/* forward declarations of goto targets */
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+ u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+/*
+ * ===========================================================================
+ *
+ * What follows are opcode definitions shared between multiple opcodes with
+ * minor substitutions handled by the C pre-processor. These should probably
+ * use the mterp substitution mechanism instead, with the code here moved
+ * into common fragment files (like the asm "binop.S"), although it's hard
+ * to give up the C preprocessor in favor of the much simpler text subst.
+ *
+ * ===========================================================================
+ */
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_totype(vdst, \
+ GET_REGISTER##_fromtype(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
+ _tovtype, _tortype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ { \
+ /* spec defines specific handling for +/- inf and NaN values */ \
+ _fromvtype val; \
+ _tovtype intMin, intMax, result; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ val = GET_REGISTER##_fromrtype(vsrc1); \
+ intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
+ intMax = ~intMin; \
+ result = (_tovtype) val; \
+ if (val >= intMax) /* +inf */ \
+ result = intMax; \
+ else if (val <= intMin) /* -inf */ \
+ result = intMin; \
+ else if (val != val) /* NaN */ \
+ result = 0; \
+ else \
+ result = (_tovtype) val; \
+ SET_REGISTER##_tortype(vdst, result); \
+ } \
+ FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
+ FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ int result; \
+ u2 regs; \
+ _varType val1, val2; \
+ vdst = INST_AA(inst); \
+ regs = FETCH(1); \
+ vsrc1 = regs & 0xff; \
+ vsrc2 = regs >> 8; \
+ ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ val1 = GET_REGISTER##_type(vsrc1); \
+ val2 = GET_REGISTER##_type(vsrc2); \
+ if (val1 == val2) \
+ result = 0; \
+ else if (val1 < val2) \
+ result = -1; \
+ else if (val1 > val2) \
+ result = 1; \
+ else \
+ result = (_nanVal); \
+ ILOGV("+ result=%d\n", result); \
+ SET_REGISTER(vdst, result); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
+ vsrc1 = INST_A(inst); \
+ vsrc2 = INST_B(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
+ branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
+ FINISH(2); \
+ }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
+ vsrc1 = INST_AA(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
+ FINISH(2); \
+ }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
+ FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ secondVal = GET_REGISTER(vsrc2); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ vsrc2 = FETCH(1); \
+ ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s2) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
+ /* won't generate /lit16 instr for this; check anyway */ \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op (s2) vsrc2; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s1) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op ((s1) vsrc2); \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vdst); \
+ secondVal = GET_REGISTER(vsrc1); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vsrc1); \
+ secondVal = GET_REGISTER_WIDE(vsrc2); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vdst); \
+ secondVal = GET_REGISTER_WIDE(vsrc1); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowException("Ljava/lang/ArithmeticException;", \
+ "divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* index */ \
+ ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ LOGV("Invalid array access: %p %d (len=%d)\n", \
+ arrayObj, vsrc2, arrayObj->length); \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]); \
+ ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); /* AA: source value */ \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* CC: index */ \
+ ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \
+ NULL); \
+ GOTO_exceptionThrown(); \
+ } \
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+ ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] = \
+ GET_REGISTER##_regsize(vdst); \
+ } \
+ FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits. Consider:
+ * short foo = -1 (sets a 32-bit register to 0xffffffff)
+ * iput-quick foo (writes all 32 bits to the field)
+ * short bar = 1 (sets a 32-bit register to 0x00000001)
+ * iput-short (writes the low 16 bits to the field)
+ * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field. This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time. On
+ * a device with a 16-bit data bus this is sub-optimal. (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ dvmGetField##_ftype(obj, ifield->byteOffset)); \
+ ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
+ ILOGV("+ IGETQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetField##_ftype(obj, ifield->byteOffset, \
+ GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&ifield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUTQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
+ ILOGV("+ SGET '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_GET(&sfield->field); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ SPUT '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ UPDATE_FIELD_PUT(&sfield->field); \
+ } \
+ FINISH(2);
+
+
+/* File: c/gotoTargets.c */
+/*
+ * C footer. This has some common code shared by the various targets.
+ */
+
+/*
+ * Everything from here on is a "goto target". In the basic interpreter
+ * we jump into these targets and then jump directly to the handler for
+ * next instruction. Here, these are subroutines that return to the caller.
+ */
+
+GOTO_TARGET(filledNewArray, bool methodCallRange)
+ {
+ ClassObject* arrayClass;
+ ArrayObject* newArray;
+ u4* contents;
+ char typeCh;
+ int i;
+ u4 arg5;
+
+ EXPORT_PC();
+
+ ref = FETCH(1); /* class ref */
+ vdst = FETCH(2); /* first 4 regs -or- range base */
+
+ if (methodCallRange) {
+ vsrc1 = INST_AA(inst); /* #of elements */
+ arg5 = -1; /* silence compiler warning */
+ ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ } else {
+ arg5 = INST_A(inst);
+ vsrc1 = INST_B(inst); /* #of elements */
+ ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1, ref, vdst, arg5);
+ }
+
+ /*
+ * Resolve the array class.
+ */
+ arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (arrayClass == NULL) {
+ arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+ if (arrayClass == NULL)
+ GOTO_exceptionThrown();
+ }
+ /*
+ if (!dvmIsArrayClass(arrayClass)) {
+ dvmThrowException("Ljava/lang/RuntimeError;",
+ "filled-new-array needs array class");
+ GOTO_exceptionThrown();
+ }
+ */
+ /* verifier guarantees this is an array class */
+ assert(dvmIsArrayClass(arrayClass));
+ assert(dvmIsClassInitialized(arrayClass));
+
+ /*
+ * Create an array of the specified type.
+ */
+ LOGVV("+++ filled-new-array type is '%s'\n", arrayClass->descriptor);
+ typeCh = arrayClass->descriptor[1];
+ if (typeCh == 'D' || typeCh == 'J') {
+ /* category 2 primitives not allowed */
+ dvmThrowException("Ljava/lang/RuntimeError;",
+ "bad filled array req");
+ GOTO_exceptionThrown();
+ } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
+ /* TODO: requires multiple "fill in" loops with different widths */
+ LOGE("non-int primitives not implemented\n");
+ dvmThrowException("Ljava/lang/InternalError;",
+ "filled-new-array not implemented for anything but 'int'");
+ GOTO_exceptionThrown();
+ }
+
+ newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK);
+ if (newArray == NULL)
+ GOTO_exceptionThrown();
+
+ /*
+ * Fill in the elements. It's legal for vsrc1 to be zero.
+ */
+ contents = (u4*) newArray->contents;
+ if (methodCallRange) {
+ for (i = 0; i < vsrc1; i++)
+ contents[i] = GET_REGISTER(vdst+i);
+ } else {
+ assert(vsrc1 <= 5);
+ if (vsrc1 == 5) {
+ contents[4] = GET_REGISTER(arg5);
+ vsrc1--;
+ }
+ for (i = 0; i < vsrc1; i++) {
+ contents[i] = GET_REGISTER(vdst & 0x0f);
+ vdst >>= 4;
+ }
+ }
+
+ retval.l = newArray;
+ }
+ FINISH(3);
+GOTO_TARGET_END
+
+
+GOTO_TARGET(invokeVirtual, bool methodCallRange)
+ {
+ Method* baseMethod;
+ Object* thisPtr;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ /*
+ * Resolve the method. This is the correct method for the static
+ * type of the object. We also verify access permissions here.
+ */
+ baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (baseMethod == NULL) {
+ baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+ if (baseMethod == NULL) {
+ ILOGV("+ unknown method or access denied\n");
+ GOTO_exceptionThrown();
+ }
+ }
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method.
+ */
+ assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
+ methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ /*
+ * This can happen if you create two classes, Base and Sub, where
+ * Sub is a sub-class of Base. Declare a protected abstract
+ * method foo() in Base, and invoke foo() from a method in Base.
+ * Base is an "abstract base class" and is never instantiated
+ * directly. Now, Override foo() in Sub, and use Sub. This
+ * Works fine unless Sub stops providing an implementation of
+ * the method.
+ */
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+
+ LOGVV("+++ base=%s.%s virtual[%d]=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ (u4) baseMethod->methodIndex,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+#if 0
+ if (vsrc1 != methodToCall->insSize) {
+ LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ (u4) baseMethod->methodIndex,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ //dvmDumpClass(baseMethod->clazz);
+ //dvmDumpClass(methodToCall->clazz);
+ dvmDumpAllClasses(0);
+ }
+#endif
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuper, bool methodCallRange)
+ {
+ Method* baseMethod;
+ u2 thisReg;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange) {
+ ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ /* impossible in well-formed code, but we must check nevertheless */
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+ /*
+ * Resolve the method. This is the correct method for the static
+ * type of the object. We also verify access permissions here.
+ * The first arg to dvmResolveMethod() is just the referring class
+ * (used for class loaders and such), so we don't want to pass
+ * the superclass into the resolution call.
+ */
+ baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (baseMethod == NULL) {
+ baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+ if (baseMethod == NULL) {
+ ILOGV("+ unknown method or access denied\n");
+ GOTO_exceptionThrown();
+ }
+ }
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method's class.
+ *
+ * We're using the current method's class' superclass, not the
+ * superclass of "this". This is because we might be executing
+ * in a method inherited from a superclass, and we want to run
+ * in that class' superclass.
+ */
+ if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) {
+ /*
+ * Method does not exist in the superclass. Could happen if
+ * superclass gets updated.
+ */
+ dvmThrowException("Ljava/lang/NoSuchMethodError;",
+ baseMethod->name);
+ GOTO_exceptionThrown();
+ }
+ methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+ LOGVV("+++ base=%s.%s super-virtual=%s.%s\n",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeInterface, bool methodCallRange)
+ {
+ Object* thisPtr;
+ ClassObject* thisClass;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ thisClass = thisPtr->clazz;
+
+ /*
+ * Given a class and a method index, find the Method* with the
+ * actual code we want to execute.
+ */
+ methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod,
+ methodClassDex);
+ if (methodToCall == NULL) {
+ assert(dvmCheckException(self));
+ GOTO_exceptionThrown();
+ }
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeDirect, bool methodCallRange)
+ {
+ u2 thisReg;
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ EXPORT_PC();
+
+ if (methodCallRange) {
+ ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+ methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (methodToCall == NULL) {
+ methodToCall = dvmResolveMethod(curMethod->clazz, ref,
+ METHOD_DIRECT);
+ if (methodToCall == NULL) {
+ ILOGV("+ unknown direct method\n"); // should be impossible
+ GOTO_exceptionThrown();
+ }
+ }
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeStatic, bool methodCallRange)
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ EXPORT_PC();
+
+ if (methodCallRange)
+ ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ else
+ ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+
+ methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (methodToCall == NULL) {
+ methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC);
+ if (methodToCall == NULL) {
+ ILOGV("+ unknown method\n");
+ GOTO_exceptionThrown();
+ }
+ }
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
+ {
+ Object* thisPtr;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* vtable index */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method.
+ */
+ assert(ref < thisPtr->clazz->vtableCount);
+ methodToCall = thisPtr->clazz->vtable[ref];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+
+ LOGVV("+++ virtual[%d]=%s.%s\n",
+ ref, methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
+ {
+ u2 thisReg;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* vtable index */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange) {
+ ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ /* impossible in well-formed code, but we must check nevertheless */
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+#if 0 /* impossible in optimized + verified code */
+ if (ref >= curMethod->clazz->super->vtableCount) {
+ dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL);
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(ref < curMethod->clazz->super->vtableCount);
+#endif
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method's class.
+ *
+ * We're using the current method's class' superclass, not the
+ * superclass of "this". This is because we might be executing
+ * in a method inherited from a superclass, and we want to run
+ * in the method's class' superclass.
+ */
+ methodToCall = curMethod->clazz->super->vtable[ref];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowException("Ljava/lang/AbstractMethodError;",
+ "abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+ LOGVV("+++ super-virtual[%d]=%s.%s\n",
+ ref, methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+
+
+ /*
+ * General handling for return-void, return, and return-wide. Put the
+ * return value in "retval" before jumping here.
+ */
+GOTO_TARGET(returnFromMethod)
+ {
+ StackSaveArea* saveArea;
+
+ /*
+ * We must do this BEFORE we pop the previous stack frame off, so
+ * that the GC can see the return value (if any) in the local vars.
+ *
+ * Since this is now an interpreter switch point, we must do it before
+ * we do anything at all.
+ */
+ PERIODIC_CHECKS(kInterpEntryReturn, 0);
+
+ ILOGV("> retval=0x%llx (leaving %s.%s %s)",
+ retval.j, curMethod->clazz->descriptor, curMethod->name,
+ curMethod->signature);
+ //DUMP_REGS(curMethod, fp);
+
+ saveArea = SAVEAREA_FROM_FP(fp);
+
+#ifdef EASY_GDB
+ debugSaveArea = saveArea;
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_EXIT(self, curMethod);
+#endif
+
+ /* back up to previous frame and see if we hit a break */
+ fp = saveArea->prevFrame;
+ assert(fp != NULL);
+ if (dvmIsBreakFrame(fp)) {
+ /* bail without popping the method frame from stack */
+ LOGVV("+++ returned into break frame\n");
+ GOTO_bail();
+ }
+
+ /* update thread FP, and reset local variables */
+ self->curFrame = fp;
+ curMethod = SAVEAREA_FROM_FP(fp)->method;
+ //methodClass = curMethod->clazz;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = saveArea->savedPc;
+ ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+
+ /* use FINISH on the caller's invoke instruction */
+ //u2 invokeInstr = INST_INST(FETCH(0));
+ if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+ invokeInstr <= OP_INVOKE_INTERFACE*/)
+ {
+ FINISH(3);
+ } else {
+ //LOGE("Unknown invoke instr %02x at %d\n",
+ // invokeInstr, (int) (pc - curMethod->insns));
+ assert(false);
+ }
+ }
+GOTO_TARGET_END
+
+
+ /*
+ * Jump here when the code throws an exception.
+ *
+ * By the time we get here, the Throwable has been created and the stack
+ * trace has been saved off.
+ */
+GOTO_TARGET(exceptionThrown)
+ {
+ Object* exception;
+ int catchRelPc;
+
+ /*
+ * Since this is now an interpreter switch point, we must do it before
+ * we do anything at all.
+ */
+ PERIODIC_CHECKS(kInterpEntryThrow, 0);
+
+ /*
+ * We save off the exception and clear the exception status. While
+ * processing the exception we might need to load some Throwable
+ * classes, and we don't want class loader exceptions to get
+ * confused with this one.
+ */
+ assert(dvmCheckException(self));
+ exception = dvmGetException(self);
+ dvmAddTrackedAlloc(exception, self);
+ dvmClearException(self);
+
+ LOGV("Handling exception %s at %s:%d\n",
+ exception->clazz->descriptor, curMethod->name,
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ /*
+ * Tell the debugger about it.
+ *
+ * TODO: if the exception was thrown by interpreted code, control
+ * fell through native, and then back to us, we will report the
+ * exception at the point of the throw and again here. We can avoid
+ * this by not reporting exceptions when we jump here directly from
+ * the native call code above, but then we won't report exceptions
+ * that were thrown *from* the JNI code (as opposed to *through* it).
+ *
+ * The correct solution is probably to ignore from-native exceptions
+ * here, and have the JNI exception code do the reporting to the
+ * debugger.
+ */
+ if (gDvm.debuggerActive) {
+ void* catchFrame;
+ catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
+ exception, true, &catchFrame);
+ dvmDbgPostException(fp, pc - curMethod->insns, catchFrame,
+ catchRelPc, exception);
+ }
+#endif
+
+ /*
+ * We need to unroll to the catch block or the nearest "break"
+ * frame.
+ *
+ * A break frame could indicate that we have reached an intermediate
+ * native call, or have gone off the top of the stack and the thread
+ * needs to exit. Either way, we return from here, leaving the
+ * exception raised.
+ *
+ * If we do find a catch block, we want to transfer execution to
+ * that point.
+ */
+ catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
+ exception, false, (void*)&fp);
+
+ /*
+ * Restore the stack bounds after an overflow. This isn't going to
+ * be correct in all circumstances, e.g. if JNI code devours the
+ * exception this won't happen until some other exception gets
+ * thrown. If the code keeps pushing the stack bounds we'll end
+ * up aborting the VM.
+ *
+ * Note we want to do this *after* the call to dvmFindCatchBlock,
+ * because that may need extra stack space to resolve exception
+ * classes (e.g. through a class loader).
+ */
+ if (self->stackOverflowed)
+ dvmCleanupStackOverflow(self);
+
+ if (catchRelPc < 0) {
+ /* falling through to JNI code or off the bottom of the stack */
+#if DVM_SHOW_EXCEPTION >= 2
+ LOGD("Exception %s from %s:%d not caught locally\n",
+ exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+#endif
+ dvmSetException(self, exception);
+ dvmReleaseTrackedAlloc(exception, self);
+ GOTO_bail();
+ }
+
+#if DVM_SHOW_EXCEPTION >= 3
+ {
+ const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method;
+ LOGD("Exception %s thrown from %s:%d to %s:%d\n",
+ exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns),
+ dvmGetMethodSourceFile(catchMethod),
+ dvmLineNumFromPC(catchMethod, catchRelPc));
+ }
+#endif
+
+ /*
+ * Adjust local variables to match self->curFrame and the
+ * updated PC.
+ */
+ //fp = (u4*) self->curFrame;
+ curMethod = SAVEAREA_FROM_FP(fp)->method;
+ //methodClass = curMethod->clazz;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = curMethod->insns + catchRelPc;
+ ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+ DUMP_REGS(curMethod, fp, false); // show all regs
+
+ /*
+ * Restore the exception if the handler wants it.
+ *
+ * The Dalvik spec mandates that, if an exception handler wants to
+ * do something with the exception, the first instruction executed
+ * must be "move-exception". We can pass the exception along
+ * through the thread struct, and let the move-exception instruction
+ * clear it for us.
+ *
+ * If the handler doesn't call move-exception, we don't want to
+ * finish here with an exception still pending.
+ */
+ if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
+ dvmSetException(self, exception);
+
+ dvmReleaseTrackedAlloc(exception, self);
+ FINISH(0);
+ }
+GOTO_TARGET_END
+
+
+ /*
+ * General handling for invoke-{virtual,super,direct,static,interface},
+ * including "quick" variants.
+ *
+ * Set "methodToCall" to the Method we're calling, and "methodCallRange"
+ * depending on whether this is a "/range" instruction.
+ *
+ * For a range call:
+ * "vsrc1" holds the argument count (8 bits)
+ * "vdst" holds the first argument in the range
+ * For a non-range call:
+ * "vsrc1" holds the argument count (4 bits) and the 5th argument index
+ * "vdst" holds four 4-bit register indices
+ *
+ * The caller must EXPORT_PC before jumping here, because any method
+ * call can throw a stack overflow exception.
+ */
+GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
+ u2 count, u2 regs)
+ {
+ STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;);
+
+ //printf("range=%d call=%p count=%d regs=0x%04x\n",
+ // methodCallRange, methodToCall, count, regs);
+ //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
+ // methodToCall->name, methodToCall->signature);
+
+ u4* outs;
+ int i;
+
+ /*
+ * Copy args. This may corrupt vsrc1/vdst.
+ */
+ if (methodCallRange) {
+ // could use memcpy or a "Duff's device"; most functions have
+ // so few args it won't matter much
+ assert(vsrc1 <= curMethod->outsSize);
+ assert(vsrc1 == methodToCall->insSize);
+ outs = OUTS_FROM_FP(fp, vsrc1);
+ for (i = 0; i < vsrc1; i++)
+ outs[i] = GET_REGISTER(vdst+i);
+ } else {
+ u4 count = vsrc1 >> 4;
+
+ assert(count <= curMethod->outsSize);
+ assert(count == methodToCall->insSize);
+ assert(count <= 5);
+
+ outs = OUTS_FROM_FP(fp, count);
+#if 0
+ if (count == 5) {
+ outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+ count--;
+ }
+ for (i = 0; i < (int) count; i++) {
+ outs[i] = GET_REGISTER(vdst & 0x0f);
+ vdst >>= 4;
+ }
+#else
+ // This version executes fewer instructions but is larger
+ // overall. Seems to be a teensy bit faster.
+ assert((vdst >> 16) == 0); // 16 bits -or- high 16 bits clear
+ switch (count) {
+ case 5:
+ outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+ case 4:
+ outs[3] = GET_REGISTER(vdst >> 12);
+ case 3:
+ outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
+ case 2:
+ outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
+ case 1:
+ outs[0] = GET_REGISTER(vdst & 0x0f);
+ default:
+ ;
+ }
+#endif
+ }
+ }
+
+ /*
+ * (This was originally a "goto" target; I've kept it separate from the
+ * stuff above in case we want to refactor things again.)
+ *
+ * At this point, we have the arguments stored in the "outs" area of
+ * the current method's stack frame, and the method to call in
+ * "methodToCall". Push a new stack frame.
+ */
+ {
+ StackSaveArea* newSaveArea;
+ u4* newFp;
+
+ ILOGV("> %s%s.%s %s",
+ dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
+ methodToCall->clazz->descriptor, methodToCall->name,
+ methodToCall->signature);
+
+ newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
+ newSaveArea = SAVEAREA_FROM_FP(newFp);
+
+ /* verify that we have enough space */
+ if (true) {
+ u1* bottom;
+ bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
+ if (bottom < self->interpStackEnd) {
+ /* stack overflow */
+ LOGV("Stack overflow on method call (start=%p end=%p newBot=%p size=%d '%s')\n",
+ self->interpStackStart, self->interpStackEnd, bottom,
+ self->interpStackSize, methodToCall->name);
+ dvmHandleStackOverflow(self);
+ assert(dvmCheckException(self));
+ GOTO_exceptionThrown();
+ }
+ //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p\n",
+ // fp, newFp, newSaveArea, bottom);
+ }
+
+#ifdef LOG_INSTR
+ if (methodToCall->registersSize > methodToCall->insSize) {
+ /*
+ * This makes valgrind quiet when we print registers that
+ * haven't been initialized. Turn it off when the debug
+ * messages are disabled -- we want valgrind to report any
+ * used-before-initialized issues.
+ */
+ memset(newFp, 0xcc,
+ (methodToCall->registersSize - methodToCall->insSize) * 4);
+ }
+#endif
+
+#ifdef EASY_GDB
+ newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
+#endif
+ newSaveArea->prevFrame = fp;
+ newSaveArea->savedPc = pc;
+ newSaveArea->method = methodToCall;
+
+ if (!dvmIsNativeMethod(methodToCall)) {
+ /*
+ * "Call" interpreted code. Reposition the PC, update the
+ * frame pointer and other local state, and continue.
+ */
+ curMethod = methodToCall;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = methodToCall->insns;
+ fp = self->curFrame = newFp;
+#ifdef EASY_GDB
+ debugSaveArea = SAVEAREA_FROM_FP(newFp);
+#endif
+#if INTERP_TYPE == INTERP_DBG
+ debugIsMethodEntry = true; // profiling, debugging
+#endif
+ ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->signature);
+ DUMP_REGS(curMethod, fp, true); // show input args
+ FINISH(0); // jump to method start
+ } else {
+ /* set this up for JNI locals, even if not a JNI native */
+ newSaveArea->xtra.localRefTop = self->jniLocalRefTable.nextEntry;
+
+ self->curFrame = newFp;
+
+ DUMP_REGS(methodToCall, newFp, true); // show input args
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ if (gDvm.debuggerActive) {
+ dvmDbgPostLocationEvent(methodToCall, -1,
+ dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY);
+ }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_ENTER(self, methodToCall);
+#endif
+
+ ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
+ methodToCall->name, methodToCall->signature);
+
+ /*
+ * Jump through native call bridge. Because we leave no
+ * space for locals on native calls, "newFp" points directly
+ * to the method arguments.
+ */
+ (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
+
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER)
+ if (gDvm.debuggerActive) {
+ dvmDbgPostLocationEvent(methodToCall, -1,
+ dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT);
+ }
+#endif
+#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER)
+ TRACE_METHOD_EXIT(self, methodToCall);
+#endif
+
+ /* pop frame off */
+ dvmPopJniLocals(self, newSaveArea);
+ self->curFrame = fp;
+
+ /*
+ * If the native code threw an exception, or interpreted code
+ * invoked by the native call threw one and nobody has cleared
+ * it, jump to our local exception handling.
+ */
+ if (dvmCheckException(self)) {
+ LOGV("Exception thrown by/below native code\n");
+ GOTO_exceptionThrown();
+ }
+
+ ILOGD("> retval=0x%llx (leaving native)", retval.j);
+ ILOGD("> (return from native %s.%s to %s.%s %s)",
+ methodToCall->clazz->descriptor, methodToCall->name,
+ curMethod->clazz->descriptor, curMethod->name,
+ curMethod->signature);
+
+ //u2 invokeInstr = INST_INST(FETCH(0));
+ if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+ invokeInstr <= OP_INVOKE_INTERFACE*/)
+ {
+ FINISH(3);
+ } else {
+ //LOGE("Unknown invoke instr %02x at %d\n",
+ // invokeInstr, (int) (pc - curMethod->insns));
+ assert(false);
+ }
+ }
+ }
+ assert(false); // should not get here
+GOTO_TARGET_END
+
+
+/* File: cstubs/enddefs.c */
+
+/* undefine "magic" name remapping */
+#undef retval
+#undef pc
+#undef fp
+#undef curMethod
+#undef methodClassDex
+#undef self
+#undef debugTrackedRefStart
+