blob: ccd02b3f75ad23edb6d4feabd9e39770b637f3ca [file] [log] [blame]
Andy McFaddena5069fb2009-06-19 15:20:12 -07001/*
2 * This file was generated automatically by gen-mterp.py for 'armv7-a'.
3 *
4 * --> DO NOT EDIT <--
5 */
6
7/* File: armv5te/header.S */
8/*
9 * Copyright (C) 2008 The Android Open Source Project
10 *
11 * Licensed under the Apache License, Version 2.0 (the "License");
12 * you may not use this file except in compliance with the License.
13 * You may obtain a copy of the License at
14 *
15 * http://www.apache.org/licenses/LICENSE-2.0
16 *
17 * Unless required by applicable law or agreed to in writing, software
18 * distributed under the License is distributed on an "AS IS" BASIS,
19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 * See the License for the specific language governing permissions and
21 * limitations under the License.
22 */
23/*
24 * ARMv5 definitions and declarations.
25 */
26
27/*
28ARM EABI general notes:
29
30r0-r3 hold first 4 args to a method; they are not preserved across method calls
31r4-r8 are available for general use
32r9 is given special treatment in some situations, but not for us
33r10 (sl) seems to be generally available
34r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
35r12 (ip) is scratch -- not preserved across method calls
36r13 (sp) should be managed carefully in case a signal arrives
37r14 (lr) must be preserved
38r15 (pc) can be tinkered with directly
39
40r0 holds returns of <= 4 bytes
41r0-r1 hold returns of 8 bytes, low word in r0
42
43Callee must save/restore r4+ (except r12) if it modifies them. If VFP
44is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
45s0-s15 (d0-d7, q0-a3) do not need to be.
46
47Stack is "full descending". Only the arguments that don't fit in the first 4
48registers are placed on the stack. "sp" points at the first stacked argument
49(i.e. the 5th arg).
50
51VFP: single-precision results in s0, double-precision results in d0.
52
53In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
5464-bit quantities (long long, double) must be 64-bit aligned.
55*/
56
57/*
58Mterp and ARM notes:
59
60The following registers have fixed assignments:
61
62 reg nick purpose
63 r4 rPC interpreted program counter, used for fetching instructions
64 r5 rFP interpreted frame pointer, used for accessing locals and args
65 r6 rGLUE MterpGlue pointer
66 r7 rINST first 16-bit code unit of current instruction
67 r8 rIBASE interpreted instruction base pointer, used for computed goto
68
69Macros are provided for common operations. Each macro MUST emit only
70one instruction to make instruction-counting easier. They MUST NOT alter
71unspecified registers or condition codes.
72*/
73
74/* single-purpose registers, given names for clarity */
75#define rPC r4
76#define rFP r5
77#define rGLUE r6
78#define rINST r7
79#define rIBASE r8
80
81/* save/restore the PC and/or FP from the glue struct */
82#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc]
83#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc]
84#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp]
85#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp]
86#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP}
87#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP}
88
89/*
90 * "export" the PC to the stack frame, f/b/o future exception objects. Must
91 * be done *before* something calls dvmThrowException.
92 *
93 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
94 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
95 *
96 * It's okay to do this more than once.
97 */
98#define EXPORT_PC() \
99 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)]
100
101/*
102 * Given a frame pointer, find the stack save area.
103 *
104 * In C this is "((StackSaveArea*)(_fp) -1)".
105 */
106#define SAVEAREA_FROM_FP(_reg, _fpreg) \
107 sub _reg, _fpreg, #sizeofStackSaveArea
108
109/*
110 * Fetch the next instruction from rPC into rINST. Does not advance rPC.
111 */
112#define FETCH_INST() ldrh rINST, [rPC]
113
114/*
115 * Fetch the next instruction from the specified offset. Advances rPC
116 * to point to the next instruction. "_count" is in 16-bit code units.
117 *
118 * Because of the limited size of immediate constants on ARM, this is only
119 * suitable for small forward movements (i.e. don't try to implement "goto"
120 * with this).
121 *
122 * This must come AFTER anything that can throw an exception, or the
123 * exception catch may miss. (This also implies that it must come after
124 * EXPORT_PC().)
125 */
126#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]!
127
128/*
129 * The operation performed here is similar to FETCH_ADVANCE_INST, except the
130 * src and dest registers are parameterized (not hard-wired to rPC and rINST).
131 */
132#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
133 ldrh _dreg, [_sreg, #(_count*2)]!
134
135/*
136 * Fetch the next instruction from an offset specified by _reg. Updates
137 * rPC to point to the next instruction. "_reg" must specify the distance
138 * in bytes, *not* 16-bit code units, and may be a signed value.
139 *
140 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the
141 * bits that hold the shift distance are used for the half/byte/sign flags.
142 * In some cases we can pre-double _reg for free, so we require a byte offset
143 * here.
144 */
145#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]!
146
147/*
148 * Fetch a half-word code unit from an offset past the current PC. The
149 * "_count" value is in 16-bit code units. Does not advance rPC.
150 *
151 * The "_S" variant works the same but treats the value as signed.
152 */
153#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)]
154#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)]
155
156/*
157 * Fetch one byte from an offset past the current PC. Pass in the same
158 * "_count" as you would for FETCH, and an additional 0/1 indicating which
159 * byte of the halfword you want (lo/hi).
160 */
161#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)]
162
163/*
164 * Put the instruction's opcode field into the specified register.
165 */
166#define GET_INST_OPCODE(_reg) and _reg, rINST, #255
167
168/*
169 * Put the prefetched instruction's opcode field into the specified register.
170 */
171#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255
172
173/*
174 * Begin executing the opcode in _reg. Because this only jumps within the
175 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
176 */
177#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6
178#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6
179#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6
180
181/*
182 * Get/set the 32-bit value from a Dalvik register.
183 */
184#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2]
185#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2]
186
187#if defined(WITH_JIT)
Andy McFaddena5069fb2009-06-19 15:20:12 -0700188#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable]
Bill Buzbeed7269912009-11-10 14:31:32 -0800189#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold]
Andy McFaddena5069fb2009-06-19 15:20:12 -0700190#endif
191
192/*
193 * Convert a virtual register index into an address.
194 */
195#define VREG_INDEX_TO_ADDR(_reg, _vreg) \
196 add _reg, rFP, _vreg, lsl #2
197
198/*
199 * This is a #include, not a %include, because we want the C pre-processor
200 * to expand the macros into assembler assignment statements.
201 */
202#include "../common/asm-constants.h"
203
Ben Cheng7b133ef2010-02-04 16:15:59 -0800204#if defined(WITH_JIT)
205#include "../common/jit-config.h"
206#endif
Andy McFaddena5069fb2009-06-19 15:20:12 -0700207
208/* File: armv5te/platform.S */
209/*
210 * ===========================================================================
211 * CPU-version-specific defines
212 * ===========================================================================
213 */
214
215/*
216 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a
217 * one-way branch.
218 *
219 * May modify IP. Does not modify LR.
220 */
221.macro LDR_PC source
222 ldr pc, \source
223.endm
224
225/*
226 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
227 * Jump to subroutine.
228 *
229 * May modify IP and LR.
230 */
231.macro LDR_PC_LR source
232 mov lr, pc
233 ldr pc, \source
234.endm
235
236/*
237 * Macro for "LDMFD SP!, {...regs...,PC}".
238 *
239 * May modify IP and LR.
240 */
241.macro LDMFD_PC regs
242 ldmfd sp!, {\regs,pc}
243.endm
244
245
246/* File: armv5te/entry.S */
247/*
248 * Copyright (C) 2008 The Android Open Source Project
249 *
250 * Licensed under the Apache License, Version 2.0 (the "License");
251 * you may not use this file except in compliance with the License.
252 * You may obtain a copy of the License at
253 *
254 * http://www.apache.org/licenses/LICENSE-2.0
255 *
256 * Unless required by applicable law or agreed to in writing, software
257 * distributed under the License is distributed on an "AS IS" BASIS,
258 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
259 * See the License for the specific language governing permissions and
260 * limitations under the License.
261 */
262/*
263 * Interpreter entry point.
264 */
265
266/*
267 * We don't have formal stack frames, so gdb scans upward in the code
268 * to find the start of the function (a label with the %function type),
269 * and then looks at the next few instructions to figure out what
270 * got pushed onto the stack. From this it figures out how to restore
271 * the registers, including PC, for the previous stack frame. If gdb
272 * sees a non-function label, it stops scanning, so either we need to
273 * have nothing but assembler-local labels between the entry point and
274 * the break, or we need to fake it out.
275 *
276 * When this is defined, we add some stuff to make gdb less confused.
277 */
278#define ASSIST_DEBUGGER 1
279
280 .text
281 .align 2
282 .global dvmMterpStdRun
283 .type dvmMterpStdRun, %function
284
285/*
286 * On entry:
287 * r0 MterpGlue* glue
288 *
289 * This function returns a boolean "changeInterp" value. The return comes
290 * via a call to dvmMterpStdBail().
291 */
292dvmMterpStdRun:
293#define MTERP_ENTRY1 \
294 .save {r4-r10,fp,lr}; \
295 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs
296#define MTERP_ENTRY2 \
297 .pad #4; \
298 sub sp, sp, #4 @ align 64
299
300 .fnstart
301 MTERP_ENTRY1
302 MTERP_ENTRY2
303
304 /* save stack pointer, add magic word for debuggerd */
305 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return
306
307 /* set up "named" registers, figure out entry point */
308 mov rGLUE, r0 @ set rGLUE
309 ldrb r1, [r0, #offGlue_entryPoint] @ InterpEntry enum is char
310 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue"
311 adr rIBASE, dvmAsmInstructionStart @ set rIBASE
312 cmp r1, #kInterpEntryInstr @ usual case?
313 bne .Lnot_instr @ no, handle it
314
315#if defined(WITH_JIT)
316.Lno_singleStep:
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800317 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
Andy McFaddena5069fb2009-06-19 15:20:12 -0700318 /* Entry is always a possible trace start */
319 GET_JIT_PROF_TABLE(r0)
320 FETCH_INST()
Ben Cheng7a0bcd02010-01-22 16:45:45 -0800321 mov r1, #0 @ prepare the value for the new state
322 str r1, [r10, #offThread_inJitCodeCache] @ back to the interp land
Andy McFaddena5069fb2009-06-19 15:20:12 -0700323 cmp r0,#0
324 bne common_updateProfile
325 GET_INST_OPCODE(ip)
326 GOTO_OPCODE(ip)
327#else
328 /* start executing the instruction at rPC */
329 FETCH_INST() @ load rINST from rPC
330 GET_INST_OPCODE(ip) @ extract opcode from rINST
331 GOTO_OPCODE(ip) @ jump to next instruction
332#endif
333
334.Lnot_instr:
335 cmp r1, #kInterpEntryReturn @ were we returning from a method?
336 beq common_returnFromMethod
337
338.Lnot_return:
339 cmp r1, #kInterpEntryThrow @ were we throwing an exception?
340 beq common_exceptionThrown
341
342#if defined(WITH_JIT)
343.Lnot_throw:
344 ldr r0,[rGLUE, #offGlue_jitResume]
345 ldr r2,[rGLUE, #offGlue_jitResumePC]
346 cmp r1, #kInterpEntryResume @ resuming after Jit single-step?
347 bne .Lbad_arg
348 cmp rPC,r2
349 bne .Lno_singleStep @ must have branched, don't resume
350 mov r1, #kInterpEntryInstr
351 strb r1, [rGLUE, #offGlue_entryPoint]
352 ldr rINST, .LdvmCompilerTemplate
353 bx r0 @ re-enter the translation
354.LdvmCompilerTemplate:
355 .word dvmCompilerTemplateStart
356#endif
357
358.Lbad_arg:
359 ldr r0, strBadEntryPoint
360 @ r1 holds value of entryPoint
361 bl printf
362 bl dvmAbort
363 .fnend
364
365
366 .global dvmMterpStdBail
367 .type dvmMterpStdBail, %function
368
369/*
370 * Restore the stack pointer and PC from the save point established on entry.
371 * This is essentially the same as a longjmp, but should be cheaper. The
372 * last instruction causes us to return to whoever called dvmMterpStdRun.
373 *
374 * We pushed some registers on the stack in dvmMterpStdRun, then saved
375 * SP and LR. Here we restore SP, restore the registers, and then restore
376 * LR to PC.
377 *
378 * On entry:
379 * r0 MterpGlue* glue
380 * r1 bool changeInterp
381 */
382dvmMterpStdBail:
383 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP
384 mov r0, r1 @ return the changeInterp value
385 add sp, sp, #4 @ un-align 64
386 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return
387
388
389/*
390 * String references.
391 */
392strBadEntryPoint:
393 .word .LstrBadEntryPoint
394
395
396
397 .global dvmAsmInstructionStart
398 .type dvmAsmInstructionStart, %function
399dvmAsmInstructionStart = .L_OP_NOP
400 .text
401
402/* ------------------------------ */
403 .balign 64
404.L_OP_NOP: /* 0x00 */
405/* File: armv5te/OP_NOP.S */
406 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST
407 GET_INST_OPCODE(ip) @ ip<- opcode from rINST
408 GOTO_OPCODE(ip) @ execute it
409
410#ifdef ASSIST_DEBUGGER
411 /* insert fake function header to help gdb find the stack frame */
412 .type dalvik_inst, %function
413dalvik_inst:
414 .fnstart
415 MTERP_ENTRY1
416 MTERP_ENTRY2
417 .fnend
418#endif
419
420
421/* ------------------------------ */
422 .balign 64
423.L_OP_MOVE: /* 0x01 */
424/* File: armv6t2/OP_MOVE.S */
425 /* for move, move-object, long-to-int */
426 /* op vA, vB */
427 mov r1, rINST, lsr #12 @ r1<- B from 15:12
428 ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
429 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
430 GET_VREG(r2, r1) @ r2<- fp[B]
431 GET_INST_OPCODE(ip) @ ip<- opcode from rINST
432 SET_VREG(r2, r0) @ fp[A]<- r2
433 GOTO_OPCODE(ip) @ execute next instruction
434
435
436/* ------------------------------ */
437 .balign 64
438.L_OP_MOVE_FROM16: /* 0x02 */
439/* File: armv5te/OP_MOVE_FROM16.S */
440 /* for: move/from16, move-object/from16 */
441 /* op vAA, vBBBB */
442 FETCH(r1, 1) @ r1<- BBBB
443 mov r0, rINST, lsr #8 @ r0<- AA
444 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
445 GET_VREG(r2, r1) @ r2<- fp[BBBB]
446 GET_INST_OPCODE(ip) @ extract opcode from rINST
447 SET_VREG(r2, r0) @ fp[AA]<- r2
448 GOTO_OPCODE(ip) @ jump to next instruction
449
450
451/* ------------------------------ */
452 .balign 64
453.L_OP_MOVE_16: /* 0x03 */
454/* File: armv5te/OP_MOVE_16.S */
455 /* for: move/16, move-object/16 */
456 /* op vAAAA, vBBBB */
457 FETCH(r1, 2) @ r1<- BBBB
458 FETCH(r0, 1) @ r0<- AAAA
459 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
460 GET_VREG(r2, r1) @ r2<- fp[BBBB]
461 GET_INST_OPCODE(ip) @ extract opcode from rINST
462 SET_VREG(r2, r0) @ fp[AAAA]<- r2
463 GOTO_OPCODE(ip) @ jump to next instruction
464
465
466/* ------------------------------ */
467 .balign 64
468.L_OP_MOVE_WIDE: /* 0x04 */
469/* File: armv6t2/OP_MOVE_WIDE.S */
470 /* move-wide vA, vB */
471 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
472 mov r3, rINST, lsr #12 @ r3<- B
473 ubfx r2, rINST, #8, #4 @ r2<- A
474 add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
475 add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
476 ldmia r3, {r0-r1} @ r0/r1<- fp[B]
477 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
478 GET_INST_OPCODE(ip) @ extract opcode from rINST
479 stmia r2, {r0-r1} @ fp[A]<- r0/r1
480 GOTO_OPCODE(ip) @ jump to next instruction
481
482
483/* ------------------------------ */
484 .balign 64
485.L_OP_MOVE_WIDE_FROM16: /* 0x05 */
486/* File: armv5te/OP_MOVE_WIDE_FROM16.S */
487 /* move-wide/from16 vAA, vBBBB */
488 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
489 FETCH(r3, 1) @ r3<- BBBB
490 mov r2, rINST, lsr #8 @ r2<- AA
491 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
492 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
493 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
494 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
495 GET_INST_OPCODE(ip) @ extract opcode from rINST
496 stmia r2, {r0-r1} @ fp[AA]<- r0/r1
497 GOTO_OPCODE(ip) @ jump to next instruction
498
499
500/* ------------------------------ */
501 .balign 64
502.L_OP_MOVE_WIDE_16: /* 0x06 */
503/* File: armv5te/OP_MOVE_WIDE_16.S */
504 /* move-wide/16 vAAAA, vBBBB */
505 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
506 FETCH(r3, 2) @ r3<- BBBB
507 FETCH(r2, 1) @ r2<- AAAA
508 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
509 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
510 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
511 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
512 GET_INST_OPCODE(ip) @ extract opcode from rINST
513 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1
514 GOTO_OPCODE(ip) @ jump to next instruction
515
516
517/* ------------------------------ */
518 .balign 64
519.L_OP_MOVE_OBJECT: /* 0x07 */
520/* File: armv5te/OP_MOVE_OBJECT.S */
521/* File: armv5te/OP_MOVE.S */
522 /* for move, move-object, long-to-int */
523 /* op vA, vB */
524 mov r1, rINST, lsr #12 @ r1<- B from 15:12
525 mov r0, rINST, lsr #8 @ r0<- A from 11:8
526 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
527 GET_VREG(r2, r1) @ r2<- fp[B]
528 and r0, r0, #15
529 GET_INST_OPCODE(ip) @ ip<- opcode from rINST
530 SET_VREG(r2, r0) @ fp[A]<- r2
531 GOTO_OPCODE(ip) @ execute next instruction
532
533
534
535/* ------------------------------ */
536 .balign 64
537.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */
538/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */
539/* File: armv5te/OP_MOVE_FROM16.S */
540 /* for: move/from16, move-object/from16 */
541 /* op vAA, vBBBB */
542 FETCH(r1, 1) @ r1<- BBBB
543 mov r0, rINST, lsr #8 @ r0<- AA
544 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
545 GET_VREG(r2, r1) @ r2<- fp[BBBB]
546 GET_INST_OPCODE(ip) @ extract opcode from rINST
547 SET_VREG(r2, r0) @ fp[AA]<- r2
548 GOTO_OPCODE(ip) @ jump to next instruction
549
550
551
552/* ------------------------------ */
553 .balign 64
554.L_OP_MOVE_OBJECT_16: /* 0x09 */
555/* File: armv5te/OP_MOVE_OBJECT_16.S */
556/* File: armv5te/OP_MOVE_16.S */
557 /* for: move/16, move-object/16 */
558 /* op vAAAA, vBBBB */
559 FETCH(r1, 2) @ r1<- BBBB
560 FETCH(r0, 1) @ r0<- AAAA
561 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
562 GET_VREG(r2, r1) @ r2<- fp[BBBB]
563 GET_INST_OPCODE(ip) @ extract opcode from rINST
564 SET_VREG(r2, r0) @ fp[AAAA]<- r2
565 GOTO_OPCODE(ip) @ jump to next instruction
566
567
568
569/* ------------------------------ */
570 .balign 64
571.L_OP_MOVE_RESULT: /* 0x0a */
572/* File: armv5te/OP_MOVE_RESULT.S */
573 /* for: move-result, move-result-object */
574 /* op vAA */
575 mov r2, rINST, lsr #8 @ r2<- AA
576 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
577 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i
578 GET_INST_OPCODE(ip) @ extract opcode from rINST
579 SET_VREG(r0, r2) @ fp[AA]<- r0
580 GOTO_OPCODE(ip) @ jump to next instruction
581
582
583/* ------------------------------ */
584 .balign 64
585.L_OP_MOVE_RESULT_WIDE: /* 0x0b */
586/* File: armv5te/OP_MOVE_RESULT_WIDE.S */
587 /* move-result-wide vAA */
588 mov r2, rINST, lsr #8 @ r2<- AA
589 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval
590 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
591 ldmia r3, {r0-r1} @ r0/r1<- retval.j
592 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
593 GET_INST_OPCODE(ip) @ extract opcode from rINST
594 stmia r2, {r0-r1} @ fp[AA]<- r0/r1
595 GOTO_OPCODE(ip) @ jump to next instruction
596
597
598/* ------------------------------ */
599 .balign 64
600.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */
601/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */
602/* File: armv5te/OP_MOVE_RESULT.S */
603 /* for: move-result, move-result-object */
604 /* op vAA */
605 mov r2, rINST, lsr #8 @ r2<- AA
606 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
607 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i
608 GET_INST_OPCODE(ip) @ extract opcode from rINST
609 SET_VREG(r0, r2) @ fp[AA]<- r0
610 GOTO_OPCODE(ip) @ jump to next instruction
611
612
613
614/* ------------------------------ */
615 .balign 64
616.L_OP_MOVE_EXCEPTION: /* 0x0d */
617/* File: armv5te/OP_MOVE_EXCEPTION.S */
618 /* move-exception vAA */
619 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
620 mov r2, rINST, lsr #8 @ r2<- AA
621 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass
622 mov r1, #0 @ r1<- 0
623 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
624 SET_VREG(r3, r2) @ fp[AA]<- exception obj
625 GET_INST_OPCODE(ip) @ extract opcode from rINST
626 str r1, [r0, #offThread_exception] @ dvmClearException bypass
627 GOTO_OPCODE(ip) @ jump to next instruction
628
629
630/* ------------------------------ */
631 .balign 64
632.L_OP_RETURN_VOID: /* 0x0e */
633/* File: armv5te/OP_RETURN_VOID.S */
634 b common_returnFromMethod
635
636
637/* ------------------------------ */
638 .balign 64
639.L_OP_RETURN: /* 0x0f */
640/* File: armv5te/OP_RETURN.S */
641 /*
642 * Return a 32-bit value. Copies the return value into the "glue"
643 * structure, then jumps to the return handler.
644 *
645 * for: return, return-object
646 */
647 /* op vAA */
648 mov r2, rINST, lsr #8 @ r2<- AA
649 GET_VREG(r0, r2) @ r0<- vAA
650 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
651 b common_returnFromMethod
652
653
654/* ------------------------------ */
655 .balign 64
656.L_OP_RETURN_WIDE: /* 0x10 */
657/* File: armv5te/OP_RETURN_WIDE.S */
658 /*
659 * Return a 64-bit value. Copies the return value into the "glue"
660 * structure, then jumps to the return handler.
661 */
662 /* return-wide vAA */
663 mov r2, rINST, lsr #8 @ r2<- AA
664 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
665 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval
666 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
667 stmia r3, {r0-r1} @ retval<- r0/r1
668 b common_returnFromMethod
669
670
671/* ------------------------------ */
672 .balign 64
673.L_OP_RETURN_OBJECT: /* 0x11 */
674/* File: armv5te/OP_RETURN_OBJECT.S */
675/* File: armv5te/OP_RETURN.S */
676 /*
677 * Return a 32-bit value. Copies the return value into the "glue"
678 * structure, then jumps to the return handler.
679 *
680 * for: return, return-object
681 */
682 /* op vAA */
683 mov r2, rINST, lsr #8 @ r2<- AA
684 GET_VREG(r0, r2) @ r0<- vAA
685 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA
686 b common_returnFromMethod
687
688
689
690/* ------------------------------ */
691 .balign 64
692.L_OP_CONST_4: /* 0x12 */
693/* File: armv6t2/OP_CONST_4.S */
694 /* const/4 vA, #+B */
695 mov r1, rINST, lsl #16 @ r1<- Bxxx0000
696 ubfx r0, rINST, #8, #4 @ r0<- A
697 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
698 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended)
699 GET_INST_OPCODE(ip) @ ip<- opcode from rINST
700 SET_VREG(r1, r0) @ fp[A]<- r1
701 GOTO_OPCODE(ip) @ execute next instruction
702
703
704/* ------------------------------ */
705 .balign 64
706.L_OP_CONST_16: /* 0x13 */
707/* File: armv5te/OP_CONST_16.S */
708 /* const/16 vAA, #+BBBB */
709 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended)
710 mov r3, rINST, lsr #8 @ r3<- AA
711 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
712 SET_VREG(r0, r3) @ vAA<- r0
713 GET_INST_OPCODE(ip) @ extract opcode from rINST
714 GOTO_OPCODE(ip) @ jump to next instruction
715
716
717/* ------------------------------ */
718 .balign 64
719.L_OP_CONST: /* 0x14 */
720/* File: armv5te/OP_CONST.S */
721 /* const vAA, #+BBBBbbbb */
722 mov r3, rINST, lsr #8 @ r3<- AA
723 FETCH(r0, 1) @ r0<- bbbb (low)
724 FETCH(r1, 2) @ r1<- BBBB (high)
725 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
726 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
727 GET_INST_OPCODE(ip) @ extract opcode from rINST
728 SET_VREG(r0, r3) @ vAA<- r0
729 GOTO_OPCODE(ip) @ jump to next instruction
730
731
732/* ------------------------------ */
733 .balign 64
734.L_OP_CONST_HIGH16: /* 0x15 */
735/* File: armv5te/OP_CONST_HIGH16.S */
736 /* const/high16 vAA, #+BBBB0000 */
737 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended)
738 mov r3, rINST, lsr #8 @ r3<- AA
739 mov r0, r0, lsl #16 @ r0<- BBBB0000
740 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
741 SET_VREG(r0, r3) @ vAA<- r0
742 GET_INST_OPCODE(ip) @ extract opcode from rINST
743 GOTO_OPCODE(ip) @ jump to next instruction
744
745
746/* ------------------------------ */
747 .balign 64
748.L_OP_CONST_WIDE_16: /* 0x16 */
749/* File: armv5te/OP_CONST_WIDE_16.S */
750 /* const-wide/16 vAA, #+BBBB */
751 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended)
752 mov r3, rINST, lsr #8 @ r3<- AA
753 mov r1, r0, asr #31 @ r1<- ssssssss
754 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
755 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
756 GET_INST_OPCODE(ip) @ extract opcode from rINST
757 stmia r3, {r0-r1} @ vAA<- r0/r1
758 GOTO_OPCODE(ip) @ jump to next instruction
759
760
761/* ------------------------------ */
762 .balign 64
763.L_OP_CONST_WIDE_32: /* 0x17 */
764/* File: armv5te/OP_CONST_WIDE_32.S */
765 /* const-wide/32 vAA, #+BBBBbbbb */
766 FETCH(r0, 1) @ r0<- 0000bbbb (low)
767 mov r3, rINST, lsr #8 @ r3<- AA
768 FETCH_S(r2, 2) @ r2<- ssssBBBB (high)
769 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
770 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
771 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
772 mov r1, r0, asr #31 @ r1<- ssssssss
773 GET_INST_OPCODE(ip) @ extract opcode from rINST
774 stmia r3, {r0-r1} @ vAA<- r0/r1
775 GOTO_OPCODE(ip) @ jump to next instruction
776
777
778/* ------------------------------ */
779 .balign 64
780.L_OP_CONST_WIDE: /* 0x18 */
781/* File: armv5te/OP_CONST_WIDE.S */
782 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
783 FETCH(r0, 1) @ r0<- bbbb (low)
784 FETCH(r1, 2) @ r1<- BBBB (low middle)
785 FETCH(r2, 3) @ r2<- hhhh (high middle)
786 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
787 FETCH(r3, 4) @ r3<- HHHH (high)
788 mov r9, rINST, lsr #8 @ r9<- AA
789 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
790 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST
791 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
792 GET_INST_OPCODE(ip) @ extract opcode from rINST
793 stmia r9, {r0-r1} @ vAA<- r0/r1
794 GOTO_OPCODE(ip) @ jump to next instruction
795
796
797/* ------------------------------ */
798 .balign 64
799.L_OP_CONST_WIDE_HIGH16: /* 0x19 */
800/* File: armv5te/OP_CONST_WIDE_HIGH16.S */
801 /* const-wide/high16 vAA, #+BBBB000000000000 */
802 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended)
803 mov r3, rINST, lsr #8 @ r3<- AA
804 mov r0, #0 @ r0<- 00000000
805 mov r1, r1, lsl #16 @ r1<- BBBB0000
806 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
807 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
808 GET_INST_OPCODE(ip) @ extract opcode from rINST
809 stmia r3, {r0-r1} @ vAA<- r0/r1
810 GOTO_OPCODE(ip) @ jump to next instruction
811
812
813/* ------------------------------ */
814 .balign 64
815.L_OP_CONST_STRING: /* 0x1a */
816/* File: armv5te/OP_CONST_STRING.S */
817 /* const/string vAA, String@BBBB */
818 FETCH(r1, 1) @ r1<- BBBB
819 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex
820 mov r9, rINST, lsr #8 @ r9<- AA
821 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings
822 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB]
823 cmp r0, #0 @ not yet resolved?
824 beq .LOP_CONST_STRING_resolve
825 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
826 GET_INST_OPCODE(ip) @ extract opcode from rINST
827 SET_VREG(r0, r9) @ vAA<- r0
828 GOTO_OPCODE(ip) @ jump to next instruction
829
830/* ------------------------------ */
831 .balign 64
832.L_OP_CONST_STRING_JUMBO: /* 0x1b */
833/* File: armv5te/OP_CONST_STRING_JUMBO.S */
834 /* const/string vAA, String@BBBBBBBB */
835 FETCH(r0, 1) @ r0<- bbbb (low)
836 FETCH(r1, 2) @ r1<- BBBB (high)
837 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex
838 mov r9, rINST, lsr #8 @ r9<- AA
839 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings
840 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
841 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB]
842 cmp r0, #0
843 beq .LOP_CONST_STRING_JUMBO_resolve
844 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
845 GET_INST_OPCODE(ip) @ extract opcode from rINST
846 SET_VREG(r0, r9) @ vAA<- r0
847 GOTO_OPCODE(ip) @ jump to next instruction
848
849/* ------------------------------ */
850 .balign 64
851.L_OP_CONST_CLASS: /* 0x1c */
852/* File: armv5te/OP_CONST_CLASS.S */
853 /* const/class vAA, Class@BBBB */
854 FETCH(r1, 1) @ r1<- BBBB
855 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex
856 mov r9, rINST, lsr #8 @ r9<- AA
857 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses
858 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB]
859 cmp r0, #0 @ not yet resolved?
860 beq .LOP_CONST_CLASS_resolve
861 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
862 GET_INST_OPCODE(ip) @ extract opcode from rINST
863 SET_VREG(r0, r9) @ vAA<- r0
864 GOTO_OPCODE(ip) @ jump to next instruction
865
866/* ------------------------------ */
867 .balign 64
868.L_OP_MONITOR_ENTER: /* 0x1d */
869/* File: armv5te/OP_MONITOR_ENTER.S */
870 /*
871 * Synchronize on an object.
872 */
873 /* monitor-enter vAA */
874 mov r2, rINST, lsr #8 @ r2<- AA
875 GET_VREG(r1, r2) @ r1<- vAA (object)
876 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
877 cmp r1, #0 @ null object?
878 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING
879 beq common_errNullObject @ null object, throw an exception
880 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
881 bl dvmLockObject @ call(self, obj)
882#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */
883 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
884 ldr r1, [r0, #offThread_exception] @ check for exception
885 cmp r1, #0
886 bne common_exceptionThrown @ exception raised, bail out
887#endif
888 GET_INST_OPCODE(ip) @ extract opcode from rINST
889 GOTO_OPCODE(ip) @ jump to next instruction
890
891
892/* ------------------------------ */
893 .balign 64
894.L_OP_MONITOR_EXIT: /* 0x1e */
895/* File: armv5te/OP_MONITOR_EXIT.S */
896 /*
897 * Unlock an object.
898 *
899 * Exceptions that occur when unlocking a monitor need to appear as
900 * if they happened at the following instruction. See the Dalvik
901 * instruction spec.
902 */
903 /* monitor-exit vAA */
904 mov r2, rINST, lsr #8 @ r2<- AA
905 EXPORT_PC() @ before fetch: export the PC
906 GET_VREG(r1, r2) @ r1<- vAA (object)
907 cmp r1, #0 @ null object?
908 beq common_errNullObject @ yes
909 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
910 bl dvmUnlockObject @ r0<- success for unlock(self, obj)
911 cmp r0, #0 @ failed?
912 beq common_exceptionThrown @ yes, exception is pending
913 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST
914 GET_INST_OPCODE(ip) @ extract opcode from rINST
915 GOTO_OPCODE(ip) @ jump to next instruction
916
917
918/* ------------------------------ */
919 .balign 64
920.L_OP_CHECK_CAST: /* 0x1f */
921/* File: armv5te/OP_CHECK_CAST.S */
922 /*
923 * Check to see if a cast from one class to another is allowed.
924 */
925 /* check-cast vAA, class@BBBB */
926 mov r3, rINST, lsr #8 @ r3<- AA
927 FETCH(r2, 1) @ r2<- BBBB
928 GET_VREG(r9, r3) @ r9<- object
929 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex
930 cmp r9, #0 @ is object null?
931 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses
932 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds
933 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class
934 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
935 cmp r1, #0 @ have we resolved this before?
936 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now
937.LOP_CHECK_CAST_resolved:
938 cmp r0, r1 @ same class (trivial success)?
939 bne .LOP_CHECK_CAST_fullcheck @ no, do full check
940.LOP_CHECK_CAST_okay:
941 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
942 GET_INST_OPCODE(ip) @ extract opcode from rINST
943 GOTO_OPCODE(ip) @ jump to next instruction
944
945/* ------------------------------ */
946 .balign 64
947.L_OP_INSTANCE_OF: /* 0x20 */
948/* File: armv5te/OP_INSTANCE_OF.S */
949 /*
950 * Check to see if an object reference is an instance of a class.
951 *
952 * Most common situation is a non-null object, being compared against
953 * an already-resolved class.
954 */
955 /* instance-of vA, vB, class@CCCC */
956 mov r3, rINST, lsr #12 @ r3<- B
957 mov r9, rINST, lsr #8 @ r9<- A+
958 GET_VREG(r0, r3) @ r0<- vB (object)
959 and r9, r9, #15 @ r9<- A
960 cmp r0, #0 @ is object null?
961 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex
962 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0
963 FETCH(r3, 1) @ r3<- CCCC
964 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses
965 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class
966 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz
967 cmp r1, #0 @ have we resolved this before?
968 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now
969.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class
970 cmp r0, r1 @ same class (trivial success)?
971 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish
972 b .LOP_INSTANCE_OF_fullcheck @ no, do full check
973
974/* ------------------------------ */
975 .balign 64
976.L_OP_ARRAY_LENGTH: /* 0x21 */
977/* File: armv6t2/OP_ARRAY_LENGTH.S */
978 /*
979 * Return the length of an array.
980 */
981 mov r1, rINST, lsr #12 @ r1<- B
982 ubfx r2, rINST, #8, #4 @ r2<- A
983 GET_VREG(r0, r1) @ r0<- vB (object ref)
984 cmp r0, #0 @ is object null?
985 beq common_errNullObject @ yup, fail
986 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
987 ldr r3, [r0, #offArrayObject_length] @ r3<- array length
988 GET_INST_OPCODE(ip) @ extract opcode from rINST
989 SET_VREG(r3, r2) @ vB<- length
990 GOTO_OPCODE(ip) @ jump to next instruction
991
992
993/* ------------------------------ */
994 .balign 64
995.L_OP_NEW_INSTANCE: /* 0x22 */
996/* File: armv5te/OP_NEW_INSTANCE.S */
997 /*
998 * Create a new instance of a class.
999 */
1000 /* new-instance vAA, class@BBBB */
1001 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
1002 FETCH(r1, 1) @ r1<- BBBB
1003 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
1004 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
1005 EXPORT_PC() @ req'd for init, resolve, alloc
1006 cmp r0, #0 @ already resolved?
1007 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now
1008.LOP_NEW_INSTANCE_resolved: @ r0=class
1009 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum
1010 cmp r1, #CLASS_INITIALIZED @ has class been initialized?
1011 bne .LOP_NEW_INSTANCE_needinit @ no, init class now
1012.LOP_NEW_INSTANCE_initialized: @ r0=class
1013 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call
1014 bl dvmAllocObject @ r0<- new object
1015 b .LOP_NEW_INSTANCE_finish @ continue
1016
1017/* ------------------------------ */
1018 .balign 64
1019.L_OP_NEW_ARRAY: /* 0x23 */
1020/* File: armv5te/OP_NEW_ARRAY.S */
1021 /*
1022 * Allocate an array of objects, specified with the array class
1023 * and a count.
1024 *
1025 * The verifier guarantees that this is an array class, so we don't
1026 * check for it here.
1027 */
1028 /* new-array vA, vB, class@CCCC */
1029 mov r0, rINST, lsr #12 @ r0<- B
1030 FETCH(r2, 1) @ r2<- CCCC
1031 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
1032 GET_VREG(r1, r0) @ r1<- vB (array length)
1033 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
1034 cmp r1, #0 @ check length
1035 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class
1036 bmi common_errNegativeArraySize @ negative length, bail
1037 cmp r0, #0 @ already resolved?
1038 EXPORT_PC() @ req'd for resolve, alloc
1039 bne .LOP_NEW_ARRAY_finish @ resolved, continue
1040 b .LOP_NEW_ARRAY_resolve @ do resolve now
1041
1042/* ------------------------------ */
1043 .balign 64
1044.L_OP_FILLED_NEW_ARRAY: /* 0x24 */
1045/* File: armv5te/OP_FILLED_NEW_ARRAY.S */
1046 /*
1047 * Create a new array with elements filled from registers.
1048 *
1049 * for: filled-new-array, filled-new-array/range
1050 */
1051 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
1052 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
1053 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
1054 FETCH(r1, 1) @ r1<- BBBB
1055 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
1056 EXPORT_PC() @ need for resolve and alloc
1057 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
1058 mov r10, rINST, lsr #8 @ r10<- AA or BA
1059 cmp r0, #0 @ already resolved?
1060 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on
10618: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
1062 mov r2, #0 @ r2<- false
1063 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
1064 bl dvmResolveClass @ r0<- call(clazz, ref)
1065 cmp r0, #0 @ got null?
1066 beq common_exceptionThrown @ yes, handle exception
1067 b .LOP_FILLED_NEW_ARRAY_continue
1068
1069/* ------------------------------ */
1070 .balign 64
1071.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
1072/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */
1073/* File: armv5te/OP_FILLED_NEW_ARRAY.S */
1074 /*
1075 * Create a new array with elements filled from registers.
1076 *
1077 * for: filled-new-array, filled-new-array/range
1078 */
1079 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
1080 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
1081 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
1082 FETCH(r1, 1) @ r1<- BBBB
1083 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
1084 EXPORT_PC() @ need for resolve and alloc
1085 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
1086 mov r10, rINST, lsr #8 @ r10<- AA or BA
1087 cmp r0, #0 @ already resolved?
1088 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on
10898: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
1090 mov r2, #0 @ r2<- false
1091 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
1092 bl dvmResolveClass @ r0<- call(clazz, ref)
1093 cmp r0, #0 @ got null?
1094 beq common_exceptionThrown @ yes, handle exception
1095 b .LOP_FILLED_NEW_ARRAY_RANGE_continue
1096
1097
1098/* ------------------------------ */
1099 .balign 64
1100.L_OP_FILL_ARRAY_DATA: /* 0x26 */
1101/* File: armv5te/OP_FILL_ARRAY_DATA.S */
1102 /* fill-array-data vAA, +BBBBBBBB */
1103 FETCH(r0, 1) @ r0<- bbbb (lo)
1104 FETCH(r1, 2) @ r1<- BBBB (hi)
1105 mov r3, rINST, lsr #8 @ r3<- AA
1106 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
1107 GET_VREG(r0, r3) @ r0<- vAA (array object)
1108 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
1109 EXPORT_PC();
1110 bl dvmInterpHandleFillArrayData@ fill the array with predefined data
1111 cmp r0, #0 @ 0 means an exception is thrown
1112 beq common_exceptionThrown @ has exception
1113 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
1114 GET_INST_OPCODE(ip) @ extract opcode from rINST
1115 GOTO_OPCODE(ip) @ jump to next instruction
1116
1117/* ------------------------------ */
1118 .balign 64
1119.L_OP_THROW: /* 0x27 */
1120/* File: armv5te/OP_THROW.S */
1121 /*
1122 * Throw an exception object in the current thread.
1123 */
1124 /* throw vAA */
1125 mov r2, rINST, lsr #8 @ r2<- AA
1126 GET_VREG(r1, r2) @ r1<- vAA (exception object)
1127 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
1128 cmp r1, #0 @ null object?
1129 beq common_errNullObject @ yes, throw an NPE instead
1130 @ bypass dvmSetException, just store it
1131 str r1, [r0, #offThread_exception] @ thread->exception<- obj
1132 b common_exceptionThrown
1133
1134
1135/* ------------------------------ */
1136 .balign 64
1137.L_OP_GOTO: /* 0x28 */
1138/* File: armv5te/OP_GOTO.S */
1139 /*
1140 * Unconditional branch, 8-bit offset.
1141 *
1142 * The branch distance is a signed code-unit offset, which we need to
1143 * double to get a byte offset.
1144 */
1145 /* goto +AA */
1146 mov r0, rINST, lsl #16 @ r0<- AAxx0000
1147 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended)
1148 mov r9, r9, lsl #1 @ r9<- byte offset
1149 bmi common_backwardBranch @ backward branch, do periodic checks
1150#if defined(WITH_JIT)
1151 GET_JIT_PROF_TABLE(r0)
1152 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1153 cmp r0,#0
1154 bne common_updateProfile
1155 GET_INST_OPCODE(ip) @ extract opcode from rINST
1156 GOTO_OPCODE(ip) @ jump to next instruction
1157#else
1158 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1159 GET_INST_OPCODE(ip) @ extract opcode from rINST
1160 GOTO_OPCODE(ip) @ jump to next instruction
1161#endif
1162
1163/* ------------------------------ */
1164 .balign 64
1165.L_OP_GOTO_16: /* 0x29 */
1166/* File: armv5te/OP_GOTO_16.S */
1167 /*
1168 * Unconditional branch, 16-bit offset.
1169 *
1170 * The branch distance is a signed code-unit offset, which we need to
1171 * double to get a byte offset.
1172 */
1173 /* goto/16 +AAAA */
1174 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended)
1175 movs r9, r0, asl #1 @ r9<- byte offset, check sign
1176 bmi common_backwardBranch @ backward branch, do periodic checks
1177#if defined(WITH_JIT)
1178 GET_JIT_PROF_TABLE(r0)
1179 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1180 cmp r0,#0
1181 bne common_updateProfile
1182 GET_INST_OPCODE(ip) @ extract opcode from rINST
1183 GOTO_OPCODE(ip) @ jump to next instruction
1184#else
1185 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1186 GET_INST_OPCODE(ip) @ extract opcode from rINST
1187 GOTO_OPCODE(ip) @ jump to next instruction
1188#endif
1189
1190
1191/* ------------------------------ */
1192 .balign 64
1193.L_OP_GOTO_32: /* 0x2a */
1194/* File: armv5te/OP_GOTO_32.S */
1195 /*
1196 * Unconditional branch, 32-bit offset.
1197 *
1198 * The branch distance is a signed code-unit offset, which we need to
1199 * double to get a byte offset.
1200 *
1201 * Unlike most opcodes, this one is allowed to branch to itself, so
1202 * our "backward branch" test must be "<=0" instead of "<0". The ORRS
1203 * instruction doesn't affect the V flag, so we need to clear it
1204 * explicitly.
1205 */
1206 /* goto/32 +AAAAAAAA */
1207 FETCH(r0, 1) @ r0<- aaaa (lo)
1208 FETCH(r1, 2) @ r1<- AAAA (hi)
1209 cmp ip, ip @ (clear V flag during stall)
1210 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign
1211 mov r9, r0, asl #1 @ r9<- byte offset
1212 ble common_backwardBranch @ backward branch, do periodic checks
1213#if defined(WITH_JIT)
1214 GET_JIT_PROF_TABLE(r0)
1215 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1216 cmp r0,#0
1217 bne common_updateProfile
1218 GET_INST_OPCODE(ip) @ extract opcode from rINST
1219 GOTO_OPCODE(ip) @ jump to next instruction
1220#else
1221 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1222 GET_INST_OPCODE(ip) @ extract opcode from rINST
1223 GOTO_OPCODE(ip) @ jump to next instruction
1224#endif
1225
1226/* ------------------------------ */
1227 .balign 64
1228.L_OP_PACKED_SWITCH: /* 0x2b */
1229/* File: armv5te/OP_PACKED_SWITCH.S */
1230 /*
1231 * Handle a packed-switch or sparse-switch instruction. In both cases
1232 * we decode it and hand it off to a helper function.
1233 *
1234 * We don't really expect backward branches in a switch statement, but
1235 * they're perfectly legal, so we check for them here.
1236 *
1237 * for: packed-switch, sparse-switch
1238 */
1239 /* op vAA, +BBBB */
1240 FETCH(r0, 1) @ r0<- bbbb (lo)
1241 FETCH(r1, 2) @ r1<- BBBB (hi)
1242 mov r3, rINST, lsr #8 @ r3<- AA
1243 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
1244 GET_VREG(r1, r3) @ r1<- vAA
1245 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
1246 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset
1247 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign
1248 bmi common_backwardBranch @ backward branch, do periodic checks
1249 beq common_backwardBranch @ (want to use BLE but V is unknown)
1250#if defined(WITH_JIT)
1251 GET_JIT_PROF_TABLE(r0)
1252 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1253 cmp r0,#0
1254 bne common_updateProfile
1255 GET_INST_OPCODE(ip) @ extract opcode from rINST
1256 GOTO_OPCODE(ip) @ jump to next instruction
1257#else
1258 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1259 GET_INST_OPCODE(ip) @ extract opcode from rINST
1260 GOTO_OPCODE(ip) @ jump to next instruction
1261#endif
1262
1263
1264/* ------------------------------ */
1265 .balign 64
1266.L_OP_SPARSE_SWITCH: /* 0x2c */
1267/* File: armv5te/OP_SPARSE_SWITCH.S */
1268/* File: armv5te/OP_PACKED_SWITCH.S */
1269 /*
1270 * Handle a packed-switch or sparse-switch instruction. In both cases
1271 * we decode it and hand it off to a helper function.
1272 *
1273 * We don't really expect backward branches in a switch statement, but
1274 * they're perfectly legal, so we check for them here.
1275 *
1276 * for: packed-switch, sparse-switch
1277 */
1278 /* op vAA, +BBBB */
1279 FETCH(r0, 1) @ r0<- bbbb (lo)
1280 FETCH(r1, 2) @ r1<- BBBB (hi)
1281 mov r3, rINST, lsr #8 @ r3<- AA
1282 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
1283 GET_VREG(r1, r3) @ r1<- vAA
1284 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
1285 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset
1286 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign
1287 bmi common_backwardBranch @ backward branch, do periodic checks
1288 beq common_backwardBranch @ (want to use BLE but V is unknown)
1289#if defined(WITH_JIT)
1290 GET_JIT_PROF_TABLE(r0)
1291 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1292 cmp r0,#0
1293 bne common_updateProfile
1294 GET_INST_OPCODE(ip) @ extract opcode from rINST
1295 GOTO_OPCODE(ip) @ jump to next instruction
1296#else
1297 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1298 GET_INST_OPCODE(ip) @ extract opcode from rINST
1299 GOTO_OPCODE(ip) @ jump to next instruction
1300#endif
1301
1302
1303
1304/* ------------------------------ */
1305 .balign 64
1306.L_OP_CMPL_FLOAT: /* 0x2d */
1307/* File: arm-vfp/OP_CMPL_FLOAT.S */
1308 /*
1309 * Compare two floating-point values. Puts 0, 1, or -1 into the
1310 * destination register based on the results of the comparison.
1311 *
1312 * int compare(x, y) {
1313 * if (x == y) {
1314 * return 0;
1315 * } else if (x > y) {
1316 * return 1;
1317 * } else if (x < y) {
1318 * return -1;
1319 * } else {
1320 * return -1;
1321 * }
1322 * }
1323 */
1324 /* op vAA, vBB, vCC */
1325 FETCH(r0, 1) @ r0<- CCBB
Andy McFaddena5069fb2009-06-19 15:20:12 -07001326 mov r9, rINST, lsr #8 @ r9<- AA
Andy McFadden5162c5f2009-06-19 16:52:19 -07001327 and r2, r0, #255 @ r2<- BB
1328 mov r3, r0, lsr #8 @ r3<- CC
1329 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
1330 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
1331 flds s0, [r2] @ s0<- vBB
1332 flds s1, [r3] @ s1<- vCC
Andy McFaddena5069fb2009-06-19 15:20:12 -07001333 fcmpes s0, s1 @ compare (vBB, vCC)
1334 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
1335 mvn r0, #0 @ r0<- -1 (default)
1336 GET_INST_OPCODE(ip) @ extract opcode from rINST
1337 fmstat @ export status flags
1338 movgt r0, #1 @ (greater than) r1<- 1
1339 moveq r0, #0 @ (equal) r1<- 0
Andy McFadden5162c5f2009-06-19 16:52:19 -07001340 b .LOP_CMPL_FLOAT_finish @ argh
Andy McFaddena5069fb2009-06-19 15:20:12 -07001341
1342
1343/* ------------------------------ */
1344 .balign 64
1345.L_OP_CMPG_FLOAT: /* 0x2e */
1346/* File: arm-vfp/OP_CMPG_FLOAT.S */
1347 /*
1348 * Compare two floating-point values. Puts 0, 1, or -1 into the
1349 * destination register based on the results of the comparison.
1350 *
1351 * int compare(x, y) {
1352 * if (x == y) {
1353 * return 0;
1354 * } else if (x < y) {
1355 * return -1;
1356 * } else if (x > y) {
1357 * return 1;
1358 * } else {
1359 * return 1;
1360 * }
1361 * }
1362 */
1363 /* op vAA, vBB, vCC */
1364 FETCH(r0, 1) @ r0<- CCBB
Andy McFaddena5069fb2009-06-19 15:20:12 -07001365 mov r9, rINST, lsr #8 @ r9<- AA
Andy McFadden5162c5f2009-06-19 16:52:19 -07001366 and r2, r0, #255 @ r2<- BB
1367 mov r3, r0, lsr #8 @ r3<- CC
1368 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
1369 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
1370 flds s0, [r2] @ s0<- vBB
1371 flds s1, [r3] @ s1<- vCC
Andy McFaddena5069fb2009-06-19 15:20:12 -07001372 fcmpes s0, s1 @ compare (vBB, vCC)
1373 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
1374 mov r0, #1 @ r0<- 1 (default)
1375 GET_INST_OPCODE(ip) @ extract opcode from rINST
1376 fmstat @ export status flags
1377 mvnmi r0, #0 @ (less than) r1<- -1
1378 moveq r0, #0 @ (equal) r1<- 0
Andy McFadden5162c5f2009-06-19 16:52:19 -07001379 b .LOP_CMPG_FLOAT_finish @ argh
Andy McFaddena5069fb2009-06-19 15:20:12 -07001380
1381
1382/* ------------------------------ */
1383 .balign 64
1384.L_OP_CMPL_DOUBLE: /* 0x2f */
1385/* File: arm-vfp/OP_CMPL_DOUBLE.S */
1386 /*
1387 * Compare two floating-point values. Puts 0, 1, or -1 into the
1388 * destination register based on the results of the comparison.
1389 *
1390 * int compare(x, y) {
1391 * if (x == y) {
1392 * return 0;
1393 * } else if (x > y) {
1394 * return 1;
1395 * } else if (x < y) {
1396 * return -1;
1397 * } else {
1398 * return -1;
1399 * }
1400 * }
1401 */
1402 /* op vAA, vBB, vCC */
1403 FETCH(r0, 1) @ r0<- CCBB
Andy McFaddena5069fb2009-06-19 15:20:12 -07001404 mov r9, rINST, lsr #8 @ r9<- AA
Andy McFadden5162c5f2009-06-19 16:52:19 -07001405 and r2, r0, #255 @ r2<- BB
1406 mov r3, r0, lsr #8 @ r3<- CC
1407 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
1408 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
1409 fldd d0, [r2] @ d0<- vBB
1410 fldd d1, [r3] @ d1<- vCC
Andy McFaddena5069fb2009-06-19 15:20:12 -07001411 fcmped d0, d1 @ compare (vBB, vCC)
1412 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
1413 mvn r0, #0 @ r0<- -1 (default)
1414 GET_INST_OPCODE(ip) @ extract opcode from rINST
1415 fmstat @ export status flags
1416 movgt r0, #1 @ (greater than) r1<- 1
1417 moveq r0, #0 @ (equal) r1<- 0
Andy McFadden5162c5f2009-06-19 16:52:19 -07001418 b .LOP_CMPL_DOUBLE_finish @ argh
Andy McFaddena5069fb2009-06-19 15:20:12 -07001419
1420
1421/* ------------------------------ */
1422 .balign 64
1423.L_OP_CMPG_DOUBLE: /* 0x30 */
1424/* File: arm-vfp/OP_CMPG_DOUBLE.S */
1425 /*
1426 * Compare two floating-point values. Puts 0, 1, or -1 into the
1427 * destination register based on the results of the comparison.
1428 *
1429 * int compare(x, y) {
1430 * if (x == y) {
1431 * return 0;
1432 * } else if (x < y) {
1433 * return -1;
1434 * } else if (x > y) {
1435 * return 1;
1436 * } else {
1437 * return 1;
1438 * }
1439 * }
1440 */
1441 /* op vAA, vBB, vCC */
1442 FETCH(r0, 1) @ r0<- CCBB
Andy McFaddena5069fb2009-06-19 15:20:12 -07001443 mov r9, rINST, lsr #8 @ r9<- AA
Andy McFadden5162c5f2009-06-19 16:52:19 -07001444 and r2, r0, #255 @ r2<- BB
1445 mov r3, r0, lsr #8 @ r3<- CC
1446 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
1447 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
1448 fldd d0, [r2] @ d0<- vBB
1449 fldd d1, [r3] @ d1<- vCC
Andy McFaddena5069fb2009-06-19 15:20:12 -07001450 fcmped d0, d1 @ compare (vBB, vCC)
1451 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
1452 mov r0, #1 @ r0<- 1 (default)
1453 GET_INST_OPCODE(ip) @ extract opcode from rINST
1454 fmstat @ export status flags
1455 mvnmi r0, #0 @ (less than) r1<- -1
1456 moveq r0, #0 @ (equal) r1<- 0
Andy McFadden5162c5f2009-06-19 16:52:19 -07001457 b .LOP_CMPG_DOUBLE_finish @ argh
Andy McFaddena5069fb2009-06-19 15:20:12 -07001458
1459
1460/* ------------------------------ */
1461 .balign 64
1462.L_OP_CMP_LONG: /* 0x31 */
1463/* File: armv5te/OP_CMP_LONG.S */
1464 /*
1465 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
1466 * register based on the results of the comparison.
1467 *
1468 * We load the full values with LDM, but in practice many values could
1469 * be resolved by only looking at the high word. This could be made
1470 * faster or slower by splitting the LDM into a pair of LDRs.
1471 *
1472 * If we just wanted to set condition flags, we could do this:
1473 * subs ip, r0, r2
1474 * sbcs ip, r1, r3
1475 * subeqs ip, r0, r2
1476 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
1477 * integer value, which we can do with 2 conditional mov/mvn instructions
1478 * (set 1, set -1; if they're equal we already have 0 in ip), giving
1479 * us a constant 5-cycle path plus a branch at the end to the
1480 * instruction epilogue code. The multi-compare approach below needs
1481 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
1482 * in the worst case (the 64-bit values are equal).
1483 */
1484 /* cmp-long vAA, vBB, vCC */
1485 FETCH(r0, 1) @ r0<- CCBB
1486 mov r9, rINST, lsr #8 @ r9<- AA
1487 and r2, r0, #255 @ r2<- BB
1488 mov r3, r0, lsr #8 @ r3<- CC
1489 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
1490 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
1491 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
1492 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
1493 cmp r1, r3 @ compare (vBB+1, vCC+1)
1494 blt .LOP_CMP_LONG_less @ signed compare on high part
1495 bgt .LOP_CMP_LONG_greater
1496 subs r1, r0, r2 @ r1<- r0 - r2
1497 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part
1498 bne .LOP_CMP_LONG_less
1499 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0
1500
1501/* ------------------------------ */
1502 .balign 64
1503.L_OP_IF_EQ: /* 0x32 */
1504/* File: armv6t2/OP_IF_EQ.S */
1505/* File: armv6t2/bincmp.S */
1506 /*
1507 * Generic two-operand compare-and-branch operation. Provide a "revcmp"
1508 * fragment that specifies the *reverse* comparison to perform, e.g.
1509 * for "if-le" you would use "gt".
1510 *
1511 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
1512 */
1513 /* if-cmp vA, vB, +CCCC */
1514 mov r1, rINST, lsr #12 @ r1<- B
1515 ubfx r0, rINST, #8, #4 @ r0<- A
1516 GET_VREG(r3, r1) @ r3<- vB
1517 GET_VREG(r2, r0) @ r2<- vA
1518 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1519 cmp r2, r3 @ compare (vA, vB)
1520 bne 1f @ branch to 1 if comparison failed
1521 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1522 movs r9, r9, asl #1 @ convert to bytes, check sign
1523 bmi common_backwardBranch @ yes, do periodic checks
15241:
1525#if defined(WITH_JIT)
1526 GET_JIT_PROF_TABLE(r0)
1527 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1528 b common_testUpdateProfile
1529#else
1530 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1531 GET_INST_OPCODE(ip) @ extract opcode from rINST
1532 GOTO_OPCODE(ip) @ jump to next instruction
1533#endif
1534
1535
1536
1537/* ------------------------------ */
1538 .balign 64
1539.L_OP_IF_NE: /* 0x33 */
1540/* File: armv6t2/OP_IF_NE.S */
1541/* File: armv6t2/bincmp.S */
1542 /*
1543 * Generic two-operand compare-and-branch operation. Provide a "revcmp"
1544 * fragment that specifies the *reverse* comparison to perform, e.g.
1545 * for "if-le" you would use "gt".
1546 *
1547 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
1548 */
1549 /* if-cmp vA, vB, +CCCC */
1550 mov r1, rINST, lsr #12 @ r1<- B
1551 ubfx r0, rINST, #8, #4 @ r0<- A
1552 GET_VREG(r3, r1) @ r3<- vB
1553 GET_VREG(r2, r0) @ r2<- vA
1554 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1555 cmp r2, r3 @ compare (vA, vB)
1556 beq 1f @ branch to 1 if comparison failed
1557 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1558 movs r9, r9, asl #1 @ convert to bytes, check sign
1559 bmi common_backwardBranch @ yes, do periodic checks
15601:
1561#if defined(WITH_JIT)
1562 GET_JIT_PROF_TABLE(r0)
1563 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1564 b common_testUpdateProfile
1565#else
1566 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1567 GET_INST_OPCODE(ip) @ extract opcode from rINST
1568 GOTO_OPCODE(ip) @ jump to next instruction
1569#endif
1570
1571
1572
1573/* ------------------------------ */
1574 .balign 64
1575.L_OP_IF_LT: /* 0x34 */
1576/* File: armv6t2/OP_IF_LT.S */
1577/* File: armv6t2/bincmp.S */
1578 /*
1579 * Generic two-operand compare-and-branch operation. Provide a "revcmp"
1580 * fragment that specifies the *reverse* comparison to perform, e.g.
1581 * for "if-le" you would use "gt".
1582 *
1583 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
1584 */
1585 /* if-cmp vA, vB, +CCCC */
1586 mov r1, rINST, lsr #12 @ r1<- B
1587 ubfx r0, rINST, #8, #4 @ r0<- A
1588 GET_VREG(r3, r1) @ r3<- vB
1589 GET_VREG(r2, r0) @ r2<- vA
1590 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1591 cmp r2, r3 @ compare (vA, vB)
1592 bge 1f @ branch to 1 if comparison failed
1593 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1594 movs r9, r9, asl #1 @ convert to bytes, check sign
1595 bmi common_backwardBranch @ yes, do periodic checks
15961:
1597#if defined(WITH_JIT)
1598 GET_JIT_PROF_TABLE(r0)
1599 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1600 b common_testUpdateProfile
1601#else
1602 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1603 GET_INST_OPCODE(ip) @ extract opcode from rINST
1604 GOTO_OPCODE(ip) @ jump to next instruction
1605#endif
1606
1607
1608
1609/* ------------------------------ */
1610 .balign 64
1611.L_OP_IF_GE: /* 0x35 */
1612/* File: armv6t2/OP_IF_GE.S */
1613/* File: armv6t2/bincmp.S */
1614 /*
1615 * Generic two-operand compare-and-branch operation. Provide a "revcmp"
1616 * fragment that specifies the *reverse* comparison to perform, e.g.
1617 * for "if-le" you would use "gt".
1618 *
1619 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
1620 */
1621 /* if-cmp vA, vB, +CCCC */
1622 mov r1, rINST, lsr #12 @ r1<- B
1623 ubfx r0, rINST, #8, #4 @ r0<- A
1624 GET_VREG(r3, r1) @ r3<- vB
1625 GET_VREG(r2, r0) @ r2<- vA
1626 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1627 cmp r2, r3 @ compare (vA, vB)
1628 blt 1f @ branch to 1 if comparison failed
1629 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1630 movs r9, r9, asl #1 @ convert to bytes, check sign
1631 bmi common_backwardBranch @ yes, do periodic checks
16321:
1633#if defined(WITH_JIT)
1634 GET_JIT_PROF_TABLE(r0)
1635 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1636 b common_testUpdateProfile
1637#else
1638 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1639 GET_INST_OPCODE(ip) @ extract opcode from rINST
1640 GOTO_OPCODE(ip) @ jump to next instruction
1641#endif
1642
1643
1644
1645/* ------------------------------ */
1646 .balign 64
1647.L_OP_IF_GT: /* 0x36 */
1648/* File: armv6t2/OP_IF_GT.S */
1649/* File: armv6t2/bincmp.S */
1650 /*
1651 * Generic two-operand compare-and-branch operation. Provide a "revcmp"
1652 * fragment that specifies the *reverse* comparison to perform, e.g.
1653 * for "if-le" you would use "gt".
1654 *
1655 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
1656 */
1657 /* if-cmp vA, vB, +CCCC */
1658 mov r1, rINST, lsr #12 @ r1<- B
1659 ubfx r0, rINST, #8, #4 @ r0<- A
1660 GET_VREG(r3, r1) @ r3<- vB
1661 GET_VREG(r2, r0) @ r2<- vA
1662 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1663 cmp r2, r3 @ compare (vA, vB)
1664 ble 1f @ branch to 1 if comparison failed
1665 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1666 movs r9, r9, asl #1 @ convert to bytes, check sign
1667 bmi common_backwardBranch @ yes, do periodic checks
16681:
1669#if defined(WITH_JIT)
1670 GET_JIT_PROF_TABLE(r0)
1671 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1672 b common_testUpdateProfile
1673#else
1674 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1675 GET_INST_OPCODE(ip) @ extract opcode from rINST
1676 GOTO_OPCODE(ip) @ jump to next instruction
1677#endif
1678
1679
1680
1681/* ------------------------------ */
1682 .balign 64
1683.L_OP_IF_LE: /* 0x37 */
1684/* File: armv6t2/OP_IF_LE.S */
1685/* File: armv6t2/bincmp.S */
1686 /*
1687 * Generic two-operand compare-and-branch operation. Provide a "revcmp"
1688 * fragment that specifies the *reverse* comparison to perform, e.g.
1689 * for "if-le" you would use "gt".
1690 *
1691 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
1692 */
1693 /* if-cmp vA, vB, +CCCC */
1694 mov r1, rINST, lsr #12 @ r1<- B
1695 ubfx r0, rINST, #8, #4 @ r0<- A
1696 GET_VREG(r3, r1) @ r3<- vB
1697 GET_VREG(r2, r0) @ r2<- vA
1698 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1699 cmp r2, r3 @ compare (vA, vB)
1700 bgt 1f @ branch to 1 if comparison failed
1701 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1702 movs r9, r9, asl #1 @ convert to bytes, check sign
1703 bmi common_backwardBranch @ yes, do periodic checks
17041:
1705#if defined(WITH_JIT)
1706 GET_JIT_PROF_TABLE(r0)
1707 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1708 b common_testUpdateProfile
1709#else
1710 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1711 GET_INST_OPCODE(ip) @ extract opcode from rINST
1712 GOTO_OPCODE(ip) @ jump to next instruction
1713#endif
1714
1715
1716
1717/* ------------------------------ */
1718 .balign 64
1719.L_OP_IF_EQZ: /* 0x38 */
1720/* File: armv5te/OP_IF_EQZ.S */
1721/* File: armv5te/zcmp.S */
1722 /*
1723 * Generic one-operand compare-and-branch operation. Provide a "revcmp"
1724 * fragment that specifies the *reverse* comparison to perform, e.g.
1725 * for "if-le" you would use "gt".
1726 *
1727 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
1728 */
1729 /* if-cmp vAA, +BBBB */
1730 mov r0, rINST, lsr #8 @ r0<- AA
1731 GET_VREG(r2, r0) @ r2<- vAA
1732 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1733 cmp r2, #0 @ compare (vA, 0)
1734 bne 1f @ branch to 1 if comparison failed
1735 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1736 movs r9, r9, asl #1 @ convert to bytes, check sign
1737 bmi common_backwardBranch @ backward branch, do periodic checks
17381:
1739#if defined(WITH_JIT)
1740 GET_JIT_PROF_TABLE(r0)
1741 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1742 cmp r0,#0
1743 bne common_updateProfile
1744 GET_INST_OPCODE(ip) @ extract opcode from rINST
1745 GOTO_OPCODE(ip) @ jump to next instruction
1746#else
1747 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1748 GET_INST_OPCODE(ip) @ extract opcode from rINST
1749 GOTO_OPCODE(ip) @ jump to next instruction
1750#endif
1751
1752
1753
1754/* ------------------------------ */
1755 .balign 64
1756.L_OP_IF_NEZ: /* 0x39 */
1757/* File: armv5te/OP_IF_NEZ.S */
1758/* File: armv5te/zcmp.S */
1759 /*
1760 * Generic one-operand compare-and-branch operation. Provide a "revcmp"
1761 * fragment that specifies the *reverse* comparison to perform, e.g.
1762 * for "if-le" you would use "gt".
1763 *
1764 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
1765 */
1766 /* if-cmp vAA, +BBBB */
1767 mov r0, rINST, lsr #8 @ r0<- AA
1768 GET_VREG(r2, r0) @ r2<- vAA
1769 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1770 cmp r2, #0 @ compare (vA, 0)
1771 beq 1f @ branch to 1 if comparison failed
1772 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1773 movs r9, r9, asl #1 @ convert to bytes, check sign
1774 bmi common_backwardBranch @ backward branch, do periodic checks
17751:
1776#if defined(WITH_JIT)
1777 GET_JIT_PROF_TABLE(r0)
1778 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1779 cmp r0,#0
1780 bne common_updateProfile
1781 GET_INST_OPCODE(ip) @ extract opcode from rINST
1782 GOTO_OPCODE(ip) @ jump to next instruction
1783#else
1784 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1785 GET_INST_OPCODE(ip) @ extract opcode from rINST
1786 GOTO_OPCODE(ip) @ jump to next instruction
1787#endif
1788
1789
1790
1791/* ------------------------------ */
1792 .balign 64
1793.L_OP_IF_LTZ: /* 0x3a */
1794/* File: armv5te/OP_IF_LTZ.S */
1795/* File: armv5te/zcmp.S */
1796 /*
1797 * Generic one-operand compare-and-branch operation. Provide a "revcmp"
1798 * fragment that specifies the *reverse* comparison to perform, e.g.
1799 * for "if-le" you would use "gt".
1800 *
1801 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
1802 */
1803 /* if-cmp vAA, +BBBB */
1804 mov r0, rINST, lsr #8 @ r0<- AA
1805 GET_VREG(r2, r0) @ r2<- vAA
1806 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1807 cmp r2, #0 @ compare (vA, 0)
1808 bge 1f @ branch to 1 if comparison failed
1809 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1810 movs r9, r9, asl #1 @ convert to bytes, check sign
1811 bmi common_backwardBranch @ backward branch, do periodic checks
18121:
1813#if defined(WITH_JIT)
1814 GET_JIT_PROF_TABLE(r0)
1815 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1816 cmp r0,#0
1817 bne common_updateProfile
1818 GET_INST_OPCODE(ip) @ extract opcode from rINST
1819 GOTO_OPCODE(ip) @ jump to next instruction
1820#else
1821 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1822 GET_INST_OPCODE(ip) @ extract opcode from rINST
1823 GOTO_OPCODE(ip) @ jump to next instruction
1824#endif
1825
1826
1827
1828/* ------------------------------ */
1829 .balign 64
1830.L_OP_IF_GEZ: /* 0x3b */
1831/* File: armv5te/OP_IF_GEZ.S */
1832/* File: armv5te/zcmp.S */
1833 /*
1834 * Generic one-operand compare-and-branch operation. Provide a "revcmp"
1835 * fragment that specifies the *reverse* comparison to perform, e.g.
1836 * for "if-le" you would use "gt".
1837 *
1838 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
1839 */
1840 /* if-cmp vAA, +BBBB */
1841 mov r0, rINST, lsr #8 @ r0<- AA
1842 GET_VREG(r2, r0) @ r2<- vAA
1843 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1844 cmp r2, #0 @ compare (vA, 0)
1845 blt 1f @ branch to 1 if comparison failed
1846 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1847 movs r9, r9, asl #1 @ convert to bytes, check sign
1848 bmi common_backwardBranch @ backward branch, do periodic checks
18491:
1850#if defined(WITH_JIT)
1851 GET_JIT_PROF_TABLE(r0)
1852 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1853 cmp r0,#0
1854 bne common_updateProfile
1855 GET_INST_OPCODE(ip) @ extract opcode from rINST
1856 GOTO_OPCODE(ip) @ jump to next instruction
1857#else
1858 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1859 GET_INST_OPCODE(ip) @ extract opcode from rINST
1860 GOTO_OPCODE(ip) @ jump to next instruction
1861#endif
1862
1863
1864
1865/* ------------------------------ */
1866 .balign 64
1867.L_OP_IF_GTZ: /* 0x3c */
1868/* File: armv5te/OP_IF_GTZ.S */
1869/* File: armv5te/zcmp.S */
1870 /*
1871 * Generic one-operand compare-and-branch operation. Provide a "revcmp"
1872 * fragment that specifies the *reverse* comparison to perform, e.g.
1873 * for "if-le" you would use "gt".
1874 *
1875 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
1876 */
1877 /* if-cmp vAA, +BBBB */
1878 mov r0, rINST, lsr #8 @ r0<- AA
1879 GET_VREG(r2, r0) @ r2<- vAA
1880 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1881 cmp r2, #0 @ compare (vA, 0)
1882 ble 1f @ branch to 1 if comparison failed
1883 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1884 movs r9, r9, asl #1 @ convert to bytes, check sign
1885 bmi common_backwardBranch @ backward branch, do periodic checks
18861:
1887#if defined(WITH_JIT)
1888 GET_JIT_PROF_TABLE(r0)
1889 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1890 cmp r0,#0
1891 bne common_updateProfile
1892 GET_INST_OPCODE(ip) @ extract opcode from rINST
1893 GOTO_OPCODE(ip) @ jump to next instruction
1894#else
1895 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1896 GET_INST_OPCODE(ip) @ extract opcode from rINST
1897 GOTO_OPCODE(ip) @ jump to next instruction
1898#endif
1899
1900
1901
1902/* ------------------------------ */
1903 .balign 64
1904.L_OP_IF_LEZ: /* 0x3d */
1905/* File: armv5te/OP_IF_LEZ.S */
1906/* File: armv5te/zcmp.S */
1907 /*
1908 * Generic one-operand compare-and-branch operation. Provide a "revcmp"
1909 * fragment that specifies the *reverse* comparison to perform, e.g.
1910 * for "if-le" you would use "gt".
1911 *
1912 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
1913 */
1914 /* if-cmp vAA, +BBBB */
1915 mov r0, rINST, lsr #8 @ r0<- AA
1916 GET_VREG(r2, r0) @ r2<- vAA
1917 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1918 cmp r2, #0 @ compare (vA, 0)
1919 bgt 1f @ branch to 1 if comparison failed
1920 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1921 movs r9, r9, asl #1 @ convert to bytes, check sign
1922 bmi common_backwardBranch @ backward branch, do periodic checks
19231:
1924#if defined(WITH_JIT)
1925 GET_JIT_PROF_TABLE(r0)
1926 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1927 cmp r0,#0
1928 bne common_updateProfile
1929 GET_INST_OPCODE(ip) @ extract opcode from rINST
1930 GOTO_OPCODE(ip) @ jump to next instruction
1931#else
1932 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1933 GET_INST_OPCODE(ip) @ extract opcode from rINST
1934 GOTO_OPCODE(ip) @ jump to next instruction
1935#endif
1936
1937
1938
1939/* ------------------------------ */
1940 .balign 64
1941.L_OP_UNUSED_3E: /* 0x3e */
1942/* File: armv5te/OP_UNUSED_3E.S */
1943/* File: armv5te/unused.S */
1944 bl common_abort
1945
1946
1947
1948/* ------------------------------ */
1949 .balign 64
1950.L_OP_UNUSED_3F: /* 0x3f */
1951/* File: armv5te/OP_UNUSED_3F.S */
1952/* File: armv5te/unused.S */
1953 bl common_abort
1954
1955
1956
1957/* ------------------------------ */
1958 .balign 64
1959.L_OP_UNUSED_40: /* 0x40 */
1960/* File: armv5te/OP_UNUSED_40.S */
1961/* File: armv5te/unused.S */
1962 bl common_abort
1963
1964
1965
1966/* ------------------------------ */
1967 .balign 64
1968.L_OP_UNUSED_41: /* 0x41 */
1969/* File: armv5te/OP_UNUSED_41.S */
1970/* File: armv5te/unused.S */
1971 bl common_abort
1972
1973
1974
1975/* ------------------------------ */
1976 .balign 64
1977.L_OP_UNUSED_42: /* 0x42 */
1978/* File: armv5te/OP_UNUSED_42.S */
1979/* File: armv5te/unused.S */
1980 bl common_abort
1981
1982
1983
1984/* ------------------------------ */
1985 .balign 64
1986.L_OP_UNUSED_43: /* 0x43 */
1987/* File: armv5te/OP_UNUSED_43.S */
1988/* File: armv5te/unused.S */
1989 bl common_abort
1990
1991
1992
1993/* ------------------------------ */
1994 .balign 64
1995.L_OP_AGET: /* 0x44 */
1996/* File: armv5te/OP_AGET.S */
1997 /*
1998 * Array get, 32 bits or less. vAA <- vBB[vCC].
1999 *
2000 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2001 * instructions. We use a pair of FETCH_Bs instead.
2002 *
2003 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
2004 */
2005 /* op vAA, vBB, vCC */
2006 FETCH_B(r2, 1, 0) @ r2<- BB
2007 mov r9, rINST, lsr #8 @ r9<- AA
2008 FETCH_B(r3, 1, 1) @ r3<- CC
2009 GET_VREG(r0, r2) @ r0<- vBB (array object)
2010 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2011 cmp r0, #0 @ null array object?
2012 beq common_errNullObject @ yes, bail
2013 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2014 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
2015 cmp r1, r3 @ compare unsigned index, length
2016 bcs common_errArrayIndex @ index >= length, bail
2017 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2018 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
2019 GET_INST_OPCODE(ip) @ extract opcode from rINST
2020 SET_VREG(r2, r9) @ vAA<- r2
2021 GOTO_OPCODE(ip) @ jump to next instruction
2022
2023
2024/* ------------------------------ */
2025 .balign 64
2026.L_OP_AGET_WIDE: /* 0x45 */
2027/* File: armv5te/OP_AGET_WIDE.S */
2028 /*
2029 * Array get, 64 bits. vAA <- vBB[vCC].
2030 *
2031 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
2032 */
2033 /* aget-wide vAA, vBB, vCC */
2034 FETCH(r0, 1) @ r0<- CCBB
2035 mov r9, rINST, lsr #8 @ r9<- AA
2036 and r2, r0, #255 @ r2<- BB
2037 mov r3, r0, lsr #8 @ r3<- CC
2038 GET_VREG(r0, r2) @ r0<- vBB (array object)
2039 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2040 cmp r0, #0 @ null array object?
2041 beq common_errNullObject @ yes, bail
2042 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2043 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
2044 cmp r1, r3 @ compare unsigned index, length
2045 bcc .LOP_AGET_WIDE_finish @ okay, continue below
2046 b common_errArrayIndex @ index >= length, bail
2047 @ May want to swap the order of these two branches depending on how the
2048 @ branch prediction (if any) handles conditional forward branches vs.
2049 @ unconditional forward branches.
2050
2051/* ------------------------------ */
2052 .balign 64
2053.L_OP_AGET_OBJECT: /* 0x46 */
2054/* File: armv5te/OP_AGET_OBJECT.S */
2055/* File: armv5te/OP_AGET.S */
2056 /*
2057 * Array get, 32 bits or less. vAA <- vBB[vCC].
2058 *
2059 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2060 * instructions. We use a pair of FETCH_Bs instead.
2061 *
2062 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
2063 */
2064 /* op vAA, vBB, vCC */
2065 FETCH_B(r2, 1, 0) @ r2<- BB
2066 mov r9, rINST, lsr #8 @ r9<- AA
2067 FETCH_B(r3, 1, 1) @ r3<- CC
2068 GET_VREG(r0, r2) @ r0<- vBB (array object)
2069 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2070 cmp r0, #0 @ null array object?
2071 beq common_errNullObject @ yes, bail
2072 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2073 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
2074 cmp r1, r3 @ compare unsigned index, length
2075 bcs common_errArrayIndex @ index >= length, bail
2076 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2077 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
2078 GET_INST_OPCODE(ip) @ extract opcode from rINST
2079 SET_VREG(r2, r9) @ vAA<- r2
2080 GOTO_OPCODE(ip) @ jump to next instruction
2081
2082
2083
2084/* ------------------------------ */
2085 .balign 64
2086.L_OP_AGET_BOOLEAN: /* 0x47 */
2087/* File: armv5te/OP_AGET_BOOLEAN.S */
2088/* File: armv5te/OP_AGET.S */
2089 /*
2090 * Array get, 32 bits or less. vAA <- vBB[vCC].
2091 *
2092 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2093 * instructions. We use a pair of FETCH_Bs instead.
2094 *
2095 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
2096 */
2097 /* op vAA, vBB, vCC */
2098 FETCH_B(r2, 1, 0) @ r2<- BB
2099 mov r9, rINST, lsr #8 @ r9<- AA
2100 FETCH_B(r3, 1, 1) @ r3<- CC
2101 GET_VREG(r0, r2) @ r0<- vBB (array object)
2102 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2103 cmp r0, #0 @ null array object?
2104 beq common_errNullObject @ yes, bail
2105 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2106 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
2107 cmp r1, r3 @ compare unsigned index, length
2108 bcs common_errArrayIndex @ index >= length, bail
2109 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2110 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
2111 GET_INST_OPCODE(ip) @ extract opcode from rINST
2112 SET_VREG(r2, r9) @ vAA<- r2
2113 GOTO_OPCODE(ip) @ jump to next instruction
2114
2115
2116
2117/* ------------------------------ */
2118 .balign 64
2119.L_OP_AGET_BYTE: /* 0x48 */
2120/* File: armv5te/OP_AGET_BYTE.S */
2121/* File: armv5te/OP_AGET.S */
2122 /*
2123 * Array get, 32 bits or less. vAA <- vBB[vCC].
2124 *
2125 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2126 * instructions. We use a pair of FETCH_Bs instead.
2127 *
2128 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
2129 */
2130 /* op vAA, vBB, vCC */
2131 FETCH_B(r2, 1, 0) @ r2<- BB
2132 mov r9, rINST, lsr #8 @ r9<- AA
2133 FETCH_B(r3, 1, 1) @ r3<- CC
2134 GET_VREG(r0, r2) @ r0<- vBB (array object)
2135 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2136 cmp r0, #0 @ null array object?
2137 beq common_errNullObject @ yes, bail
2138 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2139 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
2140 cmp r1, r3 @ compare unsigned index, length
2141 bcs common_errArrayIndex @ index >= length, bail
2142 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2143 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
2144 GET_INST_OPCODE(ip) @ extract opcode from rINST
2145 SET_VREG(r2, r9) @ vAA<- r2
2146 GOTO_OPCODE(ip) @ jump to next instruction
2147
2148
2149
2150/* ------------------------------ */
2151 .balign 64
2152.L_OP_AGET_CHAR: /* 0x49 */
2153/* File: armv5te/OP_AGET_CHAR.S */
2154/* File: armv5te/OP_AGET.S */
2155 /*
2156 * Array get, 32 bits or less. vAA <- vBB[vCC].
2157 *
2158 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2159 * instructions. We use a pair of FETCH_Bs instead.
2160 *
2161 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
2162 */
2163 /* op vAA, vBB, vCC */
2164 FETCH_B(r2, 1, 0) @ r2<- BB
2165 mov r9, rINST, lsr #8 @ r9<- AA
2166 FETCH_B(r3, 1, 1) @ r3<- CC
2167 GET_VREG(r0, r2) @ r0<- vBB (array object)
2168 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2169 cmp r0, #0 @ null array object?
2170 beq common_errNullObject @ yes, bail
2171 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2172 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
2173 cmp r1, r3 @ compare unsigned index, length
2174 bcs common_errArrayIndex @ index >= length, bail
2175 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2176 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
2177 GET_INST_OPCODE(ip) @ extract opcode from rINST
2178 SET_VREG(r2, r9) @ vAA<- r2
2179 GOTO_OPCODE(ip) @ jump to next instruction
2180
2181
2182
2183/* ------------------------------ */
2184 .balign 64
2185.L_OP_AGET_SHORT: /* 0x4a */
2186/* File: armv5te/OP_AGET_SHORT.S */
2187/* File: armv5te/OP_AGET.S */
2188 /*
2189 * Array get, 32 bits or less. vAA <- vBB[vCC].
2190 *
2191 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2192 * instructions. We use a pair of FETCH_Bs instead.
2193 *
2194 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
2195 */
2196 /* op vAA, vBB, vCC */
2197 FETCH_B(r2, 1, 0) @ r2<- BB
2198 mov r9, rINST, lsr #8 @ r9<- AA
2199 FETCH_B(r3, 1, 1) @ r3<- CC
2200 GET_VREG(r0, r2) @ r0<- vBB (array object)
2201 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2202 cmp r0, #0 @ null array object?
2203 beq common_errNullObject @ yes, bail
2204 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2205 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
2206 cmp r1, r3 @ compare unsigned index, length
2207 bcs common_errArrayIndex @ index >= length, bail
2208 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2209 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
2210 GET_INST_OPCODE(ip) @ extract opcode from rINST
2211 SET_VREG(r2, r9) @ vAA<- r2
2212 GOTO_OPCODE(ip) @ jump to next instruction
2213
2214
2215
2216/* ------------------------------ */
2217 .balign 64
2218.L_OP_APUT: /* 0x4b */
2219/* File: armv5te/OP_APUT.S */
2220 /*
2221 * Array put, 32 bits or less. vBB[vCC] <- vAA.
2222 *
2223 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2224 * instructions. We use a pair of FETCH_Bs instead.
2225 *
2226 * for: aput, aput-boolean, aput-byte, aput-char, aput-short
2227 */
2228 /* op vAA, vBB, vCC */
2229 FETCH_B(r2, 1, 0) @ r2<- BB
2230 mov r9, rINST, lsr #8 @ r9<- AA
2231 FETCH_B(r3, 1, 1) @ r3<- CC
2232 GET_VREG(r0, r2) @ r0<- vBB (array object)
2233 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2234 cmp r0, #0 @ null array object?
2235 beq common_errNullObject @ yes, bail
2236 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2237 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
2238 cmp r1, r3 @ compare unsigned index, length
2239 bcs common_errArrayIndex @ index >= length, bail
2240 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2241 GET_VREG(r2, r9) @ r2<- vAA
2242 GET_INST_OPCODE(ip) @ extract opcode from rINST
2243 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
2244 GOTO_OPCODE(ip) @ jump to next instruction
2245
2246
2247/* ------------------------------ */
2248 .balign 64
2249.L_OP_APUT_WIDE: /* 0x4c */
2250/* File: armv5te/OP_APUT_WIDE.S */
2251 /*
2252 * Array put, 64 bits. vBB[vCC] <- vAA.
2253 *
2254 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
2255 */
2256 /* aput-wide vAA, vBB, vCC */
2257 FETCH(r0, 1) @ r0<- CCBB
2258 mov r9, rINST, lsr #8 @ r9<- AA
2259 and r2, r0, #255 @ r2<- BB
2260 mov r3, r0, lsr #8 @ r3<- CC
2261 GET_VREG(r0, r2) @ r0<- vBB (array object)
2262 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2263 cmp r0, #0 @ null array object?
2264 beq common_errNullObject @ yes, bail
2265 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2266 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
2267 cmp r1, r3 @ compare unsigned index, length
2268 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
2269 bcc .LOP_APUT_WIDE_finish @ okay, continue below
2270 b common_errArrayIndex @ index >= length, bail
2271 @ May want to swap the order of these two branches depending on how the
2272 @ branch prediction (if any) handles conditional forward branches vs.
2273 @ unconditional forward branches.
2274
2275/* ------------------------------ */
2276 .balign 64
2277.L_OP_APUT_OBJECT: /* 0x4d */
2278/* File: armv5te/OP_APUT_OBJECT.S */
2279 /*
2280 * Store an object into an array. vBB[vCC] <- vAA.
2281 *
2282 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2283 * instructions. We use a pair of FETCH_Bs instead.
2284 */
2285 /* op vAA, vBB, vCC */
2286 FETCH(r0, 1) @ r0<- CCBB
2287 mov r9, rINST, lsr #8 @ r9<- AA
2288 and r2, r0, #255 @ r2<- BB
2289 mov r3, r0, lsr #8 @ r3<- CC
2290 GET_VREG(r1, r2) @ r1<- vBB (array object)
2291 GET_VREG(r0, r3) @ r0<- vCC (requested index)
2292 cmp r1, #0 @ null array object?
2293 GET_VREG(r9, r9) @ r9<- vAA
2294 beq common_errNullObject @ yes, bail
2295 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length
2296 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width
2297 cmp r0, r3 @ compare unsigned index, length
2298 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on
2299 b common_errArrayIndex @ index >= length, bail
2300
2301
2302/* ------------------------------ */
2303 .balign 64
2304.L_OP_APUT_BOOLEAN: /* 0x4e */
2305/* File: armv5te/OP_APUT_BOOLEAN.S */
2306/* File: armv5te/OP_APUT.S */
2307 /*
2308 * Array put, 32 bits or less. vBB[vCC] <- vAA.
2309 *
2310 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2311 * instructions. We use a pair of FETCH_Bs instead.
2312 *
2313 * for: aput, aput-boolean, aput-byte, aput-char, aput-short
2314 */
2315 /* op vAA, vBB, vCC */
2316 FETCH_B(r2, 1, 0) @ r2<- BB
2317 mov r9, rINST, lsr #8 @ r9<- AA
2318 FETCH_B(r3, 1, 1) @ r3<- CC
2319 GET_VREG(r0, r2) @ r0<- vBB (array object)
2320 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2321 cmp r0, #0 @ null array object?
2322 beq common_errNullObject @ yes, bail
2323 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2324 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
2325 cmp r1, r3 @ compare unsigned index, length
2326 bcs common_errArrayIndex @ index >= length, bail
2327 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2328 GET_VREG(r2, r9) @ r2<- vAA
2329 GET_INST_OPCODE(ip) @ extract opcode from rINST
2330 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
2331 GOTO_OPCODE(ip) @ jump to next instruction
2332
2333
2334
2335/* ------------------------------ */
2336 .balign 64
2337.L_OP_APUT_BYTE: /* 0x4f */
2338/* File: armv5te/OP_APUT_BYTE.S */
2339/* File: armv5te/OP_APUT.S */
2340 /*
2341 * Array put, 32 bits or less. vBB[vCC] <- vAA.
2342 *
2343 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2344 * instructions. We use a pair of FETCH_Bs instead.
2345 *
2346 * for: aput, aput-boolean, aput-byte, aput-char, aput-short
2347 */
2348 /* op vAA, vBB, vCC */
2349 FETCH_B(r2, 1, 0) @ r2<- BB
2350 mov r9, rINST, lsr #8 @ r9<- AA
2351 FETCH_B(r3, 1, 1) @ r3<- CC
2352 GET_VREG(r0, r2) @ r0<- vBB (array object)
2353 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2354 cmp r0, #0 @ null array object?
2355 beq common_errNullObject @ yes, bail
2356 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2357 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
2358 cmp r1, r3 @ compare unsigned index, length
2359 bcs common_errArrayIndex @ index >= length, bail
2360 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2361 GET_VREG(r2, r9) @ r2<- vAA
2362 GET_INST_OPCODE(ip) @ extract opcode from rINST
2363 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
2364 GOTO_OPCODE(ip) @ jump to next instruction
2365
2366
2367
2368/* ------------------------------ */
2369 .balign 64
2370.L_OP_APUT_CHAR: /* 0x50 */
2371/* File: armv5te/OP_APUT_CHAR.S */
2372/* File: armv5te/OP_APUT.S */
2373 /*
2374 * Array put, 32 bits or less. vBB[vCC] <- vAA.
2375 *
2376 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2377 * instructions. We use a pair of FETCH_Bs instead.
2378 *
2379 * for: aput, aput-boolean, aput-byte, aput-char, aput-short
2380 */
2381 /* op vAA, vBB, vCC */
2382 FETCH_B(r2, 1, 0) @ r2<- BB
2383 mov r9, rINST, lsr #8 @ r9<- AA
2384 FETCH_B(r3, 1, 1) @ r3<- CC
2385 GET_VREG(r0, r2) @ r0<- vBB (array object)
2386 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2387 cmp r0, #0 @ null array object?
2388 beq common_errNullObject @ yes, bail
2389 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2390 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
2391 cmp r1, r3 @ compare unsigned index, length
2392 bcs common_errArrayIndex @ index >= length, bail
2393 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2394 GET_VREG(r2, r9) @ r2<- vAA
2395 GET_INST_OPCODE(ip) @ extract opcode from rINST
2396 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
2397 GOTO_OPCODE(ip) @ jump to next instruction
2398
2399
2400
2401/* ------------------------------ */
2402 .balign 64
2403.L_OP_APUT_SHORT: /* 0x51 */
2404/* File: armv5te/OP_APUT_SHORT.S */
2405/* File: armv5te/OP_APUT.S */
2406 /*
2407 * Array put, 32 bits or less. vBB[vCC] <- vAA.
2408 *
2409 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
2410 * instructions. We use a pair of FETCH_Bs instead.
2411 *
2412 * for: aput, aput-boolean, aput-byte, aput-char, aput-short
2413 */
2414 /* op vAA, vBB, vCC */
2415 FETCH_B(r2, 1, 0) @ r2<- BB
2416 mov r9, rINST, lsr #8 @ r9<- AA
2417 FETCH_B(r3, 1, 1) @ r3<- CC
2418 GET_VREG(r0, r2) @ r0<- vBB (array object)
2419 GET_VREG(r1, r3) @ r1<- vCC (requested index)
2420 cmp r0, #0 @ null array object?
2421 beq common_errNullObject @ yes, bail
2422 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
2423 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
2424 cmp r1, r3 @ compare unsigned index, length
2425 bcs common_errArrayIndex @ index >= length, bail
2426 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2427 GET_VREG(r2, r9) @ r2<- vAA
2428 GET_INST_OPCODE(ip) @ extract opcode from rINST
2429 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
2430 GOTO_OPCODE(ip) @ jump to next instruction
2431
2432
2433
2434/* ------------------------------ */
2435 .balign 64
2436.L_OP_IGET: /* 0x52 */
2437/* File: armv6t2/OP_IGET.S */
2438 /*
2439 * General 32-bit instance field get.
2440 *
2441 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
2442 */
2443 /* op vA, vB, field@CCCC */
2444 mov r0, rINST, lsr #12 @ r0<- B
2445 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2446 FETCH(r1, 1) @ r1<- field ref CCCC
2447 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2448 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2449 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2450 cmp r0, #0 @ is resolved entry null?
2451 bne .LOP_IGET_finish @ no, already resolved
24528: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2453 EXPORT_PC() @ resolve() could throw
2454 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2455 bl dvmResolveInstField @ r0<- resolved InstField ptr
2456 cmp r0, #0
2457 bne .LOP_IGET_finish
2458 b common_exceptionThrown
2459
2460/* ------------------------------ */
2461 .balign 64
2462.L_OP_IGET_WIDE: /* 0x53 */
2463/* File: armv6t2/OP_IGET_WIDE.S */
2464 /*
2465 * Wide 32-bit instance field get.
2466 */
2467 /* iget-wide vA, vB, field@CCCC */
2468 mov r0, rINST, lsr #12 @ r0<- B
2469 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2470 FETCH(r1, 1) @ r1<- field ref CCCC
2471 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
2472 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2473 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2474 cmp r0, #0 @ is resolved entry null?
2475 bne .LOP_IGET_WIDE_finish @ no, already resolved
24768: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2477 EXPORT_PC() @ resolve() could throw
2478 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2479 bl dvmResolveInstField @ r0<- resolved InstField ptr
2480 cmp r0, #0
2481 bne .LOP_IGET_WIDE_finish
2482 b common_exceptionThrown
2483
2484/* ------------------------------ */
2485 .balign 64
2486.L_OP_IGET_OBJECT: /* 0x54 */
2487/* File: armv5te/OP_IGET_OBJECT.S */
2488/* File: armv5te/OP_IGET.S */
2489 /*
2490 * General 32-bit instance field get.
2491 *
2492 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
2493 */
2494 /* op vA, vB, field@CCCC */
2495 mov r0, rINST, lsr #12 @ r0<- B
2496 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2497 FETCH(r1, 1) @ r1<- field ref CCCC
2498 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2499 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2500 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2501 cmp r0, #0 @ is resolved entry null?
2502 bne .LOP_IGET_OBJECT_finish @ no, already resolved
25038: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2504 EXPORT_PC() @ resolve() could throw
2505 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2506 bl dvmResolveInstField @ r0<- resolved InstField ptr
2507 cmp r0, #0
2508 bne .LOP_IGET_OBJECT_finish
2509 b common_exceptionThrown
2510
2511
2512/* ------------------------------ */
2513 .balign 64
2514.L_OP_IGET_BOOLEAN: /* 0x55 */
2515/* File: armv5te/OP_IGET_BOOLEAN.S */
2516@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" }
2517/* File: armv5te/OP_IGET.S */
2518 /*
2519 * General 32-bit instance field get.
2520 *
2521 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
2522 */
2523 /* op vA, vB, field@CCCC */
2524 mov r0, rINST, lsr #12 @ r0<- B
2525 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2526 FETCH(r1, 1) @ r1<- field ref CCCC
2527 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2528 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2529 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2530 cmp r0, #0 @ is resolved entry null?
2531 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved
25328: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2533 EXPORT_PC() @ resolve() could throw
2534 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2535 bl dvmResolveInstField @ r0<- resolved InstField ptr
2536 cmp r0, #0
2537 bne .LOP_IGET_BOOLEAN_finish
2538 b common_exceptionThrown
2539
2540
2541/* ------------------------------ */
2542 .balign 64
2543.L_OP_IGET_BYTE: /* 0x56 */
2544/* File: armv5te/OP_IGET_BYTE.S */
2545@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" }
2546/* File: armv5te/OP_IGET.S */
2547 /*
2548 * General 32-bit instance field get.
2549 *
2550 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
2551 */
2552 /* op vA, vB, field@CCCC */
2553 mov r0, rINST, lsr #12 @ r0<- B
2554 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2555 FETCH(r1, 1) @ r1<- field ref CCCC
2556 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2557 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2558 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2559 cmp r0, #0 @ is resolved entry null?
2560 bne .LOP_IGET_BYTE_finish @ no, already resolved
25618: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2562 EXPORT_PC() @ resolve() could throw
2563 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2564 bl dvmResolveInstField @ r0<- resolved InstField ptr
2565 cmp r0, #0
2566 bne .LOP_IGET_BYTE_finish
2567 b common_exceptionThrown
2568
2569
2570/* ------------------------------ */
2571 .balign 64
2572.L_OP_IGET_CHAR: /* 0x57 */
2573/* File: armv5te/OP_IGET_CHAR.S */
2574@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" }
2575/* File: armv5te/OP_IGET.S */
2576 /*
2577 * General 32-bit instance field get.
2578 *
2579 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
2580 */
2581 /* op vA, vB, field@CCCC */
2582 mov r0, rINST, lsr #12 @ r0<- B
2583 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2584 FETCH(r1, 1) @ r1<- field ref CCCC
2585 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2586 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2587 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2588 cmp r0, #0 @ is resolved entry null?
2589 bne .LOP_IGET_CHAR_finish @ no, already resolved
25908: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2591 EXPORT_PC() @ resolve() could throw
2592 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2593 bl dvmResolveInstField @ r0<- resolved InstField ptr
2594 cmp r0, #0
2595 bne .LOP_IGET_CHAR_finish
2596 b common_exceptionThrown
2597
2598
2599/* ------------------------------ */
2600 .balign 64
2601.L_OP_IGET_SHORT: /* 0x58 */
2602/* File: armv5te/OP_IGET_SHORT.S */
2603@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" }
2604/* File: armv5te/OP_IGET.S */
2605 /*
2606 * General 32-bit instance field get.
2607 *
2608 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
2609 */
2610 /* op vA, vB, field@CCCC */
2611 mov r0, rINST, lsr #12 @ r0<- B
2612 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2613 FETCH(r1, 1) @ r1<- field ref CCCC
2614 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2615 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2616 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2617 cmp r0, #0 @ is resolved entry null?
2618 bne .LOP_IGET_SHORT_finish @ no, already resolved
26198: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2620 EXPORT_PC() @ resolve() could throw
2621 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2622 bl dvmResolveInstField @ r0<- resolved InstField ptr
2623 cmp r0, #0
2624 bne .LOP_IGET_SHORT_finish
2625 b common_exceptionThrown
2626
2627
2628/* ------------------------------ */
2629 .balign 64
2630.L_OP_IPUT: /* 0x59 */
2631/* File: armv6t2/OP_IPUT.S */
2632 /*
2633 * General 32-bit instance field put.
2634 *
2635 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
2636 */
2637 /* op vA, vB, field@CCCC */
2638 mov r0, rINST, lsr #12 @ r0<- B
2639 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2640 FETCH(r1, 1) @ r1<- field ref CCCC
2641 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2642 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2643 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2644 cmp r0, #0 @ is resolved entry null?
2645 bne .LOP_IPUT_finish @ no, already resolved
26468: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2647 EXPORT_PC() @ resolve() could throw
2648 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2649 bl dvmResolveInstField @ r0<- resolved InstField ptr
2650 cmp r0, #0 @ success?
2651 bne .LOP_IPUT_finish @ yes, finish up
2652 b common_exceptionThrown
2653
2654/* ------------------------------ */
2655 .balign 64
2656.L_OP_IPUT_WIDE: /* 0x5a */
2657/* File: armv6t2/OP_IPUT_WIDE.S */
2658 /* iput-wide vA, vB, field@CCCC */
2659 mov r0, rINST, lsr #12 @ r0<- B
2660 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2661 FETCH(r1, 1) @ r1<- field ref CCCC
2662 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
2663 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2664 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2665 cmp r0, #0 @ is resolved entry null?
2666 bne .LOP_IPUT_WIDE_finish @ no, already resolved
26678: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2668 EXPORT_PC() @ resolve() could throw
2669 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2670 bl dvmResolveInstField @ r0<- resolved InstField ptr
2671 cmp r0, #0 @ success?
2672 bne .LOP_IPUT_WIDE_finish @ yes, finish up
2673 b common_exceptionThrown
2674
2675/* ------------------------------ */
2676 .balign 64
2677.L_OP_IPUT_OBJECT: /* 0x5b */
2678/* File: armv5te/OP_IPUT_OBJECT.S */
2679/* File: armv5te/OP_IPUT.S */
2680 /*
2681 * General 32-bit instance field put.
2682 *
2683 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
2684 */
2685 /* op vA, vB, field@CCCC */
2686 mov r0, rINST, lsr #12 @ r0<- B
2687 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2688 FETCH(r1, 1) @ r1<- field ref CCCC
2689 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2690 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2691 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2692 cmp r0, #0 @ is resolved entry null?
2693 bne .LOP_IPUT_OBJECT_finish @ no, already resolved
26948: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2695 EXPORT_PC() @ resolve() could throw
2696 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2697 bl dvmResolveInstField @ r0<- resolved InstField ptr
2698 cmp r0, #0 @ success?
2699 bne .LOP_IPUT_OBJECT_finish @ yes, finish up
2700 b common_exceptionThrown
2701
2702
2703/* ------------------------------ */
2704 .balign 64
2705.L_OP_IPUT_BOOLEAN: /* 0x5c */
2706/* File: armv5te/OP_IPUT_BOOLEAN.S */
2707@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" }
2708/* File: armv5te/OP_IPUT.S */
2709 /*
2710 * General 32-bit instance field put.
2711 *
2712 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
2713 */
2714 /* op vA, vB, field@CCCC */
2715 mov r0, rINST, lsr #12 @ r0<- B
2716 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2717 FETCH(r1, 1) @ r1<- field ref CCCC
2718 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2719 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2720 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2721 cmp r0, #0 @ is resolved entry null?
2722 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved
27238: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2724 EXPORT_PC() @ resolve() could throw
2725 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2726 bl dvmResolveInstField @ r0<- resolved InstField ptr
2727 cmp r0, #0 @ success?
2728 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up
2729 b common_exceptionThrown
2730
2731
2732/* ------------------------------ */
2733 .balign 64
2734.L_OP_IPUT_BYTE: /* 0x5d */
2735/* File: armv5te/OP_IPUT_BYTE.S */
2736@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" }
2737/* File: armv5te/OP_IPUT.S */
2738 /*
2739 * General 32-bit instance field put.
2740 *
2741 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
2742 */
2743 /* op vA, vB, field@CCCC */
2744 mov r0, rINST, lsr #12 @ r0<- B
2745 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2746 FETCH(r1, 1) @ r1<- field ref CCCC
2747 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2748 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2749 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2750 cmp r0, #0 @ is resolved entry null?
2751 bne .LOP_IPUT_BYTE_finish @ no, already resolved
27528: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2753 EXPORT_PC() @ resolve() could throw
2754 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2755 bl dvmResolveInstField @ r0<- resolved InstField ptr
2756 cmp r0, #0 @ success?
2757 bne .LOP_IPUT_BYTE_finish @ yes, finish up
2758 b common_exceptionThrown
2759
2760
2761/* ------------------------------ */
2762 .balign 64
2763.L_OP_IPUT_CHAR: /* 0x5e */
2764/* File: armv5te/OP_IPUT_CHAR.S */
2765@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" }
2766/* File: armv5te/OP_IPUT.S */
2767 /*
2768 * General 32-bit instance field put.
2769 *
2770 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
2771 */
2772 /* op vA, vB, field@CCCC */
2773 mov r0, rINST, lsr #12 @ r0<- B
2774 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2775 FETCH(r1, 1) @ r1<- field ref CCCC
2776 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2777 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2778 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2779 cmp r0, #0 @ is resolved entry null?
2780 bne .LOP_IPUT_CHAR_finish @ no, already resolved
27818: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2782 EXPORT_PC() @ resolve() could throw
2783 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2784 bl dvmResolveInstField @ r0<- resolved InstField ptr
2785 cmp r0, #0 @ success?
2786 bne .LOP_IPUT_CHAR_finish @ yes, finish up
2787 b common_exceptionThrown
2788
2789
2790/* ------------------------------ */
2791 .balign 64
2792.L_OP_IPUT_SHORT: /* 0x5f */
2793/* File: armv5te/OP_IPUT_SHORT.S */
2794@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" }
2795/* File: armv5te/OP_IPUT.S */
2796 /*
2797 * General 32-bit instance field put.
2798 *
2799 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
2800 */
2801 /* op vA, vB, field@CCCC */
2802 mov r0, rINST, lsr #12 @ r0<- B
2803 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex
2804 FETCH(r1, 1) @ r1<- field ref CCCC
2805 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
2806 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
2807 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
2808 cmp r0, #0 @ is resolved entry null?
2809 bne .LOP_IPUT_SHORT_finish @ no, already resolved
28108: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
2811 EXPORT_PC() @ resolve() could throw
2812 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
2813 bl dvmResolveInstField @ r0<- resolved InstField ptr
2814 cmp r0, #0 @ success?
2815 bne .LOP_IPUT_SHORT_finish @ yes, finish up
2816 b common_exceptionThrown
2817
2818
2819/* ------------------------------ */
2820 .balign 64
2821.L_OP_SGET: /* 0x60 */
2822/* File: armv5te/OP_SGET.S */
2823 /*
2824 * General 32-bit SGET handler.
2825 *
2826 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
2827 */
2828 /* op vAA, field@BBBB */
2829 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
2830 FETCH(r1, 1) @ r1<- field ref BBBB
2831 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
2832 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
2833 cmp r0, #0 @ is resolved entry null?
2834 beq .LOP_SGET_resolve @ yes, do resolve
2835.LOP_SGET_finish: @ field ptr in r0
2836 ldr r1, [r0, #offStaticField_value] @ r1<- field value
2837 mov r2, rINST, lsr #8 @ r2<- AA
2838 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2839 SET_VREG(r1, r2) @ fp[AA]<- r1
2840 GET_INST_OPCODE(ip) @ extract opcode from rINST
2841 GOTO_OPCODE(ip) @ jump to next instruction
2842
2843/* ------------------------------ */
2844 .balign 64
2845.L_OP_SGET_WIDE: /* 0x61 */
2846/* File: armv5te/OP_SGET_WIDE.S */
2847 /*
2848 * 64-bit SGET handler.
2849 */
2850 /* sget-wide vAA, field@BBBB */
2851 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
2852 FETCH(r1, 1) @ r1<- field ref BBBB
2853 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
2854 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
2855 cmp r0, #0 @ is resolved entry null?
2856 beq .LOP_SGET_WIDE_resolve @ yes, do resolve
2857.LOP_SGET_WIDE_finish:
2858 mov r1, rINST, lsr #8 @ r1<- AA
2859 ldrd r2, [r0, #offStaticField_value] @ r2/r3<- field value (aligned)
2860 add r1, rFP, r1, lsl #2 @ r1<- &fp[AA]
2861 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2862 stmia r1, {r2-r3} @ vAA/vAA+1<- r2/r3
2863 GET_INST_OPCODE(ip) @ extract opcode from rINST
2864 GOTO_OPCODE(ip) @ jump to next instruction
2865
2866/* ------------------------------ */
2867 .balign 64
2868.L_OP_SGET_OBJECT: /* 0x62 */
2869/* File: armv5te/OP_SGET_OBJECT.S */
2870/* File: armv5te/OP_SGET.S */
2871 /*
2872 * General 32-bit SGET handler.
2873 *
2874 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
2875 */
2876 /* op vAA, field@BBBB */
2877 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
2878 FETCH(r1, 1) @ r1<- field ref BBBB
2879 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
2880 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
2881 cmp r0, #0 @ is resolved entry null?
2882 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve
2883.LOP_SGET_OBJECT_finish: @ field ptr in r0
2884 ldr r1, [r0, #offStaticField_value] @ r1<- field value
2885 mov r2, rINST, lsr #8 @ r2<- AA
2886 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2887 SET_VREG(r1, r2) @ fp[AA]<- r1
2888 GET_INST_OPCODE(ip) @ extract opcode from rINST
2889 GOTO_OPCODE(ip) @ jump to next instruction
2890
2891
2892/* ------------------------------ */
2893 .balign 64
2894.L_OP_SGET_BOOLEAN: /* 0x63 */
2895/* File: armv5te/OP_SGET_BOOLEAN.S */
2896/* File: armv5te/OP_SGET.S */
2897 /*
2898 * General 32-bit SGET handler.
2899 *
2900 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
2901 */
2902 /* op vAA, field@BBBB */
2903 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
2904 FETCH(r1, 1) @ r1<- field ref BBBB
2905 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
2906 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
2907 cmp r0, #0 @ is resolved entry null?
2908 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve
2909.LOP_SGET_BOOLEAN_finish: @ field ptr in r0
2910 ldr r1, [r0, #offStaticField_value] @ r1<- field value
2911 mov r2, rINST, lsr #8 @ r2<- AA
2912 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2913 SET_VREG(r1, r2) @ fp[AA]<- r1
2914 GET_INST_OPCODE(ip) @ extract opcode from rINST
2915 GOTO_OPCODE(ip) @ jump to next instruction
2916
2917
2918/* ------------------------------ */
2919 .balign 64
2920.L_OP_SGET_BYTE: /* 0x64 */
2921/* File: armv5te/OP_SGET_BYTE.S */
2922/* File: armv5te/OP_SGET.S */
2923 /*
2924 * General 32-bit SGET handler.
2925 *
2926 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
2927 */
2928 /* op vAA, field@BBBB */
2929 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
2930 FETCH(r1, 1) @ r1<- field ref BBBB
2931 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
2932 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
2933 cmp r0, #0 @ is resolved entry null?
2934 beq .LOP_SGET_BYTE_resolve @ yes, do resolve
2935.LOP_SGET_BYTE_finish: @ field ptr in r0
2936 ldr r1, [r0, #offStaticField_value] @ r1<- field value
2937 mov r2, rINST, lsr #8 @ r2<- AA
2938 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2939 SET_VREG(r1, r2) @ fp[AA]<- r1
2940 GET_INST_OPCODE(ip) @ extract opcode from rINST
2941 GOTO_OPCODE(ip) @ jump to next instruction
2942
2943
2944/* ------------------------------ */
2945 .balign 64
2946.L_OP_SGET_CHAR: /* 0x65 */
2947/* File: armv5te/OP_SGET_CHAR.S */
2948/* File: armv5te/OP_SGET.S */
2949 /*
2950 * General 32-bit SGET handler.
2951 *
2952 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
2953 */
2954 /* op vAA, field@BBBB */
2955 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
2956 FETCH(r1, 1) @ r1<- field ref BBBB
2957 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
2958 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
2959 cmp r0, #0 @ is resolved entry null?
2960 beq .LOP_SGET_CHAR_resolve @ yes, do resolve
2961.LOP_SGET_CHAR_finish: @ field ptr in r0
2962 ldr r1, [r0, #offStaticField_value] @ r1<- field value
2963 mov r2, rINST, lsr #8 @ r2<- AA
2964 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2965 SET_VREG(r1, r2) @ fp[AA]<- r1
2966 GET_INST_OPCODE(ip) @ extract opcode from rINST
2967 GOTO_OPCODE(ip) @ jump to next instruction
2968
2969
2970/* ------------------------------ */
2971 .balign 64
2972.L_OP_SGET_SHORT: /* 0x66 */
2973/* File: armv5te/OP_SGET_SHORT.S */
2974/* File: armv5te/OP_SGET.S */
2975 /*
2976 * General 32-bit SGET handler.
2977 *
2978 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
2979 */
2980 /* op vAA, field@BBBB */
2981 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
2982 FETCH(r1, 1) @ r1<- field ref BBBB
2983 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
2984 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
2985 cmp r0, #0 @ is resolved entry null?
2986 beq .LOP_SGET_SHORT_resolve @ yes, do resolve
2987.LOP_SGET_SHORT_finish: @ field ptr in r0
2988 ldr r1, [r0, #offStaticField_value] @ r1<- field value
2989 mov r2, rINST, lsr #8 @ r2<- AA
2990 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
2991 SET_VREG(r1, r2) @ fp[AA]<- r1
2992 GET_INST_OPCODE(ip) @ extract opcode from rINST
2993 GOTO_OPCODE(ip) @ jump to next instruction
2994
2995
2996/* ------------------------------ */
2997 .balign 64
2998.L_OP_SPUT: /* 0x67 */
2999/* File: armv5te/OP_SPUT.S */
3000 /*
3001 * General 32-bit SPUT handler.
3002 *
3003 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
3004 */
3005 /* op vAA, field@BBBB */
3006 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
3007 FETCH(r1, 1) @ r1<- field ref BBBB
3008 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
3009 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
3010 cmp r0, #0 @ is resolved entry null?
3011 beq .LOP_SPUT_resolve @ yes, do resolve
3012.LOP_SPUT_finish: @ field ptr in r0
3013 mov r2, rINST, lsr #8 @ r2<- AA
3014 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
3015 GET_VREG(r1, r2) @ r1<- fp[AA]
3016 GET_INST_OPCODE(ip) @ extract opcode from rINST
3017 str r1, [r0, #offStaticField_value] @ field<- vAA
3018 GOTO_OPCODE(ip) @ jump to next instruction
3019
3020/* ------------------------------ */
3021 .balign 64
3022.L_OP_SPUT_WIDE: /* 0x68 */
3023/* File: armv5te/OP_SPUT_WIDE.S */
3024 /*
3025 * 64-bit SPUT handler.
3026 */
3027 /* sput-wide vAA, field@BBBB */
3028 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
3029 FETCH(r1, 1) @ r1<- field ref BBBB
3030 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
3031 mov r9, rINST, lsr #8 @ r9<- AA
3032 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
3033 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
3034 cmp r0, #0 @ is resolved entry null?
3035 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve
3036.LOP_SPUT_WIDE_finish: @ field ptr in r0, AA in r9
3037 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
3038 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
3039 GET_INST_OPCODE(ip) @ extract opcode from rINST
3040 strd r2, [r0, #offStaticField_value] @ field<- vAA/vAA+1
3041 GOTO_OPCODE(ip) @ jump to next instruction
3042
3043/* ------------------------------ */
3044 .balign 64
3045.L_OP_SPUT_OBJECT: /* 0x69 */
3046/* File: armv5te/OP_SPUT_OBJECT.S */
3047/* File: armv5te/OP_SPUT.S */
3048 /*
3049 * General 32-bit SPUT handler.
3050 *
3051 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
3052 */
3053 /* op vAA, field@BBBB */
3054 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
3055 FETCH(r1, 1) @ r1<- field ref BBBB
3056 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
3057 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
3058 cmp r0, #0 @ is resolved entry null?
3059 beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve
3060.LOP_SPUT_OBJECT_finish: @ field ptr in r0
3061 mov r2, rINST, lsr #8 @ r2<- AA
3062 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
3063 GET_VREG(r1, r2) @ r1<- fp[AA]
3064 GET_INST_OPCODE(ip) @ extract opcode from rINST
3065 str r1, [r0, #offStaticField_value] @ field<- vAA
3066 GOTO_OPCODE(ip) @ jump to next instruction
3067
3068
3069/* ------------------------------ */
3070 .balign 64
3071.L_OP_SPUT_BOOLEAN: /* 0x6a */
3072/* File: armv5te/OP_SPUT_BOOLEAN.S */
3073/* File: armv5te/OP_SPUT.S */
3074 /*
3075 * General 32-bit SPUT handler.
3076 *
3077 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
3078 */
3079 /* op vAA, field@BBBB */
3080 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
3081 FETCH(r1, 1) @ r1<- field ref BBBB
3082 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
3083 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
3084 cmp r0, #0 @ is resolved entry null?
3085 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve
3086.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0
3087 mov r2, rINST, lsr #8 @ r2<- AA
3088 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
3089 GET_VREG(r1, r2) @ r1<- fp[AA]
3090 GET_INST_OPCODE(ip) @ extract opcode from rINST
3091 str r1, [r0, #offStaticField_value] @ field<- vAA
3092 GOTO_OPCODE(ip) @ jump to next instruction
3093
3094
3095/* ------------------------------ */
3096 .balign 64
3097.L_OP_SPUT_BYTE: /* 0x6b */
3098/* File: armv5te/OP_SPUT_BYTE.S */
3099/* File: armv5te/OP_SPUT.S */
3100 /*
3101 * General 32-bit SPUT handler.
3102 *
3103 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
3104 */
3105 /* op vAA, field@BBBB */
3106 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
3107 FETCH(r1, 1) @ r1<- field ref BBBB
3108 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
3109 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
3110 cmp r0, #0 @ is resolved entry null?
3111 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve
3112.LOP_SPUT_BYTE_finish: @ field ptr in r0
3113 mov r2, rINST, lsr #8 @ r2<- AA
3114 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
3115 GET_VREG(r1, r2) @ r1<- fp[AA]
3116 GET_INST_OPCODE(ip) @ extract opcode from rINST
3117 str r1, [r0, #offStaticField_value] @ field<- vAA
3118 GOTO_OPCODE(ip) @ jump to next instruction
3119
3120
3121/* ------------------------------ */
3122 .balign 64
3123.L_OP_SPUT_CHAR: /* 0x6c */
3124/* File: armv5te/OP_SPUT_CHAR.S */
3125/* File: armv5te/OP_SPUT.S */
3126 /*
3127 * General 32-bit SPUT handler.
3128 *
3129 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
3130 */
3131 /* op vAA, field@BBBB */
3132 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
3133 FETCH(r1, 1) @ r1<- field ref BBBB
3134 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
3135 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
3136 cmp r0, #0 @ is resolved entry null?
3137 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve
3138.LOP_SPUT_CHAR_finish: @ field ptr in r0
3139 mov r2, rINST, lsr #8 @ r2<- AA
3140 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
3141 GET_VREG(r1, r2) @ r1<- fp[AA]
3142 GET_INST_OPCODE(ip) @ extract opcode from rINST
3143 str r1, [r0, #offStaticField_value] @ field<- vAA
3144 GOTO_OPCODE(ip) @ jump to next instruction
3145
3146
3147/* ------------------------------ */
3148 .balign 64
3149.L_OP_SPUT_SHORT: /* 0x6d */
3150/* File: armv5te/OP_SPUT_SHORT.S */
3151/* File: armv5te/OP_SPUT.S */
3152 /*
3153 * General 32-bit SPUT handler.
3154 *
3155 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
3156 */
3157 /* op vAA, field@BBBB */
3158 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex
3159 FETCH(r1, 1) @ r1<- field ref BBBB
3160 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields
3161 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr
3162 cmp r0, #0 @ is resolved entry null?
3163 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve
3164.LOP_SPUT_SHORT_finish: @ field ptr in r0
3165 mov r2, rINST, lsr #8 @ r2<- AA
3166 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
3167 GET_VREG(r1, r2) @ r1<- fp[AA]
3168 GET_INST_OPCODE(ip) @ extract opcode from rINST
3169 str r1, [r0, #offStaticField_value] @ field<- vAA
3170 GOTO_OPCODE(ip) @ jump to next instruction
3171
3172
3173/* ------------------------------ */
3174 .balign 64
3175.L_OP_INVOKE_VIRTUAL: /* 0x6e */
3176/* File: armv5te/OP_INVOKE_VIRTUAL.S */
3177 /*
3178 * Handle a virtual method call.
3179 *
3180 * for: invoke-virtual, invoke-virtual/range
3181 */
3182 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
3183 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
3184 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
3185 FETCH(r1, 1) @ r1<- BBBB
3186 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
3187 FETCH(r10, 2) @ r10<- GFED or CCCC
3188 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
3189 .if (!0)
3190 and r10, r10, #15 @ r10<- D (or stays CCCC)
3191 .endif
3192 cmp r0, #0 @ already resolved?
3193 EXPORT_PC() @ must export for invoke
3194 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on
3195 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
3196 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
3197 mov r2, #METHOD_VIRTUAL @ resolver method type
3198 bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
3199 cmp r0, #0 @ got null?
3200 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue
3201 b common_exceptionThrown @ yes, handle exception
3202
3203/* ------------------------------ */
3204 .balign 64
3205.L_OP_INVOKE_SUPER: /* 0x6f */
3206/* File: armv5te/OP_INVOKE_SUPER.S */
3207 /*
3208 * Handle a "super" method call.
3209 *
3210 * for: invoke-super, invoke-super/range
3211 */
3212 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
3213 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
3214 FETCH(r10, 2) @ r10<- GFED or CCCC
3215 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
3216 .if (!0)
3217 and r10, r10, #15 @ r10<- D (or stays CCCC)
3218 .endif
3219 FETCH(r1, 1) @ r1<- BBBB
3220 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
3221 GET_VREG(r2, r10) @ r2<- "this" ptr
3222 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
3223 cmp r2, #0 @ null "this"?
3224 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method
3225 beq common_errNullObject @ null "this", throw exception
3226 cmp r0, #0 @ already resolved?
3227 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz
3228 EXPORT_PC() @ must export for invoke
3229 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on
3230 b .LOP_INVOKE_SUPER_resolve @ do resolve now
3231
3232/* ------------------------------ */
3233 .balign 64
3234.L_OP_INVOKE_DIRECT: /* 0x70 */
3235/* File: armv5te/OP_INVOKE_DIRECT.S */
3236 /*
3237 * Handle a direct method call.
3238 *
3239 * (We could defer the "is 'this' pointer null" test to the common
3240 * method invocation code, and use a flag to indicate that static
3241 * calls don't count. If we do this as part of copying the arguments
3242 * out we could avoiding loading the first arg twice.)
3243 *
3244 * for: invoke-direct, invoke-direct/range
3245 */
3246 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
3247 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
3248 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
3249 FETCH(r1, 1) @ r1<- BBBB
3250 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
3251 FETCH(r10, 2) @ r10<- GFED or CCCC
3252 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
3253 .if (!0)
3254 and r10, r10, #15 @ r10<- D (or stays CCCC)
3255 .endif
3256 cmp r0, #0 @ already resolved?
3257 EXPORT_PC() @ must export for invoke
3258 GET_VREG(r2, r10) @ r2<- "this" ptr
3259 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now
3260.LOP_INVOKE_DIRECT_finish:
3261 cmp r2, #0 @ null "this" ref?
3262 bne common_invokeMethodNoRange @ no, continue on
3263 b common_errNullObject @ yes, throw exception
3264
3265/* ------------------------------ */
3266 .balign 64
3267.L_OP_INVOKE_STATIC: /* 0x71 */
3268/* File: armv5te/OP_INVOKE_STATIC.S */
3269 /*
3270 * Handle a static method call.
3271 *
3272 * for: invoke-static, invoke-static/range
3273 */
3274 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
3275 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
3276 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
3277 FETCH(r1, 1) @ r1<- BBBB
3278 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
3279 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
3280 cmp r0, #0 @ already resolved?
3281 EXPORT_PC() @ must export for invoke
3282 bne common_invokeMethodNoRange @ yes, continue on
32830: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
3284 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
3285 mov r2, #METHOD_STATIC @ resolver method type
3286 bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
3287 cmp r0, #0 @ got null?
3288 bne common_invokeMethodNoRange @ no, continue
3289 b common_exceptionThrown @ yes, handle exception
3290
3291
3292/* ------------------------------ */
3293 .balign 64
3294.L_OP_INVOKE_INTERFACE: /* 0x72 */
3295/* File: armv5te/OP_INVOKE_INTERFACE.S */
3296 /*
3297 * Handle an interface method call.
3298 *
3299 * for: invoke-interface, invoke-interface/range
3300 */
3301 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
3302 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
3303 FETCH(r2, 2) @ r2<- FEDC or CCCC
3304 FETCH(r1, 1) @ r1<- BBBB
3305 .if (!0)
3306 and r2, r2, #15 @ r2<- C (or stays CCCC)
3307 .endif
3308 EXPORT_PC() @ must export for invoke
3309 GET_VREG(r0, r2) @ r0<- first arg ("this")
3310 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex
3311 cmp r0, #0 @ null obj?
3312 ldr r2, [rGLUE, #offGlue_method] @ r2<- method
3313 beq common_errNullObject @ yes, fail
3314 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz
3315 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
3316 cmp r0, #0 @ failed?
3317 beq common_exceptionThrown @ yes, handle exception
3318 b common_invokeMethodNoRange @ jump to common handler
3319
3320
3321/* ------------------------------ */
3322 .balign 64
3323.L_OP_UNUSED_73: /* 0x73 */
3324/* File: armv5te/OP_UNUSED_73.S */
3325/* File: armv5te/unused.S */
3326 bl common_abort
3327
3328
3329
3330/* ------------------------------ */
3331 .balign 64
3332.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
3333/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */
3334/* File: armv5te/OP_INVOKE_VIRTUAL.S */
3335 /*
3336 * Handle a virtual method call.
3337 *
3338 * for: invoke-virtual, invoke-virtual/range
3339 */
3340 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
3341 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
3342 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
3343 FETCH(r1, 1) @ r1<- BBBB
3344 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
3345 FETCH(r10, 2) @ r10<- GFED or CCCC
3346 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
3347 .if (!1)
3348 and r10, r10, #15 @ r10<- D (or stays CCCC)
3349 .endif
3350 cmp r0, #0 @ already resolved?
3351 EXPORT_PC() @ must export for invoke
3352 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on
3353 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
3354 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
3355 mov r2, #METHOD_VIRTUAL @ resolver method type
3356 bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
3357 cmp r0, #0 @ got null?
3358 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue
3359 b common_exceptionThrown @ yes, handle exception
3360
3361
3362/* ------------------------------ */
3363 .balign 64
3364.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
3365/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */
3366/* File: armv5te/OP_INVOKE_SUPER.S */
3367 /*
3368 * Handle a "super" method call.
3369 *
3370 * for: invoke-super, invoke-super/range
3371 */
3372 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
3373 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
3374 FETCH(r10, 2) @ r10<- GFED or CCCC
3375 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
3376 .if (!1)
3377 and r10, r10, #15 @ r10<- D (or stays CCCC)
3378 .endif
3379 FETCH(r1, 1) @ r1<- BBBB
3380 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
3381 GET_VREG(r2, r10) @ r2<- "this" ptr
3382 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
3383 cmp r2, #0 @ null "this"?
3384 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method
3385 beq common_errNullObject @ null "this", throw exception
3386 cmp r0, #0 @ already resolved?
3387 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz
3388 EXPORT_PC() @ must export for invoke
3389 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on
3390 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now
3391
3392
3393/* ------------------------------ */
3394 .balign 64
3395.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
3396/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */
3397/* File: armv5te/OP_INVOKE_DIRECT.S */
3398 /*
3399 * Handle a direct method call.
3400 *
3401 * (We could defer the "is 'this' pointer null" test to the common
3402 * method invocation code, and use a flag to indicate that static
3403 * calls don't count. If we do this as part of copying the arguments
3404 * out we could avoiding loading the first arg twice.)
3405 *
3406 * for: invoke-direct, invoke-direct/range
3407 */
3408 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
3409 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
3410 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
3411 FETCH(r1, 1) @ r1<- BBBB
3412 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
3413 FETCH(r10, 2) @ r10<- GFED or CCCC
3414 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
3415 .if (!1)
3416 and r10, r10, #15 @ r10<- D (or stays CCCC)
3417 .endif
3418 cmp r0, #0 @ already resolved?
3419 EXPORT_PC() @ must export for invoke
3420 GET_VREG(r2, r10) @ r2<- "this" ptr
3421 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now
3422.LOP_INVOKE_DIRECT_RANGE_finish:
3423 cmp r2, #0 @ null "this" ref?
3424 bne common_invokeMethodRange @ no, continue on
3425 b common_errNullObject @ yes, throw exception
3426
3427
3428/* ------------------------------ */
3429 .balign 64
3430.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
3431/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */
3432/* File: armv5te/OP_INVOKE_STATIC.S */
3433 /*
3434 * Handle a static method call.
3435 *
3436 * for: invoke-static, invoke-static/range
3437 */
3438 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
3439 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
3440 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex
3441 FETCH(r1, 1) @ r1<- BBBB
3442 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
3443 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
3444 cmp r0, #0 @ already resolved?
3445 EXPORT_PC() @ must export for invoke
3446 bne common_invokeMethodRange @ yes, continue on
34470: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
3448 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
3449 mov r2, #METHOD_STATIC @ resolver method type
3450 bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
3451 cmp r0, #0 @ got null?
3452 bne common_invokeMethodRange @ no, continue
3453 b common_exceptionThrown @ yes, handle exception
3454
3455
3456
3457/* ------------------------------ */
3458 .balign 64
3459.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
3460/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */
3461/* File: armv5te/OP_INVOKE_INTERFACE.S */
3462 /*
3463 * Handle an interface method call.
3464 *
3465 * for: invoke-interface, invoke-interface/range
3466 */
3467 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
3468 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
3469 FETCH(r2, 2) @ r2<- FEDC or CCCC
3470 FETCH(r1, 1) @ r1<- BBBB
3471 .if (!1)
3472 and r2, r2, #15 @ r2<- C (or stays CCCC)
3473 .endif
3474 EXPORT_PC() @ must export for invoke
3475 GET_VREG(r0, r2) @ r0<- first arg ("this")
3476 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex
3477 cmp r0, #0 @ null obj?
3478 ldr r2, [rGLUE, #offGlue_method] @ r2<- method
3479 beq common_errNullObject @ yes, fail
3480 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz
3481 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
3482 cmp r0, #0 @ failed?
3483 beq common_exceptionThrown @ yes, handle exception
3484 b common_invokeMethodRange @ jump to common handler
3485
3486
3487
3488/* ------------------------------ */
3489 .balign 64
3490.L_OP_UNUSED_79: /* 0x79 */
3491/* File: armv5te/OP_UNUSED_79.S */
3492/* File: armv5te/unused.S */
3493 bl common_abort
3494
3495
3496
3497/* ------------------------------ */
3498 .balign 64
3499.L_OP_UNUSED_7A: /* 0x7a */
3500/* File: armv5te/OP_UNUSED_7A.S */
3501/* File: armv5te/unused.S */
3502 bl common_abort
3503
3504
3505
3506/* ------------------------------ */
3507 .balign 64
3508.L_OP_NEG_INT: /* 0x7b */
3509/* File: armv6t2/OP_NEG_INT.S */
3510/* File: armv6t2/unop.S */
3511 /*
3512 * Generic 32-bit unary operation. Provide an "instr" line that
3513 * specifies an instruction that performs "result = op r0".
3514 * This could be an ARM instruction or a function call.
3515 *
3516 * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
3517 * int-to-byte, int-to-char, int-to-short
3518 */
3519 /* unop vA, vB */
3520 mov r3, rINST, lsr #12 @ r3<- B
3521 ubfx r9, rINST, #8, #4 @ r9<- A
3522 GET_VREG(r0, r3) @ r0<- vB
3523 @ optional op; may set condition codes
3524 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3525 rsb r0, r0, #0 @ r0<- op, r0-r3 changed
3526 GET_INST_OPCODE(ip) @ extract opcode from rINST
3527 SET_VREG(r0, r9) @ vAA<- r0
3528 GOTO_OPCODE(ip) @ jump to next instruction
3529 /* 8-9 instructions */
3530
3531
3532/* ------------------------------ */
3533 .balign 64
3534.L_OP_NOT_INT: /* 0x7c */
3535/* File: armv6t2/OP_NOT_INT.S */
3536/* File: armv6t2/unop.S */
3537 /*
3538 * Generic 32-bit unary operation. Provide an "instr" line that
3539 * specifies an instruction that performs "result = op r0".
3540 * This could be an ARM instruction or a function call.
3541 *
3542 * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
3543 * int-to-byte, int-to-char, int-to-short
3544 */
3545 /* unop vA, vB */
3546 mov r3, rINST, lsr #12 @ r3<- B
3547 ubfx r9, rINST, #8, #4 @ r9<- A
3548 GET_VREG(r0, r3) @ r0<- vB
3549 @ optional op; may set condition codes
3550 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3551 mvn r0, r0 @ r0<- op, r0-r3 changed
3552 GET_INST_OPCODE(ip) @ extract opcode from rINST
3553 SET_VREG(r0, r9) @ vAA<- r0
3554 GOTO_OPCODE(ip) @ jump to next instruction
3555 /* 8-9 instructions */
3556
3557
3558/* ------------------------------ */
3559 .balign 64
3560.L_OP_NEG_LONG: /* 0x7d */
3561/* File: armv6t2/OP_NEG_LONG.S */
3562/* File: armv6t2/unopWide.S */
3563 /*
3564 * Generic 64-bit unary operation. Provide an "instr" line that
3565 * specifies an instruction that performs "result = op r0/r1".
3566 * This could be an ARM instruction or a function call.
3567 *
3568 * For: neg-long, not-long, neg-double, long-to-double, double-to-long
3569 */
3570 /* unop vA, vB */
3571 mov r3, rINST, lsr #12 @ r3<- B
3572 ubfx r9, rINST, #8, #4 @ r9<- A
3573 add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
3574 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3575 ldmia r3, {r0-r1} @ r0/r1<- vAA
3576 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3577 rsbs r0, r0, #0 @ optional op; may set condition codes
3578 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed
3579 GET_INST_OPCODE(ip) @ extract opcode from rINST
3580 stmia r9, {r0-r1} @ vAA<- r0/r1
3581 GOTO_OPCODE(ip) @ jump to next instruction
3582 /* 10-11 instructions */
3583
3584
3585
3586/* ------------------------------ */
3587 .balign 64
3588.L_OP_NOT_LONG: /* 0x7e */
3589/* File: armv6t2/OP_NOT_LONG.S */
3590/* File: armv6t2/unopWide.S */
3591 /*
3592 * Generic 64-bit unary operation. Provide an "instr" line that
3593 * specifies an instruction that performs "result = op r0/r1".
3594 * This could be an ARM instruction or a function call.
3595 *
3596 * For: neg-long, not-long, neg-double, long-to-double, double-to-long
3597 */
3598 /* unop vA, vB */
3599 mov r3, rINST, lsr #12 @ r3<- B
3600 ubfx r9, rINST, #8, #4 @ r9<- A
3601 add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
3602 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3603 ldmia r3, {r0-r1} @ r0/r1<- vAA
3604 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3605 mvn r0, r0 @ optional op; may set condition codes
3606 mvn r1, r1 @ r0/r1<- op, r2-r3 changed
3607 GET_INST_OPCODE(ip) @ extract opcode from rINST
3608 stmia r9, {r0-r1} @ vAA<- r0/r1
3609 GOTO_OPCODE(ip) @ jump to next instruction
3610 /* 10-11 instructions */
3611
3612
3613
3614/* ------------------------------ */
3615 .balign 64
3616.L_OP_NEG_FLOAT: /* 0x7f */
3617/* File: armv6t2/OP_NEG_FLOAT.S */
3618/* File: armv6t2/unop.S */
3619 /*
3620 * Generic 32-bit unary operation. Provide an "instr" line that
3621 * specifies an instruction that performs "result = op r0".
3622 * This could be an ARM instruction or a function call.
3623 *
3624 * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
3625 * int-to-byte, int-to-char, int-to-short
3626 */
3627 /* unop vA, vB */
3628 mov r3, rINST, lsr #12 @ r3<- B
3629 ubfx r9, rINST, #8, #4 @ r9<- A
3630 GET_VREG(r0, r3) @ r0<- vB
3631 @ optional op; may set condition codes
3632 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3633 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed
3634 GET_INST_OPCODE(ip) @ extract opcode from rINST
3635 SET_VREG(r0, r9) @ vAA<- r0
3636 GOTO_OPCODE(ip) @ jump to next instruction
3637 /* 8-9 instructions */
3638
3639
3640/* ------------------------------ */
3641 .balign 64
3642.L_OP_NEG_DOUBLE: /* 0x80 */
3643/* File: armv6t2/OP_NEG_DOUBLE.S */
3644/* File: armv6t2/unopWide.S */
3645 /*
3646 * Generic 64-bit unary operation. Provide an "instr" line that
3647 * specifies an instruction that performs "result = op r0/r1".
3648 * This could be an ARM instruction or a function call.
3649 *
3650 * For: neg-long, not-long, neg-double, long-to-double, double-to-long
3651 */
3652 /* unop vA, vB */
3653 mov r3, rINST, lsr #12 @ r3<- B
3654 ubfx r9, rINST, #8, #4 @ r9<- A
3655 add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
3656 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3657 ldmia r3, {r0-r1} @ r0/r1<- vAA
3658 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3659 @ optional op; may set condition codes
3660 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed
3661 GET_INST_OPCODE(ip) @ extract opcode from rINST
3662 stmia r9, {r0-r1} @ vAA<- r0/r1
3663 GOTO_OPCODE(ip) @ jump to next instruction
3664 /* 10-11 instructions */
3665
3666
3667
3668/* ------------------------------ */
3669 .balign 64
3670.L_OP_INT_TO_LONG: /* 0x81 */
3671/* File: armv6t2/OP_INT_TO_LONG.S */
3672/* File: armv6t2/unopWider.S */
3673 /*
3674 * Generic 32bit-to-64bit unary operation. Provide an "instr" line
3675 * that specifies an instruction that performs "result = op r0", where
3676 * "result" is a 64-bit quantity in r0/r1.
3677 *
3678 * For: int-to-long, int-to-double, float-to-long, float-to-double
3679 */
3680 /* unop vA, vB */
3681 mov r3, rINST, lsr #12 @ r3<- B
3682 ubfx r9, rINST, #8, #4 @ r9<- A
3683 GET_VREG(r0, r3) @ r0<- vB
3684 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3685 @ optional op; may set condition codes
3686 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3687 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed
3688 GET_INST_OPCODE(ip) @ extract opcode from rINST
3689 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
3690 GOTO_OPCODE(ip) @ jump to next instruction
3691 /* 9-10 instructions */
3692
3693
3694/* ------------------------------ */
3695 .balign 64
3696.L_OP_INT_TO_FLOAT: /* 0x82 */
3697/* File: arm-vfp/OP_INT_TO_FLOAT.S */
3698/* File: arm-vfp/funop.S */
3699 /*
3700 * Generic 32-bit unary floating-point operation. Provide an "instr"
3701 * line that specifies an instruction that performs "s1 = op s0".
3702 *
3703 * for: int-to-float, float-to-int
3704 */
3705 /* unop vA, vB */
3706 mov r3, rINST, lsr #12 @ r3<- B
3707 mov r9, rINST, lsr #8 @ r9<- A+
3708 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
3709 flds s0, [r3] @ s0<- vB
3710 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3711 and r9, r9, #15 @ r9<- A
3712 fsitos s1, s0 @ s1<- op
3713 GET_INST_OPCODE(ip) @ extract opcode from rINST
3714 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
3715 fsts s1, [r9] @ vA<- s1
3716 GOTO_OPCODE(ip) @ jump to next instruction
3717
3718
3719/* ------------------------------ */
3720 .balign 64
3721.L_OP_INT_TO_DOUBLE: /* 0x83 */
3722/* File: arm-vfp/OP_INT_TO_DOUBLE.S */
3723/* File: arm-vfp/funopWider.S */
3724 /*
3725 * Generic 32bit-to-64bit floating point unary operation. Provide an
3726 * "instr" line that specifies an instruction that performs "d0 = op s0".
3727 *
3728 * For: int-to-double, float-to-double
3729 */
3730 /* unop vA, vB */
3731 mov r3, rINST, lsr #12 @ r3<- B
3732 mov r9, rINST, lsr #8 @ r9<- A+
3733 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
3734 flds s0, [r3] @ s0<- vB
3735 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3736 and r9, r9, #15 @ r9<- A
3737 fsitod d0, s0 @ d0<- op
3738 GET_INST_OPCODE(ip) @ extract opcode from rINST
3739 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
3740 fstd d0, [r9] @ vA<- d0
3741 GOTO_OPCODE(ip) @ jump to next instruction
3742
3743
3744/* ------------------------------ */
3745 .balign 64
3746.L_OP_LONG_TO_INT: /* 0x84 */
3747/* File: armv5te/OP_LONG_TO_INT.S */
3748/* we ignore the high word, making this equivalent to a 32-bit reg move */
3749/* File: armv5te/OP_MOVE.S */
3750 /* for move, move-object, long-to-int */
3751 /* op vA, vB */
3752 mov r1, rINST, lsr #12 @ r1<- B from 15:12
3753 mov r0, rINST, lsr #8 @ r0<- A from 11:8
3754 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3755 GET_VREG(r2, r1) @ r2<- fp[B]
3756 and r0, r0, #15
3757 GET_INST_OPCODE(ip) @ ip<- opcode from rINST
3758 SET_VREG(r2, r0) @ fp[A]<- r2
3759 GOTO_OPCODE(ip) @ execute next instruction
3760
3761
3762
3763/* ------------------------------ */
3764 .balign 64
3765.L_OP_LONG_TO_FLOAT: /* 0x85 */
3766/* File: armv6t2/OP_LONG_TO_FLOAT.S */
3767/* File: armv6t2/unopNarrower.S */
3768 /*
3769 * Generic 64bit-to-32bit unary operation. Provide an "instr" line
3770 * that specifies an instruction that performs "result = op r0/r1", where
3771 * "result" is a 32-bit quantity in r0.
3772 *
3773 * For: long-to-float, double-to-int, double-to-float
3774 *
3775 * (This would work for long-to-int, but that instruction is actually
3776 * an exact match for OP_MOVE.)
3777 */
3778 /* unop vA, vB */
3779 mov r3, rINST, lsr #12 @ r3<- B
3780 ubfx r9, rINST, #8, #4 @ r9<- A
3781 add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
3782 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
3783 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3784 @ optional op; may set condition codes
3785 bl __aeabi_l2f @ r0<- op, r0-r3 changed
3786 GET_INST_OPCODE(ip) @ extract opcode from rINST
3787 SET_VREG(r0, r9) @ vA<- r0
3788 GOTO_OPCODE(ip) @ jump to next instruction
3789 /* 9-10 instructions */
3790
3791
3792/* ------------------------------ */
3793 .balign 64
3794.L_OP_LONG_TO_DOUBLE: /* 0x86 */
3795/* File: armv6t2/OP_LONG_TO_DOUBLE.S */
3796/* File: armv6t2/unopWide.S */
3797 /*
3798 * Generic 64-bit unary operation. Provide an "instr" line that
3799 * specifies an instruction that performs "result = op r0/r1".
3800 * This could be an ARM instruction or a function call.
3801 *
3802 * For: neg-long, not-long, neg-double, long-to-double, double-to-long
3803 */
3804 /* unop vA, vB */
3805 mov r3, rINST, lsr #12 @ r3<- B
3806 ubfx r9, rINST, #8, #4 @ r9<- A
3807 add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
3808 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3809 ldmia r3, {r0-r1} @ r0/r1<- vAA
3810 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3811 @ optional op; may set condition codes
3812 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed
3813 GET_INST_OPCODE(ip) @ extract opcode from rINST
3814 stmia r9, {r0-r1} @ vAA<- r0/r1
3815 GOTO_OPCODE(ip) @ jump to next instruction
3816 /* 10-11 instructions */
3817
3818
3819
3820/* ------------------------------ */
3821 .balign 64
3822.L_OP_FLOAT_TO_INT: /* 0x87 */
3823/* File: arm-vfp/OP_FLOAT_TO_INT.S */
3824/* File: arm-vfp/funop.S */
3825 /*
3826 * Generic 32-bit unary floating-point operation. Provide an "instr"
3827 * line that specifies an instruction that performs "s1 = op s0".
3828 *
3829 * for: int-to-float, float-to-int
3830 */
3831 /* unop vA, vB */
3832 mov r3, rINST, lsr #12 @ r3<- B
3833 mov r9, rINST, lsr #8 @ r9<- A+
3834 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
3835 flds s0, [r3] @ s0<- vB
3836 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3837 and r9, r9, #15 @ r9<- A
3838 ftosizs s1, s0 @ s1<- op
3839 GET_INST_OPCODE(ip) @ extract opcode from rINST
3840 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
3841 fsts s1, [r9] @ vA<- s1
3842 GOTO_OPCODE(ip) @ jump to next instruction
3843
3844
3845/* ------------------------------ */
3846 .balign 64
3847.L_OP_FLOAT_TO_LONG: /* 0x88 */
3848/* File: armv6t2/OP_FLOAT_TO_LONG.S */
3849@include "armv6t2/unopWider.S" {"instr":"bl __aeabi_f2lz"}
3850/* File: armv6t2/unopWider.S */
3851 /*
3852 * Generic 32bit-to-64bit unary operation. Provide an "instr" line
3853 * that specifies an instruction that performs "result = op r0", where
3854 * "result" is a 64-bit quantity in r0/r1.
3855 *
3856 * For: int-to-long, int-to-double, float-to-long, float-to-double
3857 */
3858 /* unop vA, vB */
3859 mov r3, rINST, lsr #12 @ r3<- B
3860 ubfx r9, rINST, #8, #4 @ r9<- A
3861 GET_VREG(r0, r3) @ r0<- vB
3862 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3863 @ optional op; may set condition codes
3864 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3865 bl f2l_doconv @ r0<- op, r0-r3 changed
3866 GET_INST_OPCODE(ip) @ extract opcode from rINST
3867 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
3868 GOTO_OPCODE(ip) @ jump to next instruction
3869 /* 9-10 instructions */
3870
3871
3872
3873/* ------------------------------ */
3874 .balign 64
3875.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
3876/* File: arm-vfp/OP_FLOAT_TO_DOUBLE.S */
3877/* File: arm-vfp/funopWider.S */
3878 /*
3879 * Generic 32bit-to-64bit floating point unary operation. Provide an
3880 * "instr" line that specifies an instruction that performs "d0 = op s0".
3881 *
3882 * For: int-to-double, float-to-double
3883 */
3884 /* unop vA, vB */
3885 mov r3, rINST, lsr #12 @ r3<- B
3886 mov r9, rINST, lsr #8 @ r9<- A+
3887 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
3888 flds s0, [r3] @ s0<- vB
3889 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3890 and r9, r9, #15 @ r9<- A
3891 fcvtds d0, s0 @ d0<- op
3892 GET_INST_OPCODE(ip) @ extract opcode from rINST
3893 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
3894 fstd d0, [r9] @ vA<- d0
3895 GOTO_OPCODE(ip) @ jump to next instruction
3896
3897
3898/* ------------------------------ */
3899 .balign 64
3900.L_OP_DOUBLE_TO_INT: /* 0x8a */
3901/* File: arm-vfp/OP_DOUBLE_TO_INT.S */
3902/* File: arm-vfp/funopNarrower.S */
3903 /*
3904 * Generic 64bit-to-32bit unary floating point operation. Provide an
3905 * "instr" line that specifies an instruction that performs "s0 = op d0".
3906 *
3907 * For: double-to-int, double-to-float
3908 */
3909 /* unop vA, vB */
3910 mov r3, rINST, lsr #12 @ r3<- B
3911 mov r9, rINST, lsr #8 @ r9<- A+
3912 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
3913 fldd d0, [r3] @ d0<- vB
3914 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3915 and r9, r9, #15 @ r9<- A
3916 ftosizd s0, d0 @ s0<- op
3917 GET_INST_OPCODE(ip) @ extract opcode from rINST
3918 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
3919 fsts s0, [r9] @ vA<- s0
3920 GOTO_OPCODE(ip) @ jump to next instruction
3921
3922
3923/* ------------------------------ */
3924 .balign 64
3925.L_OP_DOUBLE_TO_LONG: /* 0x8b */
3926/* File: armv6t2/OP_DOUBLE_TO_LONG.S */
3927@include "armv6t2/unopWide.S" {"instr":"bl __aeabi_d2lz"}
3928/* File: armv6t2/unopWide.S */
3929 /*
3930 * Generic 64-bit unary operation. Provide an "instr" line that
3931 * specifies an instruction that performs "result = op r0/r1".
3932 * This could be an ARM instruction or a function call.
3933 *
3934 * For: neg-long, not-long, neg-double, long-to-double, double-to-long
3935 */
3936 /* unop vA, vB */
3937 mov r3, rINST, lsr #12 @ r3<- B
3938 ubfx r9, rINST, #8, #4 @ r9<- A
3939 add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
3940 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3941 ldmia r3, {r0-r1} @ r0/r1<- vAA
3942 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3943 @ optional op; may set condition codes
3944 bl d2l_doconv @ r0/r1<- op, r2-r3 changed
3945 GET_INST_OPCODE(ip) @ extract opcode from rINST
3946 stmia r9, {r0-r1} @ vAA<- r0/r1
3947 GOTO_OPCODE(ip) @ jump to next instruction
3948 /* 10-11 instructions */
3949
3950
3951
3952
3953/* ------------------------------ */
3954 .balign 64
3955.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */
3956/* File: arm-vfp/OP_DOUBLE_TO_FLOAT.S */
3957/* File: arm-vfp/funopNarrower.S */
3958 /*
3959 * Generic 64bit-to-32bit unary floating point operation. Provide an
3960 * "instr" line that specifies an instruction that performs "s0 = op d0".
3961 *
3962 * For: double-to-int, double-to-float
3963 */
3964 /* unop vA, vB */
3965 mov r3, rINST, lsr #12 @ r3<- B
3966 mov r9, rINST, lsr #8 @ r9<- A+
3967 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
3968 fldd d0, [r3] @ d0<- vB
3969 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3970 and r9, r9, #15 @ r9<- A
3971 fcvtsd s0, d0 @ s0<- op
3972 GET_INST_OPCODE(ip) @ extract opcode from rINST
3973 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
3974 fsts s0, [r9] @ vA<- s0
3975 GOTO_OPCODE(ip) @ jump to next instruction
3976
3977
3978/* ------------------------------ */
3979 .balign 64
3980.L_OP_INT_TO_BYTE: /* 0x8d */
3981/* File: armv6t2/OP_INT_TO_BYTE.S */
3982/* File: armv6t2/unop.S */
3983 /*
3984 * Generic 32-bit unary operation. Provide an "instr" line that
3985 * specifies an instruction that performs "result = op r0".
3986 * This could be an ARM instruction or a function call.
3987 *
3988 * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
3989 * int-to-byte, int-to-char, int-to-short
3990 */
3991 /* unop vA, vB */
3992 mov r3, rINST, lsr #12 @ r3<- B
3993 ubfx r9, rINST, #8, #4 @ r9<- A
3994 GET_VREG(r0, r3) @ r0<- vB
3995 @ optional op; may set condition codes
3996 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
3997 sxtb r0, r0 @ r0<- op, r0-r3 changed
3998 GET_INST_OPCODE(ip) @ extract opcode from rINST
3999 SET_VREG(r0, r9) @ vAA<- r0
4000 GOTO_OPCODE(ip) @ jump to next instruction
4001 /* 8-9 instructions */
4002
4003
4004/* ------------------------------ */
4005 .balign 64
4006.L_OP_INT_TO_CHAR: /* 0x8e */
4007/* File: armv6t2/OP_INT_TO_CHAR.S */
4008/* File: armv6t2/unop.S */
4009 /*
4010 * Generic 32-bit unary operation. Provide an "instr" line that
4011 * specifies an instruction that performs "result = op r0".
4012 * This could be an ARM instruction or a function call.
4013 *
4014 * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
4015 * int-to-byte, int-to-char, int-to-short
4016 */
4017 /* unop vA, vB */
4018 mov r3, rINST, lsr #12 @ r3<- B
4019 ubfx r9, rINST, #8, #4 @ r9<- A
4020 GET_VREG(r0, r3) @ r0<- vB
4021 @ optional op; may set condition codes
4022 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
4023 uxth r0, r0 @ r0<- op, r0-r3 changed
4024 GET_INST_OPCODE(ip) @ extract opcode from rINST
4025 SET_VREG(r0, r9) @ vAA<- r0
4026 GOTO_OPCODE(ip) @ jump to next instruction
4027 /* 8-9 instructions */
4028
4029
4030/* ------------------------------ */
4031 .balign 64
4032.L_OP_INT_TO_SHORT: /* 0x8f */
4033/* File: armv6t2/OP_INT_TO_SHORT.S */
4034/* File: armv6t2/unop.S */
4035 /*
4036 * Generic 32-bit unary operation. Provide an "instr" line that
4037 * specifies an instruction that performs "result = op r0".
4038 * This could be an ARM instruction or a function call.
4039 *
4040 * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
4041 * int-to-byte, int-to-char, int-to-short
4042 */
4043 /* unop vA, vB */
4044 mov r3, rINST, lsr #12 @ r3<- B
4045 ubfx r9, rINST, #8, #4 @ r9<- A
4046 GET_VREG(r0, r3) @ r0<- vB
4047 @ optional op; may set condition codes
4048 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
4049 sxth r0, r0 @ r0<- op, r0-r3 changed
4050 GET_INST_OPCODE(ip) @ extract opcode from rINST
4051 SET_VREG(r0, r9) @ vAA<- r0
4052 GOTO_OPCODE(ip) @ jump to next instruction
4053 /* 8-9 instructions */
4054
4055
4056/* ------------------------------ */
4057 .balign 64
4058.L_OP_ADD_INT: /* 0x90 */
4059/* File: armv5te/OP_ADD_INT.S */
4060/* File: armv5te/binop.S */
4061 /*
4062 * Generic 32-bit binary operation. Provide an "instr" line that
4063 * specifies an instruction that performs "result = r0 op r1".
4064 * This could be an ARM instruction or a function call. (If the result
4065 * comes back in a register other than r0, you can override "result".)
4066 *
4067 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4068 * vCC (r1). Useful for integer division and modulus. Note that we
4069 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
4070 * handles it correctly.
4071 *
4072 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
4073 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
4074 * mul-float, div-float, rem-float
4075 */
4076 /* binop vAA, vBB, vCC */
4077 FETCH(r0, 1) @ r0<- CCBB
4078 mov r9, rINST, lsr #8 @ r9<- AA
4079 mov r3, r0, lsr #8 @ r3<- CC
4080 and r2, r0, #255 @ r2<- BB
4081 GET_VREG(r1, r3) @ r1<- vCC
4082 GET_VREG(r0, r2) @ r0<- vBB
4083 .if 0
4084 cmp r1, #0 @ is second operand zero?
4085 beq common_errDivideByZero
4086 .endif
4087
4088 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4089 @ optional op; may set condition codes
4090 add r0, r0, r1 @ r0<- op, r0-r3 changed
4091 GET_INST_OPCODE(ip) @ extract opcode from rINST
4092 SET_VREG(r0, r9) @ vAA<- r0
4093 GOTO_OPCODE(ip) @ jump to next instruction
4094 /* 11-14 instructions */
4095
4096
4097
4098/* ------------------------------ */
4099 .balign 64
4100.L_OP_SUB_INT: /* 0x91 */
4101/* File: armv5te/OP_SUB_INT.S */
4102/* File: armv5te/binop.S */
4103 /*
4104 * Generic 32-bit binary operation. Provide an "instr" line that
4105 * specifies an instruction that performs "result = r0 op r1".
4106 * This could be an ARM instruction or a function call. (If the result
4107 * comes back in a register other than r0, you can override "result".)
4108 *
4109 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4110 * vCC (r1). Useful for integer division and modulus. Note that we
4111 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
4112 * handles it correctly.
4113 *
4114 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
4115 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
4116 * mul-float, div-float, rem-float
4117 */
4118 /* binop vAA, vBB, vCC */
4119 FETCH(r0, 1) @ r0<- CCBB
4120 mov r9, rINST, lsr #8 @ r9<- AA
4121 mov r3, r0, lsr #8 @ r3<- CC
4122 and r2, r0, #255 @ r2<- BB
4123 GET_VREG(r1, r3) @ r1<- vCC
4124 GET_VREG(r0, r2) @ r0<- vBB
4125 .if 0
4126 cmp r1, #0 @ is second operand zero?
4127 beq common_errDivideByZero
4128 .endif
4129
4130 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4131 @ optional op; may set condition codes
4132 sub r0, r0, r1 @ r0<- op, r0-r3 changed
4133 GET_INST_OPCODE(ip) @ extract opcode from rINST
4134 SET_VREG(r0, r9) @ vAA<- r0
4135 GOTO_OPCODE(ip) @ jump to next instruction
4136 /* 11-14 instructions */
4137
4138
4139
4140/* ------------------------------ */
4141 .balign 64
4142.L_OP_MUL_INT: /* 0x92 */
4143/* File: armv5te/OP_MUL_INT.S */
4144/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
4145/* File: armv5te/binop.S */
4146 /*
4147 * Generic 32-bit binary operation. Provide an "instr" line that
4148 * specifies an instruction that performs "result = r0 op r1".
4149 * This could be an ARM instruction or a function call. (If the result
4150 * comes back in a register other than r0, you can override "result".)
4151 *
4152 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4153 * vCC (r1). Useful for integer division and modulus. Note that we
4154 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
4155 * handles it correctly.
4156 *
4157 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
4158 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
4159 * mul-float, div-float, rem-float
4160 */
4161 /* binop vAA, vBB, vCC */
4162 FETCH(r0, 1) @ r0<- CCBB
4163 mov r9, rINST, lsr #8 @ r9<- AA
4164 mov r3, r0, lsr #8 @ r3<- CC
4165 and r2, r0, #255 @ r2<- BB
4166 GET_VREG(r1, r3) @ r1<- vCC
4167 GET_VREG(r0, r2) @ r0<- vBB
4168 .if 0
4169 cmp r1, #0 @ is second operand zero?
4170 beq common_errDivideByZero
4171 .endif
4172
4173 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4174 @ optional op; may set condition codes
4175 mul r0, r1, r0 @ r0<- op, r0-r3 changed
4176 GET_INST_OPCODE(ip) @ extract opcode from rINST
4177 SET_VREG(r0, r9) @ vAA<- r0
4178 GOTO_OPCODE(ip) @ jump to next instruction
4179 /* 11-14 instructions */
4180
4181
4182
4183/* ------------------------------ */
4184 .balign 64
4185.L_OP_DIV_INT: /* 0x93 */
4186/* File: armv5te/OP_DIV_INT.S */
4187/* File: armv5te/binop.S */
4188 /*
4189 * Generic 32-bit binary operation. Provide an "instr" line that
4190 * specifies an instruction that performs "result = r0 op r1".
4191 * This could be an ARM instruction or a function call. (If the result
4192 * comes back in a register other than r0, you can override "result".)
4193 *
4194 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4195 * vCC (r1). Useful for integer division and modulus. Note that we
4196 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
4197 * handles it correctly.
4198 *
4199 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
4200 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
4201 * mul-float, div-float, rem-float
4202 */
4203 /* binop vAA, vBB, vCC */
4204 FETCH(r0, 1) @ r0<- CCBB
4205 mov r9, rINST, lsr #8 @ r9<- AA
4206 mov r3, r0, lsr #8 @ r3<- CC
4207 and r2, r0, #255 @ r2<- BB
4208 GET_VREG(r1, r3) @ r1<- vCC
4209 GET_VREG(r0, r2) @ r0<- vBB
4210 .if 1
4211 cmp r1, #0 @ is second operand zero?
4212 beq common_errDivideByZero
4213 .endif
4214
4215 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4216 @ optional op; may set condition codes
4217 bl __aeabi_idiv @ r0<- op, r0-r3 changed
4218 GET_INST_OPCODE(ip) @ extract opcode from rINST
4219 SET_VREG(r0, r9) @ vAA<- r0
4220 GOTO_OPCODE(ip) @ jump to next instruction
4221 /* 11-14 instructions */
4222
4223
4224
4225/* ------------------------------ */
4226 .balign 64
4227.L_OP_REM_INT: /* 0x94 */
4228/* File: armv5te/OP_REM_INT.S */
4229/* idivmod returns quotient in r0 and remainder in r1 */
4230/* File: armv5te/binop.S */
4231 /*
4232 * Generic 32-bit binary operation. Provide an "instr" line that
4233 * specifies an instruction that performs "result = r0 op r1".
4234 * This could be an ARM instruction or a function call. (If the result
4235 * comes back in a register other than r0, you can override "result".)
4236 *
4237 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4238 * vCC (r1). Useful for integer division and modulus. Note that we
4239 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
4240 * handles it correctly.
4241 *
4242 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
4243 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
4244 * mul-float, div-float, rem-float
4245 */
4246 /* binop vAA, vBB, vCC */
4247 FETCH(r0, 1) @ r0<- CCBB
4248 mov r9, rINST, lsr #8 @ r9<- AA
4249 mov r3, r0, lsr #8 @ r3<- CC
4250 and r2, r0, #255 @ r2<- BB
4251 GET_VREG(r1, r3) @ r1<- vCC
4252 GET_VREG(r0, r2) @ r0<- vBB
4253 .if 1
4254 cmp r1, #0 @ is second operand zero?
4255 beq common_errDivideByZero
4256 .endif
4257
4258 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4259 @ optional op; may set condition codes
4260 bl __aeabi_idivmod @ r1<- op, r0-r3 changed
4261 GET_INST_OPCODE(ip) @ extract opcode from rINST
4262 SET_VREG(r1, r9) @ vAA<- r1
4263 GOTO_OPCODE(ip) @ jump to next instruction
4264 /* 11-14 instructions */
4265
4266
4267
4268/* ------------------------------ */
4269 .balign 64
4270.L_OP_AND_INT: /* 0x95 */
4271/* File: armv5te/OP_AND_INT.S */
4272/* File: armv5te/binop.S */
4273 /*
4274 * Generic 32-bit binary operation. Provide an "instr" line that
4275 * specifies an instruction that performs "result = r0 op r1".
4276 * This could be an ARM instruction or a function call. (If the result
4277 * comes back in a register other than r0, you can override "result".)
4278 *
4279 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4280 * vCC (r1). Useful for integer division and modulus. Note that we
4281 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
4282 * handles it correctly.
4283 *
4284 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
4285 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
4286 * mul-float, div-float, rem-float
4287 */
4288 /* binop vAA, vBB, vCC */
4289 FETCH(r0, 1) @ r0<- CCBB
4290 mov r9, rINST, lsr #8 @ r9<- AA
4291 mov r3, r0, lsr #8 @ r3<- CC
4292 and r2, r0, #255 @ r2<- BB
4293 GET_VREG(r1, r3) @ r1<- vCC
4294 GET_VREG(r0, r2) @ r0<- vBB
4295 .if 0
4296 cmp r1, #0 @ is second operand zero?
4297 beq common_errDivideByZero
4298 .endif
4299
4300 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4301 @ optional op; may set condition codes
4302 and r0, r0, r1 @ r0<- op, r0-r3 changed
4303 GET_INST_OPCODE(ip) @ extract opcode from rINST
4304 SET_VREG(r0, r9) @ vAA<- r0
4305 GOTO_OPCODE(ip) @ jump to next instruction
4306 /* 11-14 instructions */
4307
4308
4309
4310/* ------------------------------ */
4311 .balign 64
4312.L_OP_OR_INT: /* 0x96 */
4313/* File: armv5te/OP_OR_INT.S */
4314/* File: armv5te/binop.S */
4315 /*
4316 * Generic 32-bit binary operation. Provide an "instr" line that
4317 * specifies an instruction that performs "result = r0 op r1".
4318 * This could be an ARM instruction or a function call. (If the result
4319 * comes back in a register other than r0, you can override "result".)
4320 *
4321 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4322 * vCC (r1). Useful for integer division and modulus. Note that we
4323 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
4324 * handles it correctly.
4325 *
4326 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
4327 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
4328 * mul-float, div-float, rem-float
4329 */
4330 /* binop vAA, vBB, vCC */
4331 FETCH(r0, 1) @ r0<- CCBB
4332 mov r9, rINST, lsr #8 @ r9<- AA
4333 mov r3, r0, lsr #8 @ r3<- CC
4334 and r2, r0, #255 @ r2<- BB
4335 GET_VREG(r1, r3) @ r1<- vCC
4336 GET_VREG(r0, r2) @ r0<- vBB
4337 .if 0
4338 cmp r1, #0 @ is second operand zero?
4339 beq common_errDivideByZero
4340 .endif
4341
4342 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4343 @ optional op; may set condition codes
4344 orr r0, r0, r1 @ r0<- op, r0-r3 changed
4345 GET_INST_OPCODE(ip) @ extract opcode from rINST
4346 SET_VREG(r0, r9) @ vAA<- r0
4347 GOTO_OPCODE(ip) @ jump to next instruction
4348 /* 11-14 instructions */
4349
4350
4351
4352/* ------------------------------ */
4353 .balign 64
4354.L_OP_XOR_INT: /* 0x97 */
4355/* File: armv5te/OP_XOR_INT.S */
4356/* File: armv5te/binop.S */
4357 /*
4358 * Generic 32-bit binary operation. Provide an "instr" line that
4359 * specifies an instruction that performs "result = r0 op r1".
4360 * This could be an ARM instruction or a function call. (If the result
4361 * comes back in a register other than r0, you can override "result".)
4362 *
4363 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4364 * vCC (r1). Useful for integer division and modulus. Note that we
4365 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
4366 * handles it correctly.
4367 *
4368 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
4369 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
4370 * mul-float, div-float, rem-float
4371 */
4372 /* binop vAA, vBB, vCC */
4373 FETCH(r0, 1) @ r0<- CCBB
4374 mov r9, rINST, lsr #8 @ r9<- AA
4375 mov r3, r0, lsr #8 @ r3<- CC
4376 and r2, r0, #255 @ r2<- BB
4377 GET_VREG(r1, r3) @ r1<- vCC
4378 GET_VREG(r0, r2) @ r0<- vBB
4379 .if 0
4380 cmp r1, #0 @ is second operand zero?
4381 beq common_errDivideByZero
4382 .endif
4383
4384 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4385 @ optional op; may set condition codes
4386 eor r0, r0, r1 @ r0<- op, r0-r3 changed
4387 GET_INST_OPCODE(ip) @ extract opcode from rINST
4388 SET_VREG(r0, r9) @ vAA<- r0
4389 GOTO_OPCODE(ip) @ jump to next instruction
4390 /* 11-14 instructions */
4391
4392
4393
4394/* ------------------------------ */
4395 .balign 64
4396.L_OP_SHL_INT: /* 0x98 */
4397/* File: armv5te/OP_SHL_INT.S */
4398/* File: armv5te/binop.S */
4399 /*
4400 * Generic 32-bit binary operation. Provide an "instr" line that
4401 * specifies an instruction that performs "result = r0 op r1".
4402 * This could be an ARM instruction or a function call. (If the result
4403 * comes back in a register other than r0, you can override "result".)
4404 *
4405 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4406 * vCC (r1). Useful for integer division and modulus. Note that we
4407 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
4408 * handles it correctly.
4409 *
4410 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
4411 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
4412 * mul-float, div-float, rem-float
4413 */
4414 /* binop vAA, vBB, vCC */
4415 FETCH(r0, 1) @ r0<- CCBB
4416 mov r9, rINST, lsr #8 @ r9<- AA
4417 mov r3, r0, lsr #8 @ r3<- CC
4418 and r2, r0, #255 @ r2<- BB
4419 GET_VREG(r1, r3) @ r1<- vCC
4420 GET_VREG(r0, r2) @ r0<- vBB
4421 .if 0
4422 cmp r1, #0 @ is second operand zero?
4423 beq common_errDivideByZero
4424 .endif
4425
4426 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4427 and r1, r1, #31 @ optional op; may set condition codes
4428 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
4429 GET_INST_OPCODE(ip) @ extract opcode from rINST
4430 SET_VREG(r0, r9) @ vAA<- r0
4431 GOTO_OPCODE(ip) @ jump to next instruction
4432 /* 11-14 instructions */
4433
4434
4435
4436/* ------------------------------ */
4437 .balign 64
4438.L_OP_SHR_INT: /* 0x99 */
4439/* File: armv5te/OP_SHR_INT.S */
4440/* File: armv5te/binop.S */
4441 /*
4442 * Generic 32-bit binary operation. Provide an "instr" line that
4443 * specifies an instruction that performs "result = r0 op r1".
4444 * This could be an ARM instruction or a function call. (If the result
4445 * comes back in a register other than r0, you can override "result".)
4446 *
4447 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4448 * vCC (r1). Useful for integer division and modulus. Note that we
4449 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
4450 * handles it correctly.
4451 *
4452 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
4453 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
4454 * mul-float, div-float, rem-float
4455 */
4456 /* binop vAA, vBB, vCC */
4457 FETCH(r0, 1) @ r0<- CCBB
4458 mov r9, rINST, lsr #8 @ r9<- AA
4459 mov r3, r0, lsr #8 @ r3<- CC
4460 and r2, r0, #255 @ r2<- BB
4461 GET_VREG(r1, r3) @ r1<- vCC
4462 GET_VREG(r0, r2) @ r0<- vBB
4463 .if 0
4464 cmp r1, #0 @ is second operand zero?
4465 beq common_errDivideByZero
4466 .endif
4467
4468 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4469 and r1, r1, #31 @ optional op; may set condition codes
4470 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
4471 GET_INST_OPCODE(ip) @ extract opcode from rINST
4472 SET_VREG(r0, r9) @ vAA<- r0
4473 GOTO_OPCODE(ip) @ jump to next instruction
4474 /* 11-14 instructions */
4475
4476
4477
4478/* ------------------------------ */
4479 .balign 64
4480.L_OP_USHR_INT: /* 0x9a */
4481/* File: armv5te/OP_USHR_INT.S */
4482/* File: armv5te/binop.S */
4483 /*
4484 * Generic 32-bit binary operation. Provide an "instr" line that
4485 * specifies an instruction that performs "result = r0 op r1".
4486 * This could be an ARM instruction or a function call. (If the result
4487 * comes back in a register other than r0, you can override "result".)
4488 *
4489 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4490 * vCC (r1). Useful for integer division and modulus. Note that we
4491 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
4492 * handles it correctly.
4493 *
4494 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
4495 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
4496 * mul-float, div-float, rem-float
4497 */
4498 /* binop vAA, vBB, vCC */
4499 FETCH(r0, 1) @ r0<- CCBB
4500 mov r9, rINST, lsr #8 @ r9<- AA
4501 mov r3, r0, lsr #8 @ r3<- CC
4502 and r2, r0, #255 @ r2<- BB
4503 GET_VREG(r1, r3) @ r1<- vCC
4504 GET_VREG(r0, r2) @ r0<- vBB
4505 .if 0
4506 cmp r1, #0 @ is second operand zero?
4507 beq common_errDivideByZero
4508 .endif
4509
4510 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4511 and r1, r1, #31 @ optional op; may set condition codes
4512 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
4513 GET_INST_OPCODE(ip) @ extract opcode from rINST
4514 SET_VREG(r0, r9) @ vAA<- r0
4515 GOTO_OPCODE(ip) @ jump to next instruction
4516 /* 11-14 instructions */
4517
4518
4519
4520/* ------------------------------ */
4521 .balign 64
4522.L_OP_ADD_LONG: /* 0x9b */
4523/* File: armv5te/OP_ADD_LONG.S */
4524/* File: armv5te/binopWide.S */
4525 /*
4526 * Generic 64-bit binary operation. Provide an "instr" line that
4527 * specifies an instruction that performs "result = r0-r1 op r2-r3".
4528 * This could be an ARM instruction or a function call. (If the result
4529 * comes back in a register other than r0, you can override "result".)
4530 *
4531 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4532 * vCC (r1). Useful for integer division and modulus.
4533 *
4534 * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
4535 * xor-long, add-double, sub-double, mul-double, div-double,
4536 * rem-double
4537 *
4538 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
4539 */
4540 /* binop vAA, vBB, vCC */
4541 FETCH(r0, 1) @ r0<- CCBB
4542 mov r9, rINST, lsr #8 @ r9<- AA
4543 and r2, r0, #255 @ r2<- BB
4544 mov r3, r0, lsr #8 @ r3<- CC
4545 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4546 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
4547 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
4548 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
4549 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
4550 .if 0
4551 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
4552 beq common_errDivideByZero
4553 .endif
4554 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4555
4556 adds r0, r0, r2 @ optional op; may set condition codes
4557 adc r1, r1, r3 @ result<- op, r0-r3 changed
4558 GET_INST_OPCODE(ip) @ extract opcode from rINST
4559 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
4560 GOTO_OPCODE(ip) @ jump to next instruction
4561 /* 14-17 instructions */
4562
4563
4564
4565/* ------------------------------ */
4566 .balign 64
4567.L_OP_SUB_LONG: /* 0x9c */
4568/* File: armv5te/OP_SUB_LONG.S */
4569/* File: armv5te/binopWide.S */
4570 /*
4571 * Generic 64-bit binary operation. Provide an "instr" line that
4572 * specifies an instruction that performs "result = r0-r1 op r2-r3".
4573 * This could be an ARM instruction or a function call. (If the result
4574 * comes back in a register other than r0, you can override "result".)
4575 *
4576 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4577 * vCC (r1). Useful for integer division and modulus.
4578 *
4579 * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
4580 * xor-long, add-double, sub-double, mul-double, div-double,
4581 * rem-double
4582 *
4583 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
4584 */
4585 /* binop vAA, vBB, vCC */
4586 FETCH(r0, 1) @ r0<- CCBB
4587 mov r9, rINST, lsr #8 @ r9<- AA
4588 and r2, r0, #255 @ r2<- BB
4589 mov r3, r0, lsr #8 @ r3<- CC
4590 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4591 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
4592 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
4593 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
4594 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
4595 .if 0
4596 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
4597 beq common_errDivideByZero
4598 .endif
4599 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4600
4601 subs r0, r0, r2 @ optional op; may set condition codes
4602 sbc r1, r1, r3 @ result<- op, r0-r3 changed
4603 GET_INST_OPCODE(ip) @ extract opcode from rINST
4604 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
4605 GOTO_OPCODE(ip) @ jump to next instruction
4606 /* 14-17 instructions */
4607
4608
4609
4610/* ------------------------------ */
4611 .balign 64
4612.L_OP_MUL_LONG: /* 0x9d */
4613/* File: armv5te/OP_MUL_LONG.S */
4614 /*
4615 * Signed 64-bit integer multiply.
4616 *
4617 * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
4618 * WX
4619 * x YZ
4620 * --------
4621 * ZW ZX
4622 * YW YX
4623 *
4624 * The low word of the result holds ZX, the high word holds
4625 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
4626 * it doesn't fit in the low 64 bits.
4627 *
4628 * Unlike most ARM math operations, multiply instructions have
4629 * restrictions on using the same register more than once (Rd and Rm
4630 * cannot be the same).
4631 */
4632 /* mul-long vAA, vBB, vCC */
4633 FETCH(r0, 1) @ r0<- CCBB
4634 and r2, r0, #255 @ r2<- BB
4635 mov r3, r0, lsr #8 @ r3<- CC
4636 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
4637 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
4638 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
4639 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
4640 mul ip, r2, r1 @ ip<- ZxW
4641 umull r9, r10, r2, r0 @ r9/r10 <- ZxX
4642 mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
4643 mov r0, rINST, lsr #8 @ r0<- AA
4644 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
4645 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA]
4646 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4647 b .LOP_MUL_LONG_finish
4648
4649/* ------------------------------ */
4650 .balign 64
4651.L_OP_DIV_LONG: /* 0x9e */
4652/* File: armv5te/OP_DIV_LONG.S */
4653/* File: armv5te/binopWide.S */
4654 /*
4655 * Generic 64-bit binary operation. Provide an "instr" line that
4656 * specifies an instruction that performs "result = r0-r1 op r2-r3".
4657 * This could be an ARM instruction or a function call. (If the result
4658 * comes back in a register other than r0, you can override "result".)
4659 *
4660 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4661 * vCC (r1). Useful for integer division and modulus.
4662 *
4663 * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
4664 * xor-long, add-double, sub-double, mul-double, div-double,
4665 * rem-double
4666 *
4667 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
4668 */
4669 /* binop vAA, vBB, vCC */
4670 FETCH(r0, 1) @ r0<- CCBB
4671 mov r9, rINST, lsr #8 @ r9<- AA
4672 and r2, r0, #255 @ r2<- BB
4673 mov r3, r0, lsr #8 @ r3<- CC
4674 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4675 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
4676 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
4677 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
4678 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
4679 .if 1
4680 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
4681 beq common_errDivideByZero
4682 .endif
4683 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4684
4685 @ optional op; may set condition codes
4686 bl __aeabi_ldivmod @ result<- op, r0-r3 changed
4687 GET_INST_OPCODE(ip) @ extract opcode from rINST
4688 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
4689 GOTO_OPCODE(ip) @ jump to next instruction
4690 /* 14-17 instructions */
4691
4692
4693
4694/* ------------------------------ */
4695 .balign 64
4696.L_OP_REM_LONG: /* 0x9f */
4697/* File: armv5te/OP_REM_LONG.S */
4698/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
4699/* File: armv5te/binopWide.S */
4700 /*
4701 * Generic 64-bit binary operation. Provide an "instr" line that
4702 * specifies an instruction that performs "result = r0-r1 op r2-r3".
4703 * This could be an ARM instruction or a function call. (If the result
4704 * comes back in a register other than r0, you can override "result".)
4705 *
4706 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4707 * vCC (r1). Useful for integer division and modulus.
4708 *
4709 * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
4710 * xor-long, add-double, sub-double, mul-double, div-double,
4711 * rem-double
4712 *
4713 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
4714 */
4715 /* binop vAA, vBB, vCC */
4716 FETCH(r0, 1) @ r0<- CCBB
4717 mov r9, rINST, lsr #8 @ r9<- AA
4718 and r2, r0, #255 @ r2<- BB
4719 mov r3, r0, lsr #8 @ r3<- CC
4720 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4721 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
4722 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
4723 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
4724 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
4725 .if 1
4726 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
4727 beq common_errDivideByZero
4728 .endif
4729 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4730
4731 @ optional op; may set condition codes
4732 bl __aeabi_ldivmod @ result<- op, r0-r3 changed
4733 GET_INST_OPCODE(ip) @ extract opcode from rINST
4734 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
4735 GOTO_OPCODE(ip) @ jump to next instruction
4736 /* 14-17 instructions */
4737
4738
4739
4740/* ------------------------------ */
4741 .balign 64
4742.L_OP_AND_LONG: /* 0xa0 */
4743/* File: armv5te/OP_AND_LONG.S */
4744/* File: armv5te/binopWide.S */
4745 /*
4746 * Generic 64-bit binary operation. Provide an "instr" line that
4747 * specifies an instruction that performs "result = r0-r1 op r2-r3".
4748 * This could be an ARM instruction or a function call. (If the result
4749 * comes back in a register other than r0, you can override "result".)
4750 *
4751 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4752 * vCC (r1). Useful for integer division and modulus.
4753 *
4754 * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
4755 * xor-long, add-double, sub-double, mul-double, div-double,
4756 * rem-double
4757 *
4758 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
4759 */
4760 /* binop vAA, vBB, vCC */
4761 FETCH(r0, 1) @ r0<- CCBB
4762 mov r9, rINST, lsr #8 @ r9<- AA
4763 and r2, r0, #255 @ r2<- BB
4764 mov r3, r0, lsr #8 @ r3<- CC
4765 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4766 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
4767 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
4768 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
4769 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
4770 .if 0
4771 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
4772 beq common_errDivideByZero
4773 .endif
4774 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4775
4776 and r0, r0, r2 @ optional op; may set condition codes
4777 and r1, r1, r3 @ result<- op, r0-r3 changed
4778 GET_INST_OPCODE(ip) @ extract opcode from rINST
4779 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
4780 GOTO_OPCODE(ip) @ jump to next instruction
4781 /* 14-17 instructions */
4782
4783
4784
4785/* ------------------------------ */
4786 .balign 64
4787.L_OP_OR_LONG: /* 0xa1 */
4788/* File: armv5te/OP_OR_LONG.S */
4789/* File: armv5te/binopWide.S */
4790 /*
4791 * Generic 64-bit binary operation. Provide an "instr" line that
4792 * specifies an instruction that performs "result = r0-r1 op r2-r3".
4793 * This could be an ARM instruction or a function call. (If the result
4794 * comes back in a register other than r0, you can override "result".)
4795 *
4796 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4797 * vCC (r1). Useful for integer division and modulus.
4798 *
4799 * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
4800 * xor-long, add-double, sub-double, mul-double, div-double,
4801 * rem-double
4802 *
4803 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
4804 */
4805 /* binop vAA, vBB, vCC */
4806 FETCH(r0, 1) @ r0<- CCBB
4807 mov r9, rINST, lsr #8 @ r9<- AA
4808 and r2, r0, #255 @ r2<- BB
4809 mov r3, r0, lsr #8 @ r3<- CC
4810 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4811 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
4812 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
4813 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
4814 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
4815 .if 0
4816 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
4817 beq common_errDivideByZero
4818 .endif
4819 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4820
4821 orr r0, r0, r2 @ optional op; may set condition codes
4822 orr r1, r1, r3 @ result<- op, r0-r3 changed
4823 GET_INST_OPCODE(ip) @ extract opcode from rINST
4824 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
4825 GOTO_OPCODE(ip) @ jump to next instruction
4826 /* 14-17 instructions */
4827
4828
4829
4830/* ------------------------------ */
4831 .balign 64
4832.L_OP_XOR_LONG: /* 0xa2 */
4833/* File: armv5te/OP_XOR_LONG.S */
4834/* File: armv5te/binopWide.S */
4835 /*
4836 * Generic 64-bit binary operation. Provide an "instr" line that
4837 * specifies an instruction that performs "result = r0-r1 op r2-r3".
4838 * This could be an ARM instruction or a function call. (If the result
4839 * comes back in a register other than r0, you can override "result".)
4840 *
4841 * If "chkzero" is set to 1, we perform a divide-by-zero check on
4842 * vCC (r1). Useful for integer division and modulus.
4843 *
4844 * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
4845 * xor-long, add-double, sub-double, mul-double, div-double,
4846 * rem-double
4847 *
4848 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
4849 */
4850 /* binop vAA, vBB, vCC */
4851 FETCH(r0, 1) @ r0<- CCBB
4852 mov r9, rINST, lsr #8 @ r9<- AA
4853 and r2, r0, #255 @ r2<- BB
4854 mov r3, r0, lsr #8 @ r3<- CC
4855 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4856 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
4857 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
4858 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
4859 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
4860 .if 0
4861 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
4862 beq common_errDivideByZero
4863 .endif
4864 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4865
4866 eor r0, r0, r2 @ optional op; may set condition codes
4867 eor r1, r1, r3 @ result<- op, r0-r3 changed
4868 GET_INST_OPCODE(ip) @ extract opcode from rINST
4869 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
4870 GOTO_OPCODE(ip) @ jump to next instruction
4871 /* 14-17 instructions */
4872
4873
4874
4875/* ------------------------------ */
4876 .balign 64
4877.L_OP_SHL_LONG: /* 0xa3 */
4878/* File: armv5te/OP_SHL_LONG.S */
4879 /*
4880 * Long integer shift. This is different from the generic 32/64-bit
4881 * binary operations because vAA/vBB are 64-bit but vCC (the shift
4882 * distance) is 32-bit. Also, Dalvik requires us to mask off the low
4883 * 6 bits of the shift distance.
4884 */
4885 /* shl-long vAA, vBB, vCC */
4886 FETCH(r0, 1) @ r0<- CCBB
4887 mov r9, rINST, lsr #8 @ r9<- AA
4888 and r3, r0, #255 @ r3<- BB
4889 mov r0, r0, lsr #8 @ r0<- CC
4890 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
4891 GET_VREG(r2, r0) @ r2<- vCC
4892 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
4893 and r2, r2, #63 @ r2<- r2 & 0x3f
4894 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4895
4896 mov r1, r1, asl r2 @ r1<- r1 << r2
4897 rsb r3, r2, #32 @ r3<- 32 - r2
4898 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
4899 subs ip, r2, #32 @ ip<- r2 - 32
4900 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
4901 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4902 b .LOP_SHL_LONG_finish
4903
4904/* ------------------------------ */
4905 .balign 64
4906.L_OP_SHR_LONG: /* 0xa4 */
4907/* File: armv5te/OP_SHR_LONG.S */
4908 /*
4909 * Long integer shift. This is different from the generic 32/64-bit
4910 * binary operations because vAA/vBB are 64-bit but vCC (the shift
4911 * distance) is 32-bit. Also, Dalvik requires us to mask off the low
4912 * 6 bits of the shift distance.
4913 */
4914 /* shr-long vAA, vBB, vCC */
4915 FETCH(r0, 1) @ r0<- CCBB
4916 mov r9, rINST, lsr #8 @ r9<- AA
4917 and r3, r0, #255 @ r3<- BB
4918 mov r0, r0, lsr #8 @ r0<- CC
4919 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
4920 GET_VREG(r2, r0) @ r2<- vCC
4921 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
4922 and r2, r2, #63 @ r0<- r0 & 0x3f
4923 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4924
4925 mov r0, r0, lsr r2 @ r0<- r2 >> r2
4926 rsb r3, r2, #32 @ r3<- 32 - r2
4927 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
4928 subs ip, r2, #32 @ ip<- r2 - 32
4929 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
4930 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4931 b .LOP_SHR_LONG_finish
4932
4933/* ------------------------------ */
4934 .balign 64
4935.L_OP_USHR_LONG: /* 0xa5 */
4936/* File: armv5te/OP_USHR_LONG.S */
4937 /*
4938 * Long integer shift. This is different from the generic 32/64-bit
4939 * binary operations because vAA/vBB are 64-bit but vCC (the shift
4940 * distance) is 32-bit. Also, Dalvik requires us to mask off the low
4941 * 6 bits of the shift distance.
4942 */
4943 /* ushr-long vAA, vBB, vCC */
4944 FETCH(r0, 1) @ r0<- CCBB
4945 mov r9, rINST, lsr #8 @ r9<- AA
4946 and r3, r0, #255 @ r3<- BB
4947 mov r0, r0, lsr #8 @ r0<- CC
4948 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
4949 GET_VREG(r2, r0) @ r2<- vCC
4950 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
4951 and r2, r2, #63 @ r0<- r0 & 0x3f
4952 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4953
4954 mov r0, r0, lsr r2 @ r0<- r2 >> r2
4955 rsb r3, r2, #32 @ r3<- 32 - r2
4956 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
4957 subs ip, r2, #32 @ ip<- r2 - 32
4958 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
4959 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4960 b .LOP_USHR_LONG_finish
4961
4962/* ------------------------------ */
4963 .balign 64
4964.L_OP_ADD_FLOAT: /* 0xa6 */
4965/* File: arm-vfp/OP_ADD_FLOAT.S */
4966/* File: arm-vfp/fbinop.S */
4967 /*
4968 * Generic 32-bit floating-point operation. Provide an "instr" line that
4969 * specifies an instruction that performs "s2 = s0 op s1". Because we
4970 * use the "softfp" ABI, this must be an instruction, not a function call.
4971 *
4972 * For: add-float, sub-float, mul-float, div-float
4973 */
4974 /* floatop vAA, vBB, vCC */
4975 FETCH(r0, 1) @ r0<- CCBB
4976 mov r9, rINST, lsr #8 @ r9<- AA
4977 mov r3, r0, lsr #8 @ r3<- CC
4978 and r2, r0, #255 @ r2<- BB
4979 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
4980 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
4981 flds s1, [r3] @ s1<- vCC
4982 flds s0, [r2] @ s0<- vBB
4983
4984 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
4985 fadds s2, s0, s1 @ s2<- op
4986 GET_INST_OPCODE(ip) @ extract opcode from rINST
4987 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
4988 fsts s2, [r9] @ vAA<- s2
4989 GOTO_OPCODE(ip) @ jump to next instruction
4990
4991
4992/* ------------------------------ */
4993 .balign 64
4994.L_OP_SUB_FLOAT: /* 0xa7 */
4995/* File: arm-vfp/OP_SUB_FLOAT.S */
4996/* File: arm-vfp/fbinop.S */
4997 /*
4998 * Generic 32-bit floating-point operation. Provide an "instr" line that
4999 * specifies an instruction that performs "s2 = s0 op s1". Because we
5000 * use the "softfp" ABI, this must be an instruction, not a function call.
5001 *
5002 * For: add-float, sub-float, mul-float, div-float
5003 */
5004 /* floatop vAA, vBB, vCC */
5005 FETCH(r0, 1) @ r0<- CCBB
5006 mov r9, rINST, lsr #8 @ r9<- AA
5007 mov r3, r0, lsr #8 @ r3<- CC
5008 and r2, r0, #255 @ r2<- BB
5009 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
5010 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
5011 flds s1, [r3] @ s1<- vCC
5012 flds s0, [r2] @ s0<- vBB
5013
5014 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
5015 fsubs s2, s0, s1 @ s2<- op
5016 GET_INST_OPCODE(ip) @ extract opcode from rINST
5017 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
5018 fsts s2, [r9] @ vAA<- s2
5019 GOTO_OPCODE(ip) @ jump to next instruction
5020
5021
5022/* ------------------------------ */
5023 .balign 64
5024.L_OP_MUL_FLOAT: /* 0xa8 */
5025/* File: arm-vfp/OP_MUL_FLOAT.S */
5026/* File: arm-vfp/fbinop.S */
5027 /*
5028 * Generic 32-bit floating-point operation. Provide an "instr" line that
5029 * specifies an instruction that performs "s2 = s0 op s1". Because we
5030 * use the "softfp" ABI, this must be an instruction, not a function call.
5031 *
5032 * For: add-float, sub-float, mul-float, div-float
5033 */
5034 /* floatop vAA, vBB, vCC */
5035 FETCH(r0, 1) @ r0<- CCBB
5036 mov r9, rINST, lsr #8 @ r9<- AA
5037 mov r3, r0, lsr #8 @ r3<- CC
5038 and r2, r0, #255 @ r2<- BB
5039 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
5040 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
5041 flds s1, [r3] @ s1<- vCC
5042 flds s0, [r2] @ s0<- vBB
5043
5044 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
5045 fmuls s2, s0, s1 @ s2<- op
5046 GET_INST_OPCODE(ip) @ extract opcode from rINST
5047 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
5048 fsts s2, [r9] @ vAA<- s2
5049 GOTO_OPCODE(ip) @ jump to next instruction
5050
5051
5052/* ------------------------------ */
5053 .balign 64
5054.L_OP_DIV_FLOAT: /* 0xa9 */
5055/* File: arm-vfp/OP_DIV_FLOAT.S */
5056/* File: arm-vfp/fbinop.S */
5057 /*
5058 * Generic 32-bit floating-point operation. Provide an "instr" line that
5059 * specifies an instruction that performs "s2 = s0 op s1". Because we
5060 * use the "softfp" ABI, this must be an instruction, not a function call.
5061 *
5062 * For: add-float, sub-float, mul-float, div-float
5063 */
5064 /* floatop vAA, vBB, vCC */
5065 FETCH(r0, 1) @ r0<- CCBB
5066 mov r9, rINST, lsr #8 @ r9<- AA
5067 mov r3, r0, lsr #8 @ r3<- CC
5068 and r2, r0, #255 @ r2<- BB
5069 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
5070 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
5071 flds s1, [r3] @ s1<- vCC
5072 flds s0, [r2] @ s0<- vBB
5073
5074 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
5075 fdivs s2, s0, s1 @ s2<- op
5076 GET_INST_OPCODE(ip) @ extract opcode from rINST
5077 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
5078 fsts s2, [r9] @ vAA<- s2
5079 GOTO_OPCODE(ip) @ jump to next instruction
5080
5081
5082/* ------------------------------ */
5083 .balign 64
5084.L_OP_REM_FLOAT: /* 0xaa */
5085/* File: armv5te/OP_REM_FLOAT.S */
5086/* EABI doesn't define a float remainder function, but libm does */
5087/* File: armv5te/binop.S */
5088 /*
5089 * Generic 32-bit binary operation. Provide an "instr" line that
5090 * specifies an instruction that performs "result = r0 op r1".
5091 * This could be an ARM instruction or a function call. (If the result
5092 * comes back in a register other than r0, you can override "result".)
5093 *
5094 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5095 * vCC (r1). Useful for integer division and modulus. Note that we
5096 * *don't* check for (INT_MIN / -1) here, because the ARM math lib
5097 * handles it correctly.
5098 *
5099 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
5100 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
5101 * mul-float, div-float, rem-float
5102 */
5103 /* binop vAA, vBB, vCC */
5104 FETCH(r0, 1) @ r0<- CCBB
5105 mov r9, rINST, lsr #8 @ r9<- AA
5106 mov r3, r0, lsr #8 @ r3<- CC
5107 and r2, r0, #255 @ r2<- BB
5108 GET_VREG(r1, r3) @ r1<- vCC
5109 GET_VREG(r0, r2) @ r0<- vBB
5110 .if 0
5111 cmp r1, #0 @ is second operand zero?
5112 beq common_errDivideByZero
5113 .endif
5114
5115 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
5116 @ optional op; may set condition codes
5117 bl fmodf @ r0<- op, r0-r3 changed
5118 GET_INST_OPCODE(ip) @ extract opcode from rINST
5119 SET_VREG(r0, r9) @ vAA<- r0
5120 GOTO_OPCODE(ip) @ jump to next instruction
5121 /* 11-14 instructions */
5122
5123
5124
5125/* ------------------------------ */
5126 .balign 64
5127.L_OP_ADD_DOUBLE: /* 0xab */
5128/* File: arm-vfp/OP_ADD_DOUBLE.S */
5129/* File: arm-vfp/fbinopWide.S */
5130 /*
5131 * Generic 64-bit double-precision floating point binary operation.
5132 * Provide an "instr" line that specifies an instruction that performs
5133 * "d2 = d0 op d1".
5134 *
5135 * for: add-double, sub-double, mul-double, div-double
5136 */
5137 /* doubleop vAA, vBB, vCC */
5138 FETCH(r0, 1) @ r0<- CCBB
5139 mov r9, rINST, lsr #8 @ r9<- AA
5140 mov r3, r0, lsr #8 @ r3<- CC
5141 and r2, r0, #255 @ r2<- BB
5142 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
5143 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
5144 fldd d1, [r3] @ d1<- vCC
5145 fldd d0, [r2] @ d0<- vBB
5146
5147 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
5148 faddd d2, d0, d1 @ s2<- op
5149 GET_INST_OPCODE(ip) @ extract opcode from rINST
5150 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
5151 fstd d2, [r9] @ vAA<- d2
5152 GOTO_OPCODE(ip) @ jump to next instruction
5153
5154
5155/* ------------------------------ */
5156 .balign 64
5157.L_OP_SUB_DOUBLE: /* 0xac */
5158/* File: arm-vfp/OP_SUB_DOUBLE.S */
5159/* File: arm-vfp/fbinopWide.S */
5160 /*
5161 * Generic 64-bit double-precision floating point binary operation.
5162 * Provide an "instr" line that specifies an instruction that performs
5163 * "d2 = d0 op d1".
5164 *
5165 * for: add-double, sub-double, mul-double, div-double
5166 */
5167 /* doubleop vAA, vBB, vCC */
5168 FETCH(r0, 1) @ r0<- CCBB
5169 mov r9, rINST, lsr #8 @ r9<- AA
5170 mov r3, r0, lsr #8 @ r3<- CC
5171 and r2, r0, #255 @ r2<- BB
5172 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
5173 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
5174 fldd d1, [r3] @ d1<- vCC
5175 fldd d0, [r2] @ d0<- vBB
5176
5177 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
5178 fsubd d2, d0, d1 @ s2<- op
5179 GET_INST_OPCODE(ip) @ extract opcode from rINST
5180 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
5181 fstd d2, [r9] @ vAA<- d2
5182 GOTO_OPCODE(ip) @ jump to next instruction
5183
5184
5185/* ------------------------------ */
5186 .balign 64
5187.L_OP_MUL_DOUBLE: /* 0xad */
5188/* File: arm-vfp/OP_MUL_DOUBLE.S */
5189/* File: arm-vfp/fbinopWide.S */
5190 /*
5191 * Generic 64-bit double-precision floating point binary operation.
5192 * Provide an "instr" line that specifies an instruction that performs
5193 * "d2 = d0 op d1".
5194 *
5195 * for: add-double, sub-double, mul-double, div-double
5196 */
5197 /* doubleop vAA, vBB, vCC */
5198 FETCH(r0, 1) @ r0<- CCBB
5199 mov r9, rINST, lsr #8 @ r9<- AA
5200 mov r3, r0, lsr #8 @ r3<- CC
5201 and r2, r0, #255 @ r2<- BB
5202 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
5203 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
5204 fldd d1, [r3] @ d1<- vCC
5205 fldd d0, [r2] @ d0<- vBB
5206
5207 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
5208 fmuld d2, d0, d1 @ s2<- op
5209 GET_INST_OPCODE(ip) @ extract opcode from rINST
5210 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
5211 fstd d2, [r9] @ vAA<- d2
5212 GOTO_OPCODE(ip) @ jump to next instruction
5213
5214
5215/* ------------------------------ */
5216 .balign 64
5217.L_OP_DIV_DOUBLE: /* 0xae */
5218/* File: arm-vfp/OP_DIV_DOUBLE.S */
5219/* File: arm-vfp/fbinopWide.S */
5220 /*
5221 * Generic 64-bit double-precision floating point binary operation.
5222 * Provide an "instr" line that specifies an instruction that performs
5223 * "d2 = d0 op d1".
5224 *
5225 * for: add-double, sub-double, mul-double, div-double
5226 */
5227 /* doubleop vAA, vBB, vCC */
5228 FETCH(r0, 1) @ r0<- CCBB
5229 mov r9, rINST, lsr #8 @ r9<- AA
5230 mov r3, r0, lsr #8 @ r3<- CC
5231 and r2, r0, #255 @ r2<- BB
5232 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
5233 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
5234 fldd d1, [r3] @ d1<- vCC
5235 fldd d0, [r2] @ d0<- vBB
5236
5237 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
5238 fdivd d2, d0, d1 @ s2<- op
5239 GET_INST_OPCODE(ip) @ extract opcode from rINST
5240 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
5241 fstd d2, [r9] @ vAA<- d2
5242 GOTO_OPCODE(ip) @ jump to next instruction
5243
5244
5245/* ------------------------------ */
5246 .balign 64
5247.L_OP_REM_DOUBLE: /* 0xaf */
5248/* File: armv5te/OP_REM_DOUBLE.S */
5249/* EABI doesn't define a double remainder function, but libm does */
5250/* File: armv5te/binopWide.S */
5251 /*
5252 * Generic 64-bit binary operation. Provide an "instr" line that
5253 * specifies an instruction that performs "result = r0-r1 op r2-r3".
5254 * This could be an ARM instruction or a function call. (If the result
5255 * comes back in a register other than r0, you can override "result".)
5256 *
5257 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5258 * vCC (r1). Useful for integer division and modulus.
5259 *
5260 * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
5261 * xor-long, add-double, sub-double, mul-double, div-double,
5262 * rem-double
5263 *
5264 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
5265 */
5266 /* binop vAA, vBB, vCC */
5267 FETCH(r0, 1) @ r0<- CCBB
5268 mov r9, rINST, lsr #8 @ r9<- AA
5269 and r2, r0, #255 @ r2<- BB
5270 mov r3, r0, lsr #8 @ r3<- CC
5271 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
5272 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
5273 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
5274 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
5275 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
5276 .if 0
5277 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
5278 beq common_errDivideByZero
5279 .endif
5280 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
5281
5282 @ optional op; may set condition codes
5283 bl fmod @ result<- op, r0-r3 changed
5284 GET_INST_OPCODE(ip) @ extract opcode from rINST
5285 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5286 GOTO_OPCODE(ip) @ jump to next instruction
5287 /* 14-17 instructions */
5288
5289
5290
5291/* ------------------------------ */
5292 .balign 64
5293.L_OP_ADD_INT_2ADDR: /* 0xb0 */
5294/* File: armv6t2/OP_ADD_INT_2ADDR.S */
5295/* File: armv6t2/binop2addr.S */
5296 /*
5297 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
5298 * that specifies an instruction that performs "result = r0 op r1".
5299 * This could be an ARM instruction or a function call. (If the result
5300 * comes back in a register other than r0, you can override "result".)
5301 *
5302 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5303 * vCC (r1). Useful for integer division and modulus.
5304 *
5305 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
5306 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
5307 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
5308 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
5309 */
5310 /* binop/2addr vA, vB */
5311 mov r3, rINST, lsr #12 @ r3<- B
5312 ubfx r9, rINST, #8, #4 @ r9<- A
5313 GET_VREG(r1, r3) @ r1<- vB
5314 GET_VREG(r0, r9) @ r0<- vA
5315 .if 0
5316 cmp r1, #0 @ is second operand zero?
5317 beq common_errDivideByZero
5318 .endif
5319 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5320
5321 @ optional op; may set condition codes
5322 add r0, r0, r1 @ r0<- op, r0-r3 changed
5323 GET_INST_OPCODE(ip) @ extract opcode from rINST
5324 SET_VREG(r0, r9) @ vAA<- r0
5325 GOTO_OPCODE(ip) @ jump to next instruction
5326 /* 10-13 instructions */
5327
5328
5329
5330/* ------------------------------ */
5331 .balign 64
5332.L_OP_SUB_INT_2ADDR: /* 0xb1 */
5333/* File: armv6t2/OP_SUB_INT_2ADDR.S */
5334/* File: armv6t2/binop2addr.S */
5335 /*
5336 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
5337 * that specifies an instruction that performs "result = r0 op r1".
5338 * This could be an ARM instruction or a function call. (If the result
5339 * comes back in a register other than r0, you can override "result".)
5340 *
5341 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5342 * vCC (r1). Useful for integer division and modulus.
5343 *
5344 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
5345 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
5346 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
5347 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
5348 */
5349 /* binop/2addr vA, vB */
5350 mov r3, rINST, lsr #12 @ r3<- B
5351 ubfx r9, rINST, #8, #4 @ r9<- A
5352 GET_VREG(r1, r3) @ r1<- vB
5353 GET_VREG(r0, r9) @ r0<- vA
5354 .if 0
5355 cmp r1, #0 @ is second operand zero?
5356 beq common_errDivideByZero
5357 .endif
5358 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5359
5360 @ optional op; may set condition codes
5361 sub r0, r0, r1 @ r0<- op, r0-r3 changed
5362 GET_INST_OPCODE(ip) @ extract opcode from rINST
5363 SET_VREG(r0, r9) @ vAA<- r0
5364 GOTO_OPCODE(ip) @ jump to next instruction
5365 /* 10-13 instructions */
5366
5367
5368
5369/* ------------------------------ */
5370 .balign 64
5371.L_OP_MUL_INT_2ADDR: /* 0xb2 */
5372/* File: armv6t2/OP_MUL_INT_2ADDR.S */
5373/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
5374/* File: armv6t2/binop2addr.S */
5375 /*
5376 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
5377 * that specifies an instruction that performs "result = r0 op r1".
5378 * This could be an ARM instruction or a function call. (If the result
5379 * comes back in a register other than r0, you can override "result".)
5380 *
5381 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5382 * vCC (r1). Useful for integer division and modulus.
5383 *
5384 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
5385 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
5386 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
5387 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
5388 */
5389 /* binop/2addr vA, vB */
5390 mov r3, rINST, lsr #12 @ r3<- B
5391 ubfx r9, rINST, #8, #4 @ r9<- A
5392 GET_VREG(r1, r3) @ r1<- vB
5393 GET_VREG(r0, r9) @ r0<- vA
5394 .if 0
5395 cmp r1, #0 @ is second operand zero?
5396 beq common_errDivideByZero
5397 .endif
5398 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5399
5400 @ optional op; may set condition codes
5401 mul r0, r1, r0 @ r0<- op, r0-r3 changed
5402 GET_INST_OPCODE(ip) @ extract opcode from rINST
5403 SET_VREG(r0, r9) @ vAA<- r0
5404 GOTO_OPCODE(ip) @ jump to next instruction
5405 /* 10-13 instructions */
5406
5407
5408
5409/* ------------------------------ */
5410 .balign 64
5411.L_OP_DIV_INT_2ADDR: /* 0xb3 */
5412/* File: armv6t2/OP_DIV_INT_2ADDR.S */
5413/* File: armv6t2/binop2addr.S */
5414 /*
5415 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
5416 * that specifies an instruction that performs "result = r0 op r1".
5417 * This could be an ARM instruction or a function call. (If the result
5418 * comes back in a register other than r0, you can override "result".)
5419 *
5420 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5421 * vCC (r1). Useful for integer division and modulus.
5422 *
5423 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
5424 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
5425 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
5426 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
5427 */
5428 /* binop/2addr vA, vB */
5429 mov r3, rINST, lsr #12 @ r3<- B
5430 ubfx r9, rINST, #8, #4 @ r9<- A
5431 GET_VREG(r1, r3) @ r1<- vB
5432 GET_VREG(r0, r9) @ r0<- vA
5433 .if 1
5434 cmp r1, #0 @ is second operand zero?
5435 beq common_errDivideByZero
5436 .endif
5437 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5438
5439 @ optional op; may set condition codes
5440 bl __aeabi_idiv @ r0<- op, r0-r3 changed
5441 GET_INST_OPCODE(ip) @ extract opcode from rINST
5442 SET_VREG(r0, r9) @ vAA<- r0
5443 GOTO_OPCODE(ip) @ jump to next instruction
5444 /* 10-13 instructions */
5445
5446
5447
5448/* ------------------------------ */
5449 .balign 64
5450.L_OP_REM_INT_2ADDR: /* 0xb4 */
5451/* File: armv6t2/OP_REM_INT_2ADDR.S */
5452/* idivmod returns quotient in r0 and remainder in r1 */
5453/* File: armv6t2/binop2addr.S */
5454 /*
5455 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
5456 * that specifies an instruction that performs "result = r0 op r1".
5457 * This could be an ARM instruction or a function call. (If the result
5458 * comes back in a register other than r0, you can override "result".)
5459 *
5460 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5461 * vCC (r1). Useful for integer division and modulus.
5462 *
5463 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
5464 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
5465 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
5466 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
5467 */
5468 /* binop/2addr vA, vB */
5469 mov r3, rINST, lsr #12 @ r3<- B
5470 ubfx r9, rINST, #8, #4 @ r9<- A
5471 GET_VREG(r1, r3) @ r1<- vB
5472 GET_VREG(r0, r9) @ r0<- vA
5473 .if 1
5474 cmp r1, #0 @ is second operand zero?
5475 beq common_errDivideByZero
5476 .endif
5477 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5478
5479 @ optional op; may set condition codes
5480 bl __aeabi_idivmod @ r1<- op, r0-r3 changed
5481 GET_INST_OPCODE(ip) @ extract opcode from rINST
5482 SET_VREG(r1, r9) @ vAA<- r1
5483 GOTO_OPCODE(ip) @ jump to next instruction
5484 /* 10-13 instructions */
5485
5486
5487
5488/* ------------------------------ */
5489 .balign 64
5490.L_OP_AND_INT_2ADDR: /* 0xb5 */
5491/* File: armv6t2/OP_AND_INT_2ADDR.S */
5492/* File: armv6t2/binop2addr.S */
5493 /*
5494 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
5495 * that specifies an instruction that performs "result = r0 op r1".
5496 * This could be an ARM instruction or a function call. (If the result
5497 * comes back in a register other than r0, you can override "result".)
5498 *
5499 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5500 * vCC (r1). Useful for integer division and modulus.
5501 *
5502 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
5503 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
5504 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
5505 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
5506 */
5507 /* binop/2addr vA, vB */
5508 mov r3, rINST, lsr #12 @ r3<- B
5509 ubfx r9, rINST, #8, #4 @ r9<- A
5510 GET_VREG(r1, r3) @ r1<- vB
5511 GET_VREG(r0, r9) @ r0<- vA
5512 .if 0
5513 cmp r1, #0 @ is second operand zero?
5514 beq common_errDivideByZero
5515 .endif
5516 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5517
5518 @ optional op; may set condition codes
5519 and r0, r0, r1 @ r0<- op, r0-r3 changed
5520 GET_INST_OPCODE(ip) @ extract opcode from rINST
5521 SET_VREG(r0, r9) @ vAA<- r0
5522 GOTO_OPCODE(ip) @ jump to next instruction
5523 /* 10-13 instructions */
5524
5525
5526
5527/* ------------------------------ */
5528 .balign 64
5529.L_OP_OR_INT_2ADDR: /* 0xb6 */
5530/* File: armv6t2/OP_OR_INT_2ADDR.S */
5531/* File: armv6t2/binop2addr.S */
5532 /*
5533 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
5534 * that specifies an instruction that performs "result = r0 op r1".
5535 * This could be an ARM instruction or a function call. (If the result
5536 * comes back in a register other than r0, you can override "result".)
5537 *
5538 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5539 * vCC (r1). Useful for integer division and modulus.
5540 *
5541 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
5542 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
5543 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
5544 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
5545 */
5546 /* binop/2addr vA, vB */
5547 mov r3, rINST, lsr #12 @ r3<- B
5548 ubfx r9, rINST, #8, #4 @ r9<- A
5549 GET_VREG(r1, r3) @ r1<- vB
5550 GET_VREG(r0, r9) @ r0<- vA
5551 .if 0
5552 cmp r1, #0 @ is second operand zero?
5553 beq common_errDivideByZero
5554 .endif
5555 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5556
5557 @ optional op; may set condition codes
5558 orr r0, r0, r1 @ r0<- op, r0-r3 changed
5559 GET_INST_OPCODE(ip) @ extract opcode from rINST
5560 SET_VREG(r0, r9) @ vAA<- r0
5561 GOTO_OPCODE(ip) @ jump to next instruction
5562 /* 10-13 instructions */
5563
5564
5565
5566/* ------------------------------ */
5567 .balign 64
5568.L_OP_XOR_INT_2ADDR: /* 0xb7 */
5569/* File: armv6t2/OP_XOR_INT_2ADDR.S */
5570/* File: armv6t2/binop2addr.S */
5571 /*
5572 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
5573 * that specifies an instruction that performs "result = r0 op r1".
5574 * This could be an ARM instruction or a function call. (If the result
5575 * comes back in a register other than r0, you can override "result".)
5576 *
5577 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5578 * vCC (r1). Useful for integer division and modulus.
5579 *
5580 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
5581 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
5582 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
5583 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
5584 */
5585 /* binop/2addr vA, vB */
5586 mov r3, rINST, lsr #12 @ r3<- B
5587 ubfx r9, rINST, #8, #4 @ r9<- A
5588 GET_VREG(r1, r3) @ r1<- vB
5589 GET_VREG(r0, r9) @ r0<- vA
5590 .if 0
5591 cmp r1, #0 @ is second operand zero?
5592 beq common_errDivideByZero
5593 .endif
5594 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5595
5596 @ optional op; may set condition codes
5597 eor r0, r0, r1 @ r0<- op, r0-r3 changed
5598 GET_INST_OPCODE(ip) @ extract opcode from rINST
5599 SET_VREG(r0, r9) @ vAA<- r0
5600 GOTO_OPCODE(ip) @ jump to next instruction
5601 /* 10-13 instructions */
5602
5603
5604
5605/* ------------------------------ */
5606 .balign 64
5607.L_OP_SHL_INT_2ADDR: /* 0xb8 */
5608/* File: armv6t2/OP_SHL_INT_2ADDR.S */
5609/* File: armv6t2/binop2addr.S */
5610 /*
5611 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
5612 * that specifies an instruction that performs "result = r0 op r1".
5613 * This could be an ARM instruction or a function call. (If the result
5614 * comes back in a register other than r0, you can override "result".)
5615 *
5616 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5617 * vCC (r1). Useful for integer division and modulus.
5618 *
5619 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
5620 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
5621 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
5622 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
5623 */
5624 /* binop/2addr vA, vB */
5625 mov r3, rINST, lsr #12 @ r3<- B
5626 ubfx r9, rINST, #8, #4 @ r9<- A
5627 GET_VREG(r1, r3) @ r1<- vB
5628 GET_VREG(r0, r9) @ r0<- vA
5629 .if 0
5630 cmp r1, #0 @ is second operand zero?
5631 beq common_errDivideByZero
5632 .endif
5633 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5634
5635 and r1, r1, #31 @ optional op; may set condition codes
5636 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
5637 GET_INST_OPCODE(ip) @ extract opcode from rINST
5638 SET_VREG(r0, r9) @ vAA<- r0
5639 GOTO_OPCODE(ip) @ jump to next instruction
5640 /* 10-13 instructions */
5641
5642
5643
5644/* ------------------------------ */
5645 .balign 64
5646.L_OP_SHR_INT_2ADDR: /* 0xb9 */
5647/* File: armv6t2/OP_SHR_INT_2ADDR.S */
5648/* File: armv6t2/binop2addr.S */
5649 /*
5650 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
5651 * that specifies an instruction that performs "result = r0 op r1".
5652 * This could be an ARM instruction or a function call. (If the result
5653 * comes back in a register other than r0, you can override "result".)
5654 *
5655 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5656 * vCC (r1). Useful for integer division and modulus.
5657 *
5658 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
5659 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
5660 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
5661 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
5662 */
5663 /* binop/2addr vA, vB */
5664 mov r3, rINST, lsr #12 @ r3<- B
5665 ubfx r9, rINST, #8, #4 @ r9<- A
5666 GET_VREG(r1, r3) @ r1<- vB
5667 GET_VREG(r0, r9) @ r0<- vA
5668 .if 0
5669 cmp r1, #0 @ is second operand zero?
5670 beq common_errDivideByZero
5671 .endif
5672 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5673
5674 and r1, r1, #31 @ optional op; may set condition codes
5675 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
5676 GET_INST_OPCODE(ip) @ extract opcode from rINST
5677 SET_VREG(r0, r9) @ vAA<- r0
5678 GOTO_OPCODE(ip) @ jump to next instruction
5679 /* 10-13 instructions */
5680
5681
5682
5683/* ------------------------------ */
5684 .balign 64
5685.L_OP_USHR_INT_2ADDR: /* 0xba */
5686/* File: armv6t2/OP_USHR_INT_2ADDR.S */
5687/* File: armv6t2/binop2addr.S */
5688 /*
5689 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
5690 * that specifies an instruction that performs "result = r0 op r1".
5691 * This could be an ARM instruction or a function call. (If the result
5692 * comes back in a register other than r0, you can override "result".)
5693 *
5694 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5695 * vCC (r1). Useful for integer division and modulus.
5696 *
5697 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
5698 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
5699 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
5700 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
5701 */
5702 /* binop/2addr vA, vB */
5703 mov r3, rINST, lsr #12 @ r3<- B
5704 ubfx r9, rINST, #8, #4 @ r9<- A
5705 GET_VREG(r1, r3) @ r1<- vB
5706 GET_VREG(r0, r9) @ r0<- vA
5707 .if 0
5708 cmp r1, #0 @ is second operand zero?
5709 beq common_errDivideByZero
5710 .endif
5711 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5712
5713 and r1, r1, #31 @ optional op; may set condition codes
5714 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
5715 GET_INST_OPCODE(ip) @ extract opcode from rINST
5716 SET_VREG(r0, r9) @ vAA<- r0
5717 GOTO_OPCODE(ip) @ jump to next instruction
5718 /* 10-13 instructions */
5719
5720
5721
5722/* ------------------------------ */
5723 .balign 64
5724.L_OP_ADD_LONG_2ADDR: /* 0xbb */
5725/* File: armv6t2/OP_ADD_LONG_2ADDR.S */
5726/* File: armv6t2/binopWide2addr.S */
5727 /*
5728 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
5729 * that specifies an instruction that performs "result = r0-r1 op r2-r3".
5730 * This could be an ARM instruction or a function call. (If the result
5731 * comes back in a register other than r0, you can override "result".)
5732 *
5733 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5734 * vCC (r1). Useful for integer division and modulus.
5735 *
5736 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
5737 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
5738 * sub-double/2addr, mul-double/2addr, div-double/2addr,
5739 * rem-double/2addr
5740 */
5741 /* binop/2addr vA, vB */
5742 mov r1, rINST, lsr #12 @ r1<- B
5743 ubfx r9, rINST, #8, #4 @ r9<- A
5744 add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
5745 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
5746 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
5747 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
5748 .if 0
5749 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
5750 beq common_errDivideByZero
5751 .endif
5752 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5753
5754 adds r0, r0, r2 @ optional op; may set condition codes
5755 adc r1, r1, r3 @ result<- op, r0-r3 changed
5756 GET_INST_OPCODE(ip) @ extract opcode from rINST
5757 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5758 GOTO_OPCODE(ip) @ jump to next instruction
5759 /* 12-15 instructions */
5760
5761
5762
5763/* ------------------------------ */
5764 .balign 64
5765.L_OP_SUB_LONG_2ADDR: /* 0xbc */
5766/* File: armv6t2/OP_SUB_LONG_2ADDR.S */
5767/* File: armv6t2/binopWide2addr.S */
5768 /*
5769 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
5770 * that specifies an instruction that performs "result = r0-r1 op r2-r3".
5771 * This could be an ARM instruction or a function call. (If the result
5772 * comes back in a register other than r0, you can override "result".)
5773 *
5774 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5775 * vCC (r1). Useful for integer division and modulus.
5776 *
5777 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
5778 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
5779 * sub-double/2addr, mul-double/2addr, div-double/2addr,
5780 * rem-double/2addr
5781 */
5782 /* binop/2addr vA, vB */
5783 mov r1, rINST, lsr #12 @ r1<- B
5784 ubfx r9, rINST, #8, #4 @ r9<- A
5785 add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
5786 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
5787 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
5788 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
5789 .if 0
5790 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
5791 beq common_errDivideByZero
5792 .endif
5793 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5794
5795 subs r0, r0, r2 @ optional op; may set condition codes
5796 sbc r1, r1, r3 @ result<- op, r0-r3 changed
5797 GET_INST_OPCODE(ip) @ extract opcode from rINST
5798 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5799 GOTO_OPCODE(ip) @ jump to next instruction
5800 /* 12-15 instructions */
5801
5802
5803
5804/* ------------------------------ */
5805 .balign 64
5806.L_OP_MUL_LONG_2ADDR: /* 0xbd */
5807/* File: armv6t2/OP_MUL_LONG_2ADDR.S */
5808 /*
5809 * Signed 64-bit integer multiply, "/2addr" version.
5810 *
5811 * See OP_MUL_LONG for an explanation.
5812 *
5813 * We get a little tight on registers, so to avoid looking up &fp[A]
5814 * again we stuff it into rINST.
5815 */
5816 /* mul-long/2addr vA, vB */
5817 mov r1, rINST, lsr #12 @ r1<- B
5818 ubfx r9, rINST, #8, #4 @ r9<- A
5819 add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
5820 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
5821 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
5822 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
5823 mul ip, r2, r1 @ ip<- ZxW
5824 umull r9, r10, r2, r0 @ r9/r10 <- ZxX
5825 mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
5826 mov r0, rINST @ r0<- &fp[A] (free up rINST)
5827 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5828 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
5829 GET_INST_OPCODE(ip) @ extract opcode from rINST
5830 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
5831 GOTO_OPCODE(ip) @ jump to next instruction
5832
5833
5834/* ------------------------------ */
5835 .balign 64
5836.L_OP_DIV_LONG_2ADDR: /* 0xbe */
5837/* File: armv6t2/OP_DIV_LONG_2ADDR.S */
5838/* File: armv6t2/binopWide2addr.S */
5839 /*
5840 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
5841 * that specifies an instruction that performs "result = r0-r1 op r2-r3".
5842 * This could be an ARM instruction or a function call. (If the result
5843 * comes back in a register other than r0, you can override "result".)
5844 *
5845 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5846 * vCC (r1). Useful for integer division and modulus.
5847 *
5848 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
5849 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
5850 * sub-double/2addr, mul-double/2addr, div-double/2addr,
5851 * rem-double/2addr
5852 */
5853 /* binop/2addr vA, vB */
5854 mov r1, rINST, lsr #12 @ r1<- B
5855 ubfx r9, rINST, #8, #4 @ r9<- A
5856 add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
5857 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
5858 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
5859 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
5860 .if 1
5861 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
5862 beq common_errDivideByZero
5863 .endif
5864 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5865
5866 @ optional op; may set condition codes
5867 bl __aeabi_ldivmod @ result<- op, r0-r3 changed
5868 GET_INST_OPCODE(ip) @ extract opcode from rINST
5869 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5870 GOTO_OPCODE(ip) @ jump to next instruction
5871 /* 12-15 instructions */
5872
5873
5874
5875/* ------------------------------ */
5876 .balign 64
5877.L_OP_REM_LONG_2ADDR: /* 0xbf */
5878/* File: armv6t2/OP_REM_LONG_2ADDR.S */
5879/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
5880/* File: armv6t2/binopWide2addr.S */
5881 /*
5882 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
5883 * that specifies an instruction that performs "result = r0-r1 op r2-r3".
5884 * This could be an ARM instruction or a function call. (If the result
5885 * comes back in a register other than r0, you can override "result".)
5886 *
5887 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5888 * vCC (r1). Useful for integer division and modulus.
5889 *
5890 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
5891 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
5892 * sub-double/2addr, mul-double/2addr, div-double/2addr,
5893 * rem-double/2addr
5894 */
5895 /* binop/2addr vA, vB */
5896 mov r1, rINST, lsr #12 @ r1<- B
5897 ubfx r9, rINST, #8, #4 @ r9<- A
5898 add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
5899 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
5900 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
5901 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
5902 .if 1
5903 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
5904 beq common_errDivideByZero
5905 .endif
5906 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5907
5908 @ optional op; may set condition codes
5909 bl __aeabi_ldivmod @ result<- op, r0-r3 changed
5910 GET_INST_OPCODE(ip) @ extract opcode from rINST
5911 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
5912 GOTO_OPCODE(ip) @ jump to next instruction
5913 /* 12-15 instructions */
5914
5915
5916
5917/* ------------------------------ */
5918 .balign 64
5919.L_OP_AND_LONG_2ADDR: /* 0xc0 */
5920/* File: armv6t2/OP_AND_LONG_2ADDR.S */
5921/* File: armv6t2/binopWide2addr.S */
5922 /*
5923 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
5924 * that specifies an instruction that performs "result = r0-r1 op r2-r3".
5925 * This could be an ARM instruction or a function call. (If the result
5926 * comes back in a register other than r0, you can override "result".)
5927 *
5928 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5929 * vCC (r1). Useful for integer division and modulus.
5930 *
5931 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
5932 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
5933 * sub-double/2addr, mul-double/2addr, div-double/2addr,
5934 * rem-double/2addr
5935 */
5936 /* binop/2addr vA, vB */
5937 mov r1, rINST, lsr #12 @ r1<- B
5938 ubfx r9, rINST, #8, #4 @ r9<- A
5939 add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
5940 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
5941 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
5942 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
5943 .if 0
5944 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
5945 beq common_errDivideByZero
5946 .endif
5947 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5948
5949 and r0, r0, r2 @ optional op; may set condition codes
5950 and r1, r1, r3 @ result<- op, r0-r3 changed
5951 GET_INST_OPCODE(ip) @ extract opcode from rINST
5952 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5953 GOTO_OPCODE(ip) @ jump to next instruction
5954 /* 12-15 instructions */
5955
5956
5957
5958/* ------------------------------ */
5959 .balign 64
5960.L_OP_OR_LONG_2ADDR: /* 0xc1 */
5961/* File: armv6t2/OP_OR_LONG_2ADDR.S */
5962/* File: armv6t2/binopWide2addr.S */
5963 /*
5964 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
5965 * that specifies an instruction that performs "result = r0-r1 op r2-r3".
5966 * This could be an ARM instruction or a function call. (If the result
5967 * comes back in a register other than r0, you can override "result".)
5968 *
5969 * If "chkzero" is set to 1, we perform a divide-by-zero check on
5970 * vCC (r1). Useful for integer division and modulus.
5971 *
5972 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
5973 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
5974 * sub-double/2addr, mul-double/2addr, div-double/2addr,
5975 * rem-double/2addr
5976 */
5977 /* binop/2addr vA, vB */
5978 mov r1, rINST, lsr #12 @ r1<- B
5979 ubfx r9, rINST, #8, #4 @ r9<- A
5980 add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
5981 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
5982 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
5983 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
5984 .if 0
5985 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
5986 beq common_errDivideByZero
5987 .endif
5988 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
5989
5990 orr r0, r0, r2 @ optional op; may set condition codes
5991 orr r1, r1, r3 @ result<- op, r0-r3 changed
5992 GET_INST_OPCODE(ip) @ extract opcode from rINST
5993 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5994 GOTO_OPCODE(ip) @ jump to next instruction
5995 /* 12-15 instructions */
5996
5997
5998
5999/* ------------------------------ */
6000 .balign 64
6001.L_OP_XOR_LONG_2ADDR: /* 0xc2 */
6002/* File: armv6t2/OP_XOR_LONG_2ADDR.S */
6003/* File: armv6t2/binopWide2addr.S */
6004 /*
6005 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
6006 * that specifies an instruction that performs "result = r0-r1 op r2-r3".
6007 * This could be an ARM instruction or a function call. (If the result
6008 * comes back in a register other than r0, you can override "result".)
6009 *
6010 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6011 * vCC (r1). Useful for integer division and modulus.
6012 *
6013 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
6014 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
6015 * sub-double/2addr, mul-double/2addr, div-double/2addr,
6016 * rem-double/2addr
6017 */
6018 /* binop/2addr vA, vB */
6019 mov r1, rINST, lsr #12 @ r1<- B
6020 ubfx r9, rINST, #8, #4 @ r9<- A
6021 add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
6022 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6023 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
6024 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6025 .if 0
6026 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
6027 beq common_errDivideByZero
6028 .endif
6029 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6030
6031 eor r0, r0, r2 @ optional op; may set condition codes
6032 eor r1, r1, r3 @ result<- op, r0-r3 changed
6033 GET_INST_OPCODE(ip) @ extract opcode from rINST
6034 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6035 GOTO_OPCODE(ip) @ jump to next instruction
6036 /* 12-15 instructions */
6037
6038
6039
6040/* ------------------------------ */
6041 .balign 64
6042.L_OP_SHL_LONG_2ADDR: /* 0xc3 */
6043/* File: armv6t2/OP_SHL_LONG_2ADDR.S */
6044 /*
6045 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
6046 * 32-bit shift distance.
6047 */
6048 /* shl-long/2addr vA, vB */
6049 mov r3, rINST, lsr #12 @ r3<- B
6050 ubfx r9, rINST, #8, #4 @ r9<- A
6051 GET_VREG(r2, r3) @ r2<- vB
6052 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6053 and r2, r2, #63 @ r2<- r2 & 0x3f
6054 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6055
6056 mov r1, r1, asl r2 @ r1<- r1 << r2
6057 rsb r3, r2, #32 @ r3<- 32 - r2
6058 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
6059 subs ip, r2, #32 @ ip<- r2 - 32
6060 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6061 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
6062 mov r0, r0, asl r2 @ r0<- r0 << r2
6063 b .LOP_SHL_LONG_2ADDR_finish
6064
6065/* ------------------------------ */
6066 .balign 64
6067.L_OP_SHR_LONG_2ADDR: /* 0xc4 */
6068/* File: armv6t2/OP_SHR_LONG_2ADDR.S */
6069 /*
6070 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
6071 * 32-bit shift distance.
6072 */
6073 /* shr-long/2addr vA, vB */
6074 mov r3, rINST, lsr #12 @ r3<- B
6075 ubfx r9, rINST, #8, #4 @ r9<- A
6076 GET_VREG(r2, r3) @ r2<- vB
6077 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6078 and r2, r2, #63 @ r2<- r2 & 0x3f
6079 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6080
6081 mov r0, r0, lsr r2 @ r0<- r2 >> r2
6082 rsb r3, r2, #32 @ r3<- 32 - r2
6083 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
6084 subs ip, r2, #32 @ ip<- r2 - 32
6085 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6086 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
6087 mov r1, r1, asr r2 @ r1<- r1 >> r2
6088 b .LOP_SHR_LONG_2ADDR_finish
6089
6090/* ------------------------------ */
6091 .balign 64
6092.L_OP_USHR_LONG_2ADDR: /* 0xc5 */
6093/* File: armv6t2/OP_USHR_LONG_2ADDR.S */
6094 /*
6095 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
6096 * 32-bit shift distance.
6097 */
6098 /* ushr-long/2addr vA, vB */
6099 mov r3, rINST, lsr #12 @ r3<- B
6100 ubfx r9, rINST, #8, #4 @ r9<- A
6101 GET_VREG(r2, r3) @ r2<- vB
6102 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6103 and r2, r2, #63 @ r2<- r2 & 0x3f
6104 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6105
6106 mov r0, r0, lsr r2 @ r0<- r2 >> r2
6107 rsb r3, r2, #32 @ r3<- 32 - r2
6108 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
6109 subs ip, r2, #32 @ ip<- r2 - 32
6110 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6111 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
6112 mov r1, r1, lsr r2 @ r1<- r1 >>> r2
6113 b .LOP_USHR_LONG_2ADDR_finish
6114
6115/* ------------------------------ */
6116 .balign 64
6117.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
6118/* File: arm-vfp/OP_ADD_FLOAT_2ADDR.S */
6119/* File: arm-vfp/fbinop2addr.S */
6120 /*
6121 * Generic 32-bit floating point "/2addr" binary operation. Provide
6122 * an "instr" line that specifies an instruction that performs
6123 * "s2 = s0 op s1".
6124 *
6125 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
6126 */
6127 /* binop/2addr vA, vB */
6128 mov r3, rINST, lsr #12 @ r3<- B
6129 mov r9, rINST, lsr #8 @ r9<- A+
6130 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
6131 and r9, r9, #15 @ r9<- A
6132 flds s1, [r3] @ s1<- vB
6133 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
6134 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6135 flds s0, [r9] @ s0<- vA
6136
6137 fadds s2, s0, s1 @ s2<- op
6138 GET_INST_OPCODE(ip) @ extract opcode from rINST
6139 fsts s2, [r9] @ vAA<- s2
6140 GOTO_OPCODE(ip) @ jump to next instruction
6141
6142
6143/* ------------------------------ */
6144 .balign 64
6145.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
6146/* File: arm-vfp/OP_SUB_FLOAT_2ADDR.S */
6147/* File: arm-vfp/fbinop2addr.S */
6148 /*
6149 * Generic 32-bit floating point "/2addr" binary operation. Provide
6150 * an "instr" line that specifies an instruction that performs
6151 * "s2 = s0 op s1".
6152 *
6153 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
6154 */
6155 /* binop/2addr vA, vB */
6156 mov r3, rINST, lsr #12 @ r3<- B
6157 mov r9, rINST, lsr #8 @ r9<- A+
6158 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
6159 and r9, r9, #15 @ r9<- A
6160 flds s1, [r3] @ s1<- vB
6161 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
6162 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6163 flds s0, [r9] @ s0<- vA
6164
6165 fsubs s2, s0, s1 @ s2<- op
6166 GET_INST_OPCODE(ip) @ extract opcode from rINST
6167 fsts s2, [r9] @ vAA<- s2
6168 GOTO_OPCODE(ip) @ jump to next instruction
6169
6170
6171/* ------------------------------ */
6172 .balign 64
6173.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
6174/* File: arm-vfp/OP_MUL_FLOAT_2ADDR.S */
6175/* File: arm-vfp/fbinop2addr.S */
6176 /*
6177 * Generic 32-bit floating point "/2addr" binary operation. Provide
6178 * an "instr" line that specifies an instruction that performs
6179 * "s2 = s0 op s1".
6180 *
6181 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
6182 */
6183 /* binop/2addr vA, vB */
6184 mov r3, rINST, lsr #12 @ r3<- B
6185 mov r9, rINST, lsr #8 @ r9<- A+
6186 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
6187 and r9, r9, #15 @ r9<- A
6188 flds s1, [r3] @ s1<- vB
6189 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
6190 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6191 flds s0, [r9] @ s0<- vA
6192
6193 fmuls s2, s0, s1 @ s2<- op
6194 GET_INST_OPCODE(ip) @ extract opcode from rINST
6195 fsts s2, [r9] @ vAA<- s2
6196 GOTO_OPCODE(ip) @ jump to next instruction
6197
6198
6199/* ------------------------------ */
6200 .balign 64
6201.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
6202/* File: arm-vfp/OP_DIV_FLOAT_2ADDR.S */
6203/* File: arm-vfp/fbinop2addr.S */
6204 /*
6205 * Generic 32-bit floating point "/2addr" binary operation. Provide
6206 * an "instr" line that specifies an instruction that performs
6207 * "s2 = s0 op s1".
6208 *
6209 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
6210 */
6211 /* binop/2addr vA, vB */
6212 mov r3, rINST, lsr #12 @ r3<- B
6213 mov r9, rINST, lsr #8 @ r9<- A+
6214 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
6215 and r9, r9, #15 @ r9<- A
6216 flds s1, [r3] @ s1<- vB
6217 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
6218 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6219 flds s0, [r9] @ s0<- vA
6220
6221 fdivs s2, s0, s1 @ s2<- op
6222 GET_INST_OPCODE(ip) @ extract opcode from rINST
6223 fsts s2, [r9] @ vAA<- s2
6224 GOTO_OPCODE(ip) @ jump to next instruction
6225
6226
6227/* ------------------------------ */
6228 .balign 64
6229.L_OP_REM_FLOAT_2ADDR: /* 0xca */
6230/* File: armv6t2/OP_REM_FLOAT_2ADDR.S */
6231/* EABI doesn't define a float remainder function, but libm does */
6232/* File: armv6t2/binop2addr.S */
6233 /*
6234 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
6235 * that specifies an instruction that performs "result = r0 op r1".
6236 * This could be an ARM instruction or a function call. (If the result
6237 * comes back in a register other than r0, you can override "result".)
6238 *
6239 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6240 * vCC (r1). Useful for integer division and modulus.
6241 *
6242 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
6243 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
6244 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
6245 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
6246 */
6247 /* binop/2addr vA, vB */
6248 mov r3, rINST, lsr #12 @ r3<- B
6249 ubfx r9, rINST, #8, #4 @ r9<- A
6250 GET_VREG(r1, r3) @ r1<- vB
6251 GET_VREG(r0, r9) @ r0<- vA
6252 .if 0
6253 cmp r1, #0 @ is second operand zero?
6254 beq common_errDivideByZero
6255 .endif
6256 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6257
6258 @ optional op; may set condition codes
6259 bl fmodf @ r0<- op, r0-r3 changed
6260 GET_INST_OPCODE(ip) @ extract opcode from rINST
6261 SET_VREG(r0, r9) @ vAA<- r0
6262 GOTO_OPCODE(ip) @ jump to next instruction
6263 /* 10-13 instructions */
6264
6265
6266
6267/* ------------------------------ */
6268 .balign 64
6269.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
6270/* File: arm-vfp/OP_ADD_DOUBLE_2ADDR.S */
6271/* File: arm-vfp/fbinopWide2addr.S */
6272 /*
6273 * Generic 64-bit floating point "/2addr" binary operation. Provide
6274 * an "instr" line that specifies an instruction that performs
6275 * "d2 = d0 op d1".
6276 *
6277 * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
6278 * div-double/2addr
6279 */
6280 /* binop/2addr vA, vB */
6281 mov r3, rINST, lsr #12 @ r3<- B
6282 mov r9, rINST, lsr #8 @ r9<- A+
6283 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
6284 and r9, r9, #15 @ r9<- A
6285 fldd d1, [r3] @ d1<- vB
6286 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
6287 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6288 fldd d0, [r9] @ d0<- vA
6289
6290 faddd d2, d0, d1 @ d2<- op
6291 GET_INST_OPCODE(ip) @ extract opcode from rINST
6292 fstd d2, [r9] @ vAA<- d2
6293 GOTO_OPCODE(ip) @ jump to next instruction
6294
6295
6296/* ------------------------------ */
6297 .balign 64
6298.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
6299/* File: arm-vfp/OP_SUB_DOUBLE_2ADDR.S */
6300/* File: arm-vfp/fbinopWide2addr.S */
6301 /*
6302 * Generic 64-bit floating point "/2addr" binary operation. Provide
6303 * an "instr" line that specifies an instruction that performs
6304 * "d2 = d0 op d1".
6305 *
6306 * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
6307 * div-double/2addr
6308 */
6309 /* binop/2addr vA, vB */
6310 mov r3, rINST, lsr #12 @ r3<- B
6311 mov r9, rINST, lsr #8 @ r9<- A+
6312 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
6313 and r9, r9, #15 @ r9<- A
6314 fldd d1, [r3] @ d1<- vB
6315 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
6316 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6317 fldd d0, [r9] @ d0<- vA
6318
6319 fsubd d2, d0, d1 @ d2<- op
6320 GET_INST_OPCODE(ip) @ extract opcode from rINST
6321 fstd d2, [r9] @ vAA<- d2
6322 GOTO_OPCODE(ip) @ jump to next instruction
6323
6324
6325/* ------------------------------ */
6326 .balign 64
6327.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
6328/* File: arm-vfp/OP_MUL_DOUBLE_2ADDR.S */
6329/* File: arm-vfp/fbinopWide2addr.S */
6330 /*
6331 * Generic 64-bit floating point "/2addr" binary operation. Provide
6332 * an "instr" line that specifies an instruction that performs
6333 * "d2 = d0 op d1".
6334 *
6335 * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
6336 * div-double/2addr
6337 */
6338 /* binop/2addr vA, vB */
6339 mov r3, rINST, lsr #12 @ r3<- B
6340 mov r9, rINST, lsr #8 @ r9<- A+
6341 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
6342 and r9, r9, #15 @ r9<- A
6343 fldd d1, [r3] @ d1<- vB
6344 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
6345 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6346 fldd d0, [r9] @ d0<- vA
6347
6348 fmuld d2, d0, d1 @ d2<- op
6349 GET_INST_OPCODE(ip) @ extract opcode from rINST
6350 fstd d2, [r9] @ vAA<- d2
6351 GOTO_OPCODE(ip) @ jump to next instruction
6352
6353
6354/* ------------------------------ */
6355 .balign 64
6356.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */
6357/* File: arm-vfp/OP_DIV_DOUBLE_2ADDR.S */
6358/* File: arm-vfp/fbinopWide2addr.S */
6359 /*
6360 * Generic 64-bit floating point "/2addr" binary operation. Provide
6361 * an "instr" line that specifies an instruction that performs
6362 * "d2 = d0 op d1".
6363 *
6364 * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
6365 * div-double/2addr
6366 */
6367 /* binop/2addr vA, vB */
6368 mov r3, rINST, lsr #12 @ r3<- B
6369 mov r9, rINST, lsr #8 @ r9<- A+
6370 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
6371 and r9, r9, #15 @ r9<- A
6372 fldd d1, [r3] @ d1<- vB
6373 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
6374 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6375 fldd d0, [r9] @ d0<- vA
6376
6377 fdivd d2, d0, d1 @ d2<- op
6378 GET_INST_OPCODE(ip) @ extract opcode from rINST
6379 fstd d2, [r9] @ vAA<- d2
6380 GOTO_OPCODE(ip) @ jump to next instruction
6381
6382
6383/* ------------------------------ */
6384 .balign 64
6385.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */
6386/* File: armv6t2/OP_REM_DOUBLE_2ADDR.S */
6387/* EABI doesn't define a double remainder function, but libm does */
6388/* File: armv6t2/binopWide2addr.S */
6389 /*
6390 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
6391 * that specifies an instruction that performs "result = r0-r1 op r2-r3".
6392 * This could be an ARM instruction or a function call. (If the result
6393 * comes back in a register other than r0, you can override "result".)
6394 *
6395 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6396 * vCC (r1). Useful for integer division and modulus.
6397 *
6398 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
6399 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
6400 * sub-double/2addr, mul-double/2addr, div-double/2addr,
6401 * rem-double/2addr
6402 */
6403 /* binop/2addr vA, vB */
6404 mov r1, rINST, lsr #12 @ r1<- B
6405 ubfx r9, rINST, #8, #4 @ r9<- A
6406 add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
6407 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6408 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
6409 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6410 .if 0
6411 orrs ip, r2, r3 @ second arg (r2-r3) is zero?
6412 beq common_errDivideByZero
6413 .endif
6414 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
6415
6416 @ optional op; may set condition codes
6417 bl fmod @ result<- op, r0-r3 changed
6418 GET_INST_OPCODE(ip) @ extract opcode from rINST
6419 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6420 GOTO_OPCODE(ip) @ jump to next instruction
6421 /* 12-15 instructions */
6422
6423
6424
6425/* ------------------------------ */
6426 .balign 64
6427.L_OP_ADD_INT_LIT16: /* 0xd0 */
6428/* File: armv6t2/OP_ADD_INT_LIT16.S */
6429/* File: armv6t2/binopLit16.S */
6430 /*
6431 * Generic 32-bit "lit16" binary operation. Provide an "instr" line
6432 * that specifies an instruction that performs "result = r0 op r1".
6433 * This could be an ARM instruction or a function call. (If the result
6434 * comes back in a register other than r0, you can override "result".)
6435 *
6436 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6437 * vCC (r1). Useful for integer division and modulus.
6438 *
6439 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
6440 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
6441 */
6442 /* binop/lit16 vA, vB, #+CCCC */
6443 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
6444 mov r2, rINST, lsr #12 @ r2<- B
6445 ubfx r9, rINST, #8, #4 @ r9<- A
6446 GET_VREG(r0, r2) @ r0<- vB
6447 .if 0
6448 cmp r1, #0 @ is second operand zero?
6449 beq common_errDivideByZero
6450 .endif
6451 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6452
6453 add r0, r0, r1 @ r0<- op, r0-r3 changed
6454 GET_INST_OPCODE(ip) @ extract opcode from rINST
6455 SET_VREG(r0, r9) @ vAA<- r0
6456 GOTO_OPCODE(ip) @ jump to next instruction
6457 /* 10-13 instructions */
6458
6459
6460
6461/* ------------------------------ */
6462 .balign 64
6463.L_OP_RSUB_INT: /* 0xd1 */
6464/* File: armv6t2/OP_RSUB_INT.S */
6465/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
6466/* File: armv6t2/binopLit16.S */
6467 /*
6468 * Generic 32-bit "lit16" binary operation. Provide an "instr" line
6469 * that specifies an instruction that performs "result = r0 op r1".
6470 * This could be an ARM instruction or a function call. (If the result
6471 * comes back in a register other than r0, you can override "result".)
6472 *
6473 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6474 * vCC (r1). Useful for integer division and modulus.
6475 *
6476 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
6477 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
6478 */
6479 /* binop/lit16 vA, vB, #+CCCC */
6480 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
6481 mov r2, rINST, lsr #12 @ r2<- B
6482 ubfx r9, rINST, #8, #4 @ r9<- A
6483 GET_VREG(r0, r2) @ r0<- vB
6484 .if 0
6485 cmp r1, #0 @ is second operand zero?
6486 beq common_errDivideByZero
6487 .endif
6488 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6489
6490 rsb r0, r0, r1 @ r0<- op, r0-r3 changed
6491 GET_INST_OPCODE(ip) @ extract opcode from rINST
6492 SET_VREG(r0, r9) @ vAA<- r0
6493 GOTO_OPCODE(ip) @ jump to next instruction
6494 /* 10-13 instructions */
6495
6496
6497
6498/* ------------------------------ */
6499 .balign 64
6500.L_OP_MUL_INT_LIT16: /* 0xd2 */
6501/* File: armv6t2/OP_MUL_INT_LIT16.S */
6502/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
6503/* File: armv6t2/binopLit16.S */
6504 /*
6505 * Generic 32-bit "lit16" binary operation. Provide an "instr" line
6506 * that specifies an instruction that performs "result = r0 op r1".
6507 * This could be an ARM instruction or a function call. (If the result
6508 * comes back in a register other than r0, you can override "result".)
6509 *
6510 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6511 * vCC (r1). Useful for integer division and modulus.
6512 *
6513 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
6514 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
6515 */
6516 /* binop/lit16 vA, vB, #+CCCC */
6517 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
6518 mov r2, rINST, lsr #12 @ r2<- B
6519 ubfx r9, rINST, #8, #4 @ r9<- A
6520 GET_VREG(r0, r2) @ r0<- vB
6521 .if 0
6522 cmp r1, #0 @ is second operand zero?
6523 beq common_errDivideByZero
6524 .endif
6525 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6526
6527 mul r0, r1, r0 @ r0<- op, r0-r3 changed
6528 GET_INST_OPCODE(ip) @ extract opcode from rINST
6529 SET_VREG(r0, r9) @ vAA<- r0
6530 GOTO_OPCODE(ip) @ jump to next instruction
6531 /* 10-13 instructions */
6532
6533
6534
6535/* ------------------------------ */
6536 .balign 64
6537.L_OP_DIV_INT_LIT16: /* 0xd3 */
6538/* File: armv6t2/OP_DIV_INT_LIT16.S */
6539/* File: armv6t2/binopLit16.S */
6540 /*
6541 * Generic 32-bit "lit16" binary operation. Provide an "instr" line
6542 * that specifies an instruction that performs "result = r0 op r1".
6543 * This could be an ARM instruction or a function call. (If the result
6544 * comes back in a register other than r0, you can override "result".)
6545 *
6546 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6547 * vCC (r1). Useful for integer division and modulus.
6548 *
6549 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
6550 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
6551 */
6552 /* binop/lit16 vA, vB, #+CCCC */
6553 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
6554 mov r2, rINST, lsr #12 @ r2<- B
6555 ubfx r9, rINST, #8, #4 @ r9<- A
6556 GET_VREG(r0, r2) @ r0<- vB
6557 .if 1
6558 cmp r1, #0 @ is second operand zero?
6559 beq common_errDivideByZero
6560 .endif
6561 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6562
6563 bl __aeabi_idiv @ r0<- op, r0-r3 changed
6564 GET_INST_OPCODE(ip) @ extract opcode from rINST
6565 SET_VREG(r0, r9) @ vAA<- r0
6566 GOTO_OPCODE(ip) @ jump to next instruction
6567 /* 10-13 instructions */
6568
6569
6570
6571/* ------------------------------ */
6572 .balign 64
6573.L_OP_REM_INT_LIT16: /* 0xd4 */
6574/* File: armv6t2/OP_REM_INT_LIT16.S */
6575/* idivmod returns quotient in r0 and remainder in r1 */
6576/* File: armv6t2/binopLit16.S */
6577 /*
6578 * Generic 32-bit "lit16" binary operation. Provide an "instr" line
6579 * that specifies an instruction that performs "result = r0 op r1".
6580 * This could be an ARM instruction or a function call. (If the result
6581 * comes back in a register other than r0, you can override "result".)
6582 *
6583 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6584 * vCC (r1). Useful for integer division and modulus.
6585 *
6586 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
6587 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
6588 */
6589 /* binop/lit16 vA, vB, #+CCCC */
6590 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
6591 mov r2, rINST, lsr #12 @ r2<- B
6592 ubfx r9, rINST, #8, #4 @ r9<- A
6593 GET_VREG(r0, r2) @ r0<- vB
6594 .if 1
6595 cmp r1, #0 @ is second operand zero?
6596 beq common_errDivideByZero
6597 .endif
6598 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6599
6600 bl __aeabi_idivmod @ r1<- op, r0-r3 changed
6601 GET_INST_OPCODE(ip) @ extract opcode from rINST
6602 SET_VREG(r1, r9) @ vAA<- r1
6603 GOTO_OPCODE(ip) @ jump to next instruction
6604 /* 10-13 instructions */
6605
6606
6607
6608/* ------------------------------ */
6609 .balign 64
6610.L_OP_AND_INT_LIT16: /* 0xd5 */
6611/* File: armv6t2/OP_AND_INT_LIT16.S */
6612/* File: armv6t2/binopLit16.S */
6613 /*
6614 * Generic 32-bit "lit16" binary operation. Provide an "instr" line
6615 * that specifies an instruction that performs "result = r0 op r1".
6616 * This could be an ARM instruction or a function call. (If the result
6617 * comes back in a register other than r0, you can override "result".)
6618 *
6619 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6620 * vCC (r1). Useful for integer division and modulus.
6621 *
6622 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
6623 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
6624 */
6625 /* binop/lit16 vA, vB, #+CCCC */
6626 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
6627 mov r2, rINST, lsr #12 @ r2<- B
6628 ubfx r9, rINST, #8, #4 @ r9<- A
6629 GET_VREG(r0, r2) @ r0<- vB
6630 .if 0
6631 cmp r1, #0 @ is second operand zero?
6632 beq common_errDivideByZero
6633 .endif
6634 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6635
6636 and r0, r0, r1 @ r0<- op, r0-r3 changed
6637 GET_INST_OPCODE(ip) @ extract opcode from rINST
6638 SET_VREG(r0, r9) @ vAA<- r0
6639 GOTO_OPCODE(ip) @ jump to next instruction
6640 /* 10-13 instructions */
6641
6642
6643
6644/* ------------------------------ */
6645 .balign 64
6646.L_OP_OR_INT_LIT16: /* 0xd6 */
6647/* File: armv6t2/OP_OR_INT_LIT16.S */
6648/* File: armv6t2/binopLit16.S */
6649 /*
6650 * Generic 32-bit "lit16" binary operation. Provide an "instr" line
6651 * that specifies an instruction that performs "result = r0 op r1".
6652 * This could be an ARM instruction or a function call. (If the result
6653 * comes back in a register other than r0, you can override "result".)
6654 *
6655 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6656 * vCC (r1). Useful for integer division and modulus.
6657 *
6658 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
6659 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
6660 */
6661 /* binop/lit16 vA, vB, #+CCCC */
6662 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
6663 mov r2, rINST, lsr #12 @ r2<- B
6664 ubfx r9, rINST, #8, #4 @ r9<- A
6665 GET_VREG(r0, r2) @ r0<- vB
6666 .if 0
6667 cmp r1, #0 @ is second operand zero?
6668 beq common_errDivideByZero
6669 .endif
6670 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6671
6672 orr r0, r0, r1 @ r0<- op, r0-r3 changed
6673 GET_INST_OPCODE(ip) @ extract opcode from rINST
6674 SET_VREG(r0, r9) @ vAA<- r0
6675 GOTO_OPCODE(ip) @ jump to next instruction
6676 /* 10-13 instructions */
6677
6678
6679
6680/* ------------------------------ */
6681 .balign 64
6682.L_OP_XOR_INT_LIT16: /* 0xd7 */
6683/* File: armv6t2/OP_XOR_INT_LIT16.S */
6684/* File: armv6t2/binopLit16.S */
6685 /*
6686 * Generic 32-bit "lit16" binary operation. Provide an "instr" line
6687 * that specifies an instruction that performs "result = r0 op r1".
6688 * This could be an ARM instruction or a function call. (If the result
6689 * comes back in a register other than r0, you can override "result".)
6690 *
6691 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6692 * vCC (r1). Useful for integer division and modulus.
6693 *
6694 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
6695 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
6696 */
6697 /* binop/lit16 vA, vB, #+CCCC */
6698 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
6699 mov r2, rINST, lsr #12 @ r2<- B
6700 ubfx r9, rINST, #8, #4 @ r9<- A
6701 GET_VREG(r0, r2) @ r0<- vB
6702 .if 0
6703 cmp r1, #0 @ is second operand zero?
6704 beq common_errDivideByZero
6705 .endif
6706 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6707
6708 eor r0, r0, r1 @ r0<- op, r0-r3 changed
6709 GET_INST_OPCODE(ip) @ extract opcode from rINST
6710 SET_VREG(r0, r9) @ vAA<- r0
6711 GOTO_OPCODE(ip) @ jump to next instruction
6712 /* 10-13 instructions */
6713
6714
6715
6716/* ------------------------------ */
6717 .balign 64
6718.L_OP_ADD_INT_LIT8: /* 0xd8 */
6719/* File: armv5te/OP_ADD_INT_LIT8.S */
6720/* File: armv5te/binopLit8.S */
6721 /*
6722 * Generic 32-bit "lit8" binary operation. Provide an "instr" line
6723 * that specifies an instruction that performs "result = r0 op r1".
6724 * This could be an ARM instruction or a function call. (If the result
6725 * comes back in a register other than r0, you can override "result".)
6726 *
6727 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6728 * vCC (r1). Useful for integer division and modulus.
6729 *
6730 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
6731 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
6732 * shl-int/lit8, shr-int/lit8, ushr-int/lit8
6733 */
6734 /* binop/lit8 vAA, vBB, #+CC */
6735 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
6736 mov r9, rINST, lsr #8 @ r9<- AA
6737 and r2, r3, #255 @ r2<- BB
6738 GET_VREG(r0, r2) @ r0<- vBB
6739 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
6740 .if 0
6741 @cmp r1, #0 @ is second operand zero?
6742 beq common_errDivideByZero
6743 .endif
6744 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6745
6746 @ optional op; may set condition codes
6747 add r0, r0, r1 @ r0<- op, r0-r3 changed
6748 GET_INST_OPCODE(ip) @ extract opcode from rINST
6749 SET_VREG(r0, r9) @ vAA<- r0
6750 GOTO_OPCODE(ip) @ jump to next instruction
6751 /* 10-12 instructions */
6752
6753
6754
6755/* ------------------------------ */
6756 .balign 64
6757.L_OP_RSUB_INT_LIT8: /* 0xd9 */
6758/* File: armv5te/OP_RSUB_INT_LIT8.S */
6759/* File: armv5te/binopLit8.S */
6760 /*
6761 * Generic 32-bit "lit8" binary operation. Provide an "instr" line
6762 * that specifies an instruction that performs "result = r0 op r1".
6763 * This could be an ARM instruction or a function call. (If the result
6764 * comes back in a register other than r0, you can override "result".)
6765 *
6766 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6767 * vCC (r1). Useful for integer division and modulus.
6768 *
6769 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
6770 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
6771 * shl-int/lit8, shr-int/lit8, ushr-int/lit8
6772 */
6773 /* binop/lit8 vAA, vBB, #+CC */
6774 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
6775 mov r9, rINST, lsr #8 @ r9<- AA
6776 and r2, r3, #255 @ r2<- BB
6777 GET_VREG(r0, r2) @ r0<- vBB
6778 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
6779 .if 0
6780 @cmp r1, #0 @ is second operand zero?
6781 beq common_errDivideByZero
6782 .endif
6783 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6784
6785 @ optional op; may set condition codes
6786 rsb r0, r0, r1 @ r0<- op, r0-r3 changed
6787 GET_INST_OPCODE(ip) @ extract opcode from rINST
6788 SET_VREG(r0, r9) @ vAA<- r0
6789 GOTO_OPCODE(ip) @ jump to next instruction
6790 /* 10-12 instructions */
6791
6792
6793
6794/* ------------------------------ */
6795 .balign 64
6796.L_OP_MUL_INT_LIT8: /* 0xda */
6797/* File: armv5te/OP_MUL_INT_LIT8.S */
6798/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
6799/* File: armv5te/binopLit8.S */
6800 /*
6801 * Generic 32-bit "lit8" binary operation. Provide an "instr" line
6802 * that specifies an instruction that performs "result = r0 op r1".
6803 * This could be an ARM instruction or a function call. (If the result
6804 * comes back in a register other than r0, you can override "result".)
6805 *
6806 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6807 * vCC (r1). Useful for integer division and modulus.
6808 *
6809 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
6810 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
6811 * shl-int/lit8, shr-int/lit8, ushr-int/lit8
6812 */
6813 /* binop/lit8 vAA, vBB, #+CC */
6814 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
6815 mov r9, rINST, lsr #8 @ r9<- AA
6816 and r2, r3, #255 @ r2<- BB
6817 GET_VREG(r0, r2) @ r0<- vBB
6818 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
6819 .if 0
6820 @cmp r1, #0 @ is second operand zero?
6821 beq common_errDivideByZero
6822 .endif
6823 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6824
6825 @ optional op; may set condition codes
6826 mul r0, r1, r0 @ r0<- op, r0-r3 changed
6827 GET_INST_OPCODE(ip) @ extract opcode from rINST
6828 SET_VREG(r0, r9) @ vAA<- r0
6829 GOTO_OPCODE(ip) @ jump to next instruction
6830 /* 10-12 instructions */
6831
6832
6833
6834/* ------------------------------ */
6835 .balign 64
6836.L_OP_DIV_INT_LIT8: /* 0xdb */
6837/* File: armv5te/OP_DIV_INT_LIT8.S */
6838/* File: armv5te/binopLit8.S */
6839 /*
6840 * Generic 32-bit "lit8" binary operation. Provide an "instr" line
6841 * that specifies an instruction that performs "result = r0 op r1".
6842 * This could be an ARM instruction or a function call. (If the result
6843 * comes back in a register other than r0, you can override "result".)
6844 *
6845 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6846 * vCC (r1). Useful for integer division and modulus.
6847 *
6848 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
6849 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
6850 * shl-int/lit8, shr-int/lit8, ushr-int/lit8
6851 */
6852 /* binop/lit8 vAA, vBB, #+CC */
6853 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
6854 mov r9, rINST, lsr #8 @ r9<- AA
6855 and r2, r3, #255 @ r2<- BB
6856 GET_VREG(r0, r2) @ r0<- vBB
6857 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
6858 .if 1
6859 @cmp r1, #0 @ is second operand zero?
6860 beq common_errDivideByZero
6861 .endif
6862 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6863
6864 @ optional op; may set condition codes
6865 bl __aeabi_idiv @ r0<- op, r0-r3 changed
6866 GET_INST_OPCODE(ip) @ extract opcode from rINST
6867 SET_VREG(r0, r9) @ vAA<- r0
6868 GOTO_OPCODE(ip) @ jump to next instruction
6869 /* 10-12 instructions */
6870
6871
6872
6873/* ------------------------------ */
6874 .balign 64
6875.L_OP_REM_INT_LIT8: /* 0xdc */
6876/* File: armv5te/OP_REM_INT_LIT8.S */
6877/* idivmod returns quotient in r0 and remainder in r1 */
6878/* File: armv5te/binopLit8.S */
6879 /*
6880 * Generic 32-bit "lit8" binary operation. Provide an "instr" line
6881 * that specifies an instruction that performs "result = r0 op r1".
6882 * This could be an ARM instruction or a function call. (If the result
6883 * comes back in a register other than r0, you can override "result".)
6884 *
6885 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6886 * vCC (r1). Useful for integer division and modulus.
6887 *
6888 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
6889 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
6890 * shl-int/lit8, shr-int/lit8, ushr-int/lit8
6891 */
6892 /* binop/lit8 vAA, vBB, #+CC */
6893 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
6894 mov r9, rINST, lsr #8 @ r9<- AA
6895 and r2, r3, #255 @ r2<- BB
6896 GET_VREG(r0, r2) @ r0<- vBB
6897 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
6898 .if 1
6899 @cmp r1, #0 @ is second operand zero?
6900 beq common_errDivideByZero
6901 .endif
6902 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6903
6904 @ optional op; may set condition codes
6905 bl __aeabi_idivmod @ r1<- op, r0-r3 changed
6906 GET_INST_OPCODE(ip) @ extract opcode from rINST
6907 SET_VREG(r1, r9) @ vAA<- r1
6908 GOTO_OPCODE(ip) @ jump to next instruction
6909 /* 10-12 instructions */
6910
6911
6912
6913/* ------------------------------ */
6914 .balign 64
6915.L_OP_AND_INT_LIT8: /* 0xdd */
6916/* File: armv5te/OP_AND_INT_LIT8.S */
6917/* File: armv5te/binopLit8.S */
6918 /*
6919 * Generic 32-bit "lit8" binary operation. Provide an "instr" line
6920 * that specifies an instruction that performs "result = r0 op r1".
6921 * This could be an ARM instruction or a function call. (If the result
6922 * comes back in a register other than r0, you can override "result".)
6923 *
6924 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6925 * vCC (r1). Useful for integer division and modulus.
6926 *
6927 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
6928 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
6929 * shl-int/lit8, shr-int/lit8, ushr-int/lit8
6930 */
6931 /* binop/lit8 vAA, vBB, #+CC */
6932 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
6933 mov r9, rINST, lsr #8 @ r9<- AA
6934 and r2, r3, #255 @ r2<- BB
6935 GET_VREG(r0, r2) @ r0<- vBB
6936 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
6937 .if 0
6938 @cmp r1, #0 @ is second operand zero?
6939 beq common_errDivideByZero
6940 .endif
6941 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6942
6943 @ optional op; may set condition codes
6944 and r0, r0, r1 @ r0<- op, r0-r3 changed
6945 GET_INST_OPCODE(ip) @ extract opcode from rINST
6946 SET_VREG(r0, r9) @ vAA<- r0
6947 GOTO_OPCODE(ip) @ jump to next instruction
6948 /* 10-12 instructions */
6949
6950
6951
6952/* ------------------------------ */
6953 .balign 64
6954.L_OP_OR_INT_LIT8: /* 0xde */
6955/* File: armv5te/OP_OR_INT_LIT8.S */
6956/* File: armv5te/binopLit8.S */
6957 /*
6958 * Generic 32-bit "lit8" binary operation. Provide an "instr" line
6959 * that specifies an instruction that performs "result = r0 op r1".
6960 * This could be an ARM instruction or a function call. (If the result
6961 * comes back in a register other than r0, you can override "result".)
6962 *
6963 * If "chkzero" is set to 1, we perform a divide-by-zero check on
6964 * vCC (r1). Useful for integer division and modulus.
6965 *
6966 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
6967 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
6968 * shl-int/lit8, shr-int/lit8, ushr-int/lit8
6969 */
6970 /* binop/lit8 vAA, vBB, #+CC */
6971 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
6972 mov r9, rINST, lsr #8 @ r9<- AA
6973 and r2, r3, #255 @ r2<- BB
6974 GET_VREG(r0, r2) @ r0<- vBB
6975 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
6976 .if 0
6977 @cmp r1, #0 @ is second operand zero?
6978 beq common_errDivideByZero
6979 .endif
6980 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
6981
6982 @ optional op; may set condition codes
6983 orr r0, r0, r1 @ r0<- op, r0-r3 changed
6984 GET_INST_OPCODE(ip) @ extract opcode from rINST
6985 SET_VREG(r0, r9) @ vAA<- r0
6986 GOTO_OPCODE(ip) @ jump to next instruction
6987 /* 10-12 instructions */
6988
6989
6990
6991/* ------------------------------ */
6992 .balign 64
6993.L_OP_XOR_INT_LIT8: /* 0xdf */
6994/* File: armv5te/OP_XOR_INT_LIT8.S */
6995/* File: armv5te/binopLit8.S */
6996 /*
6997 * Generic 32-bit "lit8" binary operation. Provide an "instr" line
6998 * that specifies an instruction that performs "result = r0 op r1".
6999 * This could be an ARM instruction or a function call. (If the result
7000 * comes back in a register other than r0, you can override "result".)
7001 *
7002 * If "chkzero" is set to 1, we perform a divide-by-zero check on
7003 * vCC (r1). Useful for integer division and modulus.
7004 *
7005 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
7006 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
7007 * shl-int/lit8, shr-int/lit8, ushr-int/lit8
7008 */
7009 /* binop/lit8 vAA, vBB, #+CC */
7010 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
7011 mov r9, rINST, lsr #8 @ r9<- AA
7012 and r2, r3, #255 @ r2<- BB
7013 GET_VREG(r0, r2) @ r0<- vBB
7014 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
7015 .if 0
7016 @cmp r1, #0 @ is second operand zero?
7017 beq common_errDivideByZero
7018 .endif
7019 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7020
7021 @ optional op; may set condition codes
7022 eor r0, r0, r1 @ r0<- op, r0-r3 changed
7023 GET_INST_OPCODE(ip) @ extract opcode from rINST
7024 SET_VREG(r0, r9) @ vAA<- r0
7025 GOTO_OPCODE(ip) @ jump to next instruction
7026 /* 10-12 instructions */
7027
7028
7029
7030/* ------------------------------ */
7031 .balign 64
7032.L_OP_SHL_INT_LIT8: /* 0xe0 */
7033/* File: armv5te/OP_SHL_INT_LIT8.S */
7034/* File: armv5te/binopLit8.S */
7035 /*
7036 * Generic 32-bit "lit8" binary operation. Provide an "instr" line
7037 * that specifies an instruction that performs "result = r0 op r1".
7038 * This could be an ARM instruction or a function call. (If the result
7039 * comes back in a register other than r0, you can override "result".)
7040 *
7041 * If "chkzero" is set to 1, we perform a divide-by-zero check on
7042 * vCC (r1). Useful for integer division and modulus.
7043 *
7044 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
7045 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
7046 * shl-int/lit8, shr-int/lit8, ushr-int/lit8
7047 */
7048 /* binop/lit8 vAA, vBB, #+CC */
7049 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
7050 mov r9, rINST, lsr #8 @ r9<- AA
7051 and r2, r3, #255 @ r2<- BB
7052 GET_VREG(r0, r2) @ r0<- vBB
7053 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
7054 .if 0
7055 @cmp r1, #0 @ is second operand zero?
7056 beq common_errDivideByZero
7057 .endif
7058 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7059
7060 and r1, r1, #31 @ optional op; may set condition codes
7061 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
7062 GET_INST_OPCODE(ip) @ extract opcode from rINST
7063 SET_VREG(r0, r9) @ vAA<- r0
7064 GOTO_OPCODE(ip) @ jump to next instruction
7065 /* 10-12 instructions */
7066
7067
7068
7069/* ------------------------------ */
7070 .balign 64
7071.L_OP_SHR_INT_LIT8: /* 0xe1 */
7072/* File: armv5te/OP_SHR_INT_LIT8.S */
7073/* File: armv5te/binopLit8.S */
7074 /*
7075 * Generic 32-bit "lit8" binary operation. Provide an "instr" line
7076 * that specifies an instruction that performs "result = r0 op r1".
7077 * This could be an ARM instruction or a function call. (If the result
7078 * comes back in a register other than r0, you can override "result".)
7079 *
7080 * If "chkzero" is set to 1, we perform a divide-by-zero check on
7081 * vCC (r1). Useful for integer division and modulus.
7082 *
7083 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
7084 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
7085 * shl-int/lit8, shr-int/lit8, ushr-int/lit8
7086 */
7087 /* binop/lit8 vAA, vBB, #+CC */
7088 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
7089 mov r9, rINST, lsr #8 @ r9<- AA
7090 and r2, r3, #255 @ r2<- BB
7091 GET_VREG(r0, r2) @ r0<- vBB
7092 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
7093 .if 0
7094 @cmp r1, #0 @ is second operand zero?
7095 beq common_errDivideByZero
7096 .endif
7097 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7098
7099 and r1, r1, #31 @ optional op; may set condition codes
7100 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
7101 GET_INST_OPCODE(ip) @ extract opcode from rINST
7102 SET_VREG(r0, r9) @ vAA<- r0
7103 GOTO_OPCODE(ip) @ jump to next instruction
7104 /* 10-12 instructions */
7105
7106
7107
7108/* ------------------------------ */
7109 .balign 64
7110.L_OP_USHR_INT_LIT8: /* 0xe2 */
7111/* File: armv5te/OP_USHR_INT_LIT8.S */
7112/* File: armv5te/binopLit8.S */
7113 /*
7114 * Generic 32-bit "lit8" binary operation. Provide an "instr" line
7115 * that specifies an instruction that performs "result = r0 op r1".
7116 * This could be an ARM instruction or a function call. (If the result
7117 * comes back in a register other than r0, you can override "result".)
7118 *
7119 * If "chkzero" is set to 1, we perform a divide-by-zero check on
7120 * vCC (r1). Useful for integer division and modulus.
7121 *
7122 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
7123 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
7124 * shl-int/lit8, shr-int/lit8, ushr-int/lit8
7125 */
7126 /* binop/lit8 vAA, vBB, #+CC */
7127 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
7128 mov r9, rINST, lsr #8 @ r9<- AA
7129 and r2, r3, #255 @ r2<- BB
7130 GET_VREG(r0, r2) @ r0<- vBB
7131 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
7132 .if 0
7133 @cmp r1, #0 @ is second operand zero?
7134 beq common_errDivideByZero
7135 .endif
7136 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7137
7138 and r1, r1, #31 @ optional op; may set condition codes
7139 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
7140 GET_INST_OPCODE(ip) @ extract opcode from rINST
7141 SET_VREG(r0, r9) @ vAA<- r0
7142 GOTO_OPCODE(ip) @ jump to next instruction
7143 /* 10-12 instructions */
7144
7145
7146
7147/* ------------------------------ */
7148 .balign 64
7149.L_OP_UNUSED_E3: /* 0xe3 */
7150/* File: armv5te/OP_UNUSED_E3.S */
7151/* File: armv5te/unused.S */
7152 bl common_abort
7153
7154
7155
7156/* ------------------------------ */
7157 .balign 64
7158.L_OP_UNUSED_E4: /* 0xe4 */
7159/* File: armv5te/OP_UNUSED_E4.S */
7160/* File: armv5te/unused.S */
7161 bl common_abort
7162
7163
7164
7165/* ------------------------------ */
7166 .balign 64
7167.L_OP_UNUSED_E5: /* 0xe5 */
7168/* File: armv5te/OP_UNUSED_E5.S */
7169/* File: armv5te/unused.S */
7170 bl common_abort
7171
7172
7173
7174/* ------------------------------ */
7175 .balign 64
7176.L_OP_UNUSED_E6: /* 0xe6 */
7177/* File: armv5te/OP_UNUSED_E6.S */
7178/* File: armv5te/unused.S */
7179 bl common_abort
7180
7181
7182
7183/* ------------------------------ */
7184 .balign 64
7185.L_OP_UNUSED_E7: /* 0xe7 */
7186/* File: armv5te/OP_UNUSED_E7.S */
7187/* File: armv5te/unused.S */
7188 bl common_abort
7189
7190
7191
7192/* ------------------------------ */
7193 .balign 64
7194.L_OP_UNUSED_E8: /* 0xe8 */
7195/* File: armv5te/OP_UNUSED_E8.S */
7196/* File: armv5te/unused.S */
7197 bl common_abort
7198
7199
7200
7201/* ------------------------------ */
7202 .balign 64
7203.L_OP_UNUSED_E9: /* 0xe9 */
7204/* File: armv5te/OP_UNUSED_E9.S */
7205/* File: armv5te/unused.S */
7206 bl common_abort
7207
7208
7209
7210/* ------------------------------ */
7211 .balign 64
7212.L_OP_UNUSED_EA: /* 0xea */
7213/* File: armv5te/OP_UNUSED_EA.S */
7214/* File: armv5te/unused.S */
7215 bl common_abort
7216
7217
7218
7219/* ------------------------------ */
7220 .balign 64
7221.L_OP_UNUSED_EB: /* 0xeb */
7222/* File: armv5te/OP_UNUSED_EB.S */
7223/* File: armv5te/unused.S */
7224 bl common_abort
7225
7226
7227
7228/* ------------------------------ */
7229 .balign 64
Andy McFadden96516932009-10-28 17:39:02 -07007230.L_OP_BREAKPOINT: /* 0xec */
7231/* File: armv5te/OP_BREAKPOINT.S */
Andy McFaddena5069fb2009-06-19 15:20:12 -07007232/* File: armv5te/unused.S */
7233 bl common_abort
7234
7235
7236
7237/* ------------------------------ */
7238 .balign 64
7239.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */
7240/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */
7241 /*
7242 * Handle a throw-verification-error instruction. This throws an
7243 * exception for an error discovered during verification. The
7244 * exception is indicated by AA, with some detail provided by BBBB.
7245 */
7246 /* op AA, ref@BBBB */
7247 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
7248 FETCH(r2, 1) @ r2<- BBBB
7249 EXPORT_PC() @ export the PC
7250 mov r1, rINST, lsr #8 @ r1<- AA
7251 bl dvmThrowVerificationError @ always throws
7252 b common_exceptionThrown @ handle exception
7253
7254
7255/* ------------------------------ */
7256 .balign 64
7257.L_OP_EXECUTE_INLINE: /* 0xee */
7258/* File: armv5te/OP_EXECUTE_INLINE.S */
7259 /*
7260 * Execute a "native inline" instruction.
7261 *
Andy McFaddenb0a05412009-11-19 10:23:41 -08007262 * We need to call an InlineOp4Func:
7263 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
Andy McFaddena5069fb2009-06-19 15:20:12 -07007264 *
Andy McFaddenb0a05412009-11-19 10:23:41 -08007265 * The first four args are in r0-r3, pointer to return value storage
7266 * is on the stack. The function's return value is a flag that tells
7267 * us if an exception was thrown.
Andy McFaddena5069fb2009-06-19 15:20:12 -07007268 */
7269 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
7270 FETCH(r10, 1) @ r10<- BBBB
7271 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval
7272 EXPORT_PC() @ can throw
Andy McFaddenb0a05412009-11-19 10:23:41 -08007273 sub sp, sp, #8 @ make room for arg, +64 bit align
Andy McFaddena5069fb2009-06-19 15:20:12 -07007274 mov r0, rINST, lsr #12 @ r0<- B
7275 str r1, [sp] @ push &glue->retval
7276 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
7277 add sp, sp, #8 @ pop stack
7278 cmp r0, #0 @ test boolean result of inline
7279 beq common_exceptionThrown @ returned false, handle exception
7280 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
7281 GET_INST_OPCODE(ip) @ extract opcode from rINST
7282 GOTO_OPCODE(ip) @ jump to next instruction
7283
7284/* ------------------------------ */
7285 .balign 64
Andy McFaddenb0a05412009-11-19 10:23:41 -08007286.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */
7287/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */
7288 /*
7289 * Execute a "native inline" instruction, using "/range" semantics.
7290 * Same idea as execute-inline, but we get the args differently.
7291 *
7292 * We need to call an InlineOp4Func:
7293 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
7294 *
7295 * The first four args are in r0-r3, pointer to return value storage
7296 * is on the stack. The function's return value is a flag that tells
7297 * us if an exception was thrown.
7298 */
7299 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
7300 FETCH(r10, 1) @ r10<- BBBB
7301 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval
7302 EXPORT_PC() @ can throw
7303 sub sp, sp, #8 @ make room for arg, +64 bit align
7304 mov r0, rINST, lsr #8 @ r0<- AA
7305 str r1, [sp] @ push &glue->retval
7306 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
7307 add sp, sp, #8 @ pop stack
7308 cmp r0, #0 @ test boolean result of inline
7309 beq common_exceptionThrown @ returned false, handle exception
7310 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
7311 GET_INST_OPCODE(ip) @ extract opcode from rINST
7312 GOTO_OPCODE(ip) @ jump to next instruction
Andy McFaddena5069fb2009-06-19 15:20:12 -07007313
7314/* ------------------------------ */
7315 .balign 64
7316.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */
7317/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */
7318 /*
7319 * invoke-direct-empty is a no-op in a "standard" interpreter.
7320 */
7321 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
7322 GET_INST_OPCODE(ip) @ ip<- opcode from rINST
7323 GOTO_OPCODE(ip) @ execute it
7324
7325/* ------------------------------ */
7326 .balign 64
7327.L_OP_UNUSED_F1: /* 0xf1 */
7328/* File: armv5te/OP_UNUSED_F1.S */
7329/* File: armv5te/unused.S */
7330 bl common_abort
7331
7332
7333
7334/* ------------------------------ */
7335 .balign 64
7336.L_OP_IGET_QUICK: /* 0xf2 */
7337/* File: armv6t2/OP_IGET_QUICK.S */
7338 /* For: iget-quick, iget-object-quick */
7339 /* op vA, vB, offset@CCCC */
7340 mov r2, rINST, lsr #12 @ r2<- B
7341 FETCH(r1, 1) @ r1<- field byte offset
7342 GET_VREG(r3, r2) @ r3<- object we're operating on
7343 ubfx r2, rINST, #8, #4 @ r2<- A
7344 cmp r3, #0 @ check object for null
7345 beq common_errNullObject @ object was null
7346 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits)
7347 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7348 GET_INST_OPCODE(ip) @ extract opcode from rINST
7349 SET_VREG(r0, r2) @ fp[A]<- r0
7350 GOTO_OPCODE(ip) @ jump to next instruction
7351
7352
7353/* ------------------------------ */
7354 .balign 64
7355.L_OP_IGET_WIDE_QUICK: /* 0xf3 */
7356/* File: armv6t2/OP_IGET_WIDE_QUICK.S */
7357 /* iget-wide-quick vA, vB, offset@CCCC */
7358 mov r2, rINST, lsr #12 @ r2<- B
7359 FETCH(r1, 1) @ r1<- field byte offset
7360 GET_VREG(r3, r2) @ r3<- object we're operating on
7361 ubfx r2, rINST, #8, #4 @ r2<- A
7362 cmp r3, #0 @ check object for null
7363 beq common_errNullObject @ object was null
7364 ldrd r0, [r3, r1] @ r0<- obj.field (64 bits, aligned)
7365 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7366 add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
7367 GET_INST_OPCODE(ip) @ extract opcode from rINST
7368 stmia r3, {r0-r1} @ fp[A]<- r0/r1
7369 GOTO_OPCODE(ip) @ jump to next instruction
7370
7371
7372/* ------------------------------ */
7373 .balign 64
7374.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */
7375/* File: armv5te/OP_IGET_OBJECT_QUICK.S */
7376/* File: armv5te/OP_IGET_QUICK.S */
7377 /* For: iget-quick, iget-object-quick */
7378 /* op vA, vB, offset@CCCC */
7379 mov r2, rINST, lsr #12 @ r2<- B
7380 GET_VREG(r3, r2) @ r3<- object we're operating on
7381 FETCH(r1, 1) @ r1<- field byte offset
7382 cmp r3, #0 @ check object for null
7383 mov r2, rINST, lsr #8 @ r2<- A(+)
7384 beq common_errNullObject @ object was null
7385 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits)
7386 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7387 and r2, r2, #15
7388 GET_INST_OPCODE(ip) @ extract opcode from rINST
7389 SET_VREG(r0, r2) @ fp[A]<- r0
7390 GOTO_OPCODE(ip) @ jump to next instruction
7391
7392
7393
7394/* ------------------------------ */
7395 .balign 64
7396.L_OP_IPUT_QUICK: /* 0xf5 */
7397/* File: armv6t2/OP_IPUT_QUICK.S */
7398 /* For: iput-quick, iput-object-quick */
7399 /* op vA, vB, offset@CCCC */
7400 mov r2, rINST, lsr #12 @ r2<- B
7401 FETCH(r1, 1) @ r1<- field byte offset
7402 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer
7403 ubfx r2, rINST, #8, #4 @ r2<- A
7404 cmp r3, #0 @ check object for null
7405 beq common_errNullObject @ object was null
7406 GET_VREG(r0, r2) @ r0<- fp[A]
7407 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7408 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0
7409 GET_INST_OPCODE(ip) @ extract opcode from rINST
7410 GOTO_OPCODE(ip) @ jump to next instruction
7411
7412
7413/* ------------------------------ */
7414 .balign 64
7415.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */
7416/* File: armv6t2/OP_IPUT_WIDE_QUICK.S */
7417 /* iput-wide-quick vA, vB, offset@CCCC */
7418 mov r1, rINST, lsr #12 @ r1<- B
7419 ubfx r0, rINST, #8, #4 @ r0<- A
7420 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer
7421 add r3, rFP, r0, lsl #2 @ r3<- &fp[A]
7422 cmp r2, #0 @ check object for null
7423 ldmia r3, {r0-r1} @ r0/r1<- fp[A]
7424 beq common_errNullObject @ object was null
7425 FETCH(r3, 1) @ r3<- field byte offset
7426 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7427 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1
7428 GET_INST_OPCODE(ip) @ extract opcode from rINST
7429 GOTO_OPCODE(ip) @ jump to next instruction
7430
7431
7432/* ------------------------------ */
7433 .balign 64
7434.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
7435/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */
7436/* File: armv5te/OP_IPUT_QUICK.S */
7437 /* For: iput-quick, iput-object-quick */
7438 /* op vA, vB, offset@CCCC */
7439 mov r2, rINST, lsr #12 @ r2<- B
7440 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer
7441 FETCH(r1, 1) @ r1<- field byte offset
7442 cmp r3, #0 @ check object for null
7443 mov r2, rINST, lsr #8 @ r2<- A(+)
7444 beq common_errNullObject @ object was null
7445 and r2, r2, #15
7446 GET_VREG(r0, r2) @ r0<- fp[A]
7447 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7448 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0
7449 GET_INST_OPCODE(ip) @ extract opcode from rINST
7450 GOTO_OPCODE(ip) @ jump to next instruction
7451
7452
7453
7454/* ------------------------------ */
7455 .balign 64
7456.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
7457/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */
7458 /*
7459 * Handle an optimized virtual method call.
7460 *
7461 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
7462 */
7463 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
7464 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
7465 FETCH(r3, 2) @ r3<- FEDC or CCCC
7466 FETCH(r1, 1) @ r1<- BBBB
7467 .if (!0)
7468 and r3, r3, #15 @ r3<- C (or stays CCCC)
7469 .endif
7470 GET_VREG(r2, r3) @ r2<- vC ("this" ptr)
7471 cmp r2, #0 @ is "this" null?
7472 beq common_errNullObject @ null "this", throw exception
7473 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz
7474 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable
7475 EXPORT_PC() @ invoke must export
7476 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB]
7477 bl common_invokeMethodNoRange @ continue on
7478
7479/* ------------------------------ */
7480 .balign 64
7481.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
7482/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */
7483/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */
7484 /*
7485 * Handle an optimized virtual method call.
7486 *
7487 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
7488 */
7489 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
7490 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
7491 FETCH(r3, 2) @ r3<- FEDC or CCCC
7492 FETCH(r1, 1) @ r1<- BBBB
7493 .if (!1)
7494 and r3, r3, #15 @ r3<- C (or stays CCCC)
7495 .endif
7496 GET_VREG(r2, r3) @ r2<- vC ("this" ptr)
7497 cmp r2, #0 @ is "this" null?
7498 beq common_errNullObject @ null "this", throw exception
7499 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz
7500 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable
7501 EXPORT_PC() @ invoke must export
7502 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB]
7503 bl common_invokeMethodRange @ continue on
7504
7505
7506/* ------------------------------ */
7507 .balign 64
7508.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */
7509/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */
7510 /*
7511 * Handle an optimized "super" method call.
7512 *
7513 * for: [opt] invoke-super-quick, invoke-super-quick/range
7514 */
7515 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
7516 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
7517 FETCH(r10, 2) @ r10<- GFED or CCCC
7518 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
7519 .if (!0)
7520 and r10, r10, #15 @ r10<- D (or stays CCCC)
7521 .endif
7522 FETCH(r1, 1) @ r1<- BBBB
7523 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz
7524 EXPORT_PC() @ must export for invoke
7525 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super
7526 GET_VREG(r3, r10) @ r3<- "this"
7527 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable
7528 cmp r3, #0 @ null "this" ref?
7529 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB]
7530 beq common_errNullObject @ "this" is null, throw exception
7531 bl common_invokeMethodNoRange @ continue on
7532
7533
7534/* ------------------------------ */
7535 .balign 64
7536.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
7537/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */
7538/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */
7539 /*
7540 * Handle an optimized "super" method call.
7541 *
7542 * for: [opt] invoke-super-quick, invoke-super-quick/range
7543 */
7544 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
7545 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
7546 FETCH(r10, 2) @ r10<- GFED or CCCC
7547 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
7548 .if (!1)
7549 and r10, r10, #15 @ r10<- D (or stays CCCC)
7550 .endif
7551 FETCH(r1, 1) @ r1<- BBBB
7552 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz
7553 EXPORT_PC() @ must export for invoke
7554 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super
7555 GET_VREG(r3, r10) @ r3<- "this"
7556 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable
7557 cmp r3, #0 @ null "this" ref?
7558 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB]
7559 beq common_errNullObject @ "this" is null, throw exception
7560 bl common_invokeMethodRange @ continue on
7561
7562
7563
7564/* ------------------------------ */
7565 .balign 64
7566.L_OP_UNUSED_FC: /* 0xfc */
7567/* File: armv5te/OP_UNUSED_FC.S */
7568/* File: armv5te/unused.S */
7569 bl common_abort
7570
7571
7572
7573/* ------------------------------ */
7574 .balign 64
7575.L_OP_UNUSED_FD: /* 0xfd */
7576/* File: armv5te/OP_UNUSED_FD.S */
7577/* File: armv5te/unused.S */
7578 bl common_abort
7579
7580
7581
7582/* ------------------------------ */
7583 .balign 64
7584.L_OP_UNUSED_FE: /* 0xfe */
7585/* File: armv5te/OP_UNUSED_FE.S */
7586/* File: armv5te/unused.S */
7587 bl common_abort
7588
7589
7590
7591/* ------------------------------ */
7592 .balign 64
7593.L_OP_UNUSED_FF: /* 0xff */
7594/* File: armv5te/OP_UNUSED_FF.S */
7595/* File: armv5te/unused.S */
7596 bl common_abort
7597
7598
7599
7600
7601 .balign 64
7602 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart
7603 .global dvmAsmInstructionEnd
7604dvmAsmInstructionEnd:
7605
7606/*
7607 * ===========================================================================
7608 * Sister implementations
7609 * ===========================================================================
7610 */
7611 .global dvmAsmSisterStart
7612 .type dvmAsmSisterStart, %function
7613 .text
7614 .balign 4
7615dvmAsmSisterStart:
7616
7617/* continuation for OP_CONST_STRING */
7618
7619 /*
7620 * Continuation if the String has not yet been resolved.
7621 * r1: BBBB (String ref)
7622 * r9: target register
7623 */
7624.LOP_CONST_STRING_resolve:
7625 EXPORT_PC()
7626 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
7627 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
7628 bl dvmResolveString @ r0<- String reference
7629 cmp r0, #0 @ failed?
7630 beq common_exceptionThrown @ yup, handle the exception
7631 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7632 GET_INST_OPCODE(ip) @ extract opcode from rINST
7633 SET_VREG(r0, r9) @ vAA<- r0
7634 GOTO_OPCODE(ip) @ jump to next instruction
7635
7636
7637/* continuation for OP_CONST_STRING_JUMBO */
7638
7639 /*
7640 * Continuation if the String has not yet been resolved.
7641 * r1: BBBBBBBB (String ref)
7642 * r9: target register
7643 */
7644.LOP_CONST_STRING_JUMBO_resolve:
7645 EXPORT_PC()
7646 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
7647 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
7648 bl dvmResolveString @ r0<- String reference
7649 cmp r0, #0 @ failed?
7650 beq common_exceptionThrown @ yup, handle the exception
7651 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
7652 GET_INST_OPCODE(ip) @ extract opcode from rINST
7653 SET_VREG(r0, r9) @ vAA<- r0
7654 GOTO_OPCODE(ip) @ jump to next instruction
7655
7656
7657/* continuation for OP_CONST_CLASS */
7658
7659 /*
7660 * Continuation if the Class has not yet been resolved.
7661 * r1: BBBB (Class ref)
7662 * r9: target register
7663 */
7664.LOP_CONST_CLASS_resolve:
7665 EXPORT_PC()
7666 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
7667 mov r2, #1 @ r2<- true
7668 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
7669 bl dvmResolveClass @ r0<- Class reference
7670 cmp r0, #0 @ failed?
7671 beq common_exceptionThrown @ yup, handle the exception
7672 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7673 GET_INST_OPCODE(ip) @ extract opcode from rINST
7674 SET_VREG(r0, r9) @ vAA<- r0
7675 GOTO_OPCODE(ip) @ jump to next instruction
7676
7677
7678/* continuation for OP_CHECK_CAST */
7679
7680 /*
7681 * Trivial test failed, need to perform full check. This is common.
7682 * r0 holds obj->clazz
7683 * r1 holds class resolved from BBBB
7684 * r9 holds object
7685 */
7686.LOP_CHECK_CAST_fullcheck:
7687 bl dvmInstanceofNonTrivial @ r0<- boolean result
7688 cmp r0, #0 @ failed?
7689 bne .LOP_CHECK_CAST_okay @ no, success
7690
7691 @ A cast has failed. We need to throw a ClassCastException with the
7692 @ class of the object that failed to be cast.
7693 EXPORT_PC() @ about to throw
7694 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz
7695 ldr r0, .LstrClassCastExceptionPtr
7696 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor
7697 bl dvmThrowExceptionWithClassMessage
7698 b common_exceptionThrown
7699
7700 /*
7701 * Resolution required. This is the least-likely path.
7702 *
7703 * r2 holds BBBB
7704 * r9 holds object
7705 */
7706.LOP_CHECK_CAST_resolve:
7707 EXPORT_PC() @ resolve() could throw
7708 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
7709 mov r1, r2 @ r1<- BBBB
7710 mov r2, #0 @ r2<- false
7711 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
7712 bl dvmResolveClass @ r0<- resolved ClassObject ptr
7713 cmp r0, #0 @ got null?
7714 beq common_exceptionThrown @ yes, handle exception
7715 mov r1, r0 @ r1<- class resolved from BBB
7716 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
7717 b .LOP_CHECK_CAST_resolved @ pick up where we left off
7718
7719.LstrClassCastExceptionPtr:
7720 .word .LstrClassCastException
7721
7722
7723/* continuation for OP_INSTANCE_OF */
7724
7725 /*
7726 * Trivial test failed, need to perform full check. This is common.
7727 * r0 holds obj->clazz
7728 * r1 holds class resolved from BBBB
7729 * r9 holds A
7730 */
7731.LOP_INSTANCE_OF_fullcheck:
7732 bl dvmInstanceofNonTrivial @ r0<- boolean result
7733 @ fall through to OP_INSTANCE_OF_store
7734
7735 /*
7736 * r0 holds boolean result
7737 * r9 holds A
7738 */
7739.LOP_INSTANCE_OF_store:
7740 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7741 SET_VREG(r0, r9) @ vA<- r0
7742 GET_INST_OPCODE(ip) @ extract opcode from rINST
7743 GOTO_OPCODE(ip) @ jump to next instruction
7744
7745 /*
7746 * Trivial test succeeded, save and bail.
7747 * r9 holds A
7748 */
7749.LOP_INSTANCE_OF_trivial:
7750 mov r0, #1 @ indicate success
7751 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper
7752 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7753 SET_VREG(r0, r9) @ vA<- r0
7754 GET_INST_OPCODE(ip) @ extract opcode from rINST
7755 GOTO_OPCODE(ip) @ jump to next instruction
7756
7757 /*
7758 * Resolution required. This is the least-likely path.
7759 *
7760 * r3 holds BBBB
7761 * r9 holds A
7762 */
7763.LOP_INSTANCE_OF_resolve:
7764 EXPORT_PC() @ resolve() could throw
7765 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method
7766 mov r1, r3 @ r1<- BBBB
7767 mov r2, #1 @ r2<- true
7768 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
7769 bl dvmResolveClass @ r0<- resolved ClassObject ptr
7770 cmp r0, #0 @ got null?
7771 beq common_exceptionThrown @ yes, handle exception
7772 mov r1, r0 @ r1<- class resolved from BBB
7773 mov r3, rINST, lsr #12 @ r3<- B
7774 GET_VREG(r0, r3) @ r0<- vB (object)
7775 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz
7776 b .LOP_INSTANCE_OF_resolved @ pick up where we left off
7777
7778
7779/* continuation for OP_NEW_INSTANCE */
7780
7781 .balign 32 @ minimize cache lines
7782.LOP_NEW_INSTANCE_finish: @ r0=new object
7783 mov r3, rINST, lsr #8 @ r3<- AA
7784 cmp r0, #0 @ failed?
7785 beq common_exceptionThrown @ yes, handle the exception
7786 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7787 GET_INST_OPCODE(ip) @ extract opcode from rINST
7788 SET_VREG(r0, r3) @ vAA<- r0
7789 GOTO_OPCODE(ip) @ jump to next instruction
7790
7791 /*
7792 * Class initialization required.
7793 *
7794 * r0 holds class object
7795 */
7796.LOP_NEW_INSTANCE_needinit:
7797 mov r9, r0 @ save r0
7798 bl dvmInitClass @ initialize class
7799 cmp r0, #0 @ check boolean result
7800 mov r0, r9 @ restore r0
7801 bne .LOP_NEW_INSTANCE_initialized @ success, continue
7802 b common_exceptionThrown @ failed, deal with init exception
7803
7804 /*
7805 * Resolution required. This is the least-likely path.
7806 *
7807 * r1 holds BBBB
7808 */
7809.LOP_NEW_INSTANCE_resolve:
7810 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
7811 mov r2, #0 @ r2<- false
7812 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
7813 bl dvmResolveClass @ r0<- resolved ClassObject ptr
7814 cmp r0, #0 @ got null?
7815 bne .LOP_NEW_INSTANCE_resolved @ no, continue
7816 b common_exceptionThrown @ yes, handle exception
7817
7818.LstrInstantiationErrorPtr:
7819 .word .LstrInstantiationError
7820
7821
7822/* continuation for OP_NEW_ARRAY */
7823
7824
7825 /*
7826 * Resolve class. (This is an uncommon case.)
7827 *
7828 * r1 holds array length
7829 * r2 holds class ref CCCC
7830 */
7831.LOP_NEW_ARRAY_resolve:
7832 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
7833 mov r9, r1 @ r9<- length (save)
7834 mov r1, r2 @ r1<- CCCC
7835 mov r2, #0 @ r2<- false
7836 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
7837 bl dvmResolveClass @ r0<- call(clazz, ref)
7838 cmp r0, #0 @ got null?
7839 mov r1, r9 @ r1<- length (restore)
7840 beq common_exceptionThrown @ yes, handle exception
7841 @ fall through to OP_NEW_ARRAY_finish
7842
7843 /*
7844 * Finish allocation.
7845 *
7846 * r0 holds class
7847 * r1 holds array length
7848 */
7849.LOP_NEW_ARRAY_finish:
7850 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table
7851 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags)
7852 cmp r0, #0 @ failed?
7853 mov r2, rINST, lsr #8 @ r2<- A+
7854 beq common_exceptionThrown @ yes, handle the exception
7855 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
7856 and r2, r2, #15 @ r2<- A
7857 GET_INST_OPCODE(ip) @ extract opcode from rINST
7858 SET_VREG(r0, r2) @ vA<- r0
7859 GOTO_OPCODE(ip) @ jump to next instruction
7860
7861
7862/* continuation for OP_FILLED_NEW_ARRAY */
7863
7864 /*
7865 * On entry:
7866 * r0 holds array class
7867 * r10 holds AA or BA
7868 */
7869.LOP_FILLED_NEW_ARRAY_continue:
7870 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
7871 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags
7872 ldrb r3, [r3, #1] @ r3<- descriptor[1]
7873 .if 0
7874 mov r1, r10 @ r1<- AA (length)
7875 .else
7876 mov r1, r10, lsr #4 @ r1<- B (length)
7877 .endif
7878 cmp r3, #'I' @ array of ints?
7879 cmpne r3, #'L' @ array of objects?
7880 cmpne r3, #'[' @ array of arrays?
7881 mov r9, r1 @ save length in r9
7882 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet
7883 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags)
7884 cmp r0, #0 @ null return?
7885 beq common_exceptionThrown @ alloc failed, handle exception
7886
7887 FETCH(r1, 2) @ r1<- FEDC or CCCC
7888 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array
7889 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents
7890 subs r9, r9, #1 @ length--, check for neg
7891 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
7892 bmi 2f @ was zero, bail
7893
7894 @ copy values from registers into the array
7895 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
7896 .if 0
7897 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC]
78981: ldr r3, [r2], #4 @ r3<- *r2++
7899 subs r9, r9, #1 @ count--
7900 str r3, [r0], #4 @ *contents++ = vX
7901 bpl 1b
7902 @ continue at 2
7903 .else
7904 cmp r9, #4 @ length was initially 5?
7905 and r2, r10, #15 @ r2<- A
7906 bne 1f @ <= 4 args, branch
7907 GET_VREG(r3, r2) @ r3<- vA
7908 sub r9, r9, #1 @ count--
7909 str r3, [r0, #16] @ contents[4] = vA
79101: and r2, r1, #15 @ r2<- F/E/D/C
7911 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC
7912 mov r1, r1, lsr #4 @ r1<- next reg in low 4
7913 subs r9, r9, #1 @ count--
7914 str r3, [r0], #4 @ *contents++ = vX
7915 bpl 1b
7916 @ continue at 2
7917 .endif
7918
79192:
7920 GET_INST_OPCODE(ip) @ ip<- opcode from rINST
7921 GOTO_OPCODE(ip) @ execute it
7922
7923 /*
7924 * Throw an exception indicating that we have not implemented this
7925 * mode of filled-new-array.
7926 */
7927.LOP_FILLED_NEW_ARRAY_notimpl:
7928 ldr r0, .L_strInternalError
7929 ldr r1, .L_strFilledNewArrayNotImpl
7930 bl dvmThrowException
7931 b common_exceptionThrown
7932
7933 .if (!0) @ define in one or the other, not both
7934.L_strFilledNewArrayNotImpl:
7935 .word .LstrFilledNewArrayNotImpl
7936.L_strInternalError:
7937 .word .LstrInternalError
7938 .endif
7939
7940
7941/* continuation for OP_FILLED_NEW_ARRAY_RANGE */
7942
7943 /*
7944 * On entry:
7945 * r0 holds array class
7946 * r10 holds AA or BA
7947 */
7948.LOP_FILLED_NEW_ARRAY_RANGE_continue:
7949 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
7950 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags
7951 ldrb r3, [r3, #1] @ r3<- descriptor[1]
7952 .if 1
7953 mov r1, r10 @ r1<- AA (length)
7954 .else
7955 mov r1, r10, lsr #4 @ r1<- B (length)
7956 .endif
7957 cmp r3, #'I' @ array of ints?
7958 cmpne r3, #'L' @ array of objects?
7959 cmpne r3, #'[' @ array of arrays?
7960 mov r9, r1 @ save length in r9
7961 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet
7962 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags)
7963 cmp r0, #0 @ null return?
7964 beq common_exceptionThrown @ alloc failed, handle exception
7965
7966 FETCH(r1, 2) @ r1<- FEDC or CCCC
7967 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array
7968 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents
7969 subs r9, r9, #1 @ length--, check for neg
7970 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
7971 bmi 2f @ was zero, bail
7972
7973 @ copy values from registers into the array
7974 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
7975 .if 1
7976 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC]
79771: ldr r3, [r2], #4 @ r3<- *r2++
7978 subs r9, r9, #1 @ count--
7979 str r3, [r0], #4 @ *contents++ = vX
7980 bpl 1b
7981 @ continue at 2
7982 .else
7983 cmp r9, #4 @ length was initially 5?
7984 and r2, r10, #15 @ r2<- A
7985 bne 1f @ <= 4 args, branch
7986 GET_VREG(r3, r2) @ r3<- vA
7987 sub r9, r9, #1 @ count--
7988 str r3, [r0, #16] @ contents[4] = vA
79891: and r2, r1, #15 @ r2<- F/E/D/C
7990 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC
7991 mov r1, r1, lsr #4 @ r1<- next reg in low 4
7992 subs r9, r9, #1 @ count--
7993 str r3, [r0], #4 @ *contents++ = vX
7994 bpl 1b
7995 @ continue at 2
7996 .endif
7997
79982:
7999 GET_INST_OPCODE(ip) @ ip<- opcode from rINST
8000 GOTO_OPCODE(ip) @ execute it
8001
8002 /*
8003 * Throw an exception indicating that we have not implemented this
8004 * mode of filled-new-array.
8005 */
8006.LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
8007 ldr r0, .L_strInternalError
8008 ldr r1, .L_strFilledNewArrayNotImpl
8009 bl dvmThrowException
8010 b common_exceptionThrown
8011
8012 .if (!1) @ define in one or the other, not both
8013.L_strFilledNewArrayNotImpl:
8014 .word .LstrFilledNewArrayNotImpl
8015.L_strInternalError:
8016 .word .LstrInternalError
8017 .endif
8018
8019
8020/* continuation for OP_CMPL_FLOAT */
8021.LOP_CMPL_FLOAT_finish:
8022 SET_VREG(r0, r9) @ vAA<- r0
8023 GOTO_OPCODE(ip) @ jump to next instruction
8024
8025
8026/* continuation for OP_CMPG_FLOAT */
8027.LOP_CMPG_FLOAT_finish:
8028 SET_VREG(r0, r9) @ vAA<- r0
8029 GOTO_OPCODE(ip) @ jump to next instruction
8030
8031
8032/* continuation for OP_CMPL_DOUBLE */
8033.LOP_CMPL_DOUBLE_finish:
8034 SET_VREG(r0, r9) @ vAA<- r0
8035 GOTO_OPCODE(ip) @ jump to next instruction
8036
8037
8038/* continuation for OP_CMPG_DOUBLE */
8039.LOP_CMPG_DOUBLE_finish:
8040 SET_VREG(r0, r9) @ vAA<- r0
8041 GOTO_OPCODE(ip) @ jump to next instruction
8042
8043
8044/* continuation for OP_CMP_LONG */
8045
8046.LOP_CMP_LONG_less:
8047 mvn r1, #0 @ r1<- -1
8048 @ Want to cond code the next mov so we can avoid branch, but don't see it;
8049 @ instead, we just replicate the tail end.
8050 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8051 SET_VREG(r1, r9) @ vAA<- r1
8052 GET_INST_OPCODE(ip) @ extract opcode from rINST
8053 GOTO_OPCODE(ip) @ jump to next instruction
8054
8055.LOP_CMP_LONG_greater:
8056 mov r1, #1 @ r1<- 1
8057 @ fall through to _finish
8058
8059.LOP_CMP_LONG_finish:
8060 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8061 SET_VREG(r1, r9) @ vAA<- r1
8062 GET_INST_OPCODE(ip) @ extract opcode from rINST
8063 GOTO_OPCODE(ip) @ jump to next instruction
8064
8065
8066/* continuation for OP_AGET_WIDE */
8067
8068.LOP_AGET_WIDE_finish:
8069 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8070 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC]
8071 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
8072 GET_INST_OPCODE(ip) @ extract opcode from rINST
8073 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
8074 GOTO_OPCODE(ip) @ jump to next instruction
8075
8076
8077/* continuation for OP_APUT_WIDE */
8078
8079.LOP_APUT_WIDE_finish:
8080 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8081 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
8082 GET_INST_OPCODE(ip) @ extract opcode from rINST
8083 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC]
8084 GOTO_OPCODE(ip) @ jump to next instruction
8085
8086
8087/* continuation for OP_APUT_OBJECT */
8088 /*
8089 * On entry:
8090 * r1 = vBB (arrayObj)
8091 * r9 = vAA (obj)
8092 * r10 = offset into array (vBB + vCC * width)
8093 */
8094.LOP_APUT_OBJECT_finish:
8095 cmp r9, #0 @ storing null reference?
8096 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks
8097 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
8098 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz
8099 bl dvmCanPutArrayElement @ test object type vs. array type
8100 cmp r0, #0 @ okay?
8101 beq common_errArrayStore @ no
8102.LOP_APUT_OBJECT_skip_check:
8103 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8104 GET_INST_OPCODE(ip) @ extract opcode from rINST
8105 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA
8106 GOTO_OPCODE(ip) @ jump to next instruction
8107
8108
8109/* continuation for OP_IGET */
8110
8111 /*
8112 * Currently:
8113 * r0 holds resolved field
8114 * r9 holds object
8115 */
8116.LOP_IGET_finish:
8117 @bl common_squeak0
8118 cmp r9, #0 @ check object for null
8119 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8120 beq common_errNullObject @ object was null
8121 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
8122 ubfx r2, rINST, #8, #4 @ r2<- A
8123 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8124 GET_INST_OPCODE(ip) @ extract opcode from rINST
8125 SET_VREG(r0, r2) @ fp[A]<- r0
8126 GOTO_OPCODE(ip) @ jump to next instruction
8127
8128
8129/* continuation for OP_IGET_WIDE */
8130
8131 /*
8132 * Currently:
8133 * r0 holds resolved field
8134 * r9 holds object
8135 */
8136.LOP_IGET_WIDE_finish:
8137 cmp r9, #0 @ check object for null
8138 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8139 beq common_errNullObject @ object was null
8140 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok)
8141 ubfx r2, rINST, #8, #4 @ r2<- A
8142 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8143 add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
8144 GET_INST_OPCODE(ip) @ extract opcode from rINST
8145 stmia r3, {r0-r1} @ fp[A]<- r0/r1
8146 GOTO_OPCODE(ip) @ jump to next instruction
8147
8148
8149/* continuation for OP_IGET_OBJECT */
8150
8151 /*
8152 * Currently:
8153 * r0 holds resolved field
8154 * r9 holds object
8155 */
8156.LOP_IGET_OBJECT_finish:
8157 @bl common_squeak0
8158 cmp r9, #0 @ check object for null
8159 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8160 beq common_errNullObject @ object was null
8161 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
8162 mov r2, rINST, lsr #8 @ r2<- A+
8163 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8164 and r2, r2, #15 @ r2<- A
8165 GET_INST_OPCODE(ip) @ extract opcode from rINST
8166 SET_VREG(r0, r2) @ fp[A]<- r0
8167 GOTO_OPCODE(ip) @ jump to next instruction
8168
8169
8170/* continuation for OP_IGET_BOOLEAN */
8171
8172 /*
8173 * Currently:
8174 * r0 holds resolved field
8175 * r9 holds object
8176 */
8177.LOP_IGET_BOOLEAN_finish:
8178 @bl common_squeak1
8179 cmp r9, #0 @ check object for null
8180 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8181 beq common_errNullObject @ object was null
8182 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
8183 mov r2, rINST, lsr #8 @ r2<- A+
8184 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8185 and r2, r2, #15 @ r2<- A
8186 GET_INST_OPCODE(ip) @ extract opcode from rINST
8187 SET_VREG(r0, r2) @ fp[A]<- r0
8188 GOTO_OPCODE(ip) @ jump to next instruction
8189
8190
8191/* continuation for OP_IGET_BYTE */
8192
8193 /*
8194 * Currently:
8195 * r0 holds resolved field
8196 * r9 holds object
8197 */
8198.LOP_IGET_BYTE_finish:
8199 @bl common_squeak2
8200 cmp r9, #0 @ check object for null
8201 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8202 beq common_errNullObject @ object was null
8203 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
8204 mov r2, rINST, lsr #8 @ r2<- A+
8205 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8206 and r2, r2, #15 @ r2<- A
8207 GET_INST_OPCODE(ip) @ extract opcode from rINST
8208 SET_VREG(r0, r2) @ fp[A]<- r0
8209 GOTO_OPCODE(ip) @ jump to next instruction
8210
8211
8212/* continuation for OP_IGET_CHAR */
8213
8214 /*
8215 * Currently:
8216 * r0 holds resolved field
8217 * r9 holds object
8218 */
8219.LOP_IGET_CHAR_finish:
8220 @bl common_squeak3
8221 cmp r9, #0 @ check object for null
8222 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8223 beq common_errNullObject @ object was null
8224 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
8225 mov r2, rINST, lsr #8 @ r2<- A+
8226 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8227 and r2, r2, #15 @ r2<- A
8228 GET_INST_OPCODE(ip) @ extract opcode from rINST
8229 SET_VREG(r0, r2) @ fp[A]<- r0
8230 GOTO_OPCODE(ip) @ jump to next instruction
8231
8232
8233/* continuation for OP_IGET_SHORT */
8234
8235 /*
8236 * Currently:
8237 * r0 holds resolved field
8238 * r9 holds object
8239 */
8240.LOP_IGET_SHORT_finish:
8241 @bl common_squeak4
8242 cmp r9, #0 @ check object for null
8243 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8244 beq common_errNullObject @ object was null
8245 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
8246 mov r2, rINST, lsr #8 @ r2<- A+
8247 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8248 and r2, r2, #15 @ r2<- A
8249 GET_INST_OPCODE(ip) @ extract opcode from rINST
8250 SET_VREG(r0, r2) @ fp[A]<- r0
8251 GOTO_OPCODE(ip) @ jump to next instruction
8252
8253
8254/* continuation for OP_IPUT */
8255
8256 /*
8257 * Currently:
8258 * r0 holds resolved field
8259 * r9 holds object
8260 */
8261.LOP_IPUT_finish:
8262 @bl common_squeak0
8263 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8264 ubfx r1, rINST, #8, #4 @ r1<- A
8265 cmp r9, #0 @ check object for null
8266 GET_VREG(r0, r1) @ r0<- fp[A]
8267 beq common_errNullObject @ object was null
8268 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8269 GET_INST_OPCODE(ip) @ extract opcode from rINST
8270 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
8271 GOTO_OPCODE(ip) @ jump to next instruction
8272
8273
8274/* continuation for OP_IPUT_WIDE */
8275
8276 /*
8277 * Currently:
8278 * r0 holds resolved field
8279 * r9 holds object
8280 */
8281.LOP_IPUT_WIDE_finish:
8282 ubfx r2, rINST, #8, #4 @ r2<- A
8283 cmp r9, #0 @ check object for null
8284 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8285 add r2, rFP, r2, lsl #2 @ r3<- &fp[A]
8286 beq common_errNullObject @ object was null
8287 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8288 ldmia r2, {r0-r1} @ r0/r1<- fp[A]
8289 GET_INST_OPCODE(ip) @ extract opcode from rINST
8290 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0
8291 GOTO_OPCODE(ip) @ jump to next instruction
8292
8293
8294/* continuation for OP_IPUT_OBJECT */
8295
8296 /*
8297 * Currently:
8298 * r0 holds resolved field
8299 * r9 holds object
8300 */
8301.LOP_IPUT_OBJECT_finish:
8302 @bl common_squeak0
8303 mov r1, rINST, lsr #8 @ r1<- A+
8304 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8305 and r1, r1, #15 @ r1<- A
8306 cmp r9, #0 @ check object for null
8307 GET_VREG(r0, r1) @ r0<- fp[A]
8308 beq common_errNullObject @ object was null
8309 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8310 GET_INST_OPCODE(ip) @ extract opcode from rINST
8311 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
8312 GOTO_OPCODE(ip) @ jump to next instruction
8313
8314
8315/* continuation for OP_IPUT_BOOLEAN */
8316
8317 /*
8318 * Currently:
8319 * r0 holds resolved field
8320 * r9 holds object
8321 */
8322.LOP_IPUT_BOOLEAN_finish:
8323 @bl common_squeak1
8324 mov r1, rINST, lsr #8 @ r1<- A+
8325 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8326 and r1, r1, #15 @ r1<- A
8327 cmp r9, #0 @ check object for null
8328 GET_VREG(r0, r1) @ r0<- fp[A]
8329 beq common_errNullObject @ object was null
8330 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8331 GET_INST_OPCODE(ip) @ extract opcode from rINST
8332 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
8333 GOTO_OPCODE(ip) @ jump to next instruction
8334
8335
8336/* continuation for OP_IPUT_BYTE */
8337
8338 /*
8339 * Currently:
8340 * r0 holds resolved field
8341 * r9 holds object
8342 */
8343.LOP_IPUT_BYTE_finish:
8344 @bl common_squeak2
8345 mov r1, rINST, lsr #8 @ r1<- A+
8346 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8347 and r1, r1, #15 @ r1<- A
8348 cmp r9, #0 @ check object for null
8349 GET_VREG(r0, r1) @ r0<- fp[A]
8350 beq common_errNullObject @ object was null
8351 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8352 GET_INST_OPCODE(ip) @ extract opcode from rINST
8353 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
8354 GOTO_OPCODE(ip) @ jump to next instruction
8355
8356
8357/* continuation for OP_IPUT_CHAR */
8358
8359 /*
8360 * Currently:
8361 * r0 holds resolved field
8362 * r9 holds object
8363 */
8364.LOP_IPUT_CHAR_finish:
8365 @bl common_squeak3
8366 mov r1, rINST, lsr #8 @ r1<- A+
8367 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8368 and r1, r1, #15 @ r1<- A
8369 cmp r9, #0 @ check object for null
8370 GET_VREG(r0, r1) @ r0<- fp[A]
8371 beq common_errNullObject @ object was null
8372 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8373 GET_INST_OPCODE(ip) @ extract opcode from rINST
8374 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
8375 GOTO_OPCODE(ip) @ jump to next instruction
8376
8377
8378/* continuation for OP_IPUT_SHORT */
8379
8380 /*
8381 * Currently:
8382 * r0 holds resolved field
8383 * r9 holds object
8384 */
8385.LOP_IPUT_SHORT_finish:
8386 @bl common_squeak4
8387 mov r1, rINST, lsr #8 @ r1<- A+
8388 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
8389 and r1, r1, #15 @ r1<- A
8390 cmp r9, #0 @ check object for null
8391 GET_VREG(r0, r1) @ r0<- fp[A]
8392 beq common_errNullObject @ object was null
8393 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
8394 GET_INST_OPCODE(ip) @ extract opcode from rINST
8395 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
8396 GOTO_OPCODE(ip) @ jump to next instruction
8397
8398
8399/* continuation for OP_SGET */
8400
8401 /*
8402 * Continuation if the field has not yet been resolved.
8403 * r1: BBBB field ref
8404 */
8405.LOP_SGET_resolve:
8406 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8407 EXPORT_PC() @ resolve() could throw, so export now
8408 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8409 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8410 cmp r0, #0 @ success?
8411 bne .LOP_SGET_finish @ yes, finish
8412 b common_exceptionThrown @ no, handle exception
8413
8414
8415/* continuation for OP_SGET_WIDE */
8416
8417 /*
8418 * Continuation if the field has not yet been resolved.
8419 * r1: BBBB field ref
8420 */
8421.LOP_SGET_WIDE_resolve:
8422 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8423 EXPORT_PC() @ resolve() could throw, so export now
8424 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8425 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8426 cmp r0, #0 @ success?
8427 bne .LOP_SGET_WIDE_finish @ yes, finish
8428 b common_exceptionThrown @ no, handle exception
8429
8430
8431/* continuation for OP_SGET_OBJECT */
8432
8433 /*
8434 * Continuation if the field has not yet been resolved.
8435 * r1: BBBB field ref
8436 */
8437.LOP_SGET_OBJECT_resolve:
8438 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8439 EXPORT_PC() @ resolve() could throw, so export now
8440 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8441 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8442 cmp r0, #0 @ success?
8443 bne .LOP_SGET_OBJECT_finish @ yes, finish
8444 b common_exceptionThrown @ no, handle exception
8445
8446
8447/* continuation for OP_SGET_BOOLEAN */
8448
8449 /*
8450 * Continuation if the field has not yet been resolved.
8451 * r1: BBBB field ref
8452 */
8453.LOP_SGET_BOOLEAN_resolve:
8454 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8455 EXPORT_PC() @ resolve() could throw, so export now
8456 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8457 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8458 cmp r0, #0 @ success?
8459 bne .LOP_SGET_BOOLEAN_finish @ yes, finish
8460 b common_exceptionThrown @ no, handle exception
8461
8462
8463/* continuation for OP_SGET_BYTE */
8464
8465 /*
8466 * Continuation if the field has not yet been resolved.
8467 * r1: BBBB field ref
8468 */
8469.LOP_SGET_BYTE_resolve:
8470 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8471 EXPORT_PC() @ resolve() could throw, so export now
8472 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8473 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8474 cmp r0, #0 @ success?
8475 bne .LOP_SGET_BYTE_finish @ yes, finish
8476 b common_exceptionThrown @ no, handle exception
8477
8478
8479/* continuation for OP_SGET_CHAR */
8480
8481 /*
8482 * Continuation if the field has not yet been resolved.
8483 * r1: BBBB field ref
8484 */
8485.LOP_SGET_CHAR_resolve:
8486 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8487 EXPORT_PC() @ resolve() could throw, so export now
8488 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8489 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8490 cmp r0, #0 @ success?
8491 bne .LOP_SGET_CHAR_finish @ yes, finish
8492 b common_exceptionThrown @ no, handle exception
8493
8494
8495/* continuation for OP_SGET_SHORT */
8496
8497 /*
8498 * Continuation if the field has not yet been resolved.
8499 * r1: BBBB field ref
8500 */
8501.LOP_SGET_SHORT_resolve:
8502 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8503 EXPORT_PC() @ resolve() could throw, so export now
8504 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8505 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8506 cmp r0, #0 @ success?
8507 bne .LOP_SGET_SHORT_finish @ yes, finish
8508 b common_exceptionThrown @ no, handle exception
8509
8510
8511/* continuation for OP_SPUT */
8512
8513 /*
8514 * Continuation if the field has not yet been resolved.
8515 * r1: BBBB field ref
8516 */
8517.LOP_SPUT_resolve:
8518 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8519 EXPORT_PC() @ resolve() could throw, so export now
8520 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8521 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8522 cmp r0, #0 @ success?
8523 bne .LOP_SPUT_finish @ yes, finish
8524 b common_exceptionThrown @ no, handle exception
8525
8526
8527/* continuation for OP_SPUT_WIDE */
8528
8529 /*
8530 * Continuation if the field has not yet been resolved.
8531 * r1: BBBB field ref
8532 * r9: &fp[AA]
8533 */
8534.LOP_SPUT_WIDE_resolve:
8535 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8536 EXPORT_PC() @ resolve() could throw, so export now
8537 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8538 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8539 cmp r0, #0 @ success?
8540 bne .LOP_SPUT_WIDE_finish @ yes, finish
8541 b common_exceptionThrown @ no, handle exception
8542
8543
8544/* continuation for OP_SPUT_OBJECT */
8545
8546 /*
8547 * Continuation if the field has not yet been resolved.
8548 * r1: BBBB field ref
8549 */
8550.LOP_SPUT_OBJECT_resolve:
8551 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8552 EXPORT_PC() @ resolve() could throw, so export now
8553 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8554 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8555 cmp r0, #0 @ success?
8556 bne .LOP_SPUT_OBJECT_finish @ yes, finish
8557 b common_exceptionThrown @ no, handle exception
8558
8559
8560/* continuation for OP_SPUT_BOOLEAN */
8561
8562 /*
8563 * Continuation if the field has not yet been resolved.
8564 * r1: BBBB field ref
8565 */
8566.LOP_SPUT_BOOLEAN_resolve:
8567 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8568 EXPORT_PC() @ resolve() could throw, so export now
8569 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8570 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8571 cmp r0, #0 @ success?
8572 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish
8573 b common_exceptionThrown @ no, handle exception
8574
8575
8576/* continuation for OP_SPUT_BYTE */
8577
8578 /*
8579 * Continuation if the field has not yet been resolved.
8580 * r1: BBBB field ref
8581 */
8582.LOP_SPUT_BYTE_resolve:
8583 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8584 EXPORT_PC() @ resolve() could throw, so export now
8585 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8586 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8587 cmp r0, #0 @ success?
8588 bne .LOP_SPUT_BYTE_finish @ yes, finish
8589 b common_exceptionThrown @ no, handle exception
8590
8591
8592/* continuation for OP_SPUT_CHAR */
8593
8594 /*
8595 * Continuation if the field has not yet been resolved.
8596 * r1: BBBB field ref
8597 */
8598.LOP_SPUT_CHAR_resolve:
8599 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8600 EXPORT_PC() @ resolve() could throw, so export now
8601 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8602 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8603 cmp r0, #0 @ success?
8604 bne .LOP_SPUT_CHAR_finish @ yes, finish
8605 b common_exceptionThrown @ no, handle exception
8606
8607
8608/* continuation for OP_SPUT_SHORT */
8609
8610 /*
8611 * Continuation if the field has not yet been resolved.
8612 * r1: BBBB field ref
8613 */
8614.LOP_SPUT_SHORT_resolve:
8615 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method
8616 EXPORT_PC() @ resolve() could throw, so export now
8617 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
8618 bl dvmResolveStaticField @ r0<- resolved StaticField ptr
8619 cmp r0, #0 @ success?
8620 bne .LOP_SPUT_SHORT_finish @ yes, finish
8621 b common_exceptionThrown @ no, handle exception
8622
8623
8624/* continuation for OP_INVOKE_VIRTUAL */
8625
8626 /*
8627 * At this point:
8628 * r0 = resolved base method
8629 * r10 = C or CCCC (index of first arg, which is the "this" ptr)
8630 */
8631.LOP_INVOKE_VIRTUAL_continue:
8632 GET_VREG(r1, r10) @ r1<- "this" ptr
8633 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
8634 cmp r1, #0 @ is "this" null?
8635 beq common_errNullObject @ null "this", throw exception
8636 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz
8637 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable
8638 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex]
8639 bl common_invokeMethodNoRange @ continue on
8640
8641
8642/* continuation for OP_INVOKE_SUPER */
8643
8644 /*
8645 * At this point:
8646 * r0 = resolved base method
8647 * r9 = method->clazz
8648 */
8649.LOP_INVOKE_SUPER_continue:
8650 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super
8651 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
8652 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount
8653 EXPORT_PC() @ must export for invoke
8654 cmp r2, r3 @ compare (methodIndex, vtableCount)
8655 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass
8656 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable
8657 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex]
8658 bl common_invokeMethodNoRange @ continue on
8659
8660.LOP_INVOKE_SUPER_resolve:
8661 mov r0, r9 @ r0<- method->clazz
8662 mov r2, #METHOD_VIRTUAL @ resolver method type
8663 bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
8664 cmp r0, #0 @ got null?
8665 bne .LOP_INVOKE_SUPER_continue @ no, continue
8666 b common_exceptionThrown @ yes, handle exception
8667
8668 /*
8669 * Throw a NoSuchMethodError with the method name as the message.
8670 * r0 = resolved base method
8671 */
8672.LOP_INVOKE_SUPER_nsm:
8673 ldr r1, [r0, #offMethod_name] @ r1<- method name
8674 b common_errNoSuchMethod
8675
8676
8677/* continuation for OP_INVOKE_DIRECT */
8678
8679 /*
8680 * On entry:
8681 * r1 = reference (BBBB or CCCC)
8682 * r10 = "this" register
8683 */
8684.LOP_INVOKE_DIRECT_resolve:
8685 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
8686 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
8687 mov r2, #METHOD_DIRECT @ resolver method type
8688 bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
8689 cmp r0, #0 @ got null?
8690 GET_VREG(r2, r10) @ r2<- "this" ptr (reload)
8691 bne .LOP_INVOKE_DIRECT_finish @ no, continue
8692 b common_exceptionThrown @ yes, handle exception
8693
8694
8695/* continuation for OP_INVOKE_VIRTUAL_RANGE */
8696
8697 /*
8698 * At this point:
8699 * r0 = resolved base method
8700 * r10 = C or CCCC (index of first arg, which is the "this" ptr)
8701 */
8702.LOP_INVOKE_VIRTUAL_RANGE_continue:
8703 GET_VREG(r1, r10) @ r1<- "this" ptr
8704 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
8705 cmp r1, #0 @ is "this" null?
8706 beq common_errNullObject @ null "this", throw exception
8707 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz
8708 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable
8709 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex]
8710 bl common_invokeMethodRange @ continue on
8711
8712
8713/* continuation for OP_INVOKE_SUPER_RANGE */
8714
8715 /*
8716 * At this point:
8717 * r0 = resolved base method
8718 * r9 = method->clazz
8719 */
8720.LOP_INVOKE_SUPER_RANGE_continue:
8721 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super
8722 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
8723 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount
8724 EXPORT_PC() @ must export for invoke
8725 cmp r2, r3 @ compare (methodIndex, vtableCount)
8726 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass
8727 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable
8728 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex]
8729 bl common_invokeMethodRange @ continue on
8730
8731.LOP_INVOKE_SUPER_RANGE_resolve:
8732 mov r0, r9 @ r0<- method->clazz
8733 mov r2, #METHOD_VIRTUAL @ resolver method type
8734 bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
8735 cmp r0, #0 @ got null?
8736 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue
8737 b common_exceptionThrown @ yes, handle exception
8738
8739 /*
8740 * Throw a NoSuchMethodError with the method name as the message.
8741 * r0 = resolved base method
8742 */
8743.LOP_INVOKE_SUPER_RANGE_nsm:
8744 ldr r1, [r0, #offMethod_name] @ r1<- method name
8745 b common_errNoSuchMethod
8746
8747
8748/* continuation for OP_INVOKE_DIRECT_RANGE */
8749
8750 /*
8751 * On entry:
8752 * r1 = reference (BBBB or CCCC)
8753 * r10 = "this" register
8754 */
8755.LOP_INVOKE_DIRECT_RANGE_resolve:
8756 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method
8757 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
8758 mov r2, #METHOD_DIRECT @ resolver method type
8759 bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
8760 cmp r0, #0 @ got null?
8761 GET_VREG(r2, r10) @ r2<- "this" ptr (reload)
8762 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue
8763 b common_exceptionThrown @ yes, handle exception
8764
8765
8766/* continuation for OP_FLOAT_TO_LONG */
8767/*
8768 * Convert the float in r0 to a long in r0/r1.
8769 *
8770 * We have to clip values to long min/max per the specification. The
8771 * expected common case is a "reasonable" value that converts directly
8772 * to modest integer. The EABI convert function isn't doing this for us.
8773 */
8774f2l_doconv:
8775 stmfd sp!, {r4, lr}
8776 mov r1, #0x5f000000 @ (float)maxlong
8777 mov r4, r0
8778 bl __aeabi_fcmpge @ is arg >= maxlong?
8779 cmp r0, #0 @ nonzero == yes
8780 mvnne r0, #0 @ return maxlong (7fffffff)
8781 mvnne r1, #0x80000000
8782 ldmnefd sp!, {r4, pc}
8783
8784 mov r0, r4 @ recover arg
8785 mov r1, #0xdf000000 @ (float)minlong
8786 bl __aeabi_fcmple @ is arg <= minlong?
8787 cmp r0, #0 @ nonzero == yes
8788 movne r0, #0 @ return minlong (80000000)
8789 movne r1, #0x80000000
8790 ldmnefd sp!, {r4, pc}
8791
8792 mov r0, r4 @ recover arg
8793 mov r1, r4
8794 bl __aeabi_fcmpeq @ is arg == self?
8795 cmp r0, #0 @ zero == no
8796 moveq r1, #0 @ return zero for NaN
8797 ldmeqfd sp!, {r4, pc}
8798
8799 mov r0, r4 @ recover arg
8800 bl __aeabi_f2lz @ convert float to long
8801 ldmfd sp!, {r4, pc}
8802
8803
8804/* continuation for OP_DOUBLE_TO_LONG */
8805/*
8806 * Convert the double in r0/r1 to a long in r0/r1.
8807 *
8808 * We have to clip values to long min/max per the specification. The
8809 * expected common case is a "reasonable" value that converts directly
8810 * to modest integer. The EABI convert function isn't doing this for us.
8811 */
8812d2l_doconv:
8813 stmfd sp!, {r4, r5, lr} @ save regs
Andy McFadden5162c5f2009-06-19 16:52:19 -07008814 mov r3, #0x43000000 @ maxlong, as a double (high word)
8815 add r3, #0x00e00000 @ 0x43e00000
8816 mov r2, #0 @ maxlong, as a double (low word)
Andy McFaddena5069fb2009-06-19 15:20:12 -07008817 sub sp, sp, #4 @ align for EABI
Andy McFadden5162c5f2009-06-19 16:52:19 -07008818 mov r4, r0 @ save a copy of r0
Andy McFaddena5069fb2009-06-19 15:20:12 -07008819 mov r5, r1 @ and r1
8820 bl __aeabi_dcmpge @ is arg >= maxlong?
8821 cmp r0, #0 @ nonzero == yes
8822 mvnne r0, #0 @ return maxlong (7fffffffffffffff)
8823 mvnne r1, #0x80000000
8824 bne 1f
8825
8826 mov r0, r4 @ recover arg
8827 mov r1, r5
Andy McFadden5162c5f2009-06-19 16:52:19 -07008828 mov r3, #0xc3000000 @ minlong, as a double (high word)
8829 add r3, #0x00e00000 @ 0xc3e00000
8830 mov r2, #0 @ minlong, as a double (low word)
Andy McFaddena5069fb2009-06-19 15:20:12 -07008831 bl __aeabi_dcmple @ is arg <= minlong?
8832 cmp r0, #0 @ nonzero == yes
8833 movne r0, #0 @ return minlong (8000000000000000)
8834 movne r1, #0x80000000
8835 bne 1f
8836
8837 mov r0, r4 @ recover arg
8838 mov r1, r5
8839 mov r2, r4 @ compare against self
8840 mov r3, r5
8841 bl __aeabi_dcmpeq @ is arg == self?
8842 cmp r0, #0 @ zero == no
8843 moveq r1, #0 @ return zero for NaN
8844 beq 1f
8845
8846 mov r0, r4 @ recover arg
8847 mov r1, r5
8848 bl __aeabi_d2lz @ convert double to long
8849
88501:
8851 add sp, sp, #4
8852 ldmfd sp!, {r4, r5, pc}
8853
Andy McFaddena5069fb2009-06-19 15:20:12 -07008854
8855/* continuation for OP_MUL_LONG */
8856
8857.LOP_MUL_LONG_finish:
8858 GET_INST_OPCODE(ip) @ extract opcode from rINST
8859 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
8860 GOTO_OPCODE(ip) @ jump to next instruction
8861
8862
8863/* continuation for OP_SHL_LONG */
8864
8865.LOP_SHL_LONG_finish:
8866 mov r0, r0, asl r2 @ r0<- r0 << r2
8867 GET_INST_OPCODE(ip) @ extract opcode from rINST
8868 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
8869 GOTO_OPCODE(ip) @ jump to next instruction
8870
8871
8872/* continuation for OP_SHR_LONG */
8873
8874.LOP_SHR_LONG_finish:
8875 mov r1, r1, asr r2 @ r1<- r1 >> r2
8876 GET_INST_OPCODE(ip) @ extract opcode from rINST
8877 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
8878 GOTO_OPCODE(ip) @ jump to next instruction
8879
8880
8881/* continuation for OP_USHR_LONG */
8882
8883.LOP_USHR_LONG_finish:
8884 mov r1, r1, lsr r2 @ r1<- r1 >>> r2
8885 GET_INST_OPCODE(ip) @ extract opcode from rINST
8886 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
8887 GOTO_OPCODE(ip) @ jump to next instruction
8888
8889
8890/* continuation for OP_SHL_LONG_2ADDR */
8891
8892.LOP_SHL_LONG_2ADDR_finish:
8893 GET_INST_OPCODE(ip) @ extract opcode from rINST
8894 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
8895 GOTO_OPCODE(ip) @ jump to next instruction
8896
8897
8898/* continuation for OP_SHR_LONG_2ADDR */
8899
8900.LOP_SHR_LONG_2ADDR_finish:
8901 GET_INST_OPCODE(ip) @ extract opcode from rINST
8902 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
8903 GOTO_OPCODE(ip) @ jump to next instruction
8904
8905
8906/* continuation for OP_USHR_LONG_2ADDR */
8907
8908.LOP_USHR_LONG_2ADDR_finish:
8909 GET_INST_OPCODE(ip) @ extract opcode from rINST
8910 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
8911 GOTO_OPCODE(ip) @ jump to next instruction
8912
8913
8914/* continuation for OP_EXECUTE_INLINE */
8915
8916 /*
8917 * Extract args, call function.
8918 * r0 = #of args (0-4)
8919 * r10 = call index
8920 * lr = return addr, above [DO NOT bl out of here w/o preserving LR]
8921 *
8922 * Other ideas:
8923 * - Use a jump table from the main piece to jump directly into the
8924 * AND/LDR pairs. Costs a data load, saves a branch.
8925 * - Have five separate pieces that do the loading, so we can work the
8926 * interleave a little better. Increases code size.
8927 */
8928.LOP_EXECUTE_INLINE_continue:
8929 rsb r0, r0, #4 @ r0<- 4-r0
8930 FETCH(r9, 2) @ r9<- FEDC
8931 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
8932 bl common_abort @ (skipped due to ARM prefetch)
89334: and ip, r9, #0xf000 @ isolate F
8934 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
89353: and ip, r9, #0x0f00 @ isolate E
8936 ldr r2, [rFP, ip, lsr #6] @ r2<- vE
89372: and ip, r9, #0x00f0 @ isolate D
8938 ldr r1, [rFP, ip, lsr #2] @ r1<- vD
89391: and ip, r9, #0x000f @ isolate C
8940 ldr r0, [rFP, ip, lsl #2] @ r0<- vC
89410:
8942 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
8943 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry
8944 @ (not reached)
8945
8946.LOP_EXECUTE_INLINE_table:
8947 .word gDvmInlineOpsTable
8948
8949
Andy McFaddenb0a05412009-11-19 10:23:41 -08008950/* continuation for OP_EXECUTE_INLINE_RANGE */
8951
8952 /*
8953 * Extract args, call function.
8954 * r0 = #of args (0-4)
8955 * r10 = call index
8956 * lr = return addr, above [DO NOT bl out of here w/o preserving LR]
8957 */
8958.LOP_EXECUTE_INLINE_RANGE_continue:
8959 rsb r0, r0, #4 @ r0<- 4-r0
8960 FETCH(r9, 2) @ r9<- CCCC
8961 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
8962 bl common_abort @ (skipped due to ARM prefetch)
89634: add ip, r9, #3 @ base+3
8964 GET_VREG(r3, ip) @ r3<- vBase[3]
89653: add ip, r9, #2 @ base+2
8966 GET_VREG(r2, ip) @ r2<- vBase[2]
89672: add ip, r9, #1 @ base+1
8968 GET_VREG(r1, ip) @ r1<- vBase[1]
89691: add ip, r9, #0 @ (nop)
8970 GET_VREG(r0, ip) @ r0<- vBase[0]
89710:
8972 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation
8973 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry
8974 @ (not reached)
8975
8976.LOP_EXECUTE_INLINE_RANGE_table:
8977 .word gDvmInlineOpsTable
8978
8979
Andy McFaddena5069fb2009-06-19 15:20:12 -07008980 .size dvmAsmSisterStart, .-dvmAsmSisterStart
8981 .global dvmAsmSisterEnd
8982dvmAsmSisterEnd:
8983
8984/* File: armv5te/footer.S */
8985
8986/*
8987 * ===========================================================================
8988 * Common subroutines and data
8989 * ===========================================================================
8990 */
8991
8992
8993
8994 .text
8995 .align 2
8996
8997#if defined(WITH_JIT)
Jeff Hao97319a82009-08-12 16:57:15 -07008998#if defined(WITH_SELF_VERIFICATION)
8999 .global dvmJitToInterpPunt
9000dvmJitToInterpPunt:
9001 mov r2,#kSVSPunt @ r2<- interpreter entry point
9002 b dvmJitSelfVerificationEnd @ doesn't return
9003
9004 .global dvmJitToInterpSingleStep
9005dvmJitToInterpSingleStep:
9006 mov r2,#kSVSSingleStep @ r2<- interpreter entry point
9007 b dvmJitSelfVerificationEnd @ doesn't return
9008
9009 .global dvmJitToTraceSelect
9010dvmJitToTraceSelect:
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009011 ldr r0,[lr, #-1] @ pass our target PC
Jeff Hao97319a82009-08-12 16:57:15 -07009012 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point
9013 b dvmJitSelfVerificationEnd @ doesn't return
9014
9015 .global dvmJitToBackwardBranch
9016dvmJitToBackwardBranch:
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009017 ldr r0,[lr, #-1] @ pass our target PC
Jeff Hao97319a82009-08-12 16:57:15 -07009018 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point
9019 b dvmJitSelfVerificationEnd @ doesn't return
9020
9021 .global dvmJitToInterpNormal
9022dvmJitToInterpNormal:
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009023 ldr r0,[lr, #-1] @ pass our target PC
Jeff Hao97319a82009-08-12 16:57:15 -07009024 mov r2,#kSVSNormal @ r2<- interpreter entry point
9025 b dvmJitSelfVerificationEnd @ doesn't return
9026
9027 .global dvmJitToInterpNoChain
9028dvmJitToInterpNoChain:
9029 mov r0,rPC @ pass our target PC
9030 mov r2,#kSVSNoChain @ r2<- interpreter entry point
9031 b dvmJitSelfVerificationEnd @ doesn't return
9032#else
Andy McFaddena5069fb2009-06-19 15:20:12 -07009033/*
9034 * Return from the translation cache to the interpreter when the compiler is
9035 * having issues translating/executing a Dalvik instruction. We have to skip
9036 * the code cache lookup otherwise it is possible to indefinitely bouce
9037 * between the interpreter and the code cache if the instruction that fails
9038 * to be compiled happens to be at a trace start.
9039 */
9040 .global dvmJitToInterpPunt
9041dvmJitToInterpPunt:
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009042 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
Andy McFaddena5069fb2009-06-19 15:20:12 -07009043 mov rPC, r0
9044#ifdef EXIT_STATS
9045 mov r0,lr
9046 bl dvmBumpPunt;
9047#endif
9048 EXPORT_PC()
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009049 mov r0, #0
9050 str r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land
Andy McFaddena5069fb2009-06-19 15:20:12 -07009051 adrl rIBASE, dvmAsmInstructionStart
9052 FETCH_INST()
9053 GET_INST_OPCODE(ip)
9054 GOTO_OPCODE(ip)
9055
9056/*
9057 * Return to the interpreter to handle a single instruction.
9058 * On entry:
9059 * r0 <= PC
9060 * r1 <= PC of resume instruction
9061 * lr <= resume point in translation
9062 */
9063 .global dvmJitToInterpSingleStep
9064dvmJitToInterpSingleStep:
9065 str lr,[rGLUE,#offGlue_jitResume]
9066 str r1,[rGLUE,#offGlue_jitResumePC]
9067 mov r1,#kInterpEntryInstr
9068 @ enum is 4 byte in aapcs-EABI
9069 str r1, [rGLUE, #offGlue_entryPoint]
9070 mov rPC,r0
9071 EXPORT_PC()
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009072
Andy McFaddena5069fb2009-06-19 15:20:12 -07009073 adrl rIBASE, dvmAsmInstructionStart
9074 mov r2,#kJitSingleStep @ Ask for single step and then revert
9075 str r2,[rGLUE,#offGlue_jitState]
9076 mov r1,#1 @ set changeInterp to bail to debug interp
9077 b common_gotoBail
9078
9079
9080/*
9081 * Return from the translation cache and immediately request
9082 * a translation for the exit target. Commonly used following
9083 * invokes.
9084 */
9085 .global dvmJitToTraceSelect
9086dvmJitToTraceSelect:
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009087 ldr rPC,[lr, #-1] @ get our target PC
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009088 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009089 add rINST,lr,#-5 @ save start of chain branch
Andy McFaddena5069fb2009-06-19 15:20:12 -07009090 mov r0,rPC
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009091 bl dvmJitGetCodeAddr @ Is there a translation?
9092 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
Andy McFaddena5069fb2009-06-19 15:20:12 -07009093 cmp r0,#0
9094 beq 2f
9095 mov r1,rINST
9096 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009097 mov r1, rPC @ arg1 of translation may need this
9098 mov lr, #0 @ in case target is HANDLER_INTERPRET
Andy McFaddena5069fb2009-06-19 15:20:12 -07009099 cmp r0,#0 @ successful chain?
9100 bxne r0 @ continue native execution
9101 b toInterpreter @ didn't chain - resume with interpreter
9102
9103/* No translation, so request one if profiling isn't disabled*/
91042:
9105 adrl rIBASE, dvmAsmInstructionStart
9106 GET_JIT_PROF_TABLE(r0)
9107 FETCH_INST()
9108 cmp r0, #0
9109 bne common_selectTrace
9110 GET_INST_OPCODE(ip)
9111 GOTO_OPCODE(ip)
9112
9113/*
9114 * Return from the translation cache to the interpreter.
9115 * The return was done with a BLX from thumb mode, and
9116 * the following 32-bit word contains the target rPC value.
9117 * Note that lr (r14) will have its low-order bit set to denote
9118 * its thumb-mode origin.
9119 *
9120 * We'll need to stash our lr origin away, recover the new
9121 * target and then check to see if there is a translation available
9122 * for our new target. If so, we do a translation chain and
9123 * go back to native execution. Otherwise, it's back to the
9124 * interpreter (after treating this entry as a potential
9125 * trace start).
9126 */
9127 .global dvmJitToInterpNormal
9128dvmJitToInterpNormal:
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009129 ldr rPC,[lr, #-1] @ get our target PC
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009130 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009131 add rINST,lr,#-5 @ save start of chain branch
Andy McFaddena5069fb2009-06-19 15:20:12 -07009132#ifdef EXIT_STATS
9133 bl dvmBumpNormal
9134#endif
9135 mov r0,rPC
9136 bl dvmJitGetCodeAddr @ Is there a translation?
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009137 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
Andy McFaddena5069fb2009-06-19 15:20:12 -07009138 cmp r0,#0
9139 beq toInterpreter @ go if not, otherwise do chain
9140 mov r1,rINST
9141 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009142 mov r1, rPC @ arg1 of translation may need this
9143 mov lr, #0 @ in case target is HANDLER_INTERPRET
Andy McFaddena5069fb2009-06-19 15:20:12 -07009144 cmp r0,#0 @ successful chain?
9145 bxne r0 @ continue native execution
9146 b toInterpreter @ didn't chain - resume with interpreter
9147
9148/*
9149 * Return from the translation cache to the interpreter to do method invocation.
9150 * Check if translation exists for the callee, but don't chain to it.
9151 */
9152 .global dvmJitToInterpNoChain
9153dvmJitToInterpNoChain:
9154#ifdef EXIT_STATS
9155 bl dvmBumpNoChain
9156#endif
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009157 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
Andy McFaddena5069fb2009-06-19 15:20:12 -07009158 mov r0,rPC
9159 bl dvmJitGetCodeAddr @ Is there a translation?
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009160 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009161 mov r1, rPC @ arg1 of translation may need this
9162 mov lr, #0 @ in case target is HANDLER_INTERPRET
Andy McFaddena5069fb2009-06-19 15:20:12 -07009163 cmp r0,#0
9164 bxne r0 @ continue native execution if so
Jeff Hao97319a82009-08-12 16:57:15 -07009165#endif
Andy McFaddena5069fb2009-06-19 15:20:12 -07009166
9167/*
9168 * No translation, restore interpreter regs and start interpreting.
9169 * rGLUE & rFP were preserved in the translated code, and rPC has
9170 * already been restored by the time we get here. We'll need to set
9171 * up rIBASE & rINST, and load the address of the JitTable into r0.
9172 */
9173toInterpreter:
9174 EXPORT_PC()
9175 adrl rIBASE, dvmAsmInstructionStart
9176 FETCH_INST()
9177 GET_JIT_PROF_TABLE(r0)
9178 @ NOTE: intended fallthrough
9179/*
9180 * Common code to update potential trace start counter, and initiate
9181 * a trace-build if appropriate. On entry, rPC should point to the
9182 * next instruction to execute, and rINST should be already loaded with
9183 * the next opcode word, and r0 holds a pointer to the jit profile
9184 * table (pJitProfTable).
9185 */
9186common_testUpdateProfile:
9187 cmp r0,#0
9188 GET_INST_OPCODE(ip)
9189 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */
9190
9191common_updateProfile:
9192 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function
Ben Cheng7b133ef2010-02-04 16:15:59 -08009193 lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits
9194 ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter
Andy McFaddena5069fb2009-06-19 15:20:12 -07009195 GET_INST_OPCODE(ip)
9196 subs r1,r1,#1 @ decrement counter
Ben Cheng7b133ef2010-02-04 16:15:59 -08009197 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it
Andy McFaddena5069fb2009-06-19 15:20:12 -07009198 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */
9199
9200/*
9201 * Here, we switch to the debug interpreter to request
9202 * trace selection. First, though, check to see if there
9203 * is already a native translation in place (and, if so,
9204 * jump to it now).
9205 */
Bill Buzbeed7269912009-11-10 14:31:32 -08009206 GET_JIT_THRESHOLD(r1)
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009207 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
Ben Cheng7b133ef2010-02-04 16:15:59 -08009208 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter
Andy McFaddena5069fb2009-06-19 15:20:12 -07009209 EXPORT_PC()
9210 mov r0,rPC
9211 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC)
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009212 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
9213 mov r1, rPC @ arg1 of translation may need this
9214 mov lr, #0 @ in case target is HANDLER_INTERPRET
Andy McFaddena5069fb2009-06-19 15:20:12 -07009215 cmp r0,#0
Jeff Hao97319a82009-08-12 16:57:15 -07009216#if !defined(WITH_SELF_VERIFICATION)
Andy McFaddena5069fb2009-06-19 15:20:12 -07009217 bxne r0 @ jump to the translation
Jeff Hao97319a82009-08-12 16:57:15 -07009218#else
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009219 beq common_selectTrace
9220 /*
9221 * At this point, we have a target translation. However, if
9222 * that translation is actually the interpret-only pseudo-translation
9223 * we want to treat it the same as no translation.
9224 */
9225 mov r10, r0 @ save target
9226 bl dvmCompilerGetInterpretTemplate
9227 cmp r0, r10 @ special case?
9228 bne dvmJitSelfVerificationStart @ set up self verification
9229 GET_INST_OPCODE(ip)
9230 GOTO_OPCODE(ip)
9231 /* no return */
Jeff Hao97319a82009-08-12 16:57:15 -07009232#endif
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009233
Andy McFaddena5069fb2009-06-19 15:20:12 -07009234common_selectTrace:
9235 mov r2,#kJitTSelectRequest @ ask for trace selection
9236 str r2,[rGLUE,#offGlue_jitState]
Ben Cheng9c147b82009-10-07 16:41:46 -07009237 mov r2,#kInterpEntryInstr @ normal entry reason
9238 str r2,[rGLUE,#offGlue_entryPoint]
Andy McFaddena5069fb2009-06-19 15:20:12 -07009239 mov r1,#1 @ set changeInterp
9240 b common_gotoBail
9241
Jeff Hao97319a82009-08-12 16:57:15 -07009242#if defined(WITH_SELF_VERIFICATION)
9243/*
9244 * Save PC and registers to shadow memory for self verification mode
9245 * before jumping to native translation.
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009246 * On entry, r10 contains the address of the target translation.
Jeff Hao97319a82009-08-12 16:57:15 -07009247 */
9248dvmJitSelfVerificationStart:
Jeff Hao97319a82009-08-12 16:57:15 -07009249 mov r0,rPC @ r0<- program counter
9250 mov r1,rFP @ r1<- frame pointer
9251 mov r2,rGLUE @ r2<- InterpState pointer
Bill Buzbee9a8c75a2009-11-08 14:31:20 -08009252 mov r3,r10 @ r3<- target translation
Jeff Hao97319a82009-08-12 16:57:15 -07009253 bl dvmSelfVerificationSaveState @ save registers to shadow space
Ben Chengccd6c012009-10-15 14:52:45 -07009254 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space
9255 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space
9256 bx r10 @ jump to the translation
Jeff Hao97319a82009-08-12 16:57:15 -07009257
9258/*
9259 * Restore PC, registers, and interpState to original values
9260 * before jumping back to the interpreter.
9261 */
9262dvmJitSelfVerificationEnd:
Ben Cheng6999d842010-01-26 16:46:15 -08009263 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
9264 mov r1, #0
9265 str r1, [r10, #offThread_inJitCodeCache] @ Back to the interp land
Jeff Hao97319a82009-08-12 16:57:15 -07009266 mov r1,rFP @ pass ending fp
9267 bl dvmSelfVerificationRestoreState @ restore pc and fp values
Ben Chengccd6c012009-10-15 14:52:45 -07009268 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC
9269 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP
9270 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState
9271 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state
Jeff Hao97319a82009-08-12 16:57:15 -07009272 cmp r1,#0 @ check for punt condition
9273 beq 1f
9274 mov r2,#kJitSelfVerification @ ask for self verification
9275 str r2,[rGLUE,#offGlue_jitState]
Ben Cheng30f1f462009-10-12 13:46:55 -07009276 mov r2,#kInterpEntryInstr @ normal entry reason
9277 str r2,[rGLUE,#offGlue_entryPoint]
Jeff Hao97319a82009-08-12 16:57:15 -07009278 mov r1,#1 @ set changeInterp
9279 b common_gotoBail
9280
92811: @ exit to interpreter without check
9282 EXPORT_PC()
9283 adrl rIBASE, dvmAsmInstructionStart
9284 FETCH_INST()
9285 GET_INST_OPCODE(ip)
9286 GOTO_OPCODE(ip)
9287#endif
9288
Andy McFaddena5069fb2009-06-19 15:20:12 -07009289#endif
9290
9291/*
9292 * Common code when a backward branch is taken.
9293 *
9294 * On entry:
9295 * r9 is PC adjustment *in bytes*
9296 */
9297common_backwardBranch:
9298 mov r0, #kInterpEntryInstr
9299 bl common_periodicChecks
9300#if defined(WITH_JIT)
9301 GET_JIT_PROF_TABLE(r0)
9302 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
9303 cmp r0,#0
9304 bne common_updateProfile
9305 GET_INST_OPCODE(ip)
9306 GOTO_OPCODE(ip)
9307#else
9308 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
9309 GET_INST_OPCODE(ip) @ extract opcode from rINST
9310 GOTO_OPCODE(ip) @ jump to next instruction
9311#endif
9312
9313
9314/*
9315 * Need to see if the thread needs to be suspended or debugger/profiler
9316 * activity has begun.
9317 *
9318 * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't
9319 * have to do the second ldr.
9320 *
9321 * TODO: reduce this so we're just checking a single location.
9322 *
9323 * On entry:
9324 * r0 is reentry type, e.g. kInterpEntryInstr
9325 * r9 is trampoline PC adjustment *in bytes*
9326 */
9327common_periodicChecks:
9328 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount
9329
Ben Cheng9c147b82009-10-07 16:41:46 -07009330 @ speculatively store r0 before it is clobbered by dvmCheckSuspendPending
9331 str r0, [rGLUE, #offGlue_entryPoint]
9332
Andy McFaddena5069fb2009-06-19 15:20:12 -07009333#if defined(WITH_DEBUGGER)
9334 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive
9335#endif
9336#if defined(WITH_PROFILER)
9337 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers
9338#endif
9339
9340 ldr r3, [r3] @ r3<- suspendCount (int)
9341
9342#if defined(WITH_DEBUGGER)
9343 ldrb r1, [r1] @ r1<- debuggerActive (boolean)
9344#endif
9345#if defined (WITH_PROFILER)
9346 ldr r2, [r2] @ r2<- activeProfilers (int)
9347#endif
9348
9349 cmp r3, #0 @ suspend pending?
9350 bne 2f @ yes, do full suspension check
9351
9352#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER)
9353# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER)
9354 orrs r1, r1, r2 @ r1<- r1 | r2
9355 cmp r1, #0 @ debugger attached or profiler started?
9356# elif defined(WITH_DEBUGGER)
9357 cmp r1, #0 @ debugger attached?
9358# elif defined(WITH_PROFILER)
9359 cmp r2, #0 @ profiler started?
9360# endif
9361 bne 3f @ debugger/profiler, switch interp
9362#endif
9363
9364 bx lr @ nothing to do, return
9365
93662: @ check suspend
Bill Buzbee964a7b02010-01-28 12:54:19 -08009367#if defined(WITH_JIT)
9368 /*
9369 * Refresh the Jit's cached copy of profile table pointer. This pointer
9370 * doubles as the Jit's on/off switch.
9371 */
9372 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ r10<-&gDvmJit.pJitProfTable
9373 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
9374 ldr r3, [r3] @ r10 <- pJitProfTable
9375 EXPORT_PC() @ need for precise GC
9376 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch
9377#else
Andy McFaddena5069fb2009-06-19 15:20:12 -07009378 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self
9379 EXPORT_PC() @ need for precise GC
Bill Buzbee964a7b02010-01-28 12:54:19 -08009380#endif
Andy McFaddena5069fb2009-06-19 15:20:12 -07009381 b dvmCheckSuspendPending @ suspend if necessary, then return
9382
93833: @ debugger/profiler enabled, bail out
9384 add rPC, rPC, r9 @ update rPC
Andy McFaddena5069fb2009-06-19 15:20:12 -07009385 mov r1, #1 @ "want switch" = true
9386 b common_gotoBail
9387
9388
9389/*
9390 * The equivalent of "goto bail", this calls through the "bail handler".
9391 *
9392 * State registers will be saved to the "glue" area before bailing.
9393 *
9394 * On entry:
9395 * r1 is "bool changeInterp", indicating if we want to switch to the
9396 * other interpreter or just bail all the way out
9397 */
9398common_gotoBail:
9399 SAVE_PC_FP_TO_GLUE() @ export state to "glue"
9400 mov r0, rGLUE @ r0<- glue ptr
9401 b dvmMterpStdBail @ call(glue, changeInterp)
9402
9403 @add r1, r1, #1 @ using (boolean+1)
9404 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf
9405 @bl _longjmp @ does not return
9406 @bl common_abort
9407
9408
9409/*
9410 * Common code for method invocation with range.
9411 *
9412 * On entry:
9413 * r0 is "Method* methodToCall", the method we're trying to call
9414 */
9415common_invokeMethodRange:
9416.LinvokeNewRange:
9417 @ prepare to copy args to "outs" area of current frame
9418 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero
9419 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area
9420 beq .LinvokeArgsDone @ if no args, skip the rest
9421 FETCH(r1, 2) @ r1<- CCCC
9422
9423 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
9424 @ (very few methods have > 10 args; could unroll for common cases)
9425 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC]
9426 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args
9427 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize
94281: ldr r1, [r3], #4 @ val = *fp++
9429 subs r2, r2, #1 @ count--
9430 str r1, [r10], #4 @ *outs++ = val
9431 bne 1b @ ...while count != 0
9432 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize
9433 b .LinvokeArgsDone
9434
9435/*
9436 * Common code for method invocation without range.
9437 *
9438 * On entry:
9439 * r0 is "Method* methodToCall", the method we're trying to call
9440 */
9441common_invokeMethodNoRange:
9442.LinvokeNewNoRange:
9443 @ prepare to copy args to "outs" area of current frame
9444 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero
9445 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area
9446 FETCH(r1, 2) @ r1<- GFED (load here to hide latency)
9447 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize
9448 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize
9449 beq .LinvokeArgsDone
9450
9451 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs
9452.LinvokeNonRange:
9453 rsb r2, r2, #5 @ r2<- 5-r2
9454 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each
9455 bl common_abort @ (skipped due to ARM prefetch)
94565: and ip, rINST, #0x0f00 @ isolate A
9457 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2)
9458 mov r0, r0 @ nop
9459 str r2, [r10, #-4]! @ *--outs = vA
94604: and ip, r1, #0xf000 @ isolate G
9461 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2)
9462 mov r0, r0 @ nop
9463 str r2, [r10, #-4]! @ *--outs = vG
94643: and ip, r1, #0x0f00 @ isolate F
9465 ldr r2, [rFP, ip, lsr #6] @ r2<- vF
9466 mov r0, r0 @ nop
9467 str r2, [r10, #-4]! @ *--outs = vF
94682: and ip, r1, #0x00f0 @ isolate E
9469 ldr r2, [rFP, ip, lsr #2] @ r2<- vE
9470 mov r0, r0 @ nop
9471 str r2, [r10, #-4]! @ *--outs = vE
94721: and ip, r1, #0x000f @ isolate D
9473 ldr r2, [rFP, ip, lsl #2] @ r2<- vD
9474 mov r0, r0 @ nop
9475 str r2, [r10, #-4]! @ *--outs = vD
94760: @ fall through to .LinvokeArgsDone
9477
9478.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize
9479 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns
9480 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz
9481 @ find space for the new stack frame, check for overflow
9482 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
9483 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize)
9484 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea
9485@ bl common_dumpRegs
9486 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd
9487 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize)
9488 cmp r3, r9 @ bottom < interpStackEnd?
9489 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
9490 blt .LstackOverflow @ yes, this frame will overflow stack
9491
9492 @ set up newSaveArea
9493#ifdef EASY_GDB
9494 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area
9495 str ip, [r10, #offStackSaveArea_prevSave]
9496#endif
9497 str rFP, [r10, #offStackSaveArea_prevFrame]
9498 str rPC, [r10, #offStackSaveArea_savedPc]
9499#if defined(WITH_JIT)
9500 mov r9, #0
9501 str r9, [r10, #offStackSaveArea_returnAddr]
9502#endif
9503 str r0, [r10, #offStackSaveArea_method]
9504 tst r3, #ACC_NATIVE
9505 bne .LinvokeNative
9506
9507 /*
9508 stmfd sp!, {r0-r3}
9509 bl common_printNewline
9510 mov r0, rFP
9511 mov r1, #0
9512 bl dvmDumpFp
9513 ldmfd sp!, {r0-r3}
9514 stmfd sp!, {r0-r3}
9515 mov r0, r1
9516 mov r1, r10
9517 bl dvmDumpFp
9518 bl common_printNewline
9519 ldmfd sp!, {r0-r3}
9520 */
9521
9522 ldrh r9, [r2] @ r9 <- load INST from new PC
9523 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
9524 mov rPC, r2 @ publish new rPC
9525 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self
9526
9527 @ Update "glue" values for the new method
9528 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST
9529 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall
9530 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
9531#if defined(WITH_JIT)
9532 GET_JIT_PROF_TABLE(r0)
9533 mov rFP, r1 @ fp = newFp
9534 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9
9535 mov rINST, r9 @ publish new rINST
9536 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp
9537 cmp r0,#0
9538 bne common_updateProfile
9539 GOTO_OPCODE(ip) @ jump to next instruction
9540#else
9541 mov rFP, r1 @ fp = newFp
9542 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9
9543 mov rINST, r9 @ publish new rINST
9544 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp
9545 GOTO_OPCODE(ip) @ jump to next instruction
9546#endif
9547
9548.LinvokeNative:
9549 @ Prep for the native call
9550 @ r0=methodToCall, r1=newFp, r10=newSaveArea
9551 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
Andy McFaddend5ab7262009-08-25 07:19:34 -07009552 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
Andy McFaddena5069fb2009-06-19 15:20:12 -07009553 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp
Andy McFaddend5ab7262009-08-25 07:19:34 -07009554 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top
Andy McFaddena5069fb2009-06-19 15:20:12 -07009555 mov r9, r3 @ r9<- glue->self (preserve)
9556
9557 mov r2, r0 @ r2<- methodToCall
9558 mov r0, r1 @ r0<- newFp (points to args)
9559 add r1, rGLUE, #offGlue_retval @ r1<- &retval
9560
9561#ifdef ASSIST_DEBUGGER
9562 /* insert fake function header to help gdb find the stack frame */
9563 b .Lskip
9564 .type dalvik_mterp, %function
9565dalvik_mterp:
9566 .fnstart
9567 MTERP_ENTRY1
9568 MTERP_ENTRY2
9569.Lskip:
9570#endif
9571
9572 @mov lr, pc @ set return addr
9573 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
9574 LDR_PC_LR "[r2, #offMethod_nativeFunc]"
9575
Bill Buzbee964a7b02010-01-28 12:54:19 -08009576#if defined(WITH_JIT)
9577 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
9578#endif
9579
Andy McFaddena5069fb2009-06-19 15:20:12 -07009580 @ native return; r9=self, r10=newSaveArea
9581 @ equivalent to dvmPopJniLocals
Andy McFaddend5ab7262009-08-25 07:19:34 -07009582 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top
Andy McFaddena5069fb2009-06-19 15:20:12 -07009583 ldr r1, [r9, #offThread_exception] @ check for exception
Bill Buzbee964a7b02010-01-28 12:54:19 -08009584#if defined(WITH_JIT)
9585 ldr r3, [r3] @ r3 <- gDvmJit.pProfTable
9586#endif
Andy McFaddena5069fb2009-06-19 15:20:12 -07009587 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp
9588 cmp r1, #0 @ null?
Andy McFaddend5ab7262009-08-25 07:19:34 -07009589 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
Bill Buzbee964a7b02010-01-28 12:54:19 -08009590#if defined(WITH_JIT)
9591 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch
9592#endif
Andy McFaddena5069fb2009-06-19 15:20:12 -07009593 bne common_exceptionThrown @ no, handle exception
9594
9595 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
9596 GET_INST_OPCODE(ip) @ extract opcode from rINST
9597 GOTO_OPCODE(ip) @ jump to next instruction
9598
Andy McFadden6ed1a0f2009-09-10 15:34:19 -07009599.LstackOverflow: @ r0=methodToCall
9600 mov r1, r0 @ r1<- methodToCall
Andy McFaddena5069fb2009-06-19 15:20:12 -07009601 ldr r0, [rGLUE, #offGlue_self] @ r0<- self
9602 bl dvmHandleStackOverflow
9603 b common_exceptionThrown
9604#ifdef ASSIST_DEBUGGER
9605 .fnend
9606#endif
9607
9608
9609 /*
9610 * Common code for method invocation, calling through "glue code".
9611 *
9612 * TODO: now that we have range and non-range invoke handlers, this
9613 * needs to be split into two. Maybe just create entry points
9614 * that set r9 and jump here?
9615 *
9616 * On entry:
9617 * r0 is "Method* methodToCall", the method we're trying to call
9618 * r9 is "bool methodCallRange", indicating if this is a /range variant
9619 */
9620 .if 0
9621.LinvokeOld:
9622 sub sp, sp, #8 @ space for args + pad
9623 FETCH(ip, 2) @ ip<- FEDC or CCCC
9624 mov r2, r0 @ A2<- methodToCall
9625 mov r0, rGLUE @ A0<- glue
9626 SAVE_PC_FP_TO_GLUE() @ export state to "glue"
9627 mov r1, r9 @ A1<- methodCallRange
9628 mov r3, rINST, lsr #8 @ A3<- AA
9629 str ip, [sp, #0] @ A4<- ip
9630 bl dvmMterp_invokeMethod @ call the C invokeMethod
9631 add sp, sp, #8 @ remove arg area
9632 b common_resumeAfterGlueCall @ continue to next instruction
9633 .endif
9634
9635
9636
9637/*
9638 * Common code for handling a return instruction.
9639 *
9640 * This does not return.
9641 */
9642common_returnFromMethod:
9643.LreturnNew:
9644 mov r0, #kInterpEntryReturn
9645 mov r9, #0
9646 bl common_periodicChecks
9647
9648 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
9649 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
9650 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
9651 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
9652 @ r2<- method we're returning to
9653 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self
9654 cmp r2, #0 @ is this a break frame?
9655 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz
9656 mov r1, #0 @ "want switch" = false
9657 beq common_gotoBail @ break frame, bail out completely
9658
9659 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST
9660 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
9661 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex
9662 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp
9663#if defined(WITH_JIT)
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009664 ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr
Andy McFaddena5069fb2009-06-19 15:20:12 -07009665 GET_JIT_PROF_TABLE(r0)
9666 mov rPC, r9 @ publish new rPC
9667 str r1, [rGLUE, #offGlue_methodClassDex]
Ben Cheng7a0bcd02010-01-22 16:45:45 -08009668 str r10, [r3, #offThread_inJitCodeCache] @ may return to JIT'ed land
9669 cmp r10, #0 @ caller is compiled code
9670 blxne r10
Andy McFaddena5069fb2009-06-19 15:20:12 -07009671 GET_INST_OPCODE(ip) @ extract opcode from rINST
9672 cmp r0,#0
9673 bne common_updateProfile
9674 GOTO_OPCODE(ip) @ jump to next instruction
9675#else
9676 GET_INST_OPCODE(ip) @ extract opcode from rINST
9677 mov rPC, r9 @ publish new rPC
9678 str r1, [rGLUE, #offGlue_methodClassDex]
9679 GOTO_OPCODE(ip) @ jump to next instruction
9680#endif
9681
9682 /*
9683 * Return handling, calls through "glue code".
9684 */
9685 .if 0
9686.LreturnOld:
9687 SAVE_PC_FP_TO_GLUE() @ export state
9688 mov r0, rGLUE @ arg to function
9689 bl dvmMterp_returnFromMethod
9690 b common_resumeAfterGlueCall
9691 .endif
9692
9693
9694/*
9695 * Somebody has thrown an exception. Handle it.
9696 *
9697 * If the exception processing code returns to us (instead of falling
9698 * out of the interpreter), continue with whatever the next instruction
9699 * now happens to be.
9700 *
9701 * This does not return.
9702 */
9703 .global dvmMterpCommonExceptionThrown
9704dvmMterpCommonExceptionThrown:
9705common_exceptionThrown:
9706.LexceptionNew:
9707 mov r0, #kInterpEntryThrow
9708 mov r9, #0
9709 bl common_periodicChecks
9710
9711#if defined(WITH_JIT)
9712 mov r2,#kJitTSelectAbort @ abandon trace selection in progress
9713 str r2,[rGLUE,#offGlue_jitState]
9714#endif
9715
9716 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self
9717 ldr r9, [r10, #offThread_exception] @ r9<- self->exception
9718 mov r1, r10 @ r1<- self
9719 mov r0, r9 @ r0<- exception
9720 bl dvmAddTrackedAlloc @ don't let the exception be GCed
9721 mov r3, #0 @ r3<- NULL
9722 str r3, [r10, #offThread_exception] @ self->exception = NULL
9723
9724 /* set up args and a local for "&fp" */
9725 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */
9726 str rFP, [sp, #-4]! @ *--sp = fp
9727 mov ip, sp @ ip<- &fp
9728 mov r3, #0 @ r3<- false
9729 str ip, [sp, #-4]! @ *--sp = &fp
9730 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method
9731 mov r0, r10 @ r0<- self
9732 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns
9733 mov r2, r9 @ r2<- exception
9734 sub r1, rPC, r1 @ r1<- pc - method->insns
9735 mov r1, r1, asr #1 @ r1<- offset in code units
9736
9737 /* call, r0 gets catchRelPc (a code-unit offset) */
9738 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp)
9739
9740 /* fix earlier stack overflow if necessary; may trash rFP */
9741 ldrb r1, [r10, #offThread_stackOverflowed]
9742 cmp r1, #0 @ did we overflow earlier?
9743 beq 1f @ no, skip ahead
9744 mov rFP, r0 @ save relPc result in rFP
9745 mov r0, r10 @ r0<- self
Andy McFadden4fbba1f2010-02-03 07:21:14 -08009746 mov r1, r9 @ r1<- exception
Andy McFaddena5069fb2009-06-19 15:20:12 -07009747 bl dvmCleanupStackOverflow @ call(self)
9748 mov r0, rFP @ restore result
97491:
9750
9751 /* update frame pointer and check result from dvmFindCatchBlock */
9752 ldr rFP, [sp, #4] @ retrieve the updated rFP
9753 cmp r0, #0 @ is catchRelPc < 0?
9754 add sp, sp, #8 @ restore stack
9755 bmi .LnotCaughtLocally
9756
9757 /* adjust locals to match self->curFrame and updated PC */
9758 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area
9759 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method
9760 str r1, [rGLUE, #offGlue_method] @ glue->method = new method
9761 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz
9762 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns
9763 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
9764 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc
9765 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth...
9766
9767 /* release the tracked alloc on the exception */
9768 mov r0, r9 @ r0<- exception
9769 mov r1, r10 @ r1<- self
9770 bl dvmReleaseTrackedAlloc @ release the exception
9771
9772 /* restore the exception if the handler wants it */
9773 FETCH_INST() @ load rINST from rPC
9774 GET_INST_OPCODE(ip) @ extract opcode from rINST
9775 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"?
9776 streq r9, [r10, #offThread_exception] @ yes, restore the exception
9777 GOTO_OPCODE(ip) @ jump to next instruction
9778
9779.LnotCaughtLocally: @ r9=exception, r10=self
9780 /* fix stack overflow if necessary */
9781 ldrb r1, [r10, #offThread_stackOverflowed]
9782 cmp r1, #0 @ did we overflow earlier?
9783 movne r0, r10 @ if yes: r0<- self
Andy McFadden4fbba1f2010-02-03 07:21:14 -08009784 movne r1, r9 @ if yes: r1<- exception
Andy McFaddena5069fb2009-06-19 15:20:12 -07009785 blne dvmCleanupStackOverflow @ if yes: call(self)
9786
9787 @ may want to show "not caught locally" debug messages here
9788#if DVM_SHOW_EXCEPTION >= 2
9789 /* call __android_log_print(prio, tag, format, ...) */
9790 /* "Exception %s from %s:%d not caught locally" */
9791 @ dvmLineNumFromPC(method, pc - method->insns)
9792 ldr r0, [rGLUE, #offGlue_method]
9793 ldr r1, [r0, #offMethod_insns]
9794 sub r1, rPC, r1
9795 asr r1, r1, #1
9796 bl dvmLineNumFromPC
9797 str r0, [sp, #-4]!
9798 @ dvmGetMethodSourceFile(method)
9799 ldr r0, [rGLUE, #offGlue_method]
9800 bl dvmGetMethodSourceFile
9801 str r0, [sp, #-4]!
9802 @ exception->clazz->descriptor
9803 ldr r3, [r9, #offObject_clazz]
9804 ldr r3, [r3, #offClassObject_descriptor]
9805 @
9806 ldr r2, strExceptionNotCaughtLocally
9807 ldr r1, strLogTag
9808 mov r0, #3 @ LOG_DEBUG
9809 bl __android_log_print
9810#endif
9811 str r9, [r10, #offThread_exception] @ restore exception
9812 mov r0, r9 @ r0<- exception
9813 mov r1, r10 @ r1<- self
9814 bl dvmReleaseTrackedAlloc @ release the exception
9815 mov r1, #0 @ "want switch" = false
9816 b common_gotoBail @ bail out
9817
9818
9819 /*
9820 * Exception handling, calls through "glue code".
9821 */
9822 .if 0
9823.LexceptionOld:
9824 SAVE_PC_FP_TO_GLUE() @ export state
9825 mov r0, rGLUE @ arg to function
9826 bl dvmMterp_exceptionThrown
9827 b common_resumeAfterGlueCall
9828 .endif
9829
9830
9831/*
9832 * After returning from a "glued" function, pull out the updated
9833 * values and start executing at the next instruction.
9834 */
9835common_resumeAfterGlueCall:
9836 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue
9837 FETCH_INST() @ load rINST from rPC
9838 GET_INST_OPCODE(ip) @ extract opcode from rINST
9839 GOTO_OPCODE(ip) @ jump to next instruction
9840
9841/*
9842 * Invalid array index.
9843 */
9844common_errArrayIndex:
9845 EXPORT_PC()
9846 ldr r0, strArrayIndexException
9847 mov r1, #0
9848 bl dvmThrowException
9849 b common_exceptionThrown
9850
9851/*
9852 * Invalid array value.
9853 */
9854common_errArrayStore:
9855 EXPORT_PC()
9856 ldr r0, strArrayStoreException
9857 mov r1, #0
9858 bl dvmThrowException
9859 b common_exceptionThrown
9860
9861/*
9862 * Integer divide or mod by zero.
9863 */
9864common_errDivideByZero:
9865 EXPORT_PC()
9866 ldr r0, strArithmeticException
9867 ldr r1, strDivideByZero
9868 bl dvmThrowException
9869 b common_exceptionThrown
9870
9871/*
9872 * Attempt to allocate an array with a negative size.
9873 */
9874common_errNegativeArraySize:
9875 EXPORT_PC()
9876 ldr r0, strNegativeArraySizeException
9877 mov r1, #0
9878 bl dvmThrowException
9879 b common_exceptionThrown
9880
9881/*
9882 * Invocation of a non-existent method.
9883 */
9884common_errNoSuchMethod:
9885 EXPORT_PC()
9886 ldr r0, strNoSuchMethodError
9887 mov r1, #0
9888 bl dvmThrowException
9889 b common_exceptionThrown
9890
9891/*
9892 * We encountered a null object when we weren't expecting one. We
9893 * export the PC, throw a NullPointerException, and goto the exception
9894 * processing code.
9895 */
9896common_errNullObject:
9897 EXPORT_PC()
9898 ldr r0, strNullPointerException
9899 mov r1, #0
9900 bl dvmThrowException
9901 b common_exceptionThrown
9902
9903/*
9904 * For debugging, cause an immediate fault. The source address will
9905 * be in lr (use a bl instruction to jump here).
9906 */
9907common_abort:
9908 ldr pc, .LdeadFood
9909.LdeadFood:
9910 .word 0xdeadf00d
9911
9912/*
9913 * Spit out a "we were here", preserving all registers. (The attempt
9914 * to save ip won't work, but we need to save an even number of
9915 * registers for EABI 64-bit stack alignment.)
9916 */
9917 .macro SQUEAK num
9918common_squeak\num:
9919 stmfd sp!, {r0, r1, r2, r3, ip, lr}
9920 ldr r0, strSqueak
9921 mov r1, #\num
9922 bl printf
9923 ldmfd sp!, {r0, r1, r2, r3, ip, lr}
9924 bx lr
9925 .endm
9926
9927 SQUEAK 0
9928 SQUEAK 1
9929 SQUEAK 2
9930 SQUEAK 3
9931 SQUEAK 4
9932 SQUEAK 5
9933
9934/*
9935 * Spit out the number in r0, preserving registers.
9936 */
9937common_printNum:
9938 stmfd sp!, {r0, r1, r2, r3, ip, lr}
9939 mov r1, r0
9940 ldr r0, strSqueak
9941 bl printf
9942 ldmfd sp!, {r0, r1, r2, r3, ip, lr}
9943 bx lr
9944
9945/*
9946 * Print a newline, preserving registers.
9947 */
9948common_printNewline:
9949 stmfd sp!, {r0, r1, r2, r3, ip, lr}
9950 ldr r0, strNewline
9951 bl printf
9952 ldmfd sp!, {r0, r1, r2, r3, ip, lr}
9953 bx lr
9954
9955 /*
9956 * Print the 32-bit quantity in r0 as a hex value, preserving registers.
9957 */
9958common_printHex:
9959 stmfd sp!, {r0, r1, r2, r3, ip, lr}
9960 mov r1, r0
9961 ldr r0, strPrintHex
9962 bl printf
9963 ldmfd sp!, {r0, r1, r2, r3, ip, lr}
9964 bx lr
9965
9966/*
9967 * Print the 64-bit quantity in r0-r1, preserving registers.
9968 */
9969common_printLong:
9970 stmfd sp!, {r0, r1, r2, r3, ip, lr}
9971 mov r3, r1
9972 mov r2, r0
9973 ldr r0, strPrintLong
9974 bl printf
9975 ldmfd sp!, {r0, r1, r2, r3, ip, lr}
9976 bx lr
9977
9978/*
9979 * Print full method info. Pass the Method* in r0. Preserves regs.
9980 */
9981common_printMethod:
9982 stmfd sp!, {r0, r1, r2, r3, ip, lr}
9983 bl dvmMterpPrintMethod
9984 ldmfd sp!, {r0, r1, r2, r3, ip, lr}
9985 bx lr
9986
9987/*
9988 * Call a C helper function that dumps regs and possibly some
9989 * additional info. Requires the C function to be compiled in.
9990 */
9991 .if 0
9992common_dumpRegs:
9993 stmfd sp!, {r0, r1, r2, r3, ip, lr}
9994 bl dvmMterpDumpArmRegs
9995 ldmfd sp!, {r0, r1, r2, r3, ip, lr}
9996 bx lr
9997 .endif
9998
9999#if 0
10000/*
10001 * Experiment on VFP mode.
10002 *
10003 * uint32_t setFPSCR(uint32_t val, uint32_t mask)
10004 *
10005 * Updates the bits specified by "mask", setting them to the values in "val".
10006 */
10007setFPSCR:
10008 and r0, r0, r1 @ make sure no stray bits are set
10009 fmrx r2, fpscr @ get VFP reg
10010 mvn r1, r1 @ bit-invert mask
10011 and r2, r2, r1 @ clear masked bits
10012 orr r2, r2, r0 @ set specified bits
10013 fmxr fpscr, r2 @ set VFP reg
10014 mov r0, r2 @ return new value
10015 bx lr
10016
10017 .align 2
10018 .global dvmConfigureFP
10019 .type dvmConfigureFP, %function
10020dvmConfigureFP:
10021 stmfd sp!, {ip, lr}
10022 /* 0x03000000 sets DN/FZ */
10023 /* 0x00009f00 clears the six exception enable flags */
10024 bl common_squeak0
10025 mov r0, #0x03000000 @ r0<- 0x03000000
10026 add r1, r0, #0x9f00 @ r1<- 0x03009f00
10027 bl setFPSCR
10028 ldmfd sp!, {ip, pc}
10029#endif
10030
10031
10032/*
10033 * String references, must be close to the code that uses them.
10034 */
10035 .align 2
10036strArithmeticException:
10037 .word .LstrArithmeticException
10038strArrayIndexException:
10039 .word .LstrArrayIndexException
10040strArrayStoreException:
10041 .word .LstrArrayStoreException
10042strDivideByZero:
10043 .word .LstrDivideByZero
10044strNegativeArraySizeException:
10045 .word .LstrNegativeArraySizeException
10046strNoSuchMethodError:
10047 .word .LstrNoSuchMethodError
10048strNullPointerException:
10049 .word .LstrNullPointerException
10050
10051strLogTag:
10052 .word .LstrLogTag
10053strExceptionNotCaughtLocally:
10054 .word .LstrExceptionNotCaughtLocally
10055
10056strNewline:
10057 .word .LstrNewline
10058strSqueak:
10059 .word .LstrSqueak
10060strPrintHex:
10061 .word .LstrPrintHex
10062strPrintLong:
10063 .word .LstrPrintLong
10064
10065/*
10066 * Zero-terminated ASCII string data.
10067 *
10068 * On ARM we have two choices: do like gcc does, and LDR from a .word
10069 * with the address, or use an ADR pseudo-op to get the address
10070 * directly. ADR saves 4 bytes and an indirection, but it's using a
10071 * PC-relative addressing mode and hence has a limited range, which
10072 * makes it not work well with mergeable string sections.
10073 */
10074 .section .rodata.str1.4,"aMS",%progbits,1
10075
10076.LstrBadEntryPoint:
10077 .asciz "Bad entry point %d\n"
10078.LstrArithmeticException:
10079 .asciz "Ljava/lang/ArithmeticException;"
10080.LstrArrayIndexException:
10081 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;"
10082.LstrArrayStoreException:
10083 .asciz "Ljava/lang/ArrayStoreException;"
10084.LstrClassCastException:
10085 .asciz "Ljava/lang/ClassCastException;"
10086.LstrDivideByZero:
10087 .asciz "divide by zero"
10088.LstrFilledNewArrayNotImpl:
10089 .asciz "filled-new-array only implemented for objects and 'int'"
10090.LstrInternalError:
10091 .asciz "Ljava/lang/InternalError;"
10092.LstrInstantiationError:
10093 .asciz "Ljava/lang/InstantiationError;"
10094.LstrNegativeArraySizeException:
10095 .asciz "Ljava/lang/NegativeArraySizeException;"
10096.LstrNoSuchMethodError:
10097 .asciz "Ljava/lang/NoSuchMethodError;"
10098.LstrNullPointerException:
10099 .asciz "Ljava/lang/NullPointerException;"
10100
10101.LstrLogTag:
10102 .asciz "mterp"
10103.LstrExceptionNotCaughtLocally:
10104 .asciz "Exception %s from %s:%d not caught locally\n"
10105
10106.LstrNewline:
10107 .asciz "\n"
10108.LstrSqueak:
10109 .asciz "<%d>"
10110.LstrPrintHex:
10111 .asciz "<0x%x>"
10112.LstrPrintLong:
10113 .asciz "<%lld>"
10114
10115