blob: bf7c157c59e3f77382725ac38a73e1935a863ffd [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the X86 ISA */
18
19#include "codegen_x86.h"
Andreas Gampe0b9203e2015-01-22 20:39:27 -080020
21#include "base/logging.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070022#include "dex/quick/mir_to_lir-inl.h"
Andreas Gampe0b9203e2015-01-22 20:39:27 -080023#include "driver/compiler_driver.h"
Vladimir Marko20f85592015-03-19 10:07:02 +000024#include "driver/compiler_options.h"
Ian Rogers576ca0c2014-06-06 15:58:22 -070025#include "gc/accounting/card_table.h"
Vladimir Markof4da6752014-08-01 19:04:18 +010026#include "mirror/art_method.h"
27#include "mirror/object_array-inl.h"
Vladimir Markodc56cc52015-03-27 18:18:36 +000028#include "utils/dex_cache_arrays_layout-inl.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070029#include "x86_lir.h"
30
31namespace art {
32
Brian Carlstrom7940e442013-07-12 13:46:57 -070033/*
34 * The sparse table in the literal pool is an array of <key,displacement>
35 * pairs.
36 */
Andreas Gampe48971b32014-08-06 10:09:01 -070037void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
Chao-ying Fuda96aed2014-10-27 14:42:00 -070038 GenSmallSparseSwitch(mir, table_offset, rl_src);
39}
40
41/*
Brian Carlstrom7940e442013-07-12 13:46:57 -070042 * Code pattern will look something like:
43 *
44 * mov r_val, ..
45 * call 0
46 * pop r_start_of_method
47 * sub r_start_of_method, ..
48 * mov r_key_reg, r_val
49 * sub r_key_reg, low_key
50 * cmp r_key_reg, size-1 ; bound check
51 * ja done
52 * mov r_disp, [r_start_of_method + r_key_reg * 4 + table_offset]
53 * add r_start_of_method, r_disp
54 * jmp r_start_of_method
55 * done:
56 */
Andreas Gampe48971b32014-08-06 10:09:01 -070057void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
Chao-ying Fu72f53af2014-11-11 16:48:40 -080058 const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -070059 // Add the table to the list - we'll process it later
buzbee0d829482013-10-11 15:24:55 -070060 SwitchTable* tab_rec =
Vladimir Marko83cc7ae2014-02-12 18:02:05 +000061 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
Chao-ying Fu72f53af2014-11-11 16:48:40 -080062 tab_rec->switch_mir = mir;
Brian Carlstrom7940e442013-07-12 13:46:57 -070063 tab_rec->table = table;
64 tab_rec->vaddr = current_dalvik_offset_;
65 int size = table[1];
Vladimir Markoe39c54e2014-09-22 14:50:02 +010066 switch_tables_.push_back(tab_rec);
Brian Carlstrom7940e442013-07-12 13:46:57 -070067
68 // Get the switch value
69 rl_src = LoadValue(rl_src, kCoreReg);
Mark Mendell67c39c42014-01-31 17:28:00 -080070
Brian Carlstrom7940e442013-07-12 13:46:57 -070071 int low_key = s4FromSwitchData(&table[2]);
buzbee2700f7e2014-03-07 09:46:20 -080072 RegStorage keyReg;
Brian Carlstrom7940e442013-07-12 13:46:57 -070073 // Remove the bias, if necessary
74 if (low_key == 0) {
buzbee2700f7e2014-03-07 09:46:20 -080075 keyReg = rl_src.reg;
Brian Carlstrom7940e442013-07-12 13:46:57 -070076 } else {
77 keyReg = AllocTemp();
buzbee2700f7e2014-03-07 09:46:20 -080078 OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
Brian Carlstrom7940e442013-07-12 13:46:57 -070079 }
Mark Mendell27dee8b2014-12-01 19:06:12 -050080
Brian Carlstrom7940e442013-07-12 13:46:57 -070081 // Bounds check - if < 0 or >= size continue following switch
Serguei Katkov407a9d22014-07-05 03:09:32 +070082 OpRegImm(kOpCmp, keyReg, size - 1);
Brian Carlstrom7940e442013-07-12 13:46:57 -070083 LIR* branch_over = OpCondBranch(kCondHi, NULL);
84
Mark Mendell27dee8b2014-12-01 19:06:12 -050085 RegStorage addr_for_jump;
86 if (cu_->target64) {
87 RegStorage table_base = AllocTempWide();
88 // Load the address of the table into table_base.
89 LIR* lea = RawLIR(current_dalvik_offset_, kX86Lea64RM, table_base.GetReg(), kRIPReg,
90 256, 0, WrapPointer(tab_rec));
91 lea->flags.fixup = kFixupSwitchTable;
92 AppendLIR(lea);
93
94 // Load the offset from the table out of the table.
95 addr_for_jump = AllocTempWide();
96 NewLIR5(kX86MovsxdRA, addr_for_jump.GetReg(), table_base.GetReg(), keyReg.GetReg(), 2, 0);
97
98 // Add the offset from the table to the table base.
99 OpRegReg(kOpAdd, addr_for_jump, table_base);
100 } else {
101 // Materialize a pointer to the switch table.
102 RegStorage start_of_method_reg;
103 if (base_of_code_ != nullptr) {
104 // We can use the saved value.
105 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
106 rl_method = LoadValue(rl_method, kCoreReg);
107 start_of_method_reg = rl_method.reg;
108 store_method_addr_used_ = true;
109 } else {
110 start_of_method_reg = AllocTempRef();
111 NewLIR1(kX86StartOfMethod, start_of_method_reg.GetReg());
112 }
113 // Load the displacement from the switch table.
114 addr_for_jump = AllocTemp();
115 NewLIR5(kX86PcRelLoadRA, addr_for_jump.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(),
116 2, WrapPointer(tab_rec));
117 // Add displacement to start of method.
118 OpRegReg(kOpAdd, addr_for_jump, start_of_method_reg);
119 }
120
Brian Carlstrom7940e442013-07-12 13:46:57 -0700121 // ..and go!
Mark Mendell27dee8b2014-12-01 19:06:12 -0500122 tab_rec->anchor = NewLIR1(kX86JmpR, addr_for_jump.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700123
124 /* branch_over target here */
125 LIR* target = NewLIR0(kPseudoTargetLabel);
126 branch_over->target = target;
127}
128
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700129void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
buzbee33ae5582014-06-12 14:56:32 -0700130 int ex_offset = cu_->target64 ?
Andreas Gampe2f244e92014-05-08 03:35:25 -0700131 Thread::ExceptionOffset<8>().Int32Value() :
132 Thread::ExceptionOffset<4>().Int32Value();
buzbeea0cd2d72014-06-01 09:33:49 -0700133 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
Serguei Katkov407a9d22014-07-05 03:09:32 +0700134 NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
135 NewLIR2(cu_->target64 ? kX86Mov64TI : kX86Mov32TI, ex_offset, 0);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700136 StoreValue(rl_dest, rl_result);
137}
138
Vladimir Markobf535be2014-11-19 18:52:35 +0000139void X86Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
Serguei Katkov407a9d22014-07-05 03:09:32 +0700140 DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64);
Serguei Katkov407a9d22014-07-05 03:09:32 +0700141 RegStorage reg_card_base = AllocTempRef();
142 RegStorage reg_card_no = AllocTempRef();
buzbee33ae5582014-06-12 14:56:32 -0700143 int ct_offset = cu_->target64 ?
Andreas Gampe2f244e92014-05-08 03:35:25 -0700144 Thread::CardTableOffset<8>().Int32Value() :
145 Thread::CardTableOffset<4>().Int32Value();
Serguei Katkov407a9d22014-07-05 03:09:32 +0700146 NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700147 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
buzbee2700f7e2014-03-07 09:46:20 -0800148 StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700149 FreeTemp(reg_card_base);
150 FreeTemp(reg_card_no);
151}
152
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700153void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700154 /*
155 * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register
156 * allocation mechanism know so it doesn't try to use any of them when
157 * expanding the frame or flushing. This leaves the utility
158 * code with no spare temps.
159 */
Ian Rogersb28c1c02014-11-08 11:21:21 -0800160 const RegStorage arg0 = TargetReg32(kArg0);
161 const RegStorage arg1 = TargetReg32(kArg1);
162 const RegStorage arg2 = TargetReg32(kArg2);
163 LockTemp(arg0);
164 LockTemp(arg1);
165 LockTemp(arg2);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700166
Brian Carlstrom7940e442013-07-12 13:46:57 -0700167 /*
168 * We can safely skip the stack overflow check if we're
169 * a leaf *and* our frame size < fudge factor.
170 */
Ian Rogersb28c1c02014-11-08 11:21:21 -0800171 const InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
Dave Allison648d7112014-07-25 16:15:27 -0700172 bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa);
Ian Rogersb28c1c02014-11-08 11:21:21 -0800173 const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
Dave Allison69dfe512014-07-11 17:11:58 +0000174
175 // If we doing an implicit stack overflow check, perform the load immediately
176 // before the stack pointer is decremented and anything is saved.
177 if (!skip_overflow_check &&
178 cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
179 // Implicit stack overflow check.
180 // test eax,[esp + -overflow]
181 int overflow = GetStackOverflowReservedBytes(isa);
Ian Rogersb28c1c02014-11-08 11:21:21 -0800182 NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rSP.GetReg(), -overflow);
Dave Allison69dfe512014-07-11 17:11:58 +0000183 MarkPossibleStackOverflowException();
184 }
185
186 /* Build frame, return address already on stack */
Ian Rogersb28c1c02014-11-08 11:21:21 -0800187 stack_decrement_ = OpRegImm(kOpSub, rs_rSP, frame_size_ -
Dave Allison69dfe512014-07-11 17:11:58 +0000188 GetInstructionSetPointerSize(cu_->instruction_set));
189
Brian Carlstrom7940e442013-07-12 13:46:57 -0700190 NewLIR0(kPseudoMethodEntry);
191 /* Spill core callee saves */
192 SpillCoreRegs();
Serguei Katkovc3801912014-07-08 17:21:53 +0700193 SpillFPRegs();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700194 if (!skip_overflow_check) {
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700195 class StackOverflowSlowPath : public LIRSlowPath {
196 public:
197 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
Vladimir Marko0b40ecf2015-03-20 12:08:03 +0000198 : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) {
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700199 }
200 void Compile() OVERRIDE {
201 m2l_->ResetRegPool();
202 m2l_->ResetDefTracking();
Mingyao Yang6ffcfa02014-04-25 11:06:00 -0700203 GenerateTargetLabel(kPseudoThrowTarget);
Ian Rogersb28c1c02014-11-08 11:21:21 -0800204 const RegStorage local_rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
205 m2l_->OpRegImm(kOpAdd, local_rs_rSP, sp_displace_);
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700206 m2l_->ClobberCallerSave();
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700207 // Assumes codegen and target are in thumb2 mode.
Andreas Gampe98430592014-07-27 19:44:50 -0700208 m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow,
209 false /* MarkSafepointPC */, false /* UseLink */);
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700210 }
211
212 private:
213 const size_t sp_displace_;
214 };
Dave Allison69dfe512014-07-11 17:11:58 +0000215 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
216 // TODO: for large frames we should do something like:
217 // spill ebp
218 // lea ebp, [esp + frame_size]
219 // cmp ebp, fs:[stack_end_]
220 // jcc stack_overflow_exception
221 // mov esp, ebp
222 // in case a signal comes in that's not using an alternate signal stack and the large frame
223 // may have moved us outside of the reserved area at the end of the stack.
224 // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
225 if (cu_->target64) {
Ian Rogersb28c1c02014-11-08 11:21:21 -0800226 OpRegThreadMem(kOpCmp, rs_rX86_SP_64, Thread::StackEndOffset<8>());
Dave Allison69dfe512014-07-11 17:11:58 +0000227 } else {
Ian Rogersb28c1c02014-11-08 11:21:21 -0800228 OpRegThreadMem(kOpCmp, rs_rX86_SP_32, Thread::StackEndOffset<4>());
Dave Allison69dfe512014-07-11 17:11:58 +0000229 }
230 LIR* branch = OpCondBranch(kCondUlt, nullptr);
231 AddSlowPath(
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700232 new(arena_)StackOverflowSlowPath(this, branch,
233 frame_size_ -
234 GetInstructionSetPointerSize(cu_->instruction_set)));
Dave Allison69dfe512014-07-11 17:11:58 +0000235 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700236 }
237
238 FlushIns(ArgLocs, rl_method);
239
Mark Mendell67c39c42014-01-31 17:28:00 -0800240 if (base_of_code_ != nullptr) {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700241 RegStorage method_start = TargetPtrReg(kArg0);
Mark Mendell67c39c42014-01-31 17:28:00 -0800242 // We have been asked to save the address of the method start for later use.
Chao-ying Fua77ee512014-07-01 17:43:41 -0700243 setup_method_address_[0] = NewLIR1(kX86StartOfMethod, method_start.GetReg());
Mark Mendell67c39c42014-01-31 17:28:00 -0800244 int displacement = SRegOffset(base_of_code_->s_reg_low);
buzbee695d13a2014-04-19 13:32:20 -0700245 // Native pointer - must be natural word size.
Ian Rogersb28c1c02014-11-08 11:21:21 -0800246 setup_method_address_[1] = StoreBaseDisp(rs_rSP, displacement, method_start,
Elena Sayapinadd644502014-07-01 18:39:52 +0700247 cu_->target64 ? k64 : k32, kNotVolatile);
Mark Mendell67c39c42014-01-31 17:28:00 -0800248 }
249
Ian Rogersb28c1c02014-11-08 11:21:21 -0800250 FreeTemp(arg0);
251 FreeTemp(arg1);
252 FreeTemp(arg2);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700253}
254
255void X86Mir2Lir::GenExitSequence() {
256 /*
257 * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
258 * allocated by the register utilities as temps.
259 */
buzbee091cc402014-03-31 10:14:40 -0700260 LockTemp(rs_rX86_RET0);
261 LockTemp(rs_rX86_RET1);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700262
263 NewLIR0(kPseudoMethodExit);
264 UnSpillCoreRegs();
Serguei Katkovc3801912014-07-08 17:21:53 +0700265 UnSpillFPRegs();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700266 /* Remove frame except for return address */
Ian Rogersb28c1c02014-11-08 11:21:21 -0800267 const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
268 stack_increment_ = OpRegImm(kOpAdd, rs_rSP,
269 frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700270 NewLIR0(kX86Ret);
271}
272
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800273void X86Mir2Lir::GenSpecialExitSequence() {
274 NewLIR0(kX86Ret);
275}
276
Vladimir Marko6ce3eba2015-02-16 13:05:59 +0000277void X86Mir2Lir::GenSpecialEntryForSuspend() {
278 // Keep 16-byte stack alignment, there's already the return address, so
279 // - for 32-bit push EAX, i.e. ArtMethod*, ESI, EDI,
280 // - for 64-bit push RAX, i.e. ArtMethod*.
281 if (!cu_->target64) {
282 DCHECK(!IsTemp(rs_rSI));
283 DCHECK(!IsTemp(rs_rDI));
284 core_spill_mask_ =
Vladimir Markod7a5e552015-02-20 14:53:53 +0000285 (1u << rs_rDI.GetRegNum()) | (1u << rs_rSI.GetRegNum()) | (1u << rs_rRET.GetRegNum());
Vladimir Marko6ce3eba2015-02-16 13:05:59 +0000286 num_core_spills_ = 3u;
287 } else {
288 core_spill_mask_ = (1u << rs_rRET.GetRegNum());
289 num_core_spills_ = 1u;
290 }
291 fp_spill_mask_ = 0u;
292 num_fp_spills_ = 0u;
293 frame_size_ = 16u;
294 core_vmap_table_.clear();
295 fp_vmap_table_.clear();
296 if (!cu_->target64) {
297 NewLIR1(kX86Push32R, rs_rDI.GetReg());
298 NewLIR1(kX86Push32R, rs_rSI.GetReg());
299 }
300 NewLIR1(kX86Push32R, TargetReg(kArg0, kRef).GetReg()); // ArtMethod*
301}
302
303void X86Mir2Lir::GenSpecialExitForSuspend() {
304 // Pop the frame. (ArtMethod* no longer needed but restore it anyway.)
305 NewLIR1(kX86Pop32R, TargetReg(kArg0, kRef).GetReg()); // ArtMethod*
306 if (!cu_->target64) {
307 NewLIR1(kX86Pop32R, rs_rSI.GetReg());
308 NewLIR1(kX86Pop32R, rs_rDI.GetReg());
309 }
310}
311
Dave Allison69dfe512014-07-11 17:11:58 +0000312void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
313 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
314 return;
315 }
316 // Implicit null pointer check.
317 // test eax,[arg1+0]
318 NewLIR3(kX86Test32RM, rs_rAX.GetReg(), reg.GetReg(), 0);
319 MarkPossibleNullPointerException(opt_flags);
320}
321
Vladimir Markof4da6752014-08-01 19:04:18 +0100322/*
323 * Bit of a hack here - in the absence of a real scheduling pass,
324 * emit the next instruction in static & direct invoke sequences.
325 */
Vladimir Markodc56cc52015-03-27 18:18:36 +0000326int X86Mir2Lir::X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
327 int state, const MethodReference& target_method,
328 uint32_t,
329 uintptr_t direct_code, uintptr_t direct_method,
330 InvokeType type) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700331 UNUSED(info, direct_code);
Vladimir Markodc56cc52015-03-27 18:18:36 +0000332 X86Mir2Lir* cg = static_cast<X86Mir2Lir*>(cu->cg.get());
Vladimir Markof4da6752014-08-01 19:04:18 +0100333 if (direct_method != 0) {
334 switch (state) {
335 case 0: // Get the current Method* [sets kArg0]
336 if (direct_method != static_cast<uintptr_t>(-1)) {
Mathieu Chartier921d6eb2015-03-13 16:32:44 -0700337 auto target_reg = cg->TargetReg(kArg0, kRef);
338 if (target_reg.Is64Bit()) {
339 cg->LoadConstantWide(target_reg, direct_method);
340 } else {
341 cg->LoadConstant(target_reg, direct_method);
342 }
Vladimir Markof4da6752014-08-01 19:04:18 +0100343 } else {
344 cg->LoadMethodAddress(target_method, type, kArg0);
345 }
346 break;
347 default:
348 return -1;
349 }
Vladimir Markodc56cc52015-03-27 18:18:36 +0000350 } else if (cg->CanUseOpPcRelDexCacheArrayLoad()) {
351 switch (state) {
352 case 0: {
353 CHECK_EQ(cu->dex_file, target_method.dex_file);
354 size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index);
355 cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, cg->TargetReg(kArg0, kRef));
356 break;
357 }
358 default:
359 return -1;
360 }
Vladimir Markof4da6752014-08-01 19:04:18 +0100361 } else {
362 RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
363 switch (state) {
364 case 0: // Get the current Method* [sets kArg0]
365 // TUNING: we can save a reg copy if Method* has been promoted.
366 cg->LoadCurrMethodDirect(arg0_ref);
367 break;
368 case 1: // Get method->dex_cache_resolved_methods_
369 cg->LoadRefDisp(arg0_ref,
370 mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
371 arg0_ref,
372 kNotVolatile);
373 break;
374 case 2: // Grab target method*
375 CHECK_EQ(cu->dex_file, target_method.dex_file);
376 cg->LoadRefDisp(arg0_ref,
377 mirror::ObjectArray<mirror::Object>::OffsetOfElement(
378 target_method.dex_method_index).Int32Value(),
379 arg0_ref,
380 kNotVolatile);
381 break;
382 default:
383 return -1;
384 }
385 }
386 return state + 1;
387}
388
389NextCallInsn X86Mir2Lir::GetNextSDCallInsn() {
390 return X86NextSDCallInsn;
391}
392
Brian Carlstrom7940e442013-07-12 13:46:57 -0700393} // namespace art