blob: 037dfedfe77ae56b5fad6ec55a90315f895e0816 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_x86.h"
18#include "dex/quick/mir_to_lir-inl.h"
Mark Mendell67c39c42014-01-31 17:28:00 -080019#include "dex/dataflow_iterator-inl.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070020#include "x86_lir.h"
Yixin Shou7071c8d2014-03-05 06:07:48 -050021#include "dex/quick/dex_file_method_inliner.h"
22#include "dex/quick/dex_file_to_method_inliner_map.h"
buzbeeb5860fb2014-06-21 15:31:01 -070023#include "dex/reg_storage_eq.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070024
25namespace art {
26
27/* This file contains codegen for the X86 ISA */
28
buzbee2700f7e2014-03-07 09:46:20 -080029LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070030 int opcode;
31 /* must be both DOUBLE or both not DOUBLE */
buzbee091cc402014-03-31 10:14:40 -070032 DCHECK(r_dest.IsFloat() || r_src.IsFloat());
33 DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
34 if (r_dest.IsDouble()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070035 opcode = kX86MovsdRR;
36 } else {
buzbee091cc402014-03-31 10:14:40 -070037 if (r_dest.IsSingle()) {
38 if (r_src.IsSingle()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070039 opcode = kX86MovssRR;
40 } else { // Fpr <- Gpr
41 opcode = kX86MovdxrRR;
42 }
43 } else { // Gpr <- Fpr
buzbee091cc402014-03-31 10:14:40 -070044 DCHECK(r_src.IsSingle()) << "Raw: 0x" << std::hex << r_src.GetRawBits();
Brian Carlstrom7940e442013-07-12 13:46:57 -070045 opcode = kX86MovdrxRR;
46 }
47 }
48 DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
buzbee2700f7e2014-03-07 09:46:20 -080049 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -070050 if (r_dest == r_src) {
51 res->flags.is_nop = true;
52 }
53 return res;
54}
55
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070056bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070057 return true;
58}
59
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070060bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070061 return false;
62}
63
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070064bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070065 return true;
66}
67
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070068bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) {
Mark Mendell67c39c42014-01-31 17:28:00 -080069 return value == 0;
Brian Carlstrom7940e442013-07-12 13:46:57 -070070}
71
72/*
73 * Load a immediate using a shortcut if possible; otherwise
74 * grab from the per-translation literal pool. If target is
75 * a high register, build constant into a low register and copy.
76 *
77 * No additional register clobbering operation performed. Use this version when
78 * 1) r_dest is freshly returned from AllocTemp or
79 * 2) The codegen is under fixed register usage
80 */
buzbee2700f7e2014-03-07 09:46:20 -080081LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
82 RegStorage r_dest_save = r_dest;
buzbee091cc402014-03-31 10:14:40 -070083 if (r_dest.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070084 if (value == 0) {
buzbee2700f7e2014-03-07 09:46:20 -080085 return NewLIR2(kX86XorpsRR, r_dest.GetReg(), r_dest.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -070086 }
Brian Carlstrom7940e442013-07-12 13:46:57 -070087 r_dest = AllocTemp();
88 }
89
90 LIR *res;
91 if (value == 0) {
buzbee2700f7e2014-03-07 09:46:20 -080092 res = NewLIR2(kX86Xor32RR, r_dest.GetReg(), r_dest.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -070093 } else {
94 // Note, there is no byte immediate form of a 32 bit immediate move.
Chao-ying Fue0ccdc02014-06-06 17:32:37 -070095 // 64-bit immediate is not supported by LIR structure
96 res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value);
Brian Carlstrom7940e442013-07-12 13:46:57 -070097 }
98
buzbee091cc402014-03-31 10:14:40 -070099 if (r_dest_save.IsFloat()) {
buzbee2700f7e2014-03-07 09:46:20 -0800100 NewLIR2(kX86MovdxrRR, r_dest_save.GetReg(), r_dest.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700101 FreeTemp(r_dest);
102 }
103
104 return res;
105}
106
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700107LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
Brian Carlstromdf629502013-07-17 22:39:56 -0700108 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700109 res->target = target;
110 return res;
111}
112
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700113LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700114 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
115 X86ConditionEncoding(cc));
116 branch->target = target;
117 return branch;
118}
119
buzbee2700f7e2014-03-07 09:46:20 -0800120LIR* X86Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700121 X86OpCode opcode = kX86Bkpt;
122 switch (op) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700123 case kOpNeg: opcode = r_dest_src.Is64Bit() ? kX86Neg64R : kX86Neg32R; break;
124 case kOpNot: opcode = r_dest_src.Is64Bit() ? kX86Not64R : kX86Not32R; break;
nikolay serdjukc5e4ce12014-06-10 17:07:10 +0700125 case kOpRev: opcode = r_dest_src.Is64Bit() ? kX86Bswap64R : kX86Bswap32R; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700126 case kOpBlx: opcode = kX86CallR; break;
127 default:
128 LOG(FATAL) << "Bad case in OpReg " << op;
129 }
buzbee2700f7e2014-03-07 09:46:20 -0800130 return NewLIR1(opcode, r_dest_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700131}
132
buzbee2700f7e2014-03-07 09:46:20 -0800133LIR* X86Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700134 X86OpCode opcode = kX86Bkpt;
135 bool byte_imm = IS_SIMM8(value);
buzbee091cc402014-03-31 10:14:40 -0700136 DCHECK(!r_dest_src1.IsFloat());
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700137 if (r_dest_src1.Is64Bit()) {
138 switch (op) {
139 case kOpAdd: opcode = byte_imm ? kX86Add64RI8 : kX86Add64RI; break;
140 case kOpSub: opcode = byte_imm ? kX86Sub64RI8 : kX86Sub64RI; break;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700141 case kOpLsl: opcode = kX86Sal64RI; break;
142 case kOpLsr: opcode = kX86Shr64RI; break;
143 case kOpAsr: opcode = kX86Sar64RI; break;
Chao-ying Fu7e399fd2014-06-10 18:11:11 -0700144 case kOpCmp: opcode = byte_imm ? kX86Cmp64RI8 : kX86Cmp64RI; break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700145 default:
146 LOG(FATAL) << "Bad case in OpRegImm (64-bit) " << op;
147 }
148 } else {
149 switch (op) {
150 case kOpLsl: opcode = kX86Sal32RI; break;
151 case kOpLsr: opcode = kX86Shr32RI; break;
152 case kOpAsr: opcode = kX86Sar32RI; break;
153 case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
154 case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break;
155 case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
156 // case kOpSbb: opcode = kX86Sbb32RI; break;
157 case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
158 case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
159 case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
160 case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
161 case kOpMov:
162 /*
163 * Moving the constant zero into register can be specialized as an xor of the register.
164 * However, that sets eflags while the move does not. For that reason here, always do
165 * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead.
166 */
167 opcode = kX86Mov32RI;
168 break;
169 case kOpMul:
170 opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
171 return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), value);
Mark Mendelle87f9b52014-04-30 14:13:18 -0400172 case kOp2Byte:
173 opcode = kX86Mov32RI;
174 value = static_cast<int8_t>(value);
175 break;
176 case kOp2Short:
177 opcode = kX86Mov32RI;
178 value = static_cast<int16_t>(value);
179 break;
180 case kOp2Char:
181 opcode = kX86Mov32RI;
182 value = static_cast<uint16_t>(value);
183 break;
184 case kOpNeg:
185 opcode = kX86Mov32RI;
186 value = -value;
187 break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700188 default:
189 LOG(FATAL) << "Bad case in OpRegImm " << op;
190 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700191 }
buzbee2700f7e2014-03-07 09:46:20 -0800192 return NewLIR2(opcode, r_dest_src1.GetReg(), value);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700193}
194
buzbee2700f7e2014-03-07 09:46:20 -0800195LIR* X86Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700196 bool is64Bit = r_dest_src1.Is64Bit();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700197 X86OpCode opcode = kX86Nop;
198 bool src2_must_be_cx = false;
199 switch (op) {
200 // X86 unary opcodes
201 case kOpMvn:
202 OpRegCopy(r_dest_src1, r_src2);
203 return OpReg(kOpNot, r_dest_src1);
204 case kOpNeg:
205 OpRegCopy(r_dest_src1, r_src2);
206 return OpReg(kOpNeg, r_dest_src1);
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100207 case kOpRev:
208 OpRegCopy(r_dest_src1, r_src2);
209 return OpReg(kOpRev, r_dest_src1);
210 case kOpRevsh:
211 OpRegCopy(r_dest_src1, r_src2);
212 OpReg(kOpRev, r_dest_src1);
213 return OpRegImm(kOpAsr, r_dest_src1, 16);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700214 // X86 binary opcodes
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700215 case kOpSub: opcode = is64Bit ? kX86Sub64RR : kX86Sub32RR; break;
216 case kOpSbc: opcode = is64Bit ? kX86Sbb64RR : kX86Sbb32RR; break;
217 case kOpLsl: opcode = is64Bit ? kX86Sal64RC : kX86Sal32RC; src2_must_be_cx = true; break;
218 case kOpLsr: opcode = is64Bit ? kX86Shr64RC : kX86Shr32RC; src2_must_be_cx = true; break;
219 case kOpAsr: opcode = is64Bit ? kX86Sar64RC : kX86Sar32RC; src2_must_be_cx = true; break;
220 case kOpMov: opcode = is64Bit ? kX86Mov64RR : kX86Mov32RR; break;
221 case kOpCmp: opcode = is64Bit ? kX86Cmp64RR : kX86Cmp32RR; break;
222 case kOpAdd: opcode = is64Bit ? kX86Add64RR : kX86Add32RR; break;
223 case kOpAdc: opcode = is64Bit ? kX86Adc64RR : kX86Adc32RR; break;
224 case kOpAnd: opcode = is64Bit ? kX86And64RR : kX86And32RR; break;
225 case kOpOr: opcode = is64Bit ? kX86Or64RR : kX86Or32RR; break;
226 case kOpXor: opcode = is64Bit ? kX86Xor64RR : kX86Xor32RR; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700227 case kOp2Byte:
buzbee091cc402014-03-31 10:14:40 -0700228 // TODO: there are several instances of this check. A utility function perhaps?
229 // TODO: Similar to Arm's reg < 8 check. Perhaps add attribute checks to RegStorage?
Brian Carlstrom7940e442013-07-12 13:46:57 -0700230 // Use shifts instead of a byte operand if the source can't be byte accessed.
buzbee091cc402014-03-31 10:14:40 -0700231 if (r_src2.GetRegNum() >= rs_rX86_SP.GetRegNum()) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700232 NewLIR2(is64Bit ? kX86Mov64RR : kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg());
233 NewLIR2(is64Bit ? kX86Sal64RI : kX86Sal32RI, r_dest_src1.GetReg(), is64Bit ? 56 : 24);
234 return NewLIR2(is64Bit ? kX86Sar64RI : kX86Sar32RI, r_dest_src1.GetReg(),
235 is64Bit ? 56 : 24);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700236 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700237 opcode = is64Bit ? kX86Bkpt : kX86Movsx8RR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700238 }
239 break;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700240 case kOp2Short: opcode = is64Bit ? kX86Bkpt : kX86Movsx16RR; break;
241 case kOp2Char: opcode = is64Bit ? kX86Bkpt : kX86Movzx16RR; break;
242 case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RR; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700243 default:
244 LOG(FATAL) << "Bad case in OpRegReg " << op;
245 break;
246 }
buzbee091cc402014-03-31 10:14:40 -0700247 CHECK(!src2_must_be_cx || r_src2.GetReg() == rs_rCX.GetReg());
buzbee2700f7e2014-03-07 09:46:20 -0800248 return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700249}
250
buzbee2700f7e2014-03-07 09:46:20 -0800251LIR* X86Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
buzbee091cc402014-03-31 10:14:40 -0700252 DCHECK(!r_base.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800253 X86OpCode opcode = kX86Nop;
buzbee2700f7e2014-03-07 09:46:20 -0800254 int dest = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800255 switch (move_type) {
256 case kMov8GP:
buzbee091cc402014-03-31 10:14:40 -0700257 CHECK(!r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800258 opcode = kX86Mov8RM;
259 break;
260 case kMov16GP:
buzbee091cc402014-03-31 10:14:40 -0700261 CHECK(!r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800262 opcode = kX86Mov16RM;
263 break;
264 case kMov32GP:
buzbee091cc402014-03-31 10:14:40 -0700265 CHECK(!r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800266 opcode = kX86Mov32RM;
267 break;
268 case kMov32FP:
buzbee091cc402014-03-31 10:14:40 -0700269 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800270 opcode = kX86MovssRM;
271 break;
272 case kMov64FP:
buzbee091cc402014-03-31 10:14:40 -0700273 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800274 opcode = kX86MovsdRM;
275 break;
276 case kMovU128FP:
buzbee091cc402014-03-31 10:14:40 -0700277 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800278 opcode = kX86MovupsRM;
279 break;
280 case kMovA128FP:
buzbee091cc402014-03-31 10:14:40 -0700281 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800282 opcode = kX86MovapsRM;
283 break;
284 case kMovLo128FP:
buzbee091cc402014-03-31 10:14:40 -0700285 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800286 opcode = kX86MovlpsRM;
287 break;
288 case kMovHi128FP:
buzbee091cc402014-03-31 10:14:40 -0700289 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800290 opcode = kX86MovhpsRM;
291 break;
292 case kMov64GP:
293 case kMovLo64FP:
294 case kMovHi64FP:
295 default:
296 LOG(FATAL) << "Bad case in OpMovRegMem";
297 break;
298 }
299
buzbee2700f7e2014-03-07 09:46:20 -0800300 return NewLIR3(opcode, dest, r_base.GetReg(), offset);
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800301}
302
buzbee2700f7e2014-03-07 09:46:20 -0800303LIR* X86Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
buzbee091cc402014-03-31 10:14:40 -0700304 DCHECK(!r_base.IsFloat());
buzbee2700f7e2014-03-07 09:46:20 -0800305 int src = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800306
307 X86OpCode opcode = kX86Nop;
308 switch (move_type) {
309 case kMov8GP:
buzbee091cc402014-03-31 10:14:40 -0700310 CHECK(!r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800311 opcode = kX86Mov8MR;
312 break;
313 case kMov16GP:
buzbee091cc402014-03-31 10:14:40 -0700314 CHECK(!r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800315 opcode = kX86Mov16MR;
316 break;
317 case kMov32GP:
buzbee091cc402014-03-31 10:14:40 -0700318 CHECK(!r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800319 opcode = kX86Mov32MR;
320 break;
321 case kMov32FP:
buzbee091cc402014-03-31 10:14:40 -0700322 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800323 opcode = kX86MovssMR;
324 break;
325 case kMov64FP:
buzbee091cc402014-03-31 10:14:40 -0700326 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800327 opcode = kX86MovsdMR;
328 break;
329 case kMovU128FP:
buzbee091cc402014-03-31 10:14:40 -0700330 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800331 opcode = kX86MovupsMR;
332 break;
333 case kMovA128FP:
buzbee091cc402014-03-31 10:14:40 -0700334 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800335 opcode = kX86MovapsMR;
336 break;
337 case kMovLo128FP:
buzbee091cc402014-03-31 10:14:40 -0700338 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800339 opcode = kX86MovlpsMR;
340 break;
341 case kMovHi128FP:
buzbee091cc402014-03-31 10:14:40 -0700342 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800343 opcode = kX86MovhpsMR;
344 break;
345 case kMov64GP:
346 case kMovLo64FP:
347 case kMovHi64FP:
348 default:
349 LOG(FATAL) << "Bad case in OpMovMemReg";
350 break;
351 }
352
buzbee2700f7e2014-03-07 09:46:20 -0800353 return NewLIR3(opcode, r_base.GetReg(), offset, src);
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800354}
355
buzbee2700f7e2014-03-07 09:46:20 -0800356LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800357 // The only conditional reg to reg operation supported is Cmov
358 DCHECK_EQ(op, kOpCmov);
nikolay serdjukc5e4ce12014-06-10 17:07:10 +0700359 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
360 return NewLIR3(r_dest.Is64Bit() ? kX86Cmov64RRC : kX86Cmov32RRC, r_dest.GetReg(),
361 r_src.GetReg(), X86ConditionEncoding(cc));
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800362}
363
buzbee2700f7e2014-03-07 09:46:20 -0800364LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700365 bool is64Bit = r_dest.Is64Bit();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700366 X86OpCode opcode = kX86Nop;
367 switch (op) {
368 // X86 binary opcodes
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700369 case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break;
370 case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break;
371 case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break;
372 case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break;
373 case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break;
374 case kOpOr: opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break;
375 case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700376 case kOp2Byte: opcode = kX86Movsx8RM; break;
377 case kOp2Short: opcode = kX86Movsx16RM; break;
378 case kOp2Char: opcode = kX86Movzx16RM; break;
379 case kOpMul:
380 default:
381 LOG(FATAL) << "Bad case in OpRegMem " << op;
382 break;
383 }
buzbee2700f7e2014-03-07 09:46:20 -0800384 LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100385 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
386 DCHECK(r_base == rs_rX86_SP);
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800387 AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
388 }
389 return l;
390}
391
392LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) {
393 DCHECK_NE(rl_dest.location, kLocPhysReg);
394 int displacement = SRegOffset(rl_dest.s_reg_low);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700395 bool is64Bit = rl_dest.wide != 0;
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800396 X86OpCode opcode = kX86Nop;
397 switch (op) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700398 case kOpSub: opcode = is64Bit ? kX86Sub64MR : kX86Sub32MR; break;
399 case kOpMov: opcode = is64Bit ? kX86Mov64MR : kX86Mov32MR; break;
400 case kOpCmp: opcode = is64Bit ? kX86Cmp64MR : kX86Cmp32MR; break;
401 case kOpAdd: opcode = is64Bit ? kX86Add64MR : kX86Add32MR; break;
402 case kOpAnd: opcode = is64Bit ? kX86And64MR : kX86And32MR; break;
403 case kOpOr: opcode = is64Bit ? kX86Or64MR : kX86Or32MR; break;
404 case kOpXor: opcode = is64Bit ? kX86Xor64MR : kX86Xor32MR; break;
405 case kOpLsl: opcode = is64Bit ? kX86Sal64MC : kX86Sal32MC; break;
406 case kOpLsr: opcode = is64Bit ? kX86Shr64MC : kX86Shr32MC; break;
407 case kOpAsr: opcode = is64Bit ? kX86Sar64MC : kX86Sar32MC; break;
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800408 default:
409 LOG(FATAL) << "Bad case in OpMemReg " << op;
410 break;
411 }
buzbee091cc402014-03-31 10:14:40 -0700412 LIR *l = NewLIR3(opcode, rs_rX86_SP.GetReg(), displacement, r_value);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100413 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
414 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
415 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
416 }
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800417 return l;
418}
419
buzbee2700f7e2014-03-07 09:46:20 -0800420LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegLocation rl_value) {
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800421 DCHECK_NE(rl_value.location, kLocPhysReg);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700422 bool is64Bit = r_dest.Is64Bit();
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800423 int displacement = SRegOffset(rl_value.s_reg_low);
424 X86OpCode opcode = kX86Nop;
425 switch (op) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700426 case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break;
427 case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break;
428 case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break;
429 case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break;
430 case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break;
431 case kOpOr: opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break;
432 case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break;
433 case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RM; break;
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800434 default:
435 LOG(FATAL) << "Bad case in OpRegMem " << op;
436 break;
437 }
buzbee091cc402014-03-31 10:14:40 -0700438 LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP.GetReg(), displacement);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100439 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
440 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
441 }
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800442 return l;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700443}
444
buzbee2700f7e2014-03-07 09:46:20 -0800445LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
446 RegStorage r_src2) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700447 bool is64Bit = r_dest.Is64Bit();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700448 if (r_dest != r_src1 && r_dest != r_src2) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700449 if (op == kOpAdd) { // lea special case, except can't encode rbp as base
Brian Carlstrom7940e442013-07-12 13:46:57 -0700450 if (r_src1 == r_src2) {
451 OpRegCopy(r_dest, r_src1);
452 return OpRegImm(kOpLsl, r_dest, 1);
buzbee2700f7e2014-03-07 09:46:20 -0800453 } else if (r_src1 != rs_rBP) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700454 return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
455 r_src1.GetReg() /* base */, r_src2.GetReg() /* index */,
456 0 /* scale */, 0 /* disp */);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700457 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700458 return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
459 r_src2.GetReg() /* base */, r_src1.GetReg() /* index */,
460 0 /* scale */, 0 /* disp */);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700461 }
462 } else {
463 OpRegCopy(r_dest, r_src1);
464 return OpRegReg(op, r_dest, r_src2);
465 }
466 } else if (r_dest == r_src1) {
467 return OpRegReg(op, r_dest, r_src2);
468 } else { // r_dest == r_src2
469 switch (op) {
470 case kOpSub: // non-commutative
471 OpReg(kOpNeg, r_dest);
472 op = kOpAdd;
473 break;
474 case kOpSbc:
475 case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
buzbee2700f7e2014-03-07 09:46:20 -0800476 RegStorage t_reg = AllocTemp();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700477 OpRegCopy(t_reg, r_src1);
478 OpRegReg(op, t_reg, r_src2);
buzbee7a11ab02014-04-28 20:02:38 -0700479 LIR* res = OpRegCopyNoInsert(r_dest, t_reg);
480 AppendLIR(res);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700481 FreeTemp(t_reg);
482 return res;
483 }
484 case kOpAdd: // commutative
485 case kOpOr:
486 case kOpAdc:
487 case kOpAnd:
488 case kOpXor:
489 break;
490 default:
491 LOG(FATAL) << "Bad case in OpRegRegReg " << op;
492 }
493 return OpRegReg(op, r_dest, r_src1);
494 }
495}
496
buzbee2700f7e2014-03-07 09:46:20 -0800497LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int value) {
Elena Sayapinadd644502014-07-01 18:39:52 +0700498 if (op == kOpMul && !cu_->target64) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700499 X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
buzbee2700f7e2014-03-07 09:46:20 -0800500 return NewLIR3(opcode, r_dest.GetReg(), r_src.GetReg(), value);
Elena Sayapinadd644502014-07-01 18:39:52 +0700501 } else if (op == kOpAnd && !cu_->target64) {
buzbee091cc402014-03-31 10:14:40 -0700502 if (value == 0xFF && r_src.Low4()) {
buzbee2700f7e2014-03-07 09:46:20 -0800503 return NewLIR2(kX86Movzx8RR, r_dest.GetReg(), r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700504 } else if (value == 0xFFFF) {
buzbee2700f7e2014-03-07 09:46:20 -0800505 return NewLIR2(kX86Movzx16RR, r_dest.GetReg(), r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700506 }
507 }
508 if (r_dest != r_src) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700509 if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
Brian Carlstrom7940e442013-07-12 13:46:57 -0700510 // TODO: fix bug in LEA encoding when disp == 0
buzbee2700f7e2014-03-07 09:46:20 -0800511 return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r5sib_no_base /* base */,
512 r_src.GetReg() /* index */, value /* scale */, 0 /* disp */);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700513 } else if (op == kOpAdd) { // lea add special case
Chao-ying Fu7e399fd2014-06-10 18:11:11 -0700514 return NewLIR5(r_dest.Is64Bit() ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700515 r_src.GetReg() /* base */, rs_rX86_SP.GetReg()/*r4sib_no_index*/ /* index */,
516 0 /* scale */, value /* disp */);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700517 }
518 OpRegCopy(r_dest, r_src);
519 }
520 return OpRegImm(op, r_dest, value);
521}
522
Ian Rogersdd7624d2014-03-14 17:43:00 -0700523LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
Andreas Gampe2f244e92014-05-08 03:35:25 -0700524 DCHECK_EQ(kX86, cu_->instruction_set);
525 X86OpCode opcode = kX86Bkpt;
526 switch (op) {
527 case kOpBlx: opcode = kX86CallT; break;
528 case kOpBx: opcode = kX86JmpT; break;
529 default:
530 LOG(FATAL) << "Bad opcode: " << op;
531 break;
532 }
533 return NewLIR1(opcode, thread_offset.Int32Value());
534}
535
536LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
537 DCHECK_EQ(kX86_64, cu_->instruction_set);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700538 X86OpCode opcode = kX86Bkpt;
539 switch (op) {
540 case kOpBlx: opcode = kX86CallT; break;
Brian Carlstrom60d7a652014-03-13 18:10:08 -0700541 case kOpBx: opcode = kX86JmpT; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700542 default:
543 LOG(FATAL) << "Bad opcode: " << op;
544 break;
545 }
Ian Rogers468532e2013-08-05 10:56:33 -0700546 return NewLIR1(opcode, thread_offset.Int32Value());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700547}
548
buzbee2700f7e2014-03-07 09:46:20 -0800549LIR* X86Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700550 X86OpCode opcode = kX86Bkpt;
551 switch (op) {
552 case kOpBlx: opcode = kX86CallM; break;
553 default:
554 LOG(FATAL) << "Bad opcode: " << op;
555 break;
556 }
buzbee2700f7e2014-03-07 09:46:20 -0800557 return NewLIR2(opcode, r_base.GetReg(), disp);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700558}
559
buzbee2700f7e2014-03-07 09:46:20 -0800560LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700561 int32_t val_lo = Low32Bits(value);
562 int32_t val_hi = High32Bits(value);
buzbee2700f7e2014-03-07 09:46:20 -0800563 int32_t low_reg_val = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700564 LIR *res;
Mark Mendelle87f9b52014-04-30 14:13:18 -0400565 bool is_fp = r_dest.IsFloat();
buzbee2700f7e2014-03-07 09:46:20 -0800566 // TODO: clean this up once we fully recognize 64-bit storage containers.
567 if (is_fp) {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +0700568 DCHECK(r_dest.IsDouble());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700569 if (value == 0) {
buzbee2700f7e2014-03-07 09:46:20 -0800570 return NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
Mark Mendell67c39c42014-01-31 17:28:00 -0800571 } else if (base_of_code_ != nullptr) {
572 // We will load the value from the literal area.
573 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
574 if (data_target == NULL) {
575 data_target = AddWideData(&literal_list_, val_lo, val_hi);
576 }
577
578 // Address the start of the method
579 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700580 if (rl_method.wide) {
581 rl_method = LoadValueWide(rl_method, kCoreReg);
582 } else {
583 rl_method = LoadValue(rl_method, kCoreReg);
584 }
Mark Mendell67c39c42014-01-31 17:28:00 -0800585
586 // Load the proper value from the literal area.
587 // We don't know the proper offset for the value, so pick one that will force
588 // 4 byte offset. We will fix this up in the assembler later to have the right
589 // value.
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100590 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Mark Mendell0c524512014-05-27 15:52:21 -0400591 res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val),
Andreas Gampe3c12c512014-06-24 18:46:29 +0000592 kDouble, kNotVolatile);
Mark Mendell67c39c42014-01-31 17:28:00 -0800593 res->target = data_target;
594 res->flags.fixup = kFixupLoad;
Alexei Zavjalov54659e32014-07-30 19:31:04 +0700595 Clobber(rl_method.reg);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800596 store_method_addr_used_ = true;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700597 } else {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +0700598 if (r_dest.IsPair()) {
599 if (val_lo == 0) {
600 res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
601 } else {
602 res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo);
603 }
604 if (val_hi != 0) {
605 RegStorage r_dest_hi = AllocTempDouble();
606 LoadConstantNoClobber(r_dest_hi, val_hi);
607 NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
608 FreeTemp(r_dest_hi);
609 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700610 } else {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +0700611 RegStorage r_temp = AllocTypedTempWide(false, kCoreReg);
612 res = LoadConstantWide(r_temp, value);
613 OpRegCopyWide(r_dest, r_temp);
614 FreeTemp(r_temp);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700615 }
616 }
617 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700618 if (r_dest.IsPair()) {
619 res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
620 LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
621 } else {
Yixin Shou5192cbb2014-07-01 13:48:17 -0400622 if (value == 0) {
Serguei Katkov1c557032014-06-23 13:23:38 +0700623 res = NewLIR2(kX86Xor64RR, r_dest.GetReg(), r_dest.GetReg());
Yixin Shou5192cbb2014-07-01 13:48:17 -0400624 } else if (value >= INT_MIN && value <= INT_MAX) {
625 res = NewLIR2(kX86Mov64RI32, r_dest.GetReg(), val_lo);
626 } else {
627 res = NewLIR3(kX86Mov64RI64, r_dest.GetReg(), val_hi, val_lo);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700628 }
629 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700630 }
631 return res;
632}
633
buzbee2700f7e2014-03-07 09:46:20 -0800634LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100635 int displacement, RegStorage r_dest, OpSize size) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700636 LIR *load = NULL;
637 LIR *load2 = NULL;
buzbee2700f7e2014-03-07 09:46:20 -0800638 bool is_array = r_index.Valid();
buzbee091cc402014-03-31 10:14:40 -0700639 bool pair = r_dest.IsPair();
640 bool is64bit = ((size == k64) || (size == kDouble));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700641 X86OpCode opcode = kX86Nop;
642 switch (size) {
buzbee695d13a2014-04-19 13:32:20 -0700643 case k64:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700644 case kDouble:
buzbee091cc402014-03-31 10:14:40 -0700645 if (r_dest.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700646 opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700647 } else if (!pair) {
648 opcode = is_array ? kX86Mov64RA : kX86Mov64RM;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700649 } else {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700650 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
651 }
652 // TODO: double store is to unaligned address
653 DCHECK_EQ((displacement & 0x3), 0);
654 break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700655 case kWord:
Elena Sayapinadd644502014-07-01 18:39:52 +0700656 if (cu_->target64) {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700657 opcode = is_array ? kX86Mov64RA : kX86Mov64RM;
658 CHECK_EQ(is_array, false);
659 CHECK_EQ(r_dest.IsFloat(), false);
660 break;
661 } // else fall-through to k32 case
buzbee695d13a2014-04-19 13:32:20 -0700662 case k32:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700663 case kSingle:
buzbee695d13a2014-04-19 13:32:20 -0700664 case kReference: // TODO: update for reference decompression on 64-bit targets.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700665 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
buzbee091cc402014-03-31 10:14:40 -0700666 if (r_dest.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700667 opcode = is_array ? kX86MovssRA : kX86MovssRM;
buzbee091cc402014-03-31 10:14:40 -0700668 DCHECK(r_dest.IsFloat());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700669 }
670 DCHECK_EQ((displacement & 0x3), 0);
671 break;
672 case kUnsignedHalf:
673 opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
674 DCHECK_EQ((displacement & 0x1), 0);
675 break;
676 case kSignedHalf:
677 opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
678 DCHECK_EQ((displacement & 0x1), 0);
679 break;
680 case kUnsignedByte:
681 opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
682 break;
683 case kSignedByte:
684 opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
685 break;
686 default:
687 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
688 }
689
690 if (!is_array) {
691 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800692 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700693 } else {
buzbee091cc402014-03-31 10:14:40 -0700694 DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here.
695 if (r_base == r_dest.GetLow()) {
Dave Allison69dfe512014-07-11 17:11:58 +0000696 load = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700697 displacement + HIWORD_OFFSET);
Dave Allison69dfe512014-07-11 17:11:58 +0000698 load2 = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700699 } else {
buzbee091cc402014-03-31 10:14:40 -0700700 load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
701 load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700702 displacement + HIWORD_OFFSET);
703 }
704 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100705 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
706 DCHECK(r_base == rs_rX86_SP);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700707 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
708 true /* is_load */, is64bit);
709 if (pair) {
710 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
711 true /* is_load */, is64bit);
712 }
713 }
714 } else {
715 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800716 load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700717 displacement + LOWORD_OFFSET);
718 } else {
buzbee091cc402014-03-31 10:14:40 -0700719 DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here.
720 if (r_base == r_dest.GetLow()) {
721 if (r_dest.GetHigh() == r_index) {
Mark Mendellae427c32014-01-24 09:17:22 -0800722 // We can't use either register for the first load.
buzbee2700f7e2014-03-07 09:46:20 -0800723 RegStorage temp = AllocTemp();
Dave Allison69dfe512014-07-11 17:11:58 +0000724 load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800725 displacement + HIWORD_OFFSET);
Dave Allison69dfe512014-07-11 17:11:58 +0000726 load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800727 displacement + LOWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700728 OpRegCopy(r_dest.GetHigh(), temp);
Mark Mendellae427c32014-01-24 09:17:22 -0800729 FreeTemp(temp);
730 } else {
Dave Allison69dfe512014-07-11 17:11:58 +0000731 load = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800732 displacement + HIWORD_OFFSET);
Dave Allison69dfe512014-07-11 17:11:58 +0000733 load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800734 displacement + LOWORD_OFFSET);
735 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700736 } else {
buzbee091cc402014-03-31 10:14:40 -0700737 if (r_dest.GetLow() == r_index) {
Mark Mendellae427c32014-01-24 09:17:22 -0800738 // We can't use either register for the first load.
buzbee2700f7e2014-03-07 09:46:20 -0800739 RegStorage temp = AllocTemp();
740 load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800741 displacement + LOWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700742 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800743 displacement + HIWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700744 OpRegCopy(r_dest.GetLow(), temp);
Mark Mendellae427c32014-01-24 09:17:22 -0800745 FreeTemp(temp);
746 } else {
buzbee091cc402014-03-31 10:14:40 -0700747 load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800748 displacement + LOWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700749 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800750 displacement + HIWORD_OFFSET);
751 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700752 }
753 }
754 }
755
Dave Allison69dfe512014-07-11 17:11:58 +0000756 // Always return first load generated as this might cause a fault if base is nullptr.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700757 return load;
758}
759
760/* Load value from base + scaled index. */
buzbee2700f7e2014-03-07 09:46:20 -0800761LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
762 int scale, OpSize size) {
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100763 return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700764}
765
Andreas Gampe3c12c512014-06-24 18:46:29 +0000766LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
767 OpSize size, VolatileKind is_volatile) {
Vladimir Marko674744e2014-04-24 15:18:26 +0100768 // LoadBaseDisp() will emit correct insn for atomic load on x86
769 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
Vladimir Marko674744e2014-04-24 15:18:26 +0100770
Andreas Gampe3c12c512014-06-24 18:46:29 +0000771 LIR* load = LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
772 size);
773
774 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700775 GenMemBarrier(kLoadAny); // Only a scheduling barrier.
Andreas Gampe3c12c512014-06-24 18:46:29 +0000776 }
777
778 return load;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700779}
780
buzbee2700f7e2014-03-07 09:46:20 -0800781LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700782 int displacement, RegStorage r_src, OpSize size,
783 int opt_flags) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700784 LIR *store = NULL;
785 LIR *store2 = NULL;
buzbee2700f7e2014-03-07 09:46:20 -0800786 bool is_array = r_index.Valid();
buzbee091cc402014-03-31 10:14:40 -0700787 bool pair = r_src.IsPair();
788 bool is64bit = (size == k64) || (size == kDouble);
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700789 bool consider_non_temporal = false;
790
Brian Carlstrom7940e442013-07-12 13:46:57 -0700791 X86OpCode opcode = kX86Nop;
792 switch (size) {
buzbee695d13a2014-04-19 13:32:20 -0700793 case k64:
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700794 consider_non_temporal = true;
795 // Fall through!
Brian Carlstrom7940e442013-07-12 13:46:57 -0700796 case kDouble:
buzbee091cc402014-03-31 10:14:40 -0700797 if (r_src.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700798 opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700799 } else if (!pair) {
800 opcode = is_array ? kX86Mov64AR : kX86Mov64MR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700801 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700802 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700803 }
804 // TODO: double store is to unaligned address
805 DCHECK_EQ((displacement & 0x3), 0);
806 break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700807 case kWord:
Elena Sayapinadd644502014-07-01 18:39:52 +0700808 if (cu_->target64) {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700809 opcode = is_array ? kX86Mov64AR : kX86Mov64MR;
810 CHECK_EQ(is_array, false);
811 CHECK_EQ(r_src.IsFloat(), false);
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700812 consider_non_temporal = true;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700813 break;
814 } // else fall-through to k32 case
buzbee695d13a2014-04-19 13:32:20 -0700815 case k32:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700816 case kSingle:
buzbee695d13a2014-04-19 13:32:20 -0700817 case kReference:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700818 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
buzbee091cc402014-03-31 10:14:40 -0700819 if (r_src.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700820 opcode = is_array ? kX86MovssAR : kX86MovssMR;
buzbee091cc402014-03-31 10:14:40 -0700821 DCHECK(r_src.IsSingle());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700822 }
823 DCHECK_EQ((displacement & 0x3), 0);
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700824 consider_non_temporal = true;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700825 break;
826 case kUnsignedHalf:
827 case kSignedHalf:
828 opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
829 DCHECK_EQ((displacement & 0x1), 0);
830 break;
831 case kUnsignedByte:
832 case kSignedByte:
833 opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
834 break;
835 default:
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000836 LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody";
Brian Carlstrom7940e442013-07-12 13:46:57 -0700837 }
838
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700839 // Handle non temporal hint here.
840 if (consider_non_temporal && ((opt_flags & MIR_STORE_NON_TEMPORAL) != 0)) {
841 switch (opcode) {
842 // We currently only handle 32/64 bit moves here.
843 case kX86Mov64AR:
844 opcode = kX86Movnti64AR;
845 break;
846 case kX86Mov64MR:
847 opcode = kX86Movnti64MR;
848 break;
849 case kX86Mov32AR:
850 opcode = kX86Movnti32AR;
851 break;
852 case kX86Mov32MR:
853 opcode = kX86Movnti32MR;
854 break;
855 default:
856 // Do nothing here.
857 break;
858 }
859 }
860
Brian Carlstrom7940e442013-07-12 13:46:57 -0700861 if (!is_array) {
862 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800863 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700864 } else {
buzbee091cc402014-03-31 10:14:40 -0700865 DCHECK(!r_src.IsFloat()); // Make sure we're not still using a pair here.
866 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetLowReg());
867 store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700868 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100869 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
870 DCHECK(r_base == rs_rX86_SP);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700871 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
872 false /* is_load */, is64bit);
873 if (pair) {
874 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
875 false /* is_load */, is64bit);
876 }
877 }
878 } else {
879 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800880 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
881 displacement + LOWORD_OFFSET, r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700882 } else {
buzbee091cc402014-03-31 10:14:40 -0700883 DCHECK(!r_src.IsFloat()); // Make sure we're not still using a pair here.
buzbee2700f7e2014-03-07 09:46:20 -0800884 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
buzbee091cc402014-03-31 10:14:40 -0700885 displacement + LOWORD_OFFSET, r_src.GetLowReg());
buzbee2700f7e2014-03-07 09:46:20 -0800886 store2 = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
buzbee091cc402014-03-31 10:14:40 -0700887 displacement + HIWORD_OFFSET, r_src.GetHighReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700888 }
889 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700890 return store;
891}
892
893/* store value base base + scaled index. */
buzbee2700f7e2014-03-07 09:46:20 -0800894LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
Andreas Gampe3c12c512014-06-24 18:46:29 +0000895 int scale, OpSize size) {
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100896 return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700897}
898
Andreas Gampe3c12c512014-06-24 18:46:29 +0000899LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
900 VolatileKind is_volatile) {
901 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700902 GenMemBarrier(kAnyStore); // Only a scheduling barrier.
Andreas Gampe3c12c512014-06-24 18:46:29 +0000903 }
904
Vladimir Marko674744e2014-04-24 15:18:26 +0100905 // StoreBaseDisp() will emit correct insn for atomic store on x86
906 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
Fred Shih37f05ef2014-07-16 18:38:08 -0700907 // x86 only allows registers EAX-EDX to be used as byte registers, if the input src is not
908 // valid, allocate a temp.
909 bool allocated_temp = false;
910 if (size == kUnsignedByte || size == kSignedByte) {
911 if (!cu_->target64 && !r_src.Low4()) {
912 RegStorage r_input = r_src;
913 r_src = AllocateByteRegister();
914 OpRegCopy(r_src, r_input);
915 allocated_temp = true;
916 }
917 }
Vladimir Marko674744e2014-04-24 15:18:26 +0100918
Andreas Gampe3c12c512014-06-24 18:46:29 +0000919 LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
920
921 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700922 // A volatile load might follow the volatile store so insert a StoreLoad barrier.
923 // This does require a fence, even on x86.
924 GenMemBarrier(kAnyAny);
Andreas Gampe3c12c512014-06-24 18:46:29 +0000925 }
926
Fred Shih37f05ef2014-07-16 18:38:08 -0700927 if (allocated_temp) {
928 FreeTemp(r_src);
929 }
930
Andreas Gampe3c12c512014-06-24 18:46:29 +0000931 return store;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700932}
933
buzbee2700f7e2014-03-07 09:46:20 -0800934LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
Dave Allison69dfe512014-07-11 17:11:58 +0000935 int offset, int check_value, LIR* target, LIR** compare) {
936 LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
937 offset, check_value);
938 if (compare != nullptr) {
939 *compare = inst;
940 }
Mark Mendell766e9292014-01-27 07:55:47 -0800941 LIR* branch = OpCondBranch(cond, target);
942 return branch;
943}
944
Mark Mendell67c39c42014-01-31 17:28:00 -0800945void X86Mir2Lir::AnalyzeMIR() {
946 // Assume we don't need a pointer to the base of the code.
947 cu_->NewTimingSplit("X86 MIR Analysis");
948 store_method_addr_ = false;
949
950 // Walk the MIR looking for interesting items.
951 PreOrderDfsIterator iter(mir_graph_);
952 BasicBlock* curr_bb = iter.Next();
953 while (curr_bb != NULL) {
954 AnalyzeBB(curr_bb);
955 curr_bb = iter.Next();
956 }
957
958 // Did we need a pointer to the method code?
959 if (store_method_addr_) {
Elena Sayapinadd644502014-07-01 18:39:52 +0700960 base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempVR, cu_->target64 == true);
Mark Mendell67c39c42014-01-31 17:28:00 -0800961 } else {
962 base_of_code_ = nullptr;
963 }
964}
965
966void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) {
967 if (bb->block_type == kDead) {
968 // Ignore dead blocks
969 return;
970 }
971
972 for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
973 int opcode = mir->dalvikInsn.opcode;
Jean Christophe Beyler2ab40eb2014-06-02 09:03:14 -0700974 if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
Mark Mendell67c39c42014-01-31 17:28:00 -0800975 AnalyzeExtendedMIR(opcode, bb, mir);
976 } else {
977 AnalyzeMIR(opcode, bb, mir);
978 }
979 }
980}
981
982
983void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) {
984 switch (opcode) {
985 // Instructions referencing doubles.
986 case kMirOpFusedCmplDouble:
987 case kMirOpFusedCmpgDouble:
988 AnalyzeFPInstruction(opcode, bb, mir);
989 break;
Mark Mendelld65c51a2014-04-29 16:55:20 -0400990 case kMirOpConstVector:
991 store_method_addr_ = true;
992 break;
Mark Mendell67c39c42014-01-31 17:28:00 -0800993 default:
994 // Ignore the rest.
995 break;
996 }
997}
998
999void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) {
1000 // Looking for
1001 // - Do we need a pointer to the code (used for packed switches and double lits)?
1002
1003 switch (opcode) {
1004 // Instructions referencing doubles.
1005 case Instruction::CMPL_DOUBLE:
1006 case Instruction::CMPG_DOUBLE:
1007 case Instruction::NEG_DOUBLE:
1008 case Instruction::ADD_DOUBLE:
1009 case Instruction::SUB_DOUBLE:
1010 case Instruction::MUL_DOUBLE:
1011 case Instruction::DIV_DOUBLE:
1012 case Instruction::REM_DOUBLE:
1013 case Instruction::ADD_DOUBLE_2ADDR:
1014 case Instruction::SUB_DOUBLE_2ADDR:
1015 case Instruction::MUL_DOUBLE_2ADDR:
1016 case Instruction::DIV_DOUBLE_2ADDR:
1017 case Instruction::REM_DOUBLE_2ADDR:
1018 AnalyzeFPInstruction(opcode, bb, mir);
1019 break;
Mark Mendell55d0eac2014-02-06 11:02:52 -08001020
Mark Mendell67c39c42014-01-31 17:28:00 -08001021 // Packed switches and array fills need a pointer to the base of the method.
1022 case Instruction::FILL_ARRAY_DATA:
1023 case Instruction::PACKED_SWITCH:
1024 store_method_addr_ = true;
1025 break;
Yixin Shou7071c8d2014-03-05 06:07:48 -05001026 case Instruction::INVOKE_STATIC:
1027 AnalyzeInvokeStatic(opcode, bb, mir);
1028 break;
Mark Mendell67c39c42014-01-31 17:28:00 -08001029 default:
1030 // Other instructions are not interesting yet.
1031 break;
1032 }
1033}
1034
1035void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) {
1036 // Look at all the uses, and see if they are double constants.
Jean Christophe Beylercc794c32014-05-02 09:34:13 -07001037 uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode));
Mark Mendell67c39c42014-01-31 17:28:00 -08001038 int next_sreg = 0;
1039 if (attrs & DF_UA) {
1040 if (attrs & DF_A_WIDE) {
1041 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg));
1042 next_sreg += 2;
1043 } else {
1044 next_sreg++;
1045 }
1046 }
1047 if (attrs & DF_UB) {
1048 if (attrs & DF_B_WIDE) {
1049 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg));
1050 next_sreg += 2;
1051 } else {
1052 next_sreg++;
1053 }
1054 }
1055 if (attrs & DF_UC) {
1056 if (attrs & DF_C_WIDE) {
1057 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg));
1058 }
1059 }
1060}
1061
1062void X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +07001063 // If this is a double literal, we will want it in the literal pool on 32b platforms.
1064 if (use.is_const && !cu_->target64) {
Mark Mendell67c39c42014-01-31 17:28:00 -08001065 store_method_addr_ = true;
1066 }
1067}
1068
buzbee30adc732014-05-09 15:10:18 -07001069RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) {
1070 loc = UpdateLoc(loc);
1071 if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
1072 if (GetRegInfo(loc.reg)->IsTemp()) {
1073 Clobber(loc.reg);
1074 FreeTemp(loc.reg);
1075 loc.reg = RegStorage::InvalidReg();
1076 loc.location = kLocDalvikFrame;
1077 }
1078 }
Chao-ying Fue0ccdc02014-06-06 17:32:37 -07001079 DCHECK(CheckCorePoolSanity());
buzbee30adc732014-05-09 15:10:18 -07001080 return loc;
1081}
1082
1083RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) {
1084 loc = UpdateLocWide(loc);
1085 if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
1086 if (GetRegInfo(loc.reg)->IsTemp()) {
1087 Clobber(loc.reg);
1088 FreeTemp(loc.reg);
1089 loc.reg = RegStorage::InvalidReg();
1090 loc.location = kLocDalvikFrame;
1091 }
1092 }
Chao-ying Fue0ccdc02014-06-06 17:32:37 -07001093 DCHECK(CheckCorePoolSanity());
buzbee30adc732014-05-09 15:10:18 -07001094 return loc;
1095}
Yixin Shou7071c8d2014-03-05 06:07:48 -05001096
1097void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +07001098 // For now this is only actual for x86-32.
1099 if (cu_->target64) {
1100 return;
1101 }
1102
Yixin Shou7071c8d2014-03-05 06:07:48 -05001103 uint32_t index = mir->dalvikInsn.vB;
1104 if (!(mir->optimization_flags & MIR_INLINED)) {
1105 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +07001106 DexFileMethodInliner* method_inliner =
1107 cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
Yixin Shou7071c8d2014-03-05 06:07:48 -05001108 InlineMethod method;
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +07001109 if (method_inliner->IsIntrinsic(index, &method)) {
Yixin Shou7071c8d2014-03-05 06:07:48 -05001110 switch (method.opcode) {
1111 case kIntrinsicAbsDouble:
Alexei Zavjalov1222c962014-07-16 00:54:13 +07001112 case kIntrinsicMinMaxDouble:
Yixin Shou7071c8d2014-03-05 06:07:48 -05001113 store_method_addr_ = true;
1114 break;
1115 default:
1116 break;
1117 }
1118 }
1119 }
1120}
Andreas Gampe98430592014-07-27 19:44:50 -07001121
1122LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
1123 if (cu_->target64) {
1124 return OpThreadMem(op, GetThreadOffset<8>(trampoline));
1125 } else {
1126 return OpThreadMem(op, GetThreadOffset<4>(trampoline));
1127 }
1128}
1129
Brian Carlstrom7940e442013-07-12 13:46:57 -07001130} // namespace art