blob: 4d8fd1b283d1bce18c592c79ae8c368e65aa5051 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Nicolas Geoffrayf3e2cc42014-02-18 18:37:26 +000017#include <string>
18#include <inttypes.h>
19
Brian Carlstrom7940e442013-07-12 13:46:57 -070020#include "codegen_x86.h"
21#include "dex/compiler_internals.h"
22#include "dex/quick/mir_to_lir-inl.h"
Mark Mendelle19c91f2014-02-25 08:19:08 -080023#include "mirror/array.h"
24#include "mirror/string.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070025#include "x86_lir.h"
26
Brian Carlstrom7940e442013-07-12 13:46:57 -070027namespace art {
28
Vladimir Marko089142c2014-06-05 10:57:05 +010029static constexpr RegStorage core_regs_arr_32[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070030 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
31};
Vladimir Marko089142c2014-06-05 10:57:05 +010032static constexpr RegStorage core_regs_arr_64[] = {
Dmitry Petrochenko76af0d32014-06-05 21:15:08 +070033 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
Brian Carlstrom7940e442013-07-12 13:46:57 -070034#ifdef TARGET_REX_SUPPORT
buzbee091cc402014-03-31 10:14:40 -070035 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15
Brian Carlstrom7940e442013-07-12 13:46:57 -070036#endif
37};
Vladimir Marko089142c2014-06-05 10:57:05 +010038static constexpr RegStorage core_regs_arr_64q[] = {
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +070039 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q,
40#ifdef TARGET_REX_SUPPORT
Dmitry Petrochenkoa20468c2014-04-30 13:40:19 +070041 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +070042#endif
43};
Vladimir Marko089142c2014-06-05 10:57:05 +010044static constexpr RegStorage sp_regs_arr_32[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070045 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
46};
Vladimir Marko089142c2014-06-05 10:57:05 +010047static constexpr RegStorage sp_regs_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070048 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
Brian Carlstrom7940e442013-07-12 13:46:57 -070049#ifdef TARGET_REX_SUPPORT
buzbee091cc402014-03-31 10:14:40 -070050 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
Brian Carlstrom7940e442013-07-12 13:46:57 -070051#endif
52};
Vladimir Marko089142c2014-06-05 10:57:05 +010053static constexpr RegStorage dp_regs_arr_32[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070054 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
55};
Vladimir Marko089142c2014-06-05 10:57:05 +010056static constexpr RegStorage dp_regs_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070057 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
Brian Carlstrom7940e442013-07-12 13:46:57 -070058#ifdef TARGET_REX_SUPPORT
buzbee091cc402014-03-31 10:14:40 -070059 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
Brian Carlstrom7940e442013-07-12 13:46:57 -070060#endif
61};
Vladimir Marko089142c2014-06-05 10:57:05 +010062static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32};
Dmitry Petrochenko76af0d32014-06-05 21:15:08 +070063static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32};
Vladimir Marko089142c2014-06-05 10:57:05 +010064static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64};
65static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX};
66static constexpr RegStorage core_temps_arr_64[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070067 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI,
68#ifdef TARGET_REX_SUPPORT
69 rs_r8, rs_r9, rs_r10, rs_r11
70#endif
71};
Vladimir Marko089142c2014-06-05 10:57:05 +010072static constexpr RegStorage core_temps_arr_64q[] = {
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +070073 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q,
74#ifdef TARGET_REX_SUPPORT
75 rs_r8q, rs_r9q, rs_r10q, rs_r11q
76#endif
77};
Vladimir Marko089142c2014-06-05 10:57:05 +010078static constexpr RegStorage sp_temps_arr_32[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070079 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
80};
Vladimir Marko089142c2014-06-05 10:57:05 +010081static constexpr RegStorage sp_temps_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070082 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
83#ifdef TARGET_REX_SUPPORT
84 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
85#endif
86};
Vladimir Marko089142c2014-06-05 10:57:05 +010087static constexpr RegStorage dp_temps_arr_32[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070088 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
89};
Vladimir Marko089142c2014-06-05 10:57:05 +010090static constexpr RegStorage dp_temps_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070091 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
92#ifdef TARGET_REX_SUPPORT
93 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
94#endif
95};
96
Vladimir Marko089142c2014-06-05 10:57:05 +010097static constexpr RegStorage xp_temps_arr_32[] = {
Mark Mendellfe945782014-05-22 09:52:36 -040098 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
99};
Vladimir Marko089142c2014-06-05 10:57:05 +0100100static constexpr RegStorage xp_temps_arr_64[] = {
Mark Mendellfe945782014-05-22 09:52:36 -0400101 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
102#ifdef TARGET_REX_SUPPORT
103 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15
104#endif
105};
106
Vladimir Marko089142c2014-06-05 10:57:05 +0100107static constexpr ArrayRef<const RegStorage> empty_pool;
108static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
109static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
110static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q);
111static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
112static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
113static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32);
114static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
115static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
116static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
117static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q);
118static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
119static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
120static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q);
121static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
122static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
123static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32);
124static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700125
Vladimir Marko089142c2014-06-05 10:57:05 +0100126static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32);
127static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
Mark Mendellfe945782014-05-22 09:52:36 -0400128
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700129RegStorage rs_rX86_SP;
130
131X86NativeRegisterPool rX86_ARG0;
132X86NativeRegisterPool rX86_ARG1;
133X86NativeRegisterPool rX86_ARG2;
134X86NativeRegisterPool rX86_ARG3;
135X86NativeRegisterPool rX86_FARG0;
136X86NativeRegisterPool rX86_FARG1;
137X86NativeRegisterPool rX86_FARG2;
138X86NativeRegisterPool rX86_FARG3;
139X86NativeRegisterPool rX86_RET0;
140X86NativeRegisterPool rX86_RET1;
141X86NativeRegisterPool rX86_INVOKE_TGT;
142X86NativeRegisterPool rX86_COUNT;
143
144RegStorage rs_rX86_ARG0;
145RegStorage rs_rX86_ARG1;
146RegStorage rs_rX86_ARG2;
147RegStorage rs_rX86_ARG3;
148RegStorage rs_rX86_FARG0;
149RegStorage rs_rX86_FARG1;
150RegStorage rs_rX86_FARG2;
151RegStorage rs_rX86_FARG3;
152RegStorage rs_rX86_RET0;
153RegStorage rs_rX86_RET1;
154RegStorage rs_rX86_INVOKE_TGT;
155RegStorage rs_rX86_COUNT;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700156
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700157RegLocation X86Mir2Lir::LocCReturn() {
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000158 return x86_loc_c_return;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700159}
160
buzbeea0cd2d72014-06-01 09:33:49 -0700161RegLocation X86Mir2Lir::LocCReturnRef() {
162 // FIXME: return x86_loc_c_return_wide for x86_64 when wide refs supported.
163 return x86_loc_c_return;
164}
165
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700166RegLocation X86Mir2Lir::LocCReturnWide() {
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000167 return x86_loc_c_return_wide;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700168}
169
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700170RegLocation X86Mir2Lir::LocCReturnFloat() {
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000171 return x86_loc_c_return_float;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700172}
173
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700174RegLocation X86Mir2Lir::LocCReturnDouble() {
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000175 return x86_loc_c_return_double;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700176}
177
178// Return a target-dependent special register.
buzbee2700f7e2014-03-07 09:46:20 -0800179RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
buzbee091cc402014-03-31 10:14:40 -0700180 RegStorage res_reg = RegStorage::InvalidReg();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700181 switch (reg) {
buzbee091cc402014-03-31 10:14:40 -0700182 case kSelf: res_reg = RegStorage::InvalidReg(); break;
183 case kSuspend: res_reg = RegStorage::InvalidReg(); break;
184 case kLr: res_reg = RegStorage::InvalidReg(); break;
185 case kPc: res_reg = RegStorage::InvalidReg(); break;
186 case kSp: res_reg = rs_rX86_SP; break;
187 case kArg0: res_reg = rs_rX86_ARG0; break;
188 case kArg1: res_reg = rs_rX86_ARG1; break;
189 case kArg2: res_reg = rs_rX86_ARG2; break;
190 case kArg3: res_reg = rs_rX86_ARG3; break;
191 case kFArg0: res_reg = rs_rX86_FARG0; break;
192 case kFArg1: res_reg = rs_rX86_FARG1; break;
193 case kFArg2: res_reg = rs_rX86_FARG2; break;
194 case kFArg3: res_reg = rs_rX86_FARG3; break;
195 case kRet0: res_reg = rs_rX86_RET0; break;
196 case kRet1: res_reg = rs_rX86_RET1; break;
197 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
198 case kHiddenArg: res_reg = rs_rAX; break;
199 case kHiddenFpArg: res_reg = rs_fr0; break;
200 case kCount: res_reg = rs_rX86_COUNT; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700201 }
buzbee091cc402014-03-31 10:14:40 -0700202 return res_reg;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700203}
204
buzbee2700f7e2014-03-07 09:46:20 -0800205RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800206 // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
207 // TODO: This is not 64-bit compliant and depends on new internal ABI.
208 switch (arg_num) {
209 case 0:
buzbee2700f7e2014-03-07 09:46:20 -0800210 return rs_rX86_ARG1;
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800211 case 1:
buzbee2700f7e2014-03-07 09:46:20 -0800212 return rs_rX86_ARG2;
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800213 case 2:
buzbee2700f7e2014-03-07 09:46:20 -0800214 return rs_rX86_ARG3;
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800215 default:
buzbee2700f7e2014-03-07 09:46:20 -0800216 return RegStorage::InvalidReg();
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800217 }
218}
219
Brian Carlstrom7940e442013-07-12 13:46:57 -0700220/*
221 * Decode the register id.
222 */
buzbee091cc402014-03-31 10:14:40 -0700223uint64_t X86Mir2Lir::GetRegMaskCommon(RegStorage reg) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700224 uint64_t seed;
225 int shift;
226 int reg_id;
227
buzbee091cc402014-03-31 10:14:40 -0700228 reg_id = reg.GetRegNum();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700229 /* Double registers in x86 are just a single FP register */
230 seed = 1;
231 /* FP register starts at bit position 16 */
Mark Mendellfe945782014-05-22 09:52:36 -0400232 shift = (reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700233 /* Expand the double register id into single offset */
234 shift += reg_id;
235 return (seed << shift);
236}
237
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700238uint64_t X86Mir2Lir::GetPCUseDefEncoding() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700239 /*
240 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be
241 * able to clean up some of the x86/Arm_Mips differences
242 */
243 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
244 return 0ULL;
245}
246
buzbeeb48819d2013-09-14 16:15:25 -0700247void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) {
Dmitry Petrochenko6a58cb12014-04-02 17:27:59 +0700248 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
buzbeeb48819d2013-09-14 16:15:25 -0700249 DCHECK(!lir->flags.use_def_invalid);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700250
251 // X86-specific resource map setup here.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700252 if (flags & REG_USE_SP) {
buzbeeb48819d2013-09-14 16:15:25 -0700253 lir->u.m.use_mask |= ENCODE_X86_REG_SP;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700254 }
255
256 if (flags & REG_DEF_SP) {
buzbeeb48819d2013-09-14 16:15:25 -0700257 lir->u.m.def_mask |= ENCODE_X86_REG_SP;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700258 }
259
260 if (flags & REG_DEFA) {
buzbee091cc402014-03-31 10:14:40 -0700261 SetupRegMask(&lir->u.m.def_mask, rs_rAX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700262 }
263
264 if (flags & REG_DEFD) {
buzbee091cc402014-03-31 10:14:40 -0700265 SetupRegMask(&lir->u.m.def_mask, rs_rDX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700266 }
267 if (flags & REG_USEA) {
buzbee091cc402014-03-31 10:14:40 -0700268 SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700269 }
270
271 if (flags & REG_USEC) {
buzbee091cc402014-03-31 10:14:40 -0700272 SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700273 }
274
275 if (flags & REG_USED) {
buzbee091cc402014-03-31 10:14:40 -0700276 SetupRegMask(&lir->u.m.use_mask, rs_rDX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700277 }
Vladimir Marko70b797d2013-12-03 15:25:24 +0000278
279 if (flags & REG_USEB) {
buzbee091cc402014-03-31 10:14:40 -0700280 SetupRegMask(&lir->u.m.use_mask, rs_rBX.GetReg());
Vladimir Marko70b797d2013-12-03 15:25:24 +0000281 }
Mark Mendell4028a6c2014-02-19 20:06:20 -0800282
283 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
284 if (lir->opcode == kX86RepneScasw) {
buzbee091cc402014-03-31 10:14:40 -0700285 SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
286 SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
287 SetupRegMask(&lir->u.m.use_mask, rs_rDI.GetReg());
288 SetupRegMask(&lir->u.m.def_mask, rs_rDI.GetReg());
Mark Mendell4028a6c2014-02-19 20:06:20 -0800289 }
Serguei Katkove90501d2014-03-12 15:56:54 +0700290
291 if (flags & USE_FP_STACK) {
292 lir->u.m.use_mask |= ENCODE_X86_FP_STACK;
293 lir->u.m.def_mask |= ENCODE_X86_FP_STACK;
294 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700295}
296
297/* For dumping instructions */
298static const char* x86RegName[] = {
299 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
300 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
301};
302
303static const char* x86CondName[] = {
304 "O",
305 "NO",
306 "B/NAE/C",
307 "NB/AE/NC",
308 "Z/EQ",
309 "NZ/NE",
310 "BE/NA",
311 "NBE/A",
312 "S",
313 "NS",
314 "P/PE",
315 "NP/PO",
316 "L/NGE",
317 "NL/GE",
318 "LE/NG",
319 "NLE/G"
320};
321
322/*
323 * Interpret a format string and build a string no longer than size
324 * See format key in Assemble.cc.
325 */
326std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
327 std::string buf;
328 size_t i = 0;
329 size_t fmt_len = strlen(fmt);
330 while (i < fmt_len) {
331 if (fmt[i] != '!') {
332 buf += fmt[i];
333 i++;
334 } else {
335 i++;
336 DCHECK_LT(i, fmt_len);
337 char operand_number_ch = fmt[i];
338 i++;
339 if (operand_number_ch == '!') {
340 buf += "!";
341 } else {
342 int operand_number = operand_number_ch - '0';
343 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands.
344 DCHECK_LT(i, fmt_len);
345 int operand = lir->operands[operand_number];
346 switch (fmt[i]) {
347 case 'c':
348 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
349 buf += x86CondName[operand];
350 break;
351 case 'd':
352 buf += StringPrintf("%d", operand);
353 break;
354 case 'p': {
buzbee0d829482013-10-11 15:24:55 -0700355 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700356 buf += StringPrintf("0x%08x", tab_rec->offset);
357 break;
358 }
359 case 'r':
buzbee091cc402014-03-31 10:14:40 -0700360 if (RegStorage::IsFloat(operand)) {
361 int fp_reg = RegStorage::RegNum(operand);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700362 buf += StringPrintf("xmm%d", fp_reg);
363 } else {
buzbee091cc402014-03-31 10:14:40 -0700364 int reg_num = RegStorage::RegNum(operand);
365 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName));
366 buf += x86RegName[reg_num];
Brian Carlstrom7940e442013-07-12 13:46:57 -0700367 }
368 break;
369 case 't':
Ian Rogers107c31e2014-01-23 20:55:29 -0800370 buf += StringPrintf("0x%08" PRIxPTR " (L%p)",
371 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand,
372 lir->target);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700373 break;
374 default:
375 buf += StringPrintf("DecodeError '%c'", fmt[i]);
376 break;
377 }
378 i++;
379 }
380 }
381 }
382 return buf;
383}
384
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700385void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700386 char buf[256];
387 buf[0] = 0;
388
389 if (mask == ENCODE_ALL) {
390 strcpy(buf, "all");
391 } else {
392 char num[8];
393 int i;
394
395 for (i = 0; i < kX86RegEnd; i++) {
396 if (mask & (1ULL << i)) {
Ian Rogers988e6ea2014-01-08 11:30:50 -0800397 snprintf(num, arraysize(num), "%d ", i);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700398 strcat(buf, num);
399 }
400 }
401
402 if (mask & ENCODE_CCODE) {
403 strcat(buf, "cc ");
404 }
405 /* Memory bits */
406 if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
Ian Rogers988e6ea2014-01-08 11:30:50 -0800407 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
408 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
409 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
Brian Carlstrom7940e442013-07-12 13:46:57 -0700410 }
411 if (mask & ENCODE_LITERAL) {
412 strcat(buf, "lit ");
413 }
414
415 if (mask & ENCODE_HEAP_REF) {
416 strcat(buf, "heap ");
417 }
418 if (mask & ENCODE_MUST_NOT_ALIAS) {
419 strcat(buf, "noalias ");
420 }
421 }
422 if (buf[0]) {
423 LOG(INFO) << prefix << ": " << buf;
424 }
425}
426
427void X86Mir2Lir::AdjustSpillMask() {
428 // Adjustment for LR spilling, x86 has no LR so nothing to do here
buzbee091cc402014-03-31 10:14:40 -0700429 core_spill_mask_ |= (1 << rs_rRET.GetRegNum());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700430 num_core_spills_++;
431}
432
433/*
434 * Mark a callee-save fp register as promoted. Note that
435 * vpush/vpop uses contiguous register lists so we must
436 * include any holes in the mask. Associate holes with
437 * Dalvik register INVALID_VREG (0xFFFFU).
438 */
buzbee091cc402014-03-31 10:14:40 -0700439void X86Mir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) {
440 UNIMPLEMENTED(FATAL) << "MarkPreservedSingle";
Brian Carlstrom7940e442013-07-12 13:46:57 -0700441}
442
buzbee091cc402014-03-31 10:14:40 -0700443void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) {
444 UNIMPLEMENTED(FATAL) << "MarkPreservedDouble";
buzbee2700f7e2014-03-07 09:46:20 -0800445}
446
Mark Mendelle87f9b52014-04-30 14:13:18 -0400447RegStorage X86Mir2Lir::AllocateByteRegister() {
448 return AllocTypedTemp(false, kCoreReg);
449}
450
Brian Carlstrom7940e442013-07-12 13:46:57 -0700451/* Clobber all regs that might be used by an external C call */
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000452void X86Mir2Lir::ClobberCallerSave() {
buzbee091cc402014-03-31 10:14:40 -0700453 Clobber(rs_rAX);
454 Clobber(rs_rCX);
455 Clobber(rs_rDX);
456 Clobber(rs_rBX);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700457}
458
459RegLocation X86Mir2Lir::GetReturnWideAlt() {
460 RegLocation res = LocCReturnWide();
buzbee091cc402014-03-31 10:14:40 -0700461 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg());
462 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg());
463 Clobber(rs_rAX);
464 Clobber(rs_rDX);
465 MarkInUse(rs_rAX);
466 MarkInUse(rs_rDX);
467 MarkWide(res.reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700468 return res;
469}
470
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700471RegLocation X86Mir2Lir::GetReturnAlt() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700472 RegLocation res = LocCReturn();
buzbee091cc402014-03-31 10:14:40 -0700473 res.reg.SetReg(rs_rDX.GetReg());
474 Clobber(rs_rDX);
475 MarkInUse(rs_rDX);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700476 return res;
477}
478
Brian Carlstrom7940e442013-07-12 13:46:57 -0700479/* To be used when explicitly managing register use */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700480void X86Mir2Lir::LockCallTemps() {
buzbee091cc402014-03-31 10:14:40 -0700481 LockTemp(rs_rX86_ARG0);
482 LockTemp(rs_rX86_ARG1);
483 LockTemp(rs_rX86_ARG2);
484 LockTemp(rs_rX86_ARG3);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700485}
486
487/* To be used when explicitly managing register use */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700488void X86Mir2Lir::FreeCallTemps() {
buzbee091cc402014-03-31 10:14:40 -0700489 FreeTemp(rs_rX86_ARG0);
490 FreeTemp(rs_rX86_ARG1);
491 FreeTemp(rs_rX86_ARG2);
492 FreeTemp(rs_rX86_ARG3);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700493}
494
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800495bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
496 switch (opcode) {
497 case kX86LockCmpxchgMR:
498 case kX86LockCmpxchgAR:
499 case kX86LockCmpxchg8bM:
500 case kX86LockCmpxchg8bA:
501 case kX86XchgMR:
502 case kX86Mfence:
503 // Atomic memory instructions provide full barrier.
504 return true;
505 default:
506 break;
507 }
508
509 // Conservative if cannot prove it provides full barrier.
510 return false;
511}
512
Andreas Gampeb14329f2014-05-15 11:16:06 -0700513bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700514#if ANDROID_SMP != 0
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800515 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
516 LIR* mem_barrier = last_lir_insn_;
517
Andreas Gampeb14329f2014-05-15 11:16:06 -0700518 bool ret = false;
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800519 /*
520 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers
521 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need
522 * to ensure is that there is a scheduling barrier in place.
523 */
524 if (barrier_kind == kStoreLoad) {
525 // If no LIR exists already that can be used a barrier, then generate an mfence.
526 if (mem_barrier == nullptr) {
527 mem_barrier = NewLIR0(kX86Mfence);
Andreas Gampeb14329f2014-05-15 11:16:06 -0700528 ret = true;
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800529 }
530
531 // If last instruction does not provide full barrier, then insert an mfence.
532 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
533 mem_barrier = NewLIR0(kX86Mfence);
Andreas Gampeb14329f2014-05-15 11:16:06 -0700534 ret = true;
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800535 }
536 }
537
538 // Now ensure that a scheduling barrier is in place.
539 if (mem_barrier == nullptr) {
540 GenBarrier();
541 } else {
542 // Mark as a scheduling barrier.
543 DCHECK(!mem_barrier->flags.use_def_invalid);
544 mem_barrier->u.m.def_mask = ENCODE_ALL;
545 }
Andreas Gampeb14329f2014-05-15 11:16:06 -0700546 return ret;
547#else
548 return false;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700549#endif
550}
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000551
Brian Carlstrom7940e442013-07-12 13:46:57 -0700552void X86Mir2Lir::CompilerInitializeRegAlloc() {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700553 if (Gen64Bit()) {
Dmitry Petrochenko76af0d32014-06-05 21:15:08 +0700554 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
555 dp_regs_64, reserved_regs_64, reserved_regs_64q,
556 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64);
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700557 } else {
buzbeeb01bf152014-05-13 15:59:07 -0700558 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
559 dp_regs_32, reserved_regs_32, empty_pool,
560 core_temps_32, empty_pool, sp_temps_32, dp_temps_32);
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700561 }
buzbee091cc402014-03-31 10:14:40 -0700562
563 // Target-specific adjustments.
564
Mark Mendellfe945782014-05-22 09:52:36 -0400565 // Add in XMM registers.
Vladimir Marko089142c2014-06-05 10:57:05 +0100566 const ArrayRef<const RegStorage> *xp_temps = Gen64Bit() ? &xp_temps_64 : &xp_temps_32;
Mark Mendellfe945782014-05-22 09:52:36 -0400567 for (RegStorage reg : *xp_temps) {
568 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
569 reginfo_map_.Put(reg.GetReg(), info);
570 info->SetIsTemp(true);
571 }
572
buzbee091cc402014-03-31 10:14:40 -0700573 // Alias single precision xmm to double xmms.
574 // TODO: as needed, add larger vector sizes - alias all to the largest.
575 GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
576 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
577 int sp_reg_num = info->GetReg().GetRegNum();
Mark Mendellfe945782014-05-22 09:52:36 -0400578 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num);
579 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg);
580 // 128-bit xmm vector register's master storage should refer to itself.
581 DCHECK_EQ(xp_reg_info, xp_reg_info->Master());
582
583 // Redirect 32-bit vector's master storage to 128-bit vector.
584 info->SetMaster(xp_reg_info);
585
Dmitry Petrochenko76af0d32014-06-05 21:15:08 +0700586 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num);
buzbee091cc402014-03-31 10:14:40 -0700587 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
Mark Mendellfe945782014-05-22 09:52:36 -0400588 // Redirect 64-bit vector's master storage to 128-bit vector.
589 dp_reg_info->SetMaster(xp_reg_info);
Dmitry Petrochenko76af0d32014-06-05 21:15:08 +0700590 // Singles should show a single 32-bit mask bit, at first referring to the low half.
591 DCHECK_EQ(info->StorageMask(), 0x1U);
592 }
593
594 if (Gen64Bit()) {
595 // Alias 32bit W registers to corresponding 64bit X registers.
596 GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
597 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
598 int x_reg_num = info->GetReg().GetRegNum();
599 RegStorage x_reg = RegStorage::Solo64(x_reg_num);
600 RegisterInfo* x_reg_info = GetRegInfo(x_reg);
601 // 64bit X register's master storage should refer to itself.
602 DCHECK_EQ(x_reg_info, x_reg_info->Master());
603 // Redirect 32bit W master storage to 64bit X.
604 info->SetMaster(x_reg_info);
605 // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
606 DCHECK_EQ(info->StorageMask(), 0x1U);
607 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700608 }
buzbee091cc402014-03-31 10:14:40 -0700609
610 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
611 // TODO: adjust for x86/hard float calling convention.
612 reg_pool_->next_core_reg_ = 2;
613 reg_pool_->next_sp_reg_ = 2;
614 reg_pool_->next_dp_reg_ = 1;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700615}
616
Brian Carlstrom7940e442013-07-12 13:46:57 -0700617void X86Mir2Lir::SpillCoreRegs() {
618 if (num_core_spills_ == 0) {
619 return;
620 }
621 // Spill mask not including fake return address register
buzbee091cc402014-03-31 10:14:40 -0700622 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700623 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700624 for (int reg = 0; mask; mask >>= 1, reg++) {
625 if (mask & 0x1) {
buzbee2700f7e2014-03-07 09:46:20 -0800626 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700627 offset += GetInstructionSetPointerSize(cu_->instruction_set);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700628 }
629 }
630}
631
632void X86Mir2Lir::UnSpillCoreRegs() {
633 if (num_core_spills_ == 0) {
634 return;
635 }
636 // Spill mask not including fake return address register
buzbee091cc402014-03-31 10:14:40 -0700637 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700638 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700639 for (int reg = 0; mask; mask >>= 1, reg++) {
640 if (mask & 0x1) {
buzbee2700f7e2014-03-07 09:46:20 -0800641 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700642 offset += GetInstructionSetPointerSize(cu_->instruction_set);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700643 }
644 }
645}
646
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700647bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700648 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
649}
650
Vladimir Marko674744e2014-04-24 15:18:26 +0100651bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
652 return true;
653}
654
655RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
656 if (UNLIKELY(is_volatile)) {
657 // On x86, atomic 64-bit load/store requires an fp register.
658 // Smaller aligned load/store is atomic for both core and fp registers.
659 if (size == k64 || size == kDouble) {
660 return kFPReg;
661 }
662 }
663 return RegClassBySize(size);
664}
665
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700666X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit)
Mark Mendell55d0eac2014-02-06 11:02:52 -0800667 : Mir2Lir(cu, mir_graph, arena),
Ian Rogersdd7624d2014-03-14 17:43:00 -0700668 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
Mark Mendell55d0eac2014-02-06 11:02:52 -0800669 method_address_insns_(arena, 100, kGrowableArrayMisc),
670 class_type_address_insns_(arena, 100, kGrowableArrayMisc),
Mark Mendellae9fd932014-02-10 16:14:35 -0800671 call_method_insns_(arena, 100, kGrowableArrayMisc),
Mark Mendelld65c51a2014-04-29 16:55:20 -0400672 stack_decrement_(nullptr), stack_increment_(nullptr), gen64bit_(gen64bit),
673 const_vectors_(nullptr) {
674 store_method_addr_used_ = false;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700675 if (kIsDebugBuild) {
676 for (int i = 0; i < kX86Last; i++) {
677 if (X86Mir2Lir::EncodingMap[i].opcode != i) {
678 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
Mark Mendelld65c51a2014-04-29 16:55:20 -0400679 << " is wrong: expecting " << i << ", seeing "
680 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700681 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700682 }
683 }
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700684 if (Gen64Bit()) {
685 rs_rX86_SP = rs_rX86_SP_64;
686
687 rs_rX86_ARG0 = rs_rDI;
688 rs_rX86_ARG1 = rs_rSI;
689 rs_rX86_ARG2 = rs_rDX;
690 rs_rX86_ARG3 = rs_rCX;
691 rX86_ARG0 = rDI;
692 rX86_ARG1 = rSI;
693 rX86_ARG2 = rDX;
694 rX86_ARG3 = rCX;
695 // TODO: ARG4(r8), ARG5(r9), floating point args.
696 } else {
697 rs_rX86_SP = rs_rX86_SP_32;
698
699 rs_rX86_ARG0 = rs_rAX;
700 rs_rX86_ARG1 = rs_rCX;
701 rs_rX86_ARG2 = rs_rDX;
702 rs_rX86_ARG3 = rs_rBX;
703 rX86_ARG0 = rAX;
704 rX86_ARG1 = rCX;
705 rX86_ARG2 = rDX;
706 rX86_ARG3 = rBX;
707 }
708 rs_rX86_FARG0 = rs_rAX;
709 rs_rX86_FARG1 = rs_rCX;
710 rs_rX86_FARG2 = rs_rDX;
711 rs_rX86_FARG3 = rs_rBX;
712 rs_rX86_RET0 = rs_rAX;
713 rs_rX86_RET1 = rs_rDX;
714 rs_rX86_INVOKE_TGT = rs_rAX;
715 rs_rX86_COUNT = rs_rCX;
716 rX86_FARG0 = rAX;
717 rX86_FARG1 = rCX;
718 rX86_FARG2 = rDX;
719 rX86_FARG3 = rBX;
720 rX86_RET0 = rAX;
721 rX86_RET1 = rDX;
722 rX86_INVOKE_TGT = rAX;
723 rX86_COUNT = rCX;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700724}
725
726Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
727 ArenaAllocator* const arena) {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700728 return new X86Mir2Lir(cu, mir_graph, arena, false);
729}
730
731Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
732 ArenaAllocator* const arena) {
733 return new X86Mir2Lir(cu, mir_graph, arena, true);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700734}
735
736// Not used in x86
Ian Rogersdd7624d2014-03-14 17:43:00 -0700737RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700738 LOG(FATAL) << "Unexpected use of LoadHelper in x86";
buzbee2700f7e2014-03-07 09:46:20 -0800739 return RegStorage::InvalidReg();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700740}
741
Andreas Gampe2f244e92014-05-08 03:35:25 -0700742// Not used in x86
743RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
744 LOG(FATAL) << "Unexpected use of LoadHelper in x86";
745 return RegStorage::InvalidReg();
746}
747
Dave Allisonb373e092014-02-20 16:06:36 -0800748LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
749 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86";
750 return nullptr;
751}
752
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700753uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700754 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700755 return X86Mir2Lir::EncodingMap[opcode].flags;
756}
757
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700758const char* X86Mir2Lir::GetTargetInstName(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700759 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700760 return X86Mir2Lir::EncodingMap[opcode].name;
761}
762
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700763const char* X86Mir2Lir::GetTargetInstFmt(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700764 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700765 return X86Mir2Lir::EncodingMap[opcode].fmt;
766}
767
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000768void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
769 // Can we do this directly to memory?
770 rl_dest = UpdateLocWide(rl_dest);
771 if ((rl_dest.location == kLocDalvikFrame) ||
772 (rl_dest.location == kLocCompilerTemp)) {
773 int32_t val_lo = Low32Bits(value);
774 int32_t val_hi = High32Bits(value);
buzbee2700f7e2014-03-07 09:46:20 -0800775 int r_base = TargetReg(kSp).GetReg();
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000776 int displacement = SRegOffset(rl_dest.s_reg_low);
777
buzbee2700f7e2014-03-07 09:46:20 -0800778 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000779 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
780 false /* is_load */, true /* is64bit */);
buzbee2700f7e2014-03-07 09:46:20 -0800781 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000782 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
783 false /* is_load */, true /* is64bit */);
784 return;
785 }
786
787 // Just use the standard code to do the generation.
788 Mir2Lir::GenConstWide(rl_dest, value);
789}
Mark Mendelle02d48f2014-01-15 11:19:23 -0800790
791// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc
792void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
793 LOG(INFO) << "location: " << loc.location << ','
794 << (loc.wide ? " w" : " ")
795 << (loc.defined ? " D" : " ")
796 << (loc.is_const ? " c" : " ")
797 << (loc.fp ? " F" : " ")
798 << (loc.core ? " C" : " ")
799 << (loc.ref ? " r" : " ")
800 << (loc.high_word ? " h" : " ")
801 << (loc.home ? " H" : " ")
buzbee2700f7e2014-03-07 09:46:20 -0800802 << ", low: " << static_cast<int>(loc.reg.GetLowReg())
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000803 << ", high: " << static_cast<int>(loc.reg.GetHighReg())
Mark Mendelle02d48f2014-01-15 11:19:23 -0800804 << ", s_reg: " << loc.s_reg_low
805 << ", orig: " << loc.orig_sreg;
806}
807
Mark Mendell67c39c42014-01-31 17:28:00 -0800808void X86Mir2Lir::Materialize() {
809 // A good place to put the analysis before starting.
810 AnalyzeMIR();
811
812 // Now continue with regular code generation.
813 Mir2Lir::Materialize();
814}
815
Jeff Hao49161ce2014-03-12 11:05:25 -0700816void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
Mark Mendell55d0eac2014-02-06 11:02:52 -0800817 SpecialTargetRegister symbolic_reg) {
818 /*
819 * For x86, just generate a 32 bit move immediate instruction, that will be filled
820 * in at 'link time'. For now, put a unique value based on target to ensure that
821 * code deduplication works.
822 */
Jeff Hao49161ce2014-03-12 11:05:25 -0700823 int target_method_idx = target_method.dex_method_index;
824 const DexFile* target_dex_file = target_method.dex_file;
825 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
826 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800827
Jeff Hao49161ce2014-03-12 11:05:25 -0700828 // Generate the move instruction with the unique pointer and save index, dex_file, and type.
buzbee2700f7e2014-03-07 09:46:20 -0800829 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
Jeff Hao49161ce2014-03-12 11:05:25 -0700830 static_cast<int>(target_method_id_ptr), target_method_idx,
831 WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800832 AppendLIR(move);
833 method_address_insns_.Insert(move);
834}
835
836void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
837 /*
838 * For x86, just generate a 32 bit move immediate instruction, that will be filled
839 * in at 'link time'. For now, put a unique value based on target to ensure that
840 * code deduplication works.
841 */
842 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx);
843 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
844
845 // Generate the move instruction with the unique pointer and save index and type.
buzbee2700f7e2014-03-07 09:46:20 -0800846 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
Mark Mendell55d0eac2014-02-06 11:02:52 -0800847 static_cast<int>(ptr), type_idx);
848 AppendLIR(move);
849 class_type_address_insns_.Insert(move);
850}
851
Jeff Hao49161ce2014-03-12 11:05:25 -0700852LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
Mark Mendell55d0eac2014-02-06 11:02:52 -0800853 /*
854 * For x86, just generate a 32 bit call relative instruction, that will be filled
855 * in at 'link time'. For now, put a unique value based on target to ensure that
856 * code deduplication works.
857 */
Jeff Hao49161ce2014-03-12 11:05:25 -0700858 int target_method_idx = target_method.dex_method_index;
859 const DexFile* target_dex_file = target_method.dex_file;
860 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
861 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800862
Jeff Hao49161ce2014-03-12 11:05:25 -0700863 // Generate the call instruction with the unique pointer and save index, dex_file, and type.
864 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr),
865 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800866 AppendLIR(call);
867 call_method_insns_.Insert(call);
868 return call;
869}
870
Mark Mendelld65c51a2014-04-29 16:55:20 -0400871/*
872 * @brief Enter a 32 bit quantity into a buffer
873 * @param buf buffer.
874 * @param data Data value.
875 */
876
877static void PushWord(std::vector<uint8_t>&buf, int32_t data) {
878 buf.push_back(data & 0xff);
879 buf.push_back((data >> 8) & 0xff);
880 buf.push_back((data >> 16) & 0xff);
881 buf.push_back((data >> 24) & 0xff);
882}
883
Mark Mendell55d0eac2014-02-06 11:02:52 -0800884void X86Mir2Lir::InstallLiteralPools() {
885 // These are handled differently for x86.
886 DCHECK(code_literal_list_ == nullptr);
887 DCHECK(method_literal_list_ == nullptr);
888 DCHECK(class_literal_list_ == nullptr);
889
Mark Mendelld65c51a2014-04-29 16:55:20 -0400890 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is
891 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads
892 // will fail at runtime)?
893 if (const_vectors_ != nullptr) {
894 int align_size = (16-4) - (code_buffer_.size() & 0xF);
895 if (align_size < 0) {
896 align_size += 16;
897 }
898
899 while (align_size > 0) {
900 code_buffer_.push_back(0);
901 align_size--;
902 }
903 for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
904 PushWord(code_buffer_, p->operands[0]);
905 PushWord(code_buffer_, p->operands[1]);
906 PushWord(code_buffer_, p->operands[2]);
907 PushWord(code_buffer_, p->operands[3]);
908 }
909 }
910
Mark Mendell55d0eac2014-02-06 11:02:52 -0800911 // Handle the fixups for methods.
912 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
913 LIR* p = method_address_insns_.Get(i);
914 DCHECK_EQ(p->opcode, kX86Mov32RI);
Jeff Hao49161ce2014-03-12 11:05:25 -0700915 uint32_t target_method_idx = p->operands[2];
916 const DexFile* target_dex_file =
917 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
Mark Mendell55d0eac2014-02-06 11:02:52 -0800918
919 // The offset to patch is the last 4 bytes of the instruction.
920 int patch_offset = p->offset + p->flags.size - 4;
921 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx,
922 cu_->method_idx, cu_->invoke_type,
Jeff Hao49161ce2014-03-12 11:05:25 -0700923 target_method_idx, target_dex_file,
924 static_cast<InvokeType>(p->operands[4]),
Mark Mendell55d0eac2014-02-06 11:02:52 -0800925 patch_offset);
926 }
927
928 // Handle the fixups for class types.
929 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
930 LIR* p = class_type_address_insns_.Get(i);
931 DCHECK_EQ(p->opcode, kX86Mov32RI);
Jeff Hao49161ce2014-03-12 11:05:25 -0700932 uint32_t target_method_idx = p->operands[2];
Mark Mendell55d0eac2014-02-06 11:02:52 -0800933
934 // The offset to patch is the last 4 bytes of the instruction.
935 int patch_offset = p->offset + p->flags.size - 4;
936 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
Jeff Hao49161ce2014-03-12 11:05:25 -0700937 cu_->method_idx, target_method_idx, patch_offset);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800938 }
939
940 // And now the PC-relative calls to methods.
941 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
942 LIR* p = call_method_insns_.Get(i);
943 DCHECK_EQ(p->opcode, kX86CallI);
Jeff Hao49161ce2014-03-12 11:05:25 -0700944 uint32_t target_method_idx = p->operands[1];
945 const DexFile* target_dex_file =
946 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
Mark Mendell55d0eac2014-02-06 11:02:52 -0800947
948 // The offset to patch is the last 4 bytes of the instruction.
949 int patch_offset = p->offset + p->flags.size - 4;
950 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx,
Jeff Hao49161ce2014-03-12 11:05:25 -0700951 cu_->method_idx, cu_->invoke_type,
952 target_method_idx, target_dex_file,
953 static_cast<InvokeType>(p->operands[3]),
Mark Mendell55d0eac2014-02-06 11:02:52 -0800954 patch_offset, -4 /* offset */);
955 }
956
957 // And do the normal processing.
958 Mir2Lir::InstallLiteralPools();
959}
960
Mark Mendell4028a6c2014-02-19 20:06:20 -0800961/*
962 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff,
963 * otherwise bails to standard library code.
964 */
965bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
966 ClobberCallerSave();
967 LockCallTemps(); // Using fixed registers
968
969 // EAX: 16 bit character being searched.
970 // ECX: count: number of words to be searched.
971 // EDI: String being searched.
972 // EDX: temporary during execution.
973 // EBX: temporary during execution.
974
975 RegLocation rl_obj = info->args[0];
976 RegLocation rl_char = info->args[1];
buzbeea44d4f52014-03-05 11:26:39 -0800977 RegLocation rl_start; // Note: only present in III flavor or IndexOf.
Mark Mendell4028a6c2014-02-19 20:06:20 -0800978
979 uint32_t char_value =
980 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
981
982 if (char_value > 0xFFFF) {
983 // We have to punt to the real String.indexOf.
984 return false;
985 }
986
987 // Okay, we are commited to inlining this.
buzbeea0cd2d72014-06-01 09:33:49 -0700988 RegLocation rl_return = GetReturn(kCoreReg);
Mark Mendell4028a6c2014-02-19 20:06:20 -0800989 RegLocation rl_dest = InlineTarget(info);
990
991 // Is the string non-NULL?
buzbee2700f7e2014-03-07 09:46:20 -0800992 LoadValueDirectFixed(rl_obj, rs_rDX);
993 GenNullCheck(rs_rDX, info->opt_flags);
Vladimir Marko3bc86152014-03-13 14:11:28 +0000994 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
Mark Mendell4028a6c2014-02-19 20:06:20 -0800995
996 // Does the character fit in 16 bits?
Mingyao Yang3a74d152014-04-21 15:39:44 -0700997 LIR* slowpath_branch = nullptr;
Mark Mendell4028a6c2014-02-19 20:06:20 -0800998 if (rl_char.is_const) {
999 // We need the value in EAX.
buzbee2700f7e2014-03-07 09:46:20 -08001000 LoadConstantNoClobber(rs_rAX, char_value);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001001 } else {
1002 // Character is not a constant; compare at runtime.
buzbee2700f7e2014-03-07 09:46:20 -08001003 LoadValueDirectFixed(rl_char, rs_rAX);
Mingyao Yang3a74d152014-04-21 15:39:44 -07001004 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001005 }
1006
1007 // From here down, we know that we are looking for a char that fits in 16 bits.
Mark Mendelle19c91f2014-02-25 08:19:08 -08001008 // Location of reference to data array within the String object.
1009 int value_offset = mirror::String::ValueOffset().Int32Value();
1010 // Location of count within the String object.
1011 int count_offset = mirror::String::CountOffset().Int32Value();
1012 // Starting offset within data array.
1013 int offset_offset = mirror::String::OffsetOffset().Int32Value();
1014 // Start of char data with array_.
1015 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
Mark Mendell4028a6c2014-02-19 20:06:20 -08001016
1017 // Character is in EAX.
1018 // Object pointer is in EDX.
1019
1020 // We need to preserve EDI, but have no spare registers, so push it on the stack.
1021 // We have to remember that all stack addresses after this are offset by sizeof(EDI).
buzbee091cc402014-03-31 10:14:40 -07001022 NewLIR1(kX86Push32R, rs_rDI.GetReg());
Mark Mendell4028a6c2014-02-19 20:06:20 -08001023
1024 // Compute the number of words to search in to rCX.
buzbee695d13a2014-04-19 13:32:20 -07001025 Load32Disp(rs_rDX, count_offset, rs_rCX);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001026 LIR *length_compare = nullptr;
1027 int start_value = 0;
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001028 bool is_index_on_stack = false;
Mark Mendell4028a6c2014-02-19 20:06:20 -08001029 if (zero_based) {
1030 // We have to handle an empty string. Use special instruction JECXZ.
1031 length_compare = NewLIR0(kX86Jecxz8);
1032 } else {
buzbeea44d4f52014-03-05 11:26:39 -08001033 rl_start = info->args[2];
Mark Mendell4028a6c2014-02-19 20:06:20 -08001034 // We have to offset by the start index.
1035 if (rl_start.is_const) {
1036 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
1037 start_value = std::max(start_value, 0);
1038
1039 // Is the start > count?
buzbee2700f7e2014-03-07 09:46:20 -08001040 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001041
1042 if (start_value != 0) {
buzbee2700f7e2014-03-07 09:46:20 -08001043 OpRegImm(kOpSub, rs_rCX, start_value);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001044 }
1045 } else {
1046 // Runtime start index.
buzbee30adc732014-05-09 15:10:18 -07001047 rl_start = UpdateLocTyped(rl_start, kCoreReg);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001048 if (rl_start.location == kLocPhysReg) {
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001049 // Handle "start index < 0" case.
1050 OpRegReg(kOpXor, rs_rBX, rs_rBX);
1051 OpRegReg(kOpCmp, rl_start.reg, rs_rBX);
1052 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX);
1053
1054 // The length of the string should be greater than the start index.
buzbee2700f7e2014-03-07 09:46:20 -08001055 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr);
1056 OpRegReg(kOpSub, rs_rCX, rl_start.reg);
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001057 if (rl_start.reg == rs_rDI) {
1058 // The special case. We will use EDI further, so lets put start index to stack.
buzbee091cc402014-03-31 10:14:40 -07001059 NewLIR1(kX86Push32R, rs_rDI.GetReg());
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001060 is_index_on_stack = true;
1061 }
Mark Mendell4028a6c2014-02-19 20:06:20 -08001062 } else {
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001063 // Load the start index from stack, remembering that we pushed EDI.
Mark Mendell4028a6c2014-02-19 20:06:20 -08001064 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
buzbee695d13a2014-04-19 13:32:20 -07001065 Load32Disp(rs_rX86_SP, displacement, rs_rBX);
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001066 OpRegReg(kOpXor, rs_rDI, rs_rDI);
1067 OpRegReg(kOpCmp, rs_rBX, rs_rDI);
1068 OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI);
1069
1070 length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr);
1071 OpRegReg(kOpSub, rs_rCX, rs_rBX);
1072 // Put the start index to stack.
buzbee091cc402014-03-31 10:14:40 -07001073 NewLIR1(kX86Push32R, rs_rBX.GetReg());
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001074 is_index_on_stack = true;
Mark Mendell4028a6c2014-02-19 20:06:20 -08001075 }
1076 }
1077 }
1078 DCHECK(length_compare != nullptr);
1079
1080 // ECX now contains the count in words to be searched.
1081
1082 // Load the address of the string into EBX.
Mark Mendelle19c91f2014-02-25 08:19:08 -08001083 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
buzbee695d13a2014-04-19 13:32:20 -07001084 Load32Disp(rs_rDX, value_offset, rs_rDI);
1085 Load32Disp(rs_rDX, offset_offset, rs_rBX);
buzbee2700f7e2014-03-07 09:46:20 -08001086 OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001087
1088 // Now compute into EDI where the search will start.
1089 if (zero_based || rl_start.is_const) {
1090 if (start_value == 0) {
buzbee2700f7e2014-03-07 09:46:20 -08001091 OpRegCopy(rs_rDI, rs_rBX);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001092 } else {
buzbee091cc402014-03-31 10:14:40 -07001093 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), rs_rBX.GetReg(), 2 * start_value);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001094 }
1095 } else {
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001096 if (is_index_on_stack == true) {
1097 // Load the start index from stack.
buzbee091cc402014-03-31 10:14:40 -07001098 NewLIR1(kX86Pop32R, rs_rDX.GetReg());
buzbee2700f7e2014-03-07 09:46:20 -08001099 OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0);
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001100 } else {
1101 OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001102 }
1103 }
1104
1105 // EDI now contains the start of the string to be searched.
1106 // We are all prepared to do the search for the character.
1107 NewLIR0(kX86RepneScasw);
1108
1109 // Did we find a match?
1110 LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
1111
1112 // yes, we matched. Compute the index of the result.
1113 // index = ((curr_ptr - orig_ptr) / 2) - 1.
buzbee2700f7e2014-03-07 09:46:20 -08001114 OpRegReg(kOpSub, rs_rDI, rs_rBX);
1115 OpRegImm(kOpAsr, rs_rDI, 1);
buzbee091cc402014-03-31 10:14:40 -07001116 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001117 LIR *all_done = NewLIR1(kX86Jmp8, 0);
1118
1119 // Failed to match; return -1.
1120 LIR *not_found = NewLIR0(kPseudoTargetLabel);
1121 length_compare->target = not_found;
1122 failed_branch->target = not_found;
buzbee2700f7e2014-03-07 09:46:20 -08001123 LoadConstantNoClobber(rl_return.reg, -1);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001124
1125 // And join up at the end.
1126 all_done->target = NewLIR0(kPseudoTargetLabel);
1127 // Restore EDI from the stack.
buzbee091cc402014-03-31 10:14:40 -07001128 NewLIR1(kX86Pop32R, rs_rDI.GetReg());
Mark Mendell4028a6c2014-02-19 20:06:20 -08001129
1130 // Out of line code returns here.
Mingyao Yang3a74d152014-04-21 15:39:44 -07001131 if (slowpath_branch != nullptr) {
Mark Mendell4028a6c2014-02-19 20:06:20 -08001132 LIR *return_point = NewLIR0(kPseudoTargetLabel);
Mingyao Yang3a74d152014-04-21 15:39:44 -07001133 AddIntrinsicSlowPath(info, slowpath_branch, return_point);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001134 }
1135
1136 StoreValue(rl_dest, rl_return);
1137 return true;
1138}
1139
Mark Mendellae9fd932014-02-10 16:14:35 -08001140/*
Mark Mendellae9fd932014-02-10 16:14:35 -08001141 * @brief Enter an 'advance LOC' into the FDE buffer
1142 * @param buf FDE buffer.
1143 * @param increment Amount by which to increase the current location.
1144 */
1145static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
1146 if (increment < 64) {
1147 // Encoding in opcode.
1148 buf.push_back(0x1 << 6 | increment);
1149 } else if (increment < 256) {
1150 // Single byte delta.
1151 buf.push_back(0x02);
1152 buf.push_back(increment);
1153 } else if (increment < 256 * 256) {
1154 // Two byte delta.
1155 buf.push_back(0x03);
1156 buf.push_back(increment & 0xff);
1157 buf.push_back((increment >> 8) & 0xff);
1158 } else {
1159 // Four byte delta.
1160 buf.push_back(0x04);
1161 PushWord(buf, increment);
1162 }
1163}
1164
1165
1166std::vector<uint8_t>* X86CFIInitialization() {
1167 return X86Mir2Lir::ReturnCommonCallFrameInformation();
1168}
1169
1170std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() {
1171 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1172
1173 // Length of the CIE (except for this field).
1174 PushWord(*cfi_info, 16);
1175
1176 // CIE id.
1177 PushWord(*cfi_info, 0xFFFFFFFFU);
1178
1179 // Version: 3.
1180 cfi_info->push_back(0x03);
1181
1182 // Augmentation: empty string.
1183 cfi_info->push_back(0x0);
1184
1185 // Code alignment: 1.
1186 cfi_info->push_back(0x01);
1187
1188 // Data alignment: -4.
1189 cfi_info->push_back(0x7C);
1190
1191 // Return address register (R8).
1192 cfi_info->push_back(0x08);
1193
1194 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4.
1195 cfi_info->push_back(0x0C);
1196 cfi_info->push_back(0x04);
1197 cfi_info->push_back(0x04);
1198
1199 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);.
1200 cfi_info->push_back(0x2 << 6 | 0x08);
1201 cfi_info->push_back(0x01);
1202
1203 // And 2 Noops to align to 4 byte boundary.
1204 cfi_info->push_back(0x0);
1205 cfi_info->push_back(0x0);
1206
1207 DCHECK_EQ(cfi_info->size() & 3, 0U);
1208 return cfi_info;
1209}
1210
1211static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
1212 uint8_t buffer[12];
1213 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
1214 for (uint8_t *p = buffer; p < ptr; p++) {
1215 buf.push_back(*p);
1216 }
1217}
1218
1219std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
1220 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1221
1222 // Generate the FDE for the method.
1223 DCHECK_NE(data_offset_, 0U);
1224
1225 // Length (will be filled in later in this routine).
1226 PushWord(*cfi_info, 0);
1227
1228 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only
1229 // one CIE for the whole debug_frame section.
1230 PushWord(*cfi_info, 0);
1231
1232 // 'initial_location' (filled in by linker).
1233 PushWord(*cfi_info, 0);
1234
1235 // 'address_range' (number of bytes in the method).
1236 PushWord(*cfi_info, data_offset_);
1237
1238 // The instructions in the FDE.
1239 if (stack_decrement_ != nullptr) {
1240 // Advance LOC to just past the stack decrement.
1241 uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
1242 AdvanceLoc(*cfi_info, pc);
1243
1244 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
1245 cfi_info->push_back(0x0e);
1246 EncodeUnsignedLeb128(*cfi_info, frame_size_);
1247
1248 // We continue with that stack until the epilogue.
1249 if (stack_increment_ != nullptr) {
1250 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
1251 AdvanceLoc(*cfi_info, new_pc - pc);
1252
1253 // We probably have code snippets after the epilogue, so save the
1254 // current state: DW_CFA_remember_state.
1255 cfi_info->push_back(0x0a);
1256
1257 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return
1258 // PC on the stack now.
1259 cfi_info->push_back(0x0e);
1260 EncodeUnsignedLeb128(*cfi_info, 4);
1261
1262 // Everything after that is the same as before the epilogue.
1263 // Stack bump was followed by RET instruction.
1264 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_));
1265 if (post_ret_insn != nullptr) {
1266 pc = new_pc;
1267 new_pc = post_ret_insn->offset;
1268 AdvanceLoc(*cfi_info, new_pc - pc);
1269 // Restore the state: DW_CFA_restore_state.
1270 cfi_info->push_back(0x0b);
1271 }
1272 }
1273 }
1274
1275 // Padding to a multiple of 4
1276 while ((cfi_info->size() & 3) != 0) {
1277 // DW_CFA_nop is encoded as 0.
1278 cfi_info->push_back(0);
1279 }
1280
1281 // Set the length of the FDE inside the generated bytes.
1282 uint32_t length = cfi_info->size() - 4;
1283 (*cfi_info)[0] = length;
1284 (*cfi_info)[1] = length >> 8;
1285 (*cfi_info)[2] = length >> 16;
1286 (*cfi_info)[3] = length >> 24;
1287 return cfi_info;
1288}
1289
Mark Mendelld65c51a2014-04-29 16:55:20 -04001290void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
1291 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
1292 case kMirOpConstVector:
1293 GenConst128(bb, mir);
1294 break;
Mark Mendellfe945782014-05-22 09:52:36 -04001295 case kMirOpMoveVector:
1296 GenMoveVector(bb, mir);
1297 break;
1298 case kMirOpPackedMultiply:
1299 GenMultiplyVector(bb, mir);
1300 break;
1301 case kMirOpPackedAddition:
1302 GenAddVector(bb, mir);
1303 break;
1304 case kMirOpPackedSubtract:
1305 GenSubtractVector(bb, mir);
1306 break;
1307 case kMirOpPackedShiftLeft:
1308 GenShiftLeftVector(bb, mir);
1309 break;
1310 case kMirOpPackedSignedShiftRight:
1311 GenSignedShiftRightVector(bb, mir);
1312 break;
1313 case kMirOpPackedUnsignedShiftRight:
1314 GenUnsignedShiftRightVector(bb, mir);
1315 break;
1316 case kMirOpPackedAnd:
1317 GenAndVector(bb, mir);
1318 break;
1319 case kMirOpPackedOr:
1320 GenOrVector(bb, mir);
1321 break;
1322 case kMirOpPackedXor:
1323 GenXorVector(bb, mir);
1324 break;
1325 case kMirOpPackedAddReduce:
1326 GenAddReduceVector(bb, mir);
1327 break;
1328 case kMirOpPackedReduce:
1329 GenReduceVector(bb, mir);
1330 break;
1331 case kMirOpPackedSet:
1332 GenSetVector(bb, mir);
1333 break;
Mark Mendelld65c51a2014-04-29 16:55:20 -04001334 default:
1335 break;
1336 }
1337}
1338
1339void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
1340 int type_size = mir->dalvikInsn.vA;
1341 // We support 128 bit vectors.
1342 DCHECK_EQ(type_size & 0xFFFF, 128);
Mark Mendellfe945782014-05-22 09:52:36 -04001343 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB);
Mark Mendelld65c51a2014-04-29 16:55:20 -04001344 uint32_t *args = mir->dalvikInsn.arg;
Mark Mendellfe945782014-05-22 09:52:36 -04001345 int reg = rs_dest.GetReg();
Mark Mendelld65c51a2014-04-29 16:55:20 -04001346 // Check for all 0 case.
1347 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) {
1348 NewLIR2(kX86XorpsRR, reg, reg);
1349 return;
1350 }
1351 // Okay, load it from the constant vector area.
1352 LIR *data_target = ScanVectorLiteral(mir);
1353 if (data_target == nullptr) {
1354 data_target = AddVectorLiteral(mir);
1355 }
1356
1357 // Address the start of the method.
1358 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
1359 rl_method = LoadValue(rl_method, kCoreReg);
1360
1361 // Load the proper value from the literal area.
1362 // We don't know the proper offset for the value, so pick one that will force
1363 // 4 byte offset. We will fix this up in the assembler later to have the right
1364 // value.
1365 LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(), 256 /* bogus */);
1366 load->flags.fixup = kFixupLoad;
1367 load->target = data_target;
1368 SetMemRefType(load, true, kLiteral);
1369}
1370
Mark Mendellfe945782014-05-22 09:52:36 -04001371void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
1372 // We only support 128 bit registers.
1373 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1374 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB);
1375 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vC);
1376 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg());
1377}
1378
1379void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
1380 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1381 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1382 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1383 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1384 int opcode = 0;
1385 switch (opsize) {
1386 case k32:
1387 opcode = kX86PmulldRR;
1388 break;
1389 case kSignedHalf:
1390 opcode = kX86PmullwRR;
1391 break;
1392 case kSingle:
1393 opcode = kX86MulpsRR;
1394 break;
1395 case kDouble:
1396 opcode = kX86MulpdRR;
1397 break;
1398 default:
1399 LOG(FATAL) << "Unsupported vector multiply " << opsize;
1400 break;
1401 }
1402 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1403}
1404
1405void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
1406 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1407 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1408 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1409 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1410 int opcode = 0;
1411 switch (opsize) {
1412 case k32:
1413 opcode = kX86PadddRR;
1414 break;
1415 case kSignedHalf:
1416 case kUnsignedHalf:
1417 opcode = kX86PaddwRR;
1418 break;
1419 case kUnsignedByte:
1420 case kSignedByte:
1421 opcode = kX86PaddbRR;
1422 break;
1423 case kSingle:
1424 opcode = kX86AddpsRR;
1425 break;
1426 case kDouble:
1427 opcode = kX86AddpdRR;
1428 break;
1429 default:
1430 LOG(FATAL) << "Unsupported vector addition " << opsize;
1431 break;
1432 }
1433 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1434}
1435
1436void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
1437 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1438 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1439 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1440 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1441 int opcode = 0;
1442 switch (opsize) {
1443 case k32:
1444 opcode = kX86PsubdRR;
1445 break;
1446 case kSignedHalf:
1447 case kUnsignedHalf:
1448 opcode = kX86PsubwRR;
1449 break;
1450 case kUnsignedByte:
1451 case kSignedByte:
1452 opcode = kX86PsubbRR;
1453 break;
1454 case kSingle:
1455 opcode = kX86SubpsRR;
1456 break;
1457 case kDouble:
1458 opcode = kX86SubpdRR;
1459 break;
1460 default:
1461 LOG(FATAL) << "Unsupported vector subtraction " << opsize;
1462 break;
1463 }
1464 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1465}
1466
1467void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
1468 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1469 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1470 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1471 int imm = mir->dalvikInsn.vC;
1472 int opcode = 0;
1473 switch (opsize) {
1474 case k32:
1475 opcode = kX86PslldRI;
1476 break;
1477 case k64:
1478 opcode = kX86PsllqRI;
1479 break;
1480 case kSignedHalf:
1481 case kUnsignedHalf:
1482 opcode = kX86PsllwRI;
1483 break;
1484 default:
1485 LOG(FATAL) << "Unsupported vector shift left " << opsize;
1486 break;
1487 }
1488 NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1489}
1490
1491void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
1492 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1493 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1494 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1495 int imm = mir->dalvikInsn.vC;
1496 int opcode = 0;
1497 switch (opsize) {
1498 case k32:
1499 opcode = kX86PsradRI;
1500 break;
1501 case kSignedHalf:
1502 case kUnsignedHalf:
1503 opcode = kX86PsrawRI;
1504 break;
1505 default:
1506 LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
1507 break;
1508 }
1509 NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1510}
1511
1512void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
1513 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1514 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1515 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1516 int imm = mir->dalvikInsn.vC;
1517 int opcode = 0;
1518 switch (opsize) {
1519 case k32:
1520 opcode = kX86PsrldRI;
1521 break;
1522 case k64:
1523 opcode = kX86PsrlqRI;
1524 break;
1525 case kSignedHalf:
1526 case kUnsignedHalf:
1527 opcode = kX86PsrlwRI;
1528 break;
1529 default:
1530 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
1531 break;
1532 }
1533 NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1534}
1535
1536void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
1537 // We only support 128 bit registers.
1538 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1539 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1540 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1541 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1542}
1543
1544void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
1545 // We only support 128 bit registers.
1546 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1547 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1548 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1549 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1550}
1551
1552void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
1553 // We only support 128 bit registers.
1554 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1555 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1556 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vC);
1557 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1558}
1559
1560void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
1561 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1562 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1563 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1564 int imm = mir->dalvikInsn.vC;
1565 int opcode = 0;
1566 switch (opsize) {
1567 case k32:
1568 opcode = kX86PhadddRR;
1569 break;
1570 case kSignedHalf:
1571 case kUnsignedHalf:
1572 opcode = kX86PhaddwRR;
1573 break;
1574 default:
1575 LOG(FATAL) << "Unsupported vector add reduce " << opsize;
1576 break;
1577 }
1578 NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1579}
1580
1581void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
1582 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1583 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1584 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB);
1585 int index = mir->dalvikInsn.arg[0];
1586 int opcode = 0;
1587 switch (opsize) {
1588 case k32:
1589 opcode = kX86PextrdRRI;
1590 break;
1591 case kSignedHalf:
1592 case kUnsignedHalf:
1593 opcode = kX86PextrwRRI;
1594 break;
1595 case kUnsignedByte:
1596 case kSignedByte:
1597 opcode = kX86PextrbRRI;
1598 break;
1599 default:
1600 LOG(FATAL) << "Unsupported vector reduce " << opsize;
1601 break;
1602 }
1603 // We need to extract to a GPR.
1604 RegStorage temp = AllocTemp();
1605 NewLIR3(opcode, temp.GetReg(), rs_src.GetReg(), index);
1606
1607 // Assume that the destination VR is in the def for the mir.
1608 RegLocation rl_dest = mir_graph_->GetDest(mir);
1609 RegLocation rl_temp =
1610 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, temp, INVALID_SREG, INVALID_SREG};
1611 StoreValue(rl_dest, rl_temp);
1612}
1613
1614void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
1615 DCHECK_EQ(mir->dalvikInsn.vA & 0xFFFF, 128U);
1616 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vA >> 16);
1617 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vB);
1618 int op_low = 0, op_high = 0;
1619 switch (opsize) {
1620 case k32:
1621 op_low = kX86PshufdRRI;
1622 break;
1623 case kSignedHalf:
1624 case kUnsignedHalf:
1625 // Handles low quadword.
1626 op_low = kX86PshuflwRRI;
1627 // Handles upper quadword.
1628 op_high = kX86PshufdRRI;
1629 break;
1630 default:
1631 LOG(FATAL) << "Unsupported vector set " << opsize;
1632 break;
1633 }
1634
1635 // Load the value from the VR into a GPR.
1636 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
1637 rl_src = LoadValue(rl_src, kCoreReg);
1638
1639 // Load the value into the XMM register.
1640 NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rl_src.reg.GetReg());
1641
1642 // Now shuffle the value across the destination.
1643 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), 0);
1644
1645 // And then repeat as needed.
1646 if (op_high != 0) {
1647 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), 0);
1648 }
1649}
1650
1651
Mark Mendelld65c51a2014-04-29 16:55:20 -04001652LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) {
1653 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
1654 for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
1655 if (args[0] == p->operands[0] && args[1] == p->operands[1] &&
1656 args[2] == p->operands[2] && args[3] == p->operands[3]) {
1657 return p;
1658 }
1659 }
1660 return nullptr;
1661}
1662
1663LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) {
1664 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData));
1665 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
1666 new_value->operands[0] = args[0];
1667 new_value->operands[1] = args[1];
1668 new_value->operands[2] = args[2];
1669 new_value->operands[3] = args[3];
1670 new_value->next = const_vectors_;
1671 if (const_vectors_ == nullptr) {
1672 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary.
1673 }
1674 estimated_native_code_size_ += 16; // Space for one vector.
1675 const_vectors_ = new_value;
1676 return new_value;
1677}
1678
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001679} // namespace art