blob: 6731b38b64a097ccd2475f832848b3a121a69f03 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Nicolas Geoffrayf3e2cc42014-02-18 18:37:26 +000017#include <string>
18#include <inttypes.h>
19
Brian Carlstrom7940e442013-07-12 13:46:57 -070020#include "codegen_x86.h"
21#include "dex/compiler_internals.h"
22#include "dex/quick/mir_to_lir-inl.h"
buzbeeb5860fb2014-06-21 15:31:01 -070023#include "dex/reg_storage_eq.h"
Mark Mendelle19c91f2014-02-25 08:19:08 -080024#include "mirror/array.h"
25#include "mirror/string.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070026#include "x86_lir.h"
27
Brian Carlstrom7940e442013-07-12 13:46:57 -070028namespace art {
29
Vladimir Marko089142c2014-06-05 10:57:05 +010030static constexpr RegStorage core_regs_arr_32[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070031 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
32};
Vladimir Marko089142c2014-06-05 10:57:05 +010033static constexpr RegStorage core_regs_arr_64[] = {
Dmitry Petrochenko76af0d32014-06-05 21:15:08 +070034 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
buzbee091cc402014-03-31 10:14:40 -070035 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15
Brian Carlstrom7940e442013-07-12 13:46:57 -070036};
Vladimir Marko089142c2014-06-05 10:57:05 +010037static constexpr RegStorage core_regs_arr_64q[] = {
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +070038 rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q,
Dmitry Petrochenkoa20468c2014-04-30 13:40:19 +070039 rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +070040};
Vladimir Marko089142c2014-06-05 10:57:05 +010041static constexpr RegStorage sp_regs_arr_32[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070042 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
43};
Vladimir Marko089142c2014-06-05 10:57:05 +010044static constexpr RegStorage sp_regs_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070045 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
buzbee091cc402014-03-31 10:14:40 -070046 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
Brian Carlstrom7940e442013-07-12 13:46:57 -070047};
Vladimir Marko089142c2014-06-05 10:57:05 +010048static constexpr RegStorage dp_regs_arr_32[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070049 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
50};
Vladimir Marko089142c2014-06-05 10:57:05 +010051static constexpr RegStorage dp_regs_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070052 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
buzbee091cc402014-03-31 10:14:40 -070053 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
Brian Carlstrom7940e442013-07-12 13:46:57 -070054};
Vladimir Marko089142c2014-06-05 10:57:05 +010055static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32};
Dmitry Petrochenko76af0d32014-06-05 21:15:08 +070056static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32};
Vladimir Marko089142c2014-06-05 10:57:05 +010057static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64};
58static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX};
59static constexpr RegStorage core_temps_arr_64[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070060 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI,
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070061 rs_r8, rs_r9, rs_r10, rs_r11
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070062};
Vladimir Marko089142c2014-06-05 10:57:05 +010063static constexpr RegStorage core_temps_arr_64q[] = {
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +070064 rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +070065 rs_r8q, rs_r9q, rs_r10q, rs_r11q
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +070066};
Vladimir Marko089142c2014-06-05 10:57:05 +010067static constexpr RegStorage sp_temps_arr_32[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070068 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
69};
Vladimir Marko089142c2014-06-05 10:57:05 +010070static constexpr RegStorage sp_temps_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070071 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
buzbee091cc402014-03-31 10:14:40 -070072 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
buzbee091cc402014-03-31 10:14:40 -070073};
Vladimir Marko089142c2014-06-05 10:57:05 +010074static constexpr RegStorage dp_temps_arr_32[] = {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070075 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
76};
Vladimir Marko089142c2014-06-05 10:57:05 +010077static constexpr RegStorage dp_temps_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070078 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
buzbee091cc402014-03-31 10:14:40 -070079 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
buzbee091cc402014-03-31 10:14:40 -070080};
81
Vladimir Marko089142c2014-06-05 10:57:05 +010082static constexpr RegStorage xp_temps_arr_32[] = {
Mark Mendellfe945782014-05-22 09:52:36 -040083 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
84};
Vladimir Marko089142c2014-06-05 10:57:05 +010085static constexpr RegStorage xp_temps_arr_64[] = {
Mark Mendellfe945782014-05-22 09:52:36 -040086 rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
Mark Mendellfe945782014-05-22 09:52:36 -040087 rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15
Mark Mendellfe945782014-05-22 09:52:36 -040088};
89
Vladimir Marko089142c2014-06-05 10:57:05 +010090static constexpr ArrayRef<const RegStorage> empty_pool;
91static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
92static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
93static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q);
94static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
95static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
96static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32);
97static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
98static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
99static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
100static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q);
101static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
102static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
103static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q);
104static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
105static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
106static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32);
107static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700108
Vladimir Marko089142c2014-06-05 10:57:05 +0100109static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32);
110static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
Mark Mendellfe945782014-05-22 09:52:36 -0400111
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700112RegStorage rs_rX86_SP;
113
114X86NativeRegisterPool rX86_ARG0;
115X86NativeRegisterPool rX86_ARG1;
116X86NativeRegisterPool rX86_ARG2;
117X86NativeRegisterPool rX86_ARG3;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700118X86NativeRegisterPool rX86_ARG4;
119X86NativeRegisterPool rX86_ARG5;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700120X86NativeRegisterPool rX86_FARG0;
121X86NativeRegisterPool rX86_FARG1;
122X86NativeRegisterPool rX86_FARG2;
123X86NativeRegisterPool rX86_FARG3;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700124X86NativeRegisterPool rX86_FARG4;
125X86NativeRegisterPool rX86_FARG5;
126X86NativeRegisterPool rX86_FARG6;
127X86NativeRegisterPool rX86_FARG7;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700128X86NativeRegisterPool rX86_RET0;
129X86NativeRegisterPool rX86_RET1;
130X86NativeRegisterPool rX86_INVOKE_TGT;
131X86NativeRegisterPool rX86_COUNT;
132
133RegStorage rs_rX86_ARG0;
134RegStorage rs_rX86_ARG1;
135RegStorage rs_rX86_ARG2;
136RegStorage rs_rX86_ARG3;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700137RegStorage rs_rX86_ARG4;
138RegStorage rs_rX86_ARG5;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700139RegStorage rs_rX86_FARG0;
140RegStorage rs_rX86_FARG1;
141RegStorage rs_rX86_FARG2;
142RegStorage rs_rX86_FARG3;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700143RegStorage rs_rX86_FARG4;
144RegStorage rs_rX86_FARG5;
145RegStorage rs_rX86_FARG6;
146RegStorage rs_rX86_FARG7;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700147RegStorage rs_rX86_RET0;
148RegStorage rs_rX86_RET1;
149RegStorage rs_rX86_INVOKE_TGT;
150RegStorage rs_rX86_COUNT;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700151
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700152RegLocation X86Mir2Lir::LocCReturn() {
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000153 return x86_loc_c_return;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700154}
155
buzbeea0cd2d72014-06-01 09:33:49 -0700156RegLocation X86Mir2Lir::LocCReturnRef() {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700157 return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref;
buzbeea0cd2d72014-06-01 09:33:49 -0700158}
159
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700160RegLocation X86Mir2Lir::LocCReturnWide() {
Elena Sayapinadd644502014-07-01 18:39:52 +0700161 return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700162}
163
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700164RegLocation X86Mir2Lir::LocCReturnFloat() {
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000165 return x86_loc_c_return_float;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700166}
167
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700168RegLocation X86Mir2Lir::LocCReturnDouble() {
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000169 return x86_loc_c_return_double;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700170}
171
Chao-ying Fua77ee512014-07-01 17:43:41 -0700172// Return a target-dependent special register for 32-bit.
173RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) {
buzbee091cc402014-03-31 10:14:40 -0700174 RegStorage res_reg = RegStorage::InvalidReg();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700175 switch (reg) {
buzbee091cc402014-03-31 10:14:40 -0700176 case kSelf: res_reg = RegStorage::InvalidReg(); break;
177 case kSuspend: res_reg = RegStorage::InvalidReg(); break;
178 case kLr: res_reg = RegStorage::InvalidReg(); break;
179 case kPc: res_reg = RegStorage::InvalidReg(); break;
180 case kSp: res_reg = rs_rX86_SP; break;
181 case kArg0: res_reg = rs_rX86_ARG0; break;
182 case kArg1: res_reg = rs_rX86_ARG1; break;
183 case kArg2: res_reg = rs_rX86_ARG2; break;
184 case kArg3: res_reg = rs_rX86_ARG3; break;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700185 case kArg4: res_reg = rs_rX86_ARG4; break;
186 case kArg5: res_reg = rs_rX86_ARG5; break;
buzbee091cc402014-03-31 10:14:40 -0700187 case kFArg0: res_reg = rs_rX86_FARG0; break;
188 case kFArg1: res_reg = rs_rX86_FARG1; break;
189 case kFArg2: res_reg = rs_rX86_FARG2; break;
190 case kFArg3: res_reg = rs_rX86_FARG3; break;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700191 case kFArg4: res_reg = rs_rX86_FARG4; break;
192 case kFArg5: res_reg = rs_rX86_FARG5; break;
193 case kFArg6: res_reg = rs_rX86_FARG6; break;
194 case kFArg7: res_reg = rs_rX86_FARG7; break;
buzbee091cc402014-03-31 10:14:40 -0700195 case kRet0: res_reg = rs_rX86_RET0; break;
196 case kRet1: res_reg = rs_rX86_RET1; break;
197 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
198 case kHiddenArg: res_reg = rs_rAX; break;
Elena Sayapinadd644502014-07-01 18:39:52 +0700199 case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break;
buzbee091cc402014-03-31 10:14:40 -0700200 case kCount: res_reg = rs_rX86_COUNT; break;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700201 default: res_reg = RegStorage::InvalidReg();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700202 }
buzbee091cc402014-03-31 10:14:40 -0700203 return res_reg;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700204}
205
Chao-ying Fua77ee512014-07-01 17:43:41 -0700206RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
207 LOG(FATAL) << "Do not use this function!!!";
208 return RegStorage::InvalidReg();
209}
210
Brian Carlstrom7940e442013-07-12 13:46:57 -0700211/*
212 * Decode the register id.
213 */
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100214ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
215 /* Double registers in x86 are just a single FP register. This is always just a single bit. */
216 return ResourceMask::Bit(
217 /* FP register starts at bit position 16 */
218 ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700219}
220
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100221ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700222 /*
223 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be
224 * able to clean up some of the x86/Arm_Mips differences
225 */
226 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100227 return kEncodeNone;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700228}
229
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100230void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
231 ResourceMask* use_mask, ResourceMask* def_mask) {
Dmitry Petrochenko6a58cb12014-04-02 17:27:59 +0700232 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
buzbeeb48819d2013-09-14 16:15:25 -0700233 DCHECK(!lir->flags.use_def_invalid);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700234
235 // X86-specific resource map setup here.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700236 if (flags & REG_USE_SP) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100237 use_mask->SetBit(kX86RegSP);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700238 }
239
240 if (flags & REG_DEF_SP) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100241 def_mask->SetBit(kX86RegSP);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700242 }
243
244 if (flags & REG_DEFA) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100245 SetupRegMask(def_mask, rs_rAX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700246 }
247
248 if (flags & REG_DEFD) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100249 SetupRegMask(def_mask, rs_rDX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700250 }
251 if (flags & REG_USEA) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100252 SetupRegMask(use_mask, rs_rAX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700253 }
254
255 if (flags & REG_USEC) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100256 SetupRegMask(use_mask, rs_rCX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700257 }
258
259 if (flags & REG_USED) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100260 SetupRegMask(use_mask, rs_rDX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700261 }
Vladimir Marko70b797d2013-12-03 15:25:24 +0000262
263 if (flags & REG_USEB) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100264 SetupRegMask(use_mask, rs_rBX.GetReg());
Vladimir Marko70b797d2013-12-03 15:25:24 +0000265 }
Mark Mendell4028a6c2014-02-19 20:06:20 -0800266
267 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
268 if (lir->opcode == kX86RepneScasw) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100269 SetupRegMask(use_mask, rs_rAX.GetReg());
270 SetupRegMask(use_mask, rs_rCX.GetReg());
271 SetupRegMask(use_mask, rs_rDI.GetReg());
272 SetupRegMask(def_mask, rs_rDI.GetReg());
Mark Mendell4028a6c2014-02-19 20:06:20 -0800273 }
Serguei Katkove90501d2014-03-12 15:56:54 +0700274
275 if (flags & USE_FP_STACK) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100276 use_mask->SetBit(kX86FPStack);
277 def_mask->SetBit(kX86FPStack);
Serguei Katkove90501d2014-03-12 15:56:54 +0700278 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700279}
280
281/* For dumping instructions */
282static const char* x86RegName[] = {
283 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
284 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
285};
286
287static const char* x86CondName[] = {
288 "O",
289 "NO",
290 "B/NAE/C",
291 "NB/AE/NC",
292 "Z/EQ",
293 "NZ/NE",
294 "BE/NA",
295 "NBE/A",
296 "S",
297 "NS",
298 "P/PE",
299 "NP/PO",
300 "L/NGE",
301 "NL/GE",
302 "LE/NG",
303 "NLE/G"
304};
305
306/*
307 * Interpret a format string and build a string no longer than size
308 * See format key in Assemble.cc.
309 */
310std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
311 std::string buf;
312 size_t i = 0;
313 size_t fmt_len = strlen(fmt);
314 while (i < fmt_len) {
315 if (fmt[i] != '!') {
316 buf += fmt[i];
317 i++;
318 } else {
319 i++;
320 DCHECK_LT(i, fmt_len);
321 char operand_number_ch = fmt[i];
322 i++;
323 if (operand_number_ch == '!') {
324 buf += "!";
325 } else {
326 int operand_number = operand_number_ch - '0';
327 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands.
328 DCHECK_LT(i, fmt_len);
329 int operand = lir->operands[operand_number];
330 switch (fmt[i]) {
331 case 'c':
332 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
333 buf += x86CondName[operand];
334 break;
335 case 'd':
336 buf += StringPrintf("%d", operand);
337 break;
Yixin Shou5192cbb2014-07-01 13:48:17 -0400338 case 'q': {
339 int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 |
340 static_cast<uint32_t>(lir->operands[operand_number+1]));
341 buf +=StringPrintf("%" PRId64, value);
342 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700343 case 'p': {
buzbee0d829482013-10-11 15:24:55 -0700344 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700345 buf += StringPrintf("0x%08x", tab_rec->offset);
346 break;
347 }
348 case 'r':
buzbee091cc402014-03-31 10:14:40 -0700349 if (RegStorage::IsFloat(operand)) {
350 int fp_reg = RegStorage::RegNum(operand);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700351 buf += StringPrintf("xmm%d", fp_reg);
352 } else {
buzbee091cc402014-03-31 10:14:40 -0700353 int reg_num = RegStorage::RegNum(operand);
354 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName));
355 buf += x86RegName[reg_num];
Brian Carlstrom7940e442013-07-12 13:46:57 -0700356 }
357 break;
358 case 't':
Ian Rogers107c31e2014-01-23 20:55:29 -0800359 buf += StringPrintf("0x%08" PRIxPTR " (L%p)",
360 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand,
361 lir->target);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700362 break;
363 default:
364 buf += StringPrintf("DecodeError '%c'", fmt[i]);
365 break;
366 }
367 i++;
368 }
369 }
370 }
371 return buf;
372}
373
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100374void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700375 char buf[256];
376 buf[0] = 0;
377
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100378 if (mask.Equals(kEncodeAll)) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700379 strcpy(buf, "all");
380 } else {
381 char num[8];
382 int i;
383
384 for (i = 0; i < kX86RegEnd; i++) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100385 if (mask.HasBit(i)) {
Ian Rogers988e6ea2014-01-08 11:30:50 -0800386 snprintf(num, arraysize(num), "%d ", i);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700387 strcat(buf, num);
388 }
389 }
390
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100391 if (mask.HasBit(ResourceMask::kCCode)) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700392 strcat(buf, "cc ");
393 }
394 /* Memory bits */
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100395 if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) {
Ian Rogers988e6ea2014-01-08 11:30:50 -0800396 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
397 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
398 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
Brian Carlstrom7940e442013-07-12 13:46:57 -0700399 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100400 if (mask.HasBit(ResourceMask::kLiteral)) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700401 strcat(buf, "lit ");
402 }
403
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100404 if (mask.HasBit(ResourceMask::kHeapRef)) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700405 strcat(buf, "heap ");
406 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100407 if (mask.HasBit(ResourceMask::kMustNotAlias)) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700408 strcat(buf, "noalias ");
409 }
410 }
411 if (buf[0]) {
412 LOG(INFO) << prefix << ": " << buf;
413 }
414}
415
416void X86Mir2Lir::AdjustSpillMask() {
417 // Adjustment for LR spilling, x86 has no LR so nothing to do here
buzbee091cc402014-03-31 10:14:40 -0700418 core_spill_mask_ |= (1 << rs_rRET.GetRegNum());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700419 num_core_spills_++;
420}
421
Mark Mendelle87f9b52014-04-30 14:13:18 -0400422RegStorage X86Mir2Lir::AllocateByteRegister() {
Chao-ying Fu7e399fd2014-06-10 18:11:11 -0700423 RegStorage reg = AllocTypedTemp(false, kCoreReg);
Elena Sayapinadd644502014-07-01 18:39:52 +0700424 if (!cu_->target64) {
Chao-ying Fu7e399fd2014-06-10 18:11:11 -0700425 DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum());
426 }
427 return reg;
428}
429
Udayan Banerji60bfe7b2014-07-08 19:59:43 -0700430RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) {
431 return GetRegInfo(reg)->FindMatchingView(RegisterInfo::k128SoloStorageMask)->GetReg();
432}
433
Chao-ying Fu7e399fd2014-06-10 18:11:11 -0700434bool X86Mir2Lir::IsByteRegister(RegStorage reg) {
Elena Sayapinadd644502014-07-01 18:39:52 +0700435 return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum();
Mark Mendelle87f9b52014-04-30 14:13:18 -0400436}
437
Brian Carlstrom7940e442013-07-12 13:46:57 -0700438/* Clobber all regs that might be used by an external C call */
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000439void X86Mir2Lir::ClobberCallerSave() {
buzbee091cc402014-03-31 10:14:40 -0700440 Clobber(rs_rAX);
441 Clobber(rs_rCX);
442 Clobber(rs_rDX);
443 Clobber(rs_rBX);
Chao-ying Fu35ec2b52014-06-16 16:40:31 -0700444
445 Clobber(rs_fr0);
446 Clobber(rs_fr1);
447 Clobber(rs_fr2);
448 Clobber(rs_fr3);
449 Clobber(rs_fr4);
450 Clobber(rs_fr5);
451 Clobber(rs_fr6);
452 Clobber(rs_fr7);
453
Elena Sayapinadd644502014-07-01 18:39:52 +0700454 if (cu_->target64) {
Chao-ying Fu35ec2b52014-06-16 16:40:31 -0700455 Clobber(rs_r8);
456 Clobber(rs_r9);
457 Clobber(rs_r10);
458 Clobber(rs_r11);
459
460 Clobber(rs_fr8);
461 Clobber(rs_fr9);
462 Clobber(rs_fr10);
463 Clobber(rs_fr11);
464 Clobber(rs_fr12);
465 Clobber(rs_fr13);
466 Clobber(rs_fr14);
467 Clobber(rs_fr15);
468 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700469}
470
471RegLocation X86Mir2Lir::GetReturnWideAlt() {
472 RegLocation res = LocCReturnWide();
buzbee091cc402014-03-31 10:14:40 -0700473 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg());
474 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg());
475 Clobber(rs_rAX);
476 Clobber(rs_rDX);
477 MarkInUse(rs_rAX);
478 MarkInUse(rs_rDX);
479 MarkWide(res.reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700480 return res;
481}
482
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700483RegLocation X86Mir2Lir::GetReturnAlt() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700484 RegLocation res = LocCReturn();
buzbee091cc402014-03-31 10:14:40 -0700485 res.reg.SetReg(rs_rDX.GetReg());
486 Clobber(rs_rDX);
487 MarkInUse(rs_rDX);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700488 return res;
489}
490
Brian Carlstrom7940e442013-07-12 13:46:57 -0700491/* To be used when explicitly managing register use */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700492void X86Mir2Lir::LockCallTemps() {
buzbee091cc402014-03-31 10:14:40 -0700493 LockTemp(rs_rX86_ARG0);
494 LockTemp(rs_rX86_ARG1);
495 LockTemp(rs_rX86_ARG2);
496 LockTemp(rs_rX86_ARG3);
Elena Sayapinadd644502014-07-01 18:39:52 +0700497 if (cu_->target64) {
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700498 LockTemp(rs_rX86_ARG4);
499 LockTemp(rs_rX86_ARG5);
500 LockTemp(rs_rX86_FARG0);
501 LockTemp(rs_rX86_FARG1);
502 LockTemp(rs_rX86_FARG2);
503 LockTemp(rs_rX86_FARG3);
504 LockTemp(rs_rX86_FARG4);
505 LockTemp(rs_rX86_FARG5);
506 LockTemp(rs_rX86_FARG6);
507 LockTemp(rs_rX86_FARG7);
508 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700509}
510
511/* To be used when explicitly managing register use */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700512void X86Mir2Lir::FreeCallTemps() {
buzbee091cc402014-03-31 10:14:40 -0700513 FreeTemp(rs_rX86_ARG0);
514 FreeTemp(rs_rX86_ARG1);
515 FreeTemp(rs_rX86_ARG2);
516 FreeTemp(rs_rX86_ARG3);
Elena Sayapinadd644502014-07-01 18:39:52 +0700517 if (cu_->target64) {
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700518 FreeTemp(rs_rX86_ARG4);
519 FreeTemp(rs_rX86_ARG5);
520 FreeTemp(rs_rX86_FARG0);
521 FreeTemp(rs_rX86_FARG1);
522 FreeTemp(rs_rX86_FARG2);
523 FreeTemp(rs_rX86_FARG3);
524 FreeTemp(rs_rX86_FARG4);
525 FreeTemp(rs_rX86_FARG5);
526 FreeTemp(rs_rX86_FARG6);
527 FreeTemp(rs_rX86_FARG7);
528 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700529}
530
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800531bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
532 switch (opcode) {
533 case kX86LockCmpxchgMR:
534 case kX86LockCmpxchgAR:
Ian Rogers0f9b9c52014-06-09 01:32:12 -0700535 case kX86LockCmpxchg64M:
536 case kX86LockCmpxchg64A:
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800537 case kX86XchgMR:
538 case kX86Mfence:
539 // Atomic memory instructions provide full barrier.
540 return true;
541 default:
542 break;
543 }
544
545 // Conservative if cannot prove it provides full barrier.
546 return false;
547}
548
Andreas Gampeb14329f2014-05-15 11:16:06 -0700549bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700550#if ANDROID_SMP != 0
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800551 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
552 LIR* mem_barrier = last_lir_insn_;
553
Andreas Gampeb14329f2014-05-15 11:16:06 -0700554 bool ret = false;
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800555 /*
556 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers
557 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need
558 * to ensure is that there is a scheduling barrier in place.
559 */
560 if (barrier_kind == kStoreLoad) {
561 // If no LIR exists already that can be used a barrier, then generate an mfence.
562 if (mem_barrier == nullptr) {
563 mem_barrier = NewLIR0(kX86Mfence);
Andreas Gampeb14329f2014-05-15 11:16:06 -0700564 ret = true;
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800565 }
566
567 // If last instruction does not provide full barrier, then insert an mfence.
568 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
569 mem_barrier = NewLIR0(kX86Mfence);
Andreas Gampeb14329f2014-05-15 11:16:06 -0700570 ret = true;
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800571 }
572 }
573
574 // Now ensure that a scheduling barrier is in place.
575 if (mem_barrier == nullptr) {
576 GenBarrier();
577 } else {
578 // Mark as a scheduling barrier.
579 DCHECK(!mem_barrier->flags.use_def_invalid);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100580 mem_barrier->u.m.def_mask = &kEncodeAll;
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800581 }
Andreas Gampeb14329f2014-05-15 11:16:06 -0700582 return ret;
583#else
584 return false;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700585#endif
586}
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000587
Brian Carlstrom7940e442013-07-12 13:46:57 -0700588void X86Mir2Lir::CompilerInitializeRegAlloc() {
Elena Sayapinadd644502014-07-01 18:39:52 +0700589 if (cu_->target64) {
Dmitry Petrochenko76af0d32014-06-05 21:15:08 +0700590 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
591 dp_regs_64, reserved_regs_64, reserved_regs_64q,
592 core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64);
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700593 } else {
buzbeeb01bf152014-05-13 15:59:07 -0700594 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
595 dp_regs_32, reserved_regs_32, empty_pool,
596 core_temps_32, empty_pool, sp_temps_32, dp_temps_32);
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700597 }
buzbee091cc402014-03-31 10:14:40 -0700598
599 // Target-specific adjustments.
600
Mark Mendellfe945782014-05-22 09:52:36 -0400601 // Add in XMM registers.
Elena Sayapinadd644502014-07-01 18:39:52 +0700602 const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32;
Mark Mendellfe945782014-05-22 09:52:36 -0400603 for (RegStorage reg : *xp_temps) {
604 RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
605 reginfo_map_.Put(reg.GetReg(), info);
606 info->SetIsTemp(true);
607 }
608
buzbee091cc402014-03-31 10:14:40 -0700609 // Alias single precision xmm to double xmms.
610 // TODO: as needed, add larger vector sizes - alias all to the largest.
611 GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
612 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
613 int sp_reg_num = info->GetReg().GetRegNum();
Mark Mendellfe945782014-05-22 09:52:36 -0400614 RegStorage xp_reg = RegStorage::Solo128(sp_reg_num);
615 RegisterInfo* xp_reg_info = GetRegInfo(xp_reg);
616 // 128-bit xmm vector register's master storage should refer to itself.
617 DCHECK_EQ(xp_reg_info, xp_reg_info->Master());
618
619 // Redirect 32-bit vector's master storage to 128-bit vector.
620 info->SetMaster(xp_reg_info);
621
Dmitry Petrochenko76af0d32014-06-05 21:15:08 +0700622 RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num);
buzbee091cc402014-03-31 10:14:40 -0700623 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
Mark Mendellfe945782014-05-22 09:52:36 -0400624 // Redirect 64-bit vector's master storage to 128-bit vector.
625 dp_reg_info->SetMaster(xp_reg_info);
Dmitry Petrochenko76af0d32014-06-05 21:15:08 +0700626 // Singles should show a single 32-bit mask bit, at first referring to the low half.
627 DCHECK_EQ(info->StorageMask(), 0x1U);
628 }
629
Elena Sayapinadd644502014-07-01 18:39:52 +0700630 if (cu_->target64) {
Dmitry Petrochenko76af0d32014-06-05 21:15:08 +0700631 // Alias 32bit W registers to corresponding 64bit X registers.
632 GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
633 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
634 int x_reg_num = info->GetReg().GetRegNum();
635 RegStorage x_reg = RegStorage::Solo64(x_reg_num);
636 RegisterInfo* x_reg_info = GetRegInfo(x_reg);
637 // 64bit X register's master storage should refer to itself.
638 DCHECK_EQ(x_reg_info, x_reg_info->Master());
639 // Redirect 32bit W master storage to 64bit X.
640 info->SetMaster(x_reg_info);
641 // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
642 DCHECK_EQ(info->StorageMask(), 0x1U);
643 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700644 }
buzbee091cc402014-03-31 10:14:40 -0700645
646 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
647 // TODO: adjust for x86/hard float calling convention.
648 reg_pool_->next_core_reg_ = 2;
649 reg_pool_->next_sp_reg_ = 2;
650 reg_pool_->next_dp_reg_ = 1;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700651}
652
Udayan Banerji60bfe7b2014-07-08 19:59:43 -0700653int X86Mir2Lir::VectorRegisterSize() {
654 return 128;
655}
656
657int X86Mir2Lir::NumReservableVectorRegisters(bool fp_used) {
658 return fp_used ? 5 : 7;
659}
660
Brian Carlstrom7940e442013-07-12 13:46:57 -0700661void X86Mir2Lir::SpillCoreRegs() {
662 if (num_core_spills_ == 0) {
663 return;
664 }
665 // Spill mask not including fake return address register
buzbee091cc402014-03-31 10:14:40 -0700666 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700667 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700668 for (int reg = 0; mask; mask >>= 1, reg++) {
669 if (mask & 0x1) {
buzbee2700f7e2014-03-07 09:46:20 -0800670 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700671 offset += GetInstructionSetPointerSize(cu_->instruction_set);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700672 }
673 }
674}
675
676void X86Mir2Lir::UnSpillCoreRegs() {
677 if (num_core_spills_ == 0) {
678 return;
679 }
680 // Spill mask not including fake return address register
buzbee091cc402014-03-31 10:14:40 -0700681 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700682 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700683 for (int reg = 0; mask; mask >>= 1, reg++) {
684 if (mask & 0x1) {
buzbee2700f7e2014-03-07 09:46:20 -0800685 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700686 offset += GetInstructionSetPointerSize(cu_->instruction_set);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700687 }
688 }
689}
690
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700691bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700692 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
693}
694
Vladimir Marko674744e2014-04-24 15:18:26 +0100695bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
696 return true;
697}
698
699RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700700 // X86_64 can handle any size.
Elena Sayapinadd644502014-07-01 18:39:52 +0700701 if (cu_->target64) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700702 if (size == kReference) {
703 return kRefReg;
704 }
705 return kCoreReg;
706 }
707
Vladimir Marko674744e2014-04-24 15:18:26 +0100708 if (UNLIKELY(is_volatile)) {
709 // On x86, atomic 64-bit load/store requires an fp register.
710 // Smaller aligned load/store is atomic for both core and fp registers.
711 if (size == k64 || size == kDouble) {
712 return kFPReg;
713 }
714 }
715 return RegClassBySize(size);
716}
717
Elena Sayapinadd644502014-07-01 18:39:52 +0700718X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
Mark Mendell55d0eac2014-02-06 11:02:52 -0800719 : Mir2Lir(cu, mir_graph, arena),
Ian Rogersdd7624d2014-03-14 17:43:00 -0700720 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
Mark Mendell55d0eac2014-02-06 11:02:52 -0800721 method_address_insns_(arena, 100, kGrowableArrayMisc),
722 class_type_address_insns_(arena, 100, kGrowableArrayMisc),
Mark Mendellae9fd932014-02-10 16:14:35 -0800723 call_method_insns_(arena, 100, kGrowableArrayMisc),
Elena Sayapinadd644502014-07-01 18:39:52 +0700724 stack_decrement_(nullptr), stack_increment_(nullptr),
Mark Mendelld65c51a2014-04-29 16:55:20 -0400725 const_vectors_(nullptr) {
726 store_method_addr_used_ = false;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700727 if (kIsDebugBuild) {
728 for (int i = 0; i < kX86Last; i++) {
729 if (X86Mir2Lir::EncodingMap[i].opcode != i) {
730 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
Mark Mendelld65c51a2014-04-29 16:55:20 -0400731 << " is wrong: expecting " << i << ", seeing "
732 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700733 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700734 }
735 }
Elena Sayapinadd644502014-07-01 18:39:52 +0700736 if (cu_->target64) {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700737 rs_rX86_SP = rs_rX86_SP_64;
738
739 rs_rX86_ARG0 = rs_rDI;
740 rs_rX86_ARG1 = rs_rSI;
741 rs_rX86_ARG2 = rs_rDX;
742 rs_rX86_ARG3 = rs_rCX;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700743 rs_rX86_ARG4 = rs_r8;
744 rs_rX86_ARG5 = rs_r9;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700745 rs_rX86_FARG0 = rs_fr0;
746 rs_rX86_FARG1 = rs_fr1;
747 rs_rX86_FARG2 = rs_fr2;
748 rs_rX86_FARG3 = rs_fr3;
749 rs_rX86_FARG4 = rs_fr4;
750 rs_rX86_FARG5 = rs_fr5;
751 rs_rX86_FARG6 = rs_fr6;
752 rs_rX86_FARG7 = rs_fr7;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700753 rX86_ARG0 = rDI;
754 rX86_ARG1 = rSI;
755 rX86_ARG2 = rDX;
756 rX86_ARG3 = rCX;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700757 rX86_ARG4 = r8;
758 rX86_ARG5 = r9;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700759 rX86_FARG0 = fr0;
760 rX86_FARG1 = fr1;
761 rX86_FARG2 = fr2;
762 rX86_FARG3 = fr3;
763 rX86_FARG4 = fr4;
764 rX86_FARG5 = fr5;
765 rX86_FARG6 = fr6;
766 rX86_FARG7 = fr7;
Mark Mendell55884bc2014-06-10 10:21:29 -0400767 rs_rX86_INVOKE_TGT = rs_rDI;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700768 } else {
769 rs_rX86_SP = rs_rX86_SP_32;
770
771 rs_rX86_ARG0 = rs_rAX;
772 rs_rX86_ARG1 = rs_rCX;
773 rs_rX86_ARG2 = rs_rDX;
774 rs_rX86_ARG3 = rs_rBX;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700775 rs_rX86_ARG4 = RegStorage::InvalidReg();
776 rs_rX86_ARG5 = RegStorage::InvalidReg();
777 rs_rX86_FARG0 = rs_rAX;
778 rs_rX86_FARG1 = rs_rCX;
779 rs_rX86_FARG2 = rs_rDX;
780 rs_rX86_FARG3 = rs_rBX;
781 rs_rX86_FARG4 = RegStorage::InvalidReg();
782 rs_rX86_FARG5 = RegStorage::InvalidReg();
783 rs_rX86_FARG6 = RegStorage::InvalidReg();
784 rs_rX86_FARG7 = RegStorage::InvalidReg();
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700785 rX86_ARG0 = rAX;
786 rX86_ARG1 = rCX;
787 rX86_ARG2 = rDX;
788 rX86_ARG3 = rBX;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700789 rX86_FARG0 = rAX;
790 rX86_FARG1 = rCX;
791 rX86_FARG2 = rDX;
792 rX86_FARG3 = rBX;
Mark Mendell55884bc2014-06-10 10:21:29 -0400793 rs_rX86_INVOKE_TGT = rs_rAX;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700794 // TODO(64): Initialize with invalid reg
795// rX86_ARG4 = RegStorage::InvalidReg();
796// rX86_ARG5 = RegStorage::InvalidReg();
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700797 }
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700798 rs_rX86_RET0 = rs_rAX;
799 rs_rX86_RET1 = rs_rDX;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700800 rs_rX86_COUNT = rs_rCX;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700801 rX86_RET0 = rAX;
802 rX86_RET1 = rDX;
803 rX86_INVOKE_TGT = rAX;
804 rX86_COUNT = rCX;
Udayan Banerji60bfe7b2014-07-08 19:59:43 -0700805
806 // Initialize the number of reserved vector registers
807 num_reserved_vector_regs_ = -1;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700808}
809
810Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
811 ArenaAllocator* const arena) {
Elena Sayapinadd644502014-07-01 18:39:52 +0700812 return new X86Mir2Lir(cu, mir_graph, arena);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700813}
814
815// Not used in x86
Ian Rogersdd7624d2014-03-14 17:43:00 -0700816RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700817 LOG(FATAL) << "Unexpected use of LoadHelper in x86";
buzbee2700f7e2014-03-07 09:46:20 -0800818 return RegStorage::InvalidReg();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700819}
820
Andreas Gampe2f244e92014-05-08 03:35:25 -0700821// Not used in x86
822RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
823 LOG(FATAL) << "Unexpected use of LoadHelper in x86";
824 return RegStorage::InvalidReg();
825}
826
Dave Allisonb373e092014-02-20 16:06:36 -0800827LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
Dave Allison34e826c2014-05-29 08:20:04 -0700828 // First load the pointer in fs:[suspend-trigger] into eax
829 // Then use a test instruction to indirect via that address.
830 NewLIR2(kX86Mov32RT, rs_rAX.GetReg(), Thread::ThreadSuspendTriggerOffset<4>().Int32Value());
831 return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0);
Dave Allisonb373e092014-02-20 16:06:36 -0800832}
833
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700834uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700835 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700836 return X86Mir2Lir::EncodingMap[opcode].flags;
837}
838
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700839const char* X86Mir2Lir::GetTargetInstName(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700840 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700841 return X86Mir2Lir::EncodingMap[opcode].name;
842}
843
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700844const char* X86Mir2Lir::GetTargetInstFmt(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700845 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700846 return X86Mir2Lir::EncodingMap[opcode].fmt;
847}
848
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000849void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
850 // Can we do this directly to memory?
851 rl_dest = UpdateLocWide(rl_dest);
852 if ((rl_dest.location == kLocDalvikFrame) ||
853 (rl_dest.location == kLocCompilerTemp)) {
854 int32_t val_lo = Low32Bits(value);
855 int32_t val_hi = High32Bits(value);
Chao-ying Fua77ee512014-07-01 17:43:41 -0700856 int r_base = rs_rX86_SP.GetReg();
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000857 int displacement = SRegOffset(rl_dest.s_reg_low);
858
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100859 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
buzbee2700f7e2014-03-07 09:46:20 -0800860 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000861 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
862 false /* is_load */, true /* is64bit */);
buzbee2700f7e2014-03-07 09:46:20 -0800863 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000864 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
865 false /* is_load */, true /* is64bit */);
866 return;
867 }
868
869 // Just use the standard code to do the generation.
870 Mir2Lir::GenConstWide(rl_dest, value);
871}
Mark Mendelle02d48f2014-01-15 11:19:23 -0800872
873// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc
874void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
875 LOG(INFO) << "location: " << loc.location << ','
876 << (loc.wide ? " w" : " ")
877 << (loc.defined ? " D" : " ")
878 << (loc.is_const ? " c" : " ")
879 << (loc.fp ? " F" : " ")
880 << (loc.core ? " C" : " ")
881 << (loc.ref ? " r" : " ")
882 << (loc.high_word ? " h" : " ")
883 << (loc.home ? " H" : " ")
buzbee2700f7e2014-03-07 09:46:20 -0800884 << ", low: " << static_cast<int>(loc.reg.GetLowReg())
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000885 << ", high: " << static_cast<int>(loc.reg.GetHighReg())
Mark Mendelle02d48f2014-01-15 11:19:23 -0800886 << ", s_reg: " << loc.s_reg_low
887 << ", orig: " << loc.orig_sreg;
888}
889
Mark Mendell67c39c42014-01-31 17:28:00 -0800890void X86Mir2Lir::Materialize() {
891 // A good place to put the analysis before starting.
892 AnalyzeMIR();
893
894 // Now continue with regular code generation.
895 Mir2Lir::Materialize();
896}
897
Jeff Hao49161ce2014-03-12 11:05:25 -0700898void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
Mark Mendell55d0eac2014-02-06 11:02:52 -0800899 SpecialTargetRegister symbolic_reg) {
900 /*
901 * For x86, just generate a 32 bit move immediate instruction, that will be filled
902 * in at 'link time'. For now, put a unique value based on target to ensure that
903 * code deduplication works.
904 */
Jeff Hao49161ce2014-03-12 11:05:25 -0700905 int target_method_idx = target_method.dex_method_index;
906 const DexFile* target_dex_file = target_method.dex_file;
907 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
908 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800909
Jeff Hao49161ce2014-03-12 11:05:25 -0700910 // Generate the move instruction with the unique pointer and save index, dex_file, and type.
Chao-ying Fua77ee512014-07-01 17:43:41 -0700911 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(),
Jeff Hao49161ce2014-03-12 11:05:25 -0700912 static_cast<int>(target_method_id_ptr), target_method_idx,
913 WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800914 AppendLIR(move);
915 method_address_insns_.Insert(move);
916}
917
918void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
919 /*
920 * For x86, just generate a 32 bit move immediate instruction, that will be filled
921 * in at 'link time'. For now, put a unique value based on target to ensure that
922 * code deduplication works.
923 */
924 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx);
925 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
926
927 // Generate the move instruction with the unique pointer and save index and type.
Chao-ying Fua77ee512014-07-01 17:43:41 -0700928 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(),
Mark Mendell55d0eac2014-02-06 11:02:52 -0800929 static_cast<int>(ptr), type_idx);
930 AppendLIR(move);
931 class_type_address_insns_.Insert(move);
932}
933
Jeff Hao49161ce2014-03-12 11:05:25 -0700934LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
Mark Mendell55d0eac2014-02-06 11:02:52 -0800935 /*
936 * For x86, just generate a 32 bit call relative instruction, that will be filled
937 * in at 'link time'. For now, put a unique value based on target to ensure that
938 * code deduplication works.
939 */
Jeff Hao49161ce2014-03-12 11:05:25 -0700940 int target_method_idx = target_method.dex_method_index;
941 const DexFile* target_dex_file = target_method.dex_file;
942 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
943 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800944
Jeff Hao49161ce2014-03-12 11:05:25 -0700945 // Generate the call instruction with the unique pointer and save index, dex_file, and type.
946 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr),
947 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800948 AppendLIR(call);
949 call_method_insns_.Insert(call);
950 return call;
951}
952
Mark Mendelld65c51a2014-04-29 16:55:20 -0400953/*
954 * @brief Enter a 32 bit quantity into a buffer
955 * @param buf buffer.
956 * @param data Data value.
957 */
958
959static void PushWord(std::vector<uint8_t>&buf, int32_t data) {
960 buf.push_back(data & 0xff);
961 buf.push_back((data >> 8) & 0xff);
962 buf.push_back((data >> 16) & 0xff);
963 buf.push_back((data >> 24) & 0xff);
964}
965
Mark Mendell55d0eac2014-02-06 11:02:52 -0800966void X86Mir2Lir::InstallLiteralPools() {
967 // These are handled differently for x86.
968 DCHECK(code_literal_list_ == nullptr);
969 DCHECK(method_literal_list_ == nullptr);
970 DCHECK(class_literal_list_ == nullptr);
971
Mark Mendelld65c51a2014-04-29 16:55:20 -0400972 // Align to 16 byte boundary. We have implicit knowledge that the start of the method is
973 // on a 4 byte boundary. How can I check this if it changes (other than aligned loads
974 // will fail at runtime)?
975 if (const_vectors_ != nullptr) {
976 int align_size = (16-4) - (code_buffer_.size() & 0xF);
977 if (align_size < 0) {
978 align_size += 16;
979 }
980
981 while (align_size > 0) {
982 code_buffer_.push_back(0);
983 align_size--;
984 }
985 for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
986 PushWord(code_buffer_, p->operands[0]);
987 PushWord(code_buffer_, p->operands[1]);
988 PushWord(code_buffer_, p->operands[2]);
989 PushWord(code_buffer_, p->operands[3]);
990 }
991 }
992
Mark Mendell55d0eac2014-02-06 11:02:52 -0800993 // Handle the fixups for methods.
994 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
995 LIR* p = method_address_insns_.Get(i);
996 DCHECK_EQ(p->opcode, kX86Mov32RI);
Jeff Hao49161ce2014-03-12 11:05:25 -0700997 uint32_t target_method_idx = p->operands[2];
998 const DexFile* target_dex_file =
999 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
Mark Mendell55d0eac2014-02-06 11:02:52 -08001000
1001 // The offset to patch is the last 4 bytes of the instruction.
1002 int patch_offset = p->offset + p->flags.size - 4;
1003 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx,
1004 cu_->method_idx, cu_->invoke_type,
Jeff Hao49161ce2014-03-12 11:05:25 -07001005 target_method_idx, target_dex_file,
1006 static_cast<InvokeType>(p->operands[4]),
Mark Mendell55d0eac2014-02-06 11:02:52 -08001007 patch_offset);
1008 }
1009
1010 // Handle the fixups for class types.
1011 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
1012 LIR* p = class_type_address_insns_.Get(i);
1013 DCHECK_EQ(p->opcode, kX86Mov32RI);
Jeff Hao49161ce2014-03-12 11:05:25 -07001014 uint32_t target_method_idx = p->operands[2];
Mark Mendell55d0eac2014-02-06 11:02:52 -08001015
1016 // The offset to patch is the last 4 bytes of the instruction.
1017 int patch_offset = p->offset + p->flags.size - 4;
1018 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
Jeff Hao49161ce2014-03-12 11:05:25 -07001019 cu_->method_idx, target_method_idx, patch_offset);
Mark Mendell55d0eac2014-02-06 11:02:52 -08001020 }
1021
1022 // And now the PC-relative calls to methods.
1023 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
1024 LIR* p = call_method_insns_.Get(i);
1025 DCHECK_EQ(p->opcode, kX86CallI);
Jeff Hao49161ce2014-03-12 11:05:25 -07001026 uint32_t target_method_idx = p->operands[1];
1027 const DexFile* target_dex_file =
1028 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
Mark Mendell55d0eac2014-02-06 11:02:52 -08001029
1030 // The offset to patch is the last 4 bytes of the instruction.
1031 int patch_offset = p->offset + p->flags.size - 4;
1032 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx,
Jeff Hao49161ce2014-03-12 11:05:25 -07001033 cu_->method_idx, cu_->invoke_type,
1034 target_method_idx, target_dex_file,
1035 static_cast<InvokeType>(p->operands[3]),
Mark Mendell55d0eac2014-02-06 11:02:52 -08001036 patch_offset, -4 /* offset */);
1037 }
1038
1039 // And do the normal processing.
1040 Mir2Lir::InstallLiteralPools();
1041}
1042
DaniilSokolov70c4f062014-06-24 17:34:00 -07001043bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
1044 if (cu_->target64) {
1045 // TODO: Implement ArrayCOpy intrinsic for x86_64
1046 return false;
1047 }
1048
1049 RegLocation rl_src = info->args[0];
1050 RegLocation rl_srcPos = info->args[1];
1051 RegLocation rl_dst = info->args[2];
1052 RegLocation rl_dstPos = info->args[3];
1053 RegLocation rl_length = info->args[4];
1054 if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) {
1055 return false;
1056 }
1057 if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) {
1058 return false;
1059 }
1060 ClobberCallerSave();
1061 LockCallTemps(); // Using fixed registers
1062 LoadValueDirectFixed(rl_src , rs_rAX);
1063 LoadValueDirectFixed(rl_dst , rs_rCX);
1064 LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr);
1065 LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr);
1066 LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr);
1067 LoadValueDirectFixed(rl_length , rs_rDX);
1068 LIR* len_negative = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr);
1069 LIR* len_too_big = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr);
1070 LoadValueDirectFixed(rl_src , rs_rAX);
1071 LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX);
1072 LIR* src_bad_len = nullptr;
1073 LIR* srcPos_negative = nullptr;
1074 if (!rl_srcPos.is_const) {
1075 LoadValueDirectFixed(rl_srcPos , rs_rBX);
1076 srcPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr);
1077 OpRegReg(kOpAdd, rs_rBX, rs_rDX);
1078 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
1079 } else {
1080 int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg);
1081 if (pos_val == 0) {
1082 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr);
1083 } else {
1084 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val);
1085 src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
1086 }
1087 }
1088 LIR* dstPos_negative = nullptr;
1089 LIR* dst_bad_len = nullptr;
1090 LoadValueDirectFixed(rl_dst, rs_rAX);
1091 LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
1092 if (!rl_dstPos.is_const) {
1093 LoadValueDirectFixed(rl_dstPos , rs_rBX);
1094 dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr);
1095 OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX);
1096 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
1097 } else {
1098 int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg);
1099 if (pos_val == 0) {
1100 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr);
1101 } else {
1102 OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val);
1103 dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
1104 }
1105 }
1106 // everything is checked now
1107 LoadValueDirectFixed(rl_src , rs_rAX);
1108 LoadValueDirectFixed(rl_dst , rs_rBX);
1109 LoadValueDirectFixed(rl_srcPos , rs_rCX);
1110 NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(),
1111 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value());
1112 // RAX now holds the address of the first src element to be copied
1113
1114 LoadValueDirectFixed(rl_dstPos , rs_rCX);
1115 NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(),
1116 rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() );
1117 // RBX now holds the address of the first dst element to be copied
1118
1119 // check if the number of elements to be copied is odd or even. If odd
1120 // then copy the first element (so that the remaining number of elements
1121 // is even).
1122 LoadValueDirectFixed(rl_length , rs_rCX);
1123 OpRegImm(kOpAnd, rs_rCX, 1);
1124 LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
1125 OpRegImm(kOpSub, rs_rDX, 1);
1126 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
1127 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
1128
1129 // since the remaining number of elements is even, we will copy by
1130 // two elements at a time.
1131 LIR *beginLoop = NewLIR0(kPseudoTargetLabel);
1132 LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr);
1133 OpRegImm(kOpSub, rs_rDX, 2);
1134 LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle);
1135 StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle);
1136 OpUnconditionalBranch(beginLoop);
1137 LIR *check_failed = NewLIR0(kPseudoTargetLabel);
1138 LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
1139 LIR *return_point = NewLIR0(kPseudoTargetLabel);
1140 jmp_to_ret->target = return_point;
1141 jmp_to_begin_loop->target = beginLoop;
1142 src_dst_same->target = check_failed;
1143 len_negative->target = check_failed;
1144 len_too_big->target = check_failed;
1145 src_null_branch->target = check_failed;
1146 if (srcPos_negative != nullptr)
1147 srcPos_negative ->target = check_failed;
1148 if (src_bad_len != nullptr)
1149 src_bad_len->target = check_failed;
1150 dst_null_branch->target = check_failed;
1151 if (dstPos_negative != nullptr)
1152 dstPos_negative->target = check_failed;
1153 if (dst_bad_len != nullptr)
1154 dst_bad_len->target = check_failed;
1155 AddIntrinsicSlowPath(info, launchpad_branch, return_point);
1156 return true;
1157}
1158
1159
Mark Mendell4028a6c2014-02-19 20:06:20 -08001160/*
1161 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff,
1162 * otherwise bails to standard library code.
1163 */
1164bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1165 ClobberCallerSave();
1166 LockCallTemps(); // Using fixed registers
1167
1168 // EAX: 16 bit character being searched.
1169 // ECX: count: number of words to be searched.
1170 // EDI: String being searched.
1171 // EDX: temporary during execution.
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001172 // EBX or R11: temporary during execution (depending on mode).
Mark Mendell4028a6c2014-02-19 20:06:20 -08001173
1174 RegLocation rl_obj = info->args[0];
1175 RegLocation rl_char = info->args[1];
buzbeea44d4f52014-03-05 11:26:39 -08001176 RegLocation rl_start; // Note: only present in III flavor or IndexOf.
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001177 RegStorage tmpReg = cu_->target64 ? rs_r11 : rs_rBX;
Mark Mendell4028a6c2014-02-19 20:06:20 -08001178
1179 uint32_t char_value =
1180 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
1181
1182 if (char_value > 0xFFFF) {
1183 // We have to punt to the real String.indexOf.
1184 return false;
1185 }
1186
1187 // Okay, we are commited to inlining this.
buzbeea0cd2d72014-06-01 09:33:49 -07001188 RegLocation rl_return = GetReturn(kCoreReg);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001189 RegLocation rl_dest = InlineTarget(info);
1190
1191 // Is the string non-NULL?
buzbee2700f7e2014-03-07 09:46:20 -08001192 LoadValueDirectFixed(rl_obj, rs_rDX);
1193 GenNullCheck(rs_rDX, info->opt_flags);
Dave Allison34e826c2014-05-29 08:20:04 -07001194 // uint32_t opt_flags = info->opt_flags;
Vladimir Marko3bc86152014-03-13 14:11:28 +00001195 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
Mark Mendell4028a6c2014-02-19 20:06:20 -08001196
1197 // Does the character fit in 16 bits?
Mingyao Yang3a74d152014-04-21 15:39:44 -07001198 LIR* slowpath_branch = nullptr;
Mark Mendell4028a6c2014-02-19 20:06:20 -08001199 if (rl_char.is_const) {
1200 // We need the value in EAX.
buzbee2700f7e2014-03-07 09:46:20 -08001201 LoadConstantNoClobber(rs_rAX, char_value);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001202 } else {
1203 // Character is not a constant; compare at runtime.
buzbee2700f7e2014-03-07 09:46:20 -08001204 LoadValueDirectFixed(rl_char, rs_rAX);
Mingyao Yang3a74d152014-04-21 15:39:44 -07001205 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001206 }
1207
1208 // From here down, we know that we are looking for a char that fits in 16 bits.
Mark Mendelle19c91f2014-02-25 08:19:08 -08001209 // Location of reference to data array within the String object.
1210 int value_offset = mirror::String::ValueOffset().Int32Value();
1211 // Location of count within the String object.
1212 int count_offset = mirror::String::CountOffset().Int32Value();
1213 // Starting offset within data array.
1214 int offset_offset = mirror::String::OffsetOffset().Int32Value();
1215 // Start of char data with array_.
1216 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
Mark Mendell4028a6c2014-02-19 20:06:20 -08001217
1218 // Character is in EAX.
1219 // Object pointer is in EDX.
1220
Dave Allison34e826c2014-05-29 08:20:04 -07001221 // Compute the number of words to search in to rCX.
1222 Load32Disp(rs_rDX, count_offset, rs_rCX);
1223
1224 // Possible signal here due to null pointer dereference.
1225 // Note that the signal handler will expect the top word of
1226 // the stack to be the ArtMethod*. If the PUSH edi instruction
1227 // below is ahead of the load above then this will not be true
1228 // and the signal handler will not work.
1229 MarkPossibleNullPointerException(0);
1230
Mark Mendell4028a6c2014-02-19 20:06:20 -08001231 // We need to preserve EDI, but have no spare registers, so push it on the stack.
1232 // We have to remember that all stack addresses after this are offset by sizeof(EDI).
buzbee091cc402014-03-31 10:14:40 -07001233 NewLIR1(kX86Push32R, rs_rDI.GetReg());
Mark Mendell4028a6c2014-02-19 20:06:20 -08001234
Mark Mendell4028a6c2014-02-19 20:06:20 -08001235 LIR *length_compare = nullptr;
1236 int start_value = 0;
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001237 bool is_index_on_stack = false;
Mark Mendell4028a6c2014-02-19 20:06:20 -08001238 if (zero_based) {
1239 // We have to handle an empty string. Use special instruction JECXZ.
1240 length_compare = NewLIR0(kX86Jecxz8);
1241 } else {
buzbeea44d4f52014-03-05 11:26:39 -08001242 rl_start = info->args[2];
Mark Mendell4028a6c2014-02-19 20:06:20 -08001243 // We have to offset by the start index.
1244 if (rl_start.is_const) {
1245 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
1246 start_value = std::max(start_value, 0);
1247
1248 // Is the start > count?
buzbee2700f7e2014-03-07 09:46:20 -08001249 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001250
1251 if (start_value != 0) {
buzbee2700f7e2014-03-07 09:46:20 -08001252 OpRegImm(kOpSub, rs_rCX, start_value);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001253 }
1254 } else {
1255 // Runtime start index.
buzbee30adc732014-05-09 15:10:18 -07001256 rl_start = UpdateLocTyped(rl_start, kCoreReg);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001257 if (rl_start.location == kLocPhysReg) {
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001258 // Handle "start index < 0" case.
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001259 OpRegReg(kOpXor, tmpReg, tmpReg);
1260 OpRegReg(kOpCmp, rl_start.reg, tmpReg);
1261 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, tmpReg);
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001262
1263 // The length of the string should be greater than the start index.
buzbee2700f7e2014-03-07 09:46:20 -08001264 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr);
1265 OpRegReg(kOpSub, rs_rCX, rl_start.reg);
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001266 if (rl_start.reg == rs_rDI) {
1267 // The special case. We will use EDI further, so lets put start index to stack.
buzbee091cc402014-03-31 10:14:40 -07001268 NewLIR1(kX86Push32R, rs_rDI.GetReg());
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001269 is_index_on_stack = true;
1270 }
Mark Mendell4028a6c2014-02-19 20:06:20 -08001271 } else {
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001272 // Load the start index from stack, remembering that we pushed EDI.
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001273 int displacement = SRegOffset(rl_start.s_reg_low) + (cu_->target64 ? 2 : 1) * sizeof(uint32_t);
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001274 {
1275 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001276 Load32Disp(rs_rX86_SP, displacement, tmpReg);
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001277 }
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001278 OpRegReg(kOpXor, rs_rDI, rs_rDI);
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001279 OpRegReg(kOpCmp, tmpReg, rs_rDI);
1280 OpCondRegReg(kOpCmov, kCondLt, tmpReg, rs_rDI);
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001281
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001282 length_compare = OpCmpBranch(kCondLe, rs_rCX, tmpReg, nullptr);
1283 OpRegReg(kOpSub, rs_rCX, tmpReg);
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001284 // Put the start index to stack.
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001285 NewLIR1(kX86Push32R, tmpReg.GetReg());
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001286 is_index_on_stack = true;
Mark Mendell4028a6c2014-02-19 20:06:20 -08001287 }
1288 }
1289 }
1290 DCHECK(length_compare != nullptr);
1291
1292 // ECX now contains the count in words to be searched.
1293
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001294 // Load the address of the string into R11 or EBX (depending on mode).
Mark Mendelle19c91f2014-02-25 08:19:08 -08001295 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
buzbee695d13a2014-04-19 13:32:20 -07001296 Load32Disp(rs_rDX, value_offset, rs_rDI);
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001297 Load32Disp(rs_rDX, offset_offset, tmpReg);
1298 OpLea(tmpReg, rs_rDI, tmpReg, 1, data_offset);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001299
1300 // Now compute into EDI where the search will start.
1301 if (zero_based || rl_start.is_const) {
1302 if (start_value == 0) {
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001303 OpRegCopy(rs_rDI, tmpReg);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001304 } else {
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001305 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), tmpReg.GetReg(), 2 * start_value);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001306 }
1307 } else {
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001308 if (is_index_on_stack == true) {
1309 // Load the start index from stack.
buzbee091cc402014-03-31 10:14:40 -07001310 NewLIR1(kX86Pop32R, rs_rDX.GetReg());
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001311 OpLea(rs_rDI, tmpReg, rs_rDX, 1, 0);
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001312 } else {
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001313 OpLea(rs_rDI, tmpReg, rl_start.reg, 1, 0);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001314 }
1315 }
1316
1317 // EDI now contains the start of the string to be searched.
1318 // We are all prepared to do the search for the character.
1319 NewLIR0(kX86RepneScasw);
1320
1321 // Did we find a match?
1322 LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
1323
1324 // yes, we matched. Compute the index of the result.
1325 // index = ((curr_ptr - orig_ptr) / 2) - 1.
nikolay serdjukc5e4ce12014-06-10 17:07:10 +07001326 OpRegReg(kOpSub, rs_rDI, tmpReg);
buzbee2700f7e2014-03-07 09:46:20 -08001327 OpRegImm(kOpAsr, rs_rDI, 1);
buzbee091cc402014-03-31 10:14:40 -07001328 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001329 LIR *all_done = NewLIR1(kX86Jmp8, 0);
1330
1331 // Failed to match; return -1.
1332 LIR *not_found = NewLIR0(kPseudoTargetLabel);
1333 length_compare->target = not_found;
1334 failed_branch->target = not_found;
buzbee2700f7e2014-03-07 09:46:20 -08001335 LoadConstantNoClobber(rl_return.reg, -1);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001336
1337 // And join up at the end.
1338 all_done->target = NewLIR0(kPseudoTargetLabel);
1339 // Restore EDI from the stack.
buzbee091cc402014-03-31 10:14:40 -07001340 NewLIR1(kX86Pop32R, rs_rDI.GetReg());
Mark Mendell4028a6c2014-02-19 20:06:20 -08001341
1342 // Out of line code returns here.
Mingyao Yang3a74d152014-04-21 15:39:44 -07001343 if (slowpath_branch != nullptr) {
Mark Mendell4028a6c2014-02-19 20:06:20 -08001344 LIR *return_point = NewLIR0(kPseudoTargetLabel);
Mingyao Yang3a74d152014-04-21 15:39:44 -07001345 AddIntrinsicSlowPath(info, slowpath_branch, return_point);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001346 }
1347
1348 StoreValue(rl_dest, rl_return);
1349 return true;
1350}
1351
Mark Mendellae9fd932014-02-10 16:14:35 -08001352/*
Mark Mendellae9fd932014-02-10 16:14:35 -08001353 * @brief Enter an 'advance LOC' into the FDE buffer
1354 * @param buf FDE buffer.
1355 * @param increment Amount by which to increase the current location.
1356 */
1357static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
1358 if (increment < 64) {
1359 // Encoding in opcode.
1360 buf.push_back(0x1 << 6 | increment);
1361 } else if (increment < 256) {
1362 // Single byte delta.
1363 buf.push_back(0x02);
1364 buf.push_back(increment);
1365 } else if (increment < 256 * 256) {
1366 // Two byte delta.
1367 buf.push_back(0x03);
1368 buf.push_back(increment & 0xff);
1369 buf.push_back((increment >> 8) & 0xff);
1370 } else {
1371 // Four byte delta.
1372 buf.push_back(0x04);
1373 PushWord(buf, increment);
1374 }
1375}
1376
1377
1378std::vector<uint8_t>* X86CFIInitialization() {
1379 return X86Mir2Lir::ReturnCommonCallFrameInformation();
1380}
1381
1382std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() {
1383 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1384
1385 // Length of the CIE (except for this field).
1386 PushWord(*cfi_info, 16);
1387
1388 // CIE id.
1389 PushWord(*cfi_info, 0xFFFFFFFFU);
1390
1391 // Version: 3.
1392 cfi_info->push_back(0x03);
1393
1394 // Augmentation: empty string.
1395 cfi_info->push_back(0x0);
1396
1397 // Code alignment: 1.
1398 cfi_info->push_back(0x01);
1399
1400 // Data alignment: -4.
1401 cfi_info->push_back(0x7C);
1402
1403 // Return address register (R8).
1404 cfi_info->push_back(0x08);
1405
1406 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4.
1407 cfi_info->push_back(0x0C);
1408 cfi_info->push_back(0x04);
1409 cfi_info->push_back(0x04);
1410
1411 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);.
1412 cfi_info->push_back(0x2 << 6 | 0x08);
1413 cfi_info->push_back(0x01);
1414
1415 // And 2 Noops to align to 4 byte boundary.
1416 cfi_info->push_back(0x0);
1417 cfi_info->push_back(0x0);
1418
1419 DCHECK_EQ(cfi_info->size() & 3, 0U);
1420 return cfi_info;
1421}
1422
1423static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
1424 uint8_t buffer[12];
1425 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
1426 for (uint8_t *p = buffer; p < ptr; p++) {
1427 buf.push_back(*p);
1428 }
1429}
1430
1431std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
1432 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1433
1434 // Generate the FDE for the method.
1435 DCHECK_NE(data_offset_, 0U);
1436
1437 // Length (will be filled in later in this routine).
1438 PushWord(*cfi_info, 0);
1439
1440 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only
1441 // one CIE for the whole debug_frame section.
1442 PushWord(*cfi_info, 0);
1443
1444 // 'initial_location' (filled in by linker).
1445 PushWord(*cfi_info, 0);
1446
1447 // 'address_range' (number of bytes in the method).
1448 PushWord(*cfi_info, data_offset_);
1449
1450 // The instructions in the FDE.
1451 if (stack_decrement_ != nullptr) {
1452 // Advance LOC to just past the stack decrement.
1453 uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
1454 AdvanceLoc(*cfi_info, pc);
1455
1456 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
1457 cfi_info->push_back(0x0e);
1458 EncodeUnsignedLeb128(*cfi_info, frame_size_);
1459
1460 // We continue with that stack until the epilogue.
1461 if (stack_increment_ != nullptr) {
1462 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
1463 AdvanceLoc(*cfi_info, new_pc - pc);
1464
1465 // We probably have code snippets after the epilogue, so save the
1466 // current state: DW_CFA_remember_state.
1467 cfi_info->push_back(0x0a);
1468
1469 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return
1470 // PC on the stack now.
1471 cfi_info->push_back(0x0e);
1472 EncodeUnsignedLeb128(*cfi_info, 4);
1473
1474 // Everything after that is the same as before the epilogue.
1475 // Stack bump was followed by RET instruction.
1476 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_));
1477 if (post_ret_insn != nullptr) {
1478 pc = new_pc;
1479 new_pc = post_ret_insn->offset;
1480 AdvanceLoc(*cfi_info, new_pc - pc);
1481 // Restore the state: DW_CFA_restore_state.
1482 cfi_info->push_back(0x0b);
1483 }
1484 }
1485 }
1486
1487 // Padding to a multiple of 4
1488 while ((cfi_info->size() & 3) != 0) {
1489 // DW_CFA_nop is encoded as 0.
1490 cfi_info->push_back(0);
1491 }
1492
1493 // Set the length of the FDE inside the generated bytes.
1494 uint32_t length = cfi_info->size() - 4;
1495 (*cfi_info)[0] = length;
1496 (*cfi_info)[1] = length >> 8;
1497 (*cfi_info)[2] = length >> 16;
1498 (*cfi_info)[3] = length >> 24;
1499 return cfi_info;
1500}
1501
Mark Mendelld65c51a2014-04-29 16:55:20 -04001502void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
1503 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001504 case kMirOpReserveVectorRegisters:
1505 ReserveVectorRegisters(mir);
1506 break;
1507 case kMirOpReturnVectorRegisters:
1508 ReturnVectorRegisters();
1509 break;
Mark Mendelld65c51a2014-04-29 16:55:20 -04001510 case kMirOpConstVector:
1511 GenConst128(bb, mir);
1512 break;
Mark Mendellfe945782014-05-22 09:52:36 -04001513 case kMirOpMoveVector:
1514 GenMoveVector(bb, mir);
1515 break;
1516 case kMirOpPackedMultiply:
1517 GenMultiplyVector(bb, mir);
1518 break;
1519 case kMirOpPackedAddition:
1520 GenAddVector(bb, mir);
1521 break;
1522 case kMirOpPackedSubtract:
1523 GenSubtractVector(bb, mir);
1524 break;
1525 case kMirOpPackedShiftLeft:
1526 GenShiftLeftVector(bb, mir);
1527 break;
1528 case kMirOpPackedSignedShiftRight:
1529 GenSignedShiftRightVector(bb, mir);
1530 break;
1531 case kMirOpPackedUnsignedShiftRight:
1532 GenUnsignedShiftRightVector(bb, mir);
1533 break;
1534 case kMirOpPackedAnd:
1535 GenAndVector(bb, mir);
1536 break;
1537 case kMirOpPackedOr:
1538 GenOrVector(bb, mir);
1539 break;
1540 case kMirOpPackedXor:
1541 GenXorVector(bb, mir);
1542 break;
1543 case kMirOpPackedAddReduce:
1544 GenAddReduceVector(bb, mir);
1545 break;
1546 case kMirOpPackedReduce:
1547 GenReduceVector(bb, mir);
1548 break;
1549 case kMirOpPackedSet:
1550 GenSetVector(bb, mir);
1551 break;
Mark Mendelld65c51a2014-04-29 16:55:20 -04001552 default:
1553 break;
1554 }
1555}
1556
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001557void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) {
1558 // We should not try to reserve twice without returning the registers
1559 DCHECK_NE(num_reserved_vector_regs_, -1);
1560
1561 int num_vector_reg = mir->dalvikInsn.vA;
1562 for (int i = 0; i < num_vector_reg; i++) {
1563 RegStorage xp_reg = RegStorage::Solo128(i);
1564 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
1565 Clobber(xp_reg);
1566
1567 for (RegisterInfo *info = xp_reg_info->GetAliasChain();
1568 info != nullptr;
1569 info = info->GetAliasChain()) {
1570 if (info->GetReg().IsSingle()) {
1571 reg_pool_->sp_regs_.Delete(info);
1572 } else {
1573 reg_pool_->dp_regs_.Delete(info);
1574 }
1575 }
1576 }
1577
1578 num_reserved_vector_regs_ = num_vector_reg;
1579}
1580
1581void X86Mir2Lir::ReturnVectorRegisters() {
1582 // Return all the reserved registers
1583 for (int i = 0; i < num_reserved_vector_regs_; i++) {
1584 RegStorage xp_reg = RegStorage::Solo128(i);
1585 RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
1586
1587 for (RegisterInfo *info = xp_reg_info->GetAliasChain();
1588 info != nullptr;
1589 info = info->GetAliasChain()) {
1590 if (info->GetReg().IsSingle()) {
1591 reg_pool_->sp_regs_.Insert(info);
1592 } else {
1593 reg_pool_->dp_regs_.Insert(info);
1594 }
1595 }
1596 }
1597
1598 // We don't have anymore reserved vector registers
1599 num_reserved_vector_regs_ = -1;
1600}
1601
Mark Mendelld65c51a2014-04-29 16:55:20 -04001602void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001603 store_method_addr_used_ = true;
1604 int type_size = mir->dalvikInsn.vB;
Mark Mendelld65c51a2014-04-29 16:55:20 -04001605 // We support 128 bit vectors.
1606 DCHECK_EQ(type_size & 0xFFFF, 128);
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001607 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
Mark Mendelld65c51a2014-04-29 16:55:20 -04001608 uint32_t *args = mir->dalvikInsn.arg;
Mark Mendellfe945782014-05-22 09:52:36 -04001609 int reg = rs_dest.GetReg();
Mark Mendelld65c51a2014-04-29 16:55:20 -04001610 // Check for all 0 case.
1611 if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) {
1612 NewLIR2(kX86XorpsRR, reg, reg);
1613 return;
1614 }
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001615
1616 // Append the mov const vector to reg opcode.
1617 AppendOpcodeWithConst(kX86MovupsRM, reg, mir);
1618}
1619
1620void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) {
Mark Mendelld65c51a2014-04-29 16:55:20 -04001621 // Okay, load it from the constant vector area.
1622 LIR *data_target = ScanVectorLiteral(mir);
1623 if (data_target == nullptr) {
1624 data_target = AddVectorLiteral(mir);
1625 }
1626
1627 // Address the start of the method.
1628 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -07001629 if (rl_method.wide) {
1630 rl_method = LoadValueWide(rl_method, kCoreReg);
1631 } else {
1632 rl_method = LoadValue(rl_method, kCoreReg);
1633 }
Mark Mendelld65c51a2014-04-29 16:55:20 -04001634
1635 // Load the proper value from the literal area.
1636 // We don't know the proper offset for the value, so pick one that will force
1637 // 4 byte offset. We will fix this up in the assembler later to have the right
1638 // value.
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001639 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001640 LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg());
Mark Mendelld65c51a2014-04-29 16:55:20 -04001641 load->flags.fixup = kFixupLoad;
1642 load->target = data_target;
Mark Mendelld65c51a2014-04-29 16:55:20 -04001643}
1644
Mark Mendellfe945782014-05-22 09:52:36 -04001645void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
1646 // We only support 128 bit registers.
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001647 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1648 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
1649 RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB);
Mark Mendellfe945782014-05-22 09:52:36 -04001650 NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg());
1651}
1652
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001653void X86Mir2Lir::GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir) {
1654 const int BYTE_SIZE = 8;
1655 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1656 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
1657 RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempWide());
1658
1659 /*
1660 * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM
1661 * and multiplying 8 at a time before recombining back into one XMM register.
1662 *
1663 * let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes)
1664 * xmm3 is tmp (operate on high bits of 16bit lanes)
1665 *
1666 * xmm3 = xmm1
1667 * xmm1 = xmm1 .* xmm2
1668 * xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff // xmm1 now has low bits
1669 * xmm3 = xmm3 .>> 8
1670 * xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00
1671 * xmm2 = xmm2 .* xmm3 // xmm2 now has high bits
1672 * xmm1 = xmm1 | xmm2 // combine results
1673 */
1674
1675 // Copy xmm1.
1676 NewLIR2(kX86Mova128RR, rs_src1_high_tmp.GetReg(), rs_dest_src1.GetReg());
1677
1678 // Multiply low bits.
1679 NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1680
1681 // xmm1 now has low bits.
1682 AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
1683
1684 // Prepare high bits for multiplication.
1685 NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), BYTE_SIZE);
1686 AndMaskVectorRegister(rs_src2, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00);
1687
1688 // Multiply high bits and xmm2 now has high bits.
1689 NewLIR2(kX86PmullwRR, rs_src2.GetReg(), rs_src1_high_tmp.GetReg());
1690
1691 // Combine back into dest XMM register.
1692 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1693}
1694
Mark Mendellfe945782014-05-22 09:52:36 -04001695void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001696 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1697 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1698 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1699 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
Mark Mendellfe945782014-05-22 09:52:36 -04001700 int opcode = 0;
1701 switch (opsize) {
1702 case k32:
1703 opcode = kX86PmulldRR;
1704 break;
1705 case kSignedHalf:
1706 opcode = kX86PmullwRR;
1707 break;
1708 case kSingle:
1709 opcode = kX86MulpsRR;
1710 break;
1711 case kDouble:
1712 opcode = kX86MulpdRR;
1713 break;
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001714 case kSignedByte:
1715 // HW doesn't support 16x16 byte multiplication so emulate it.
1716 GenMultiplyVectorSignedByte(bb, mir);
1717 return;
Mark Mendellfe945782014-05-22 09:52:36 -04001718 default:
1719 LOG(FATAL) << "Unsupported vector multiply " << opsize;
1720 break;
1721 }
1722 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1723}
1724
1725void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001726 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1727 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1728 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1729 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
Mark Mendellfe945782014-05-22 09:52:36 -04001730 int opcode = 0;
1731 switch (opsize) {
1732 case k32:
1733 opcode = kX86PadddRR;
1734 break;
1735 case kSignedHalf:
1736 case kUnsignedHalf:
1737 opcode = kX86PaddwRR;
1738 break;
1739 case kUnsignedByte:
1740 case kSignedByte:
1741 opcode = kX86PaddbRR;
1742 break;
1743 case kSingle:
1744 opcode = kX86AddpsRR;
1745 break;
1746 case kDouble:
1747 opcode = kX86AddpdRR;
1748 break;
1749 default:
1750 LOG(FATAL) << "Unsupported vector addition " << opsize;
1751 break;
1752 }
1753 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1754}
1755
1756void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001757 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1758 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1759 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1760 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
Mark Mendellfe945782014-05-22 09:52:36 -04001761 int opcode = 0;
1762 switch (opsize) {
1763 case k32:
1764 opcode = kX86PsubdRR;
1765 break;
1766 case kSignedHalf:
1767 case kUnsignedHalf:
1768 opcode = kX86PsubwRR;
1769 break;
1770 case kUnsignedByte:
1771 case kSignedByte:
1772 opcode = kX86PsubbRR;
1773 break;
1774 case kSingle:
1775 opcode = kX86SubpsRR;
1776 break;
1777 case kDouble:
1778 opcode = kX86SubpdRR;
1779 break;
1780 default:
1781 LOG(FATAL) << "Unsupported vector subtraction " << opsize;
1782 break;
1783 }
1784 NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
1785}
1786
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001787void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
1788 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1789 RegStorage rs_tmp = Get128BitRegister(AllocTempWide());
1790
1791 int opcode = 0;
1792 int imm = mir->dalvikInsn.vB;
1793
1794 switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
1795 case kMirOpPackedShiftLeft:
1796 opcode = kX86PsllwRI;
1797 break;
1798 case kMirOpPackedSignedShiftRight:
1799 opcode = kX86PsrawRI;
1800 break;
1801 case kMirOpPackedUnsignedShiftRight:
1802 opcode = kX86PsrlwRI;
1803 break;
1804 default:
1805 LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode;
1806 break;
1807 }
1808
1809 /*
1810 * xmm1 will have low bits
1811 * xmm2 will have high bits
1812 *
1813 * xmm2 = xmm1
1814 * xmm1 = xmm1 .<< N
1815 * xmm2 = xmm2 && 0xFF00FF00FF00FF00FF00FF00FF00FF00
1816 * xmm2 = xmm2 .<< N
1817 * xmm1 = xmm1 | xmm2
1818 */
1819
1820 // Copy xmm1.
1821 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_dest_src1.GetReg());
1822
1823 // Shift lower values.
1824 NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1825
1826 // Mask bottom bits.
1827 AndMaskVectorRegister(rs_tmp, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00);
1828
1829 // Shift higher values.
1830 NewLIR2(opcode, rs_tmp.GetReg(), imm);
1831
1832 // Combine back into dest XMM register.
1833 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_tmp.GetReg());
1834}
1835
Mark Mendellfe945782014-05-22 09:52:36 -04001836void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001837 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1838 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1839 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1840 int imm = mir->dalvikInsn.vB;
Mark Mendellfe945782014-05-22 09:52:36 -04001841 int opcode = 0;
1842 switch (opsize) {
1843 case k32:
1844 opcode = kX86PslldRI;
1845 break;
1846 case k64:
1847 opcode = kX86PsllqRI;
1848 break;
1849 case kSignedHalf:
1850 case kUnsignedHalf:
1851 opcode = kX86PsllwRI;
1852 break;
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001853 case kSignedByte:
1854 case kUnsignedByte:
1855 GenShiftByteVector(bb, mir);
1856 return;
Mark Mendellfe945782014-05-22 09:52:36 -04001857 default:
1858 LOG(FATAL) << "Unsupported vector shift left " << opsize;
1859 break;
1860 }
1861 NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1862}
1863
1864void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001865 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1866 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1867 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1868 int imm = mir->dalvikInsn.vB;
Mark Mendellfe945782014-05-22 09:52:36 -04001869 int opcode = 0;
1870 switch (opsize) {
1871 case k32:
1872 opcode = kX86PsradRI;
1873 break;
1874 case kSignedHalf:
1875 case kUnsignedHalf:
1876 opcode = kX86PsrawRI;
1877 break;
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001878 case kSignedByte:
1879 case kUnsignedByte:
1880 GenShiftByteVector(bb, mir);
1881 return;
Mark Mendellfe945782014-05-22 09:52:36 -04001882 default:
1883 LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
1884 break;
1885 }
1886 NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1887}
1888
1889void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001890 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1891 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1892 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1893 int imm = mir->dalvikInsn.vB;
Mark Mendellfe945782014-05-22 09:52:36 -04001894 int opcode = 0;
1895 switch (opsize) {
1896 case k32:
1897 opcode = kX86PsrldRI;
1898 break;
1899 case k64:
1900 opcode = kX86PsrlqRI;
1901 break;
1902 case kSignedHalf:
1903 case kUnsignedHalf:
1904 opcode = kX86PsrlwRI;
1905 break;
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001906 case kSignedByte:
1907 case kUnsignedByte:
1908 GenShiftByteVector(bb, mir);
1909 return;
Mark Mendellfe945782014-05-22 09:52:36 -04001910 default:
1911 LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
1912 break;
1913 }
1914 NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
1915}
1916
1917void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
1918 // We only support 128 bit registers.
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001919 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1920 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1921 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
Mark Mendellfe945782014-05-22 09:52:36 -04001922 NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1923}
1924
1925void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
1926 // We only support 128 bit registers.
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001927 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1928 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1929 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
Mark Mendellfe945782014-05-22 09:52:36 -04001930 NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1931}
1932
1933void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
1934 // We only support 128 bit registers.
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001935 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
1936 RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
1937 RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
Mark Mendellfe945782014-05-22 09:52:36 -04001938 NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
1939}
1940
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001941void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) {
1942 MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4);
1943}
1944
1945void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) {
1946 // Create temporary MIR as container for 128-bit binary mask.
1947 MIR const_mir;
1948 MIR* const_mirp = &const_mir;
1949 const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector);
1950 const_mirp->dalvikInsn.arg[0] = m0;
1951 const_mirp->dalvikInsn.arg[1] = m1;
1952 const_mirp->dalvikInsn.arg[2] = m2;
1953 const_mirp->dalvikInsn.arg[3] = m3;
1954
1955 // Mask vector with const from literal pool.
1956 AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
1957}
1958
Mark Mendellfe945782014-05-22 09:52:36 -04001959void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001960 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1961 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
1962 RegLocation rl_dest = mir_graph_->GetDest(mir);
1963 RegStorage rs_tmp;
1964
1965 int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8;
1966 int vec_unit_size = 0;
Mark Mendellfe945782014-05-22 09:52:36 -04001967 int opcode = 0;
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001968 int extr_opcode = 0;
1969 RegLocation rl_result;
1970
Mark Mendellfe945782014-05-22 09:52:36 -04001971 switch (opsize) {
1972 case k32:
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001973 extr_opcode = kX86PextrdRRI;
Mark Mendellfe945782014-05-22 09:52:36 -04001974 opcode = kX86PhadddRR;
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001975 vec_unit_size = 4;
1976 break;
1977 case kSignedByte:
1978 case kUnsignedByte:
1979 extr_opcode = kX86PextrbRRI;
1980 opcode = kX86PhaddwRR;
1981 vec_unit_size = 2;
Mark Mendellfe945782014-05-22 09:52:36 -04001982 break;
1983 case kSignedHalf:
1984 case kUnsignedHalf:
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001985 extr_opcode = kX86PextrwRRI;
Mark Mendellfe945782014-05-22 09:52:36 -04001986 opcode = kX86PhaddwRR;
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001987 vec_unit_size = 2;
Mark Mendellfe945782014-05-22 09:52:36 -04001988 break;
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07001989 case kSingle:
1990 rl_result = EvalLoc(rl_dest, kFPReg, true);
1991 vec_unit_size = 4;
1992 for (int i = 0; i < 3; i++) {
1993 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg());
1994 NewLIR3(kX86ShufpsRRI, rs_src1.GetReg(), rs_src1.GetReg(), 0x39);
1995 }
1996 NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg());
1997 StoreValue(rl_dest, rl_result);
1998
1999 // For single-precision floats, we are done here
2000 return;
Mark Mendellfe945782014-05-22 09:52:36 -04002001 default:
2002 LOG(FATAL) << "Unsupported vector add reduce " << opsize;
2003 break;
2004 }
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002005
2006 int elems = vec_bytes / vec_unit_size;
2007
2008 // Emulate horizontal add instruction by reducing 2 vectors with 8 values before adding them again
2009 // TODO is overflow handled correctly?
2010 if (opsize == kSignedByte || opsize == kUnsignedByte) {
2011 rs_tmp = Get128BitRegister(AllocTempWide());
2012
2013 // tmp = xmm1 .>> 8.
2014 NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_src1.GetReg());
2015 NewLIR2(kX86PsrlwRI, rs_tmp.GetReg(), 8);
2016
2017 // Zero extend low bits in xmm1.
2018 AndMaskVectorRegister(rs_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
2019 }
2020
2021 while (elems > 1) {
2022 if (opsize == kSignedByte || opsize == kUnsignedByte) {
2023 NewLIR2(opcode, rs_tmp.GetReg(), rs_tmp.GetReg());
2024 }
2025 NewLIR2(opcode, rs_src1.GetReg(), rs_src1.GetReg());
2026 elems >>= 1;
2027 }
2028
2029 // Combine the results if we separated them.
2030 if (opsize == kSignedByte || opsize == kUnsignedByte) {
2031 NewLIR2(kX86PaddbRR, rs_src1.GetReg(), rs_tmp.GetReg());
2032 }
2033
2034 // We need to extract to a GPR.
2035 RegStorage temp = AllocTemp();
2036 NewLIR3(extr_opcode, temp.GetReg(), rs_src1.GetReg(), 0);
2037
2038 // Can we do this directly into memory?
2039 rl_result = UpdateLocTyped(rl_dest, kCoreReg);
2040 if (rl_result.location == kLocPhysReg) {
2041 // Ensure res is in a core reg
2042 rl_result = EvalLoc(rl_dest, kCoreReg, true);
2043 OpRegReg(kOpAdd, rl_result.reg, temp);
2044 StoreFinalValue(rl_dest, rl_result);
2045 } else {
2046 OpMemReg(kOpAdd, rl_result, temp.GetReg());
2047 }
2048
2049 FreeTemp(temp);
Mark Mendellfe945782014-05-22 09:52:36 -04002050}
2051
2052void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002053 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
2054 RegLocation rl_dest = mir_graph_->GetDest(mir);
2055 RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
2056 int extract_index = mir->dalvikInsn.arg[0];
2057 int extr_opcode = 0;
2058 RegLocation rl_result;
2059 bool is_wide = false;
2060
Mark Mendellfe945782014-05-22 09:52:36 -04002061 switch (opsize) {
2062 case k32:
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002063 rl_result = UpdateLocTyped(rl_dest, kCoreReg);
2064 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdMRI : kX86PextrdRRI;
Mark Mendellfe945782014-05-22 09:52:36 -04002065 break;
2066 case kSignedHalf:
2067 case kUnsignedHalf:
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002068 rl_result= UpdateLocTyped(rl_dest, kCoreReg);
2069 extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwMRI : kX86PextrwRRI;
Mark Mendellfe945782014-05-22 09:52:36 -04002070 break;
2071 default:
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002072 LOG(FATAL) << "Unsupported vector add reduce " << opsize;
2073 return;
Mark Mendellfe945782014-05-22 09:52:36 -04002074 break;
2075 }
Mark Mendellfe945782014-05-22 09:52:36 -04002076
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002077 if (rl_result.location == kLocPhysReg) {
2078 NewLIR3(extr_opcode, rl_result.reg.GetReg(), rs_src1.GetReg(), extract_index);
2079 if (is_wide == true) {
2080 StoreFinalValue(rl_dest, rl_result);
2081 } else {
2082 StoreFinalValueWide(rl_dest, rl_result);
2083 }
2084 } else {
2085 int displacement = SRegOffset(rl_result.s_reg_low);
2086 LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, rs_src1.GetReg());
2087 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */);
2088 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */);
2089 }
Mark Mendellfe945782014-05-22 09:52:36 -04002090}
2091
2092void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002093 DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
2094 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
2095 RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
2096 int op_low = 0, op_high = 0, imm = 0, op_mov = kX86MovdxrRR;
2097 RegisterClass reg_type = kCoreReg;
2098
Mark Mendellfe945782014-05-22 09:52:36 -04002099 switch (opsize) {
2100 case k32:
2101 op_low = kX86PshufdRRI;
2102 break;
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002103 case kSingle:
2104 op_low = kX86PshufdRRI;
2105 op_mov = kX86Mova128RR;
2106 reg_type = kFPReg;
2107 break;
2108 case k64:
2109 op_low = kX86PshufdRRI;
2110 imm = 0x44;
2111 break;
2112 case kDouble:
2113 op_low = kX86PshufdRRI;
2114 op_mov = kX86Mova128RR;
2115 reg_type = kFPReg;
2116 imm = 0x44;
2117 break;
2118 case kSignedByte:
2119 case kUnsignedByte:
2120 // Shuffle 8 bit value into 16 bit word.
2121 // We set val = val + (val << 8) below and use 16 bit shuffle.
Mark Mendellfe945782014-05-22 09:52:36 -04002122 case kSignedHalf:
2123 case kUnsignedHalf:
2124 // Handles low quadword.
2125 op_low = kX86PshuflwRRI;
2126 // Handles upper quadword.
2127 op_high = kX86PshufdRRI;
2128 break;
2129 default:
2130 LOG(FATAL) << "Unsupported vector set " << opsize;
2131 break;
2132 }
2133
Mark Mendellfe945782014-05-22 09:52:36 -04002134 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002135
2136 // Load the value from the VR into the reg.
2137 if (rl_src.wide == 0) {
2138 rl_src = LoadValue(rl_src, reg_type);
2139 } else {
2140 rl_src = LoadValueWide(rl_src, reg_type);
2141 }
2142
2143 // If opsize is 8 bits wide then double value and use 16 bit shuffle instead.
2144 if (opsize == kSignedByte || opsize == kUnsignedByte) {
2145 RegStorage temp = AllocTemp();
2146 // val = val + (val << 8).
2147 NewLIR2(kX86Mov32RR, temp.GetReg(), rl_src.reg.GetReg());
2148 NewLIR2(kX86Sal32RI, temp.GetReg(), 8);
2149 NewLIR2(kX86Or32RR, rl_src.reg.GetReg(), temp.GetReg());
2150 FreeTemp(temp);
2151 }
Mark Mendellfe945782014-05-22 09:52:36 -04002152
2153 // Load the value into the XMM register.
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002154 NewLIR2(op_mov, rs_dest.GetReg(), rl_src.reg.GetReg());
Mark Mendellfe945782014-05-22 09:52:36 -04002155
2156 // Now shuffle the value across the destination.
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002157 NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), imm);
Mark Mendellfe945782014-05-22 09:52:36 -04002158
2159 // And then repeat as needed.
2160 if (op_high != 0) {
Udayan Banerji60bfe7b2014-07-08 19:59:43 -07002161 NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), imm);
Mark Mendellfe945782014-05-22 09:52:36 -04002162 }
2163}
2164
Mark Mendelld65c51a2014-04-29 16:55:20 -04002165LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) {
2166 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
2167 for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
2168 if (args[0] == p->operands[0] && args[1] == p->operands[1] &&
2169 args[2] == p->operands[2] && args[3] == p->operands[3]) {
2170 return p;
2171 }
2172 }
2173 return nullptr;
2174}
2175
2176LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) {
2177 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData));
2178 int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
2179 new_value->operands[0] = args[0];
2180 new_value->operands[1] = args[1];
2181 new_value->operands[2] = args[2];
2182 new_value->operands[3] = args[3];
2183 new_value->next = const_vectors_;
2184 if (const_vectors_ == nullptr) {
2185 estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary.
2186 }
2187 estimated_native_code_size_ += 16; // Space for one vector.
2188 const_vectors_ = new_value;
2189 return new_value;
2190}
2191
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002192// ------------ ABI support: mapping of args to physical registers -------------
Serguei Katkov407a9d22014-07-05 03:09:32 +07002193RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002194 const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5};
2195 const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister);
2196 const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3,
2197 kFArg4, kFArg5, kFArg6, kFArg7};
2198 const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002199
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002200 if (is_double_or_float) {
2201 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002202 return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002203 }
2204 } else {
2205 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
Serguei Katkov407a9d22014-07-05 03:09:32 +07002206 return is_ref ? ml_->TargetRefReg(coreArgMappingToPhysicalReg[cur_core_reg_++]) :
2207 ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002208 }
2209 }
Chao-ying Fua77ee512014-07-01 17:43:41 -07002210 return RegStorage::InvalidReg();
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002211}
2212
2213RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) {
2214 DCHECK(IsInitialized());
2215 auto res = mapping_.find(in_position);
2216 return res != mapping_.end() ? res->second : RegStorage::InvalidReg();
2217}
2218
2219void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) {
2220 DCHECK(mapper != nullptr);
2221 max_mapped_in_ = -1;
2222 is_there_stack_mapped_ = false;
2223 for (int in_position = 0; in_position < count; in_position++) {
Serguei Katkov407a9d22014-07-05 03:09:32 +07002224 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp,
2225 arg_locs[in_position].wide, arg_locs[in_position].ref);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002226 if (reg.Valid()) {
2227 mapping_[in_position] = reg;
2228 max_mapped_in_ = std::max(max_mapped_in_, in_position);
Serguei Katkov407a9d22014-07-05 03:09:32 +07002229 if (arg_locs[in_position].wide) {
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002230 // We covered 2 args, so skip the next one
2231 in_position++;
2232 }
2233 } else {
2234 is_there_stack_mapped_ = true;
2235 }
2236 }
2237 initialized_ = true;
2238}
2239
2240RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
Elena Sayapinadd644502014-07-01 18:39:52 +07002241 if (!cu_->target64) {
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002242 return GetCoreArgMappingToPhysicalReg(arg_num);
2243 }
2244
2245 if (!in_to_reg_storage_mapping_.IsInitialized()) {
2246 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
2247 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
2248
Chao-ying Fua77ee512014-07-01 17:43:41 -07002249 InToRegStorageX86_64Mapper mapper(this);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002250 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper);
2251 }
2252 return in_to_reg_storage_mapping_.Get(arg_num);
2253}
2254
2255RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) {
2256 // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
2257 // Not used for 64-bit, TODO: Move X86_32 to the same framework
2258 switch (core_arg_num) {
2259 case 0:
2260 return rs_rX86_ARG1;
2261 case 1:
2262 return rs_rX86_ARG2;
2263 case 2:
2264 return rs_rX86_ARG3;
2265 default:
2266 return RegStorage::InvalidReg();
2267 }
2268}
2269
2270// ---------End of ABI support: mapping of args to physical registers -------------
2271
2272/*
2273 * If there are any ins passed in registers that have not been promoted
2274 * to a callee-save register, flush them to the frame. Perform initial
2275 * assignment of promoted arguments.
2276 *
2277 * ArgLocs is an array of location records describing the incoming arguments
2278 * with one location record per word of argument.
2279 */
2280void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
Elena Sayapinadd644502014-07-01 18:39:52 +07002281 if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002282 /*
2283 * Dummy up a RegLocation for the incoming Method*
2284 * It will attempt to keep kArg0 live (or copy it to home location
2285 * if promoted).
2286 */
2287
2288 RegLocation rl_src = rl_method;
2289 rl_src.location = kLocPhysReg;
Chao-ying Fua77ee512014-07-01 17:43:41 -07002290 rl_src.reg = TargetRefReg(kArg0);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002291 rl_src.home = false;
2292 MarkLive(rl_src);
2293 StoreValue(rl_method, rl_src);
2294 // If Method* has been promoted, explicitly flush
2295 if (rl_method.location == kLocPhysReg) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002296 StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetRefReg(kArg0)), kNotVolatile);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002297 }
2298
2299 if (cu_->num_ins == 0) {
2300 return;
2301 }
2302
2303 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
2304 /*
2305 * Copy incoming arguments to their proper home locations.
2306 * NOTE: an older version of dx had an issue in which
2307 * it would reuse static method argument registers.
2308 * This could result in the same Dalvik virtual register
2309 * being promoted to both core and fp regs. To account for this,
2310 * we only copy to the corresponding promoted physical register
2311 * if it matches the type of the SSA name for the incoming
2312 * argument. It is also possible that long and double arguments
2313 * end up half-promoted. In those cases, we must flush the promoted
2314 * half to memory as well.
2315 */
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002316 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002317 for (int i = 0; i < cu_->num_ins; i++) {
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002318 // get reg corresponding to input
Dmitry Petrochenko4d5d7942014-06-27 12:25:01 +07002319 RegStorage reg = GetArgMappingToPhysicalReg(i);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002320
Dmitry Petrochenko4d5d7942014-06-27 12:25:01 +07002321 RegLocation* t_loc = &ArgLocs[i];
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002322 if (reg.Valid()) {
Dmitry Petrochenko4d5d7942014-06-27 12:25:01 +07002323 // If arriving in register.
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002324
Dmitry Petrochenko4d5d7942014-06-27 12:25:01 +07002325 // We have already updated the arg location with promoted info
2326 // so we can be based on it.
2327 if (t_loc->location == kLocPhysReg) {
2328 // Just copy it.
2329 OpRegCopy(t_loc->reg, reg);
2330 } else {
2331 // Needs flush.
2332 if (t_loc->ref) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002333 StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002334 } else {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002335 StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
Dmitry Petrochenko4d5d7942014-06-27 12:25:01 +07002336 kNotVolatile);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002337 }
2338 }
2339 } else {
Dmitry Petrochenko4d5d7942014-06-27 12:25:01 +07002340 // If arriving in frame & promoted.
2341 if (t_loc->location == kLocPhysReg) {
2342 if (t_loc->ref) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002343 LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
Dmitry Petrochenko4d5d7942014-06-27 12:25:01 +07002344 } else {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002345 LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg,
Dmitry Petrochenko4d5d7942014-06-27 12:25:01 +07002346 t_loc->wide ? k64 : k32, kNotVolatile);
2347 }
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002348 }
Dmitry Petrochenko4d5d7942014-06-27 12:25:01 +07002349 }
2350 if (t_loc->wide) {
2351 // Increment i to skip the next one.
2352 i++;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002353 }
2354 }
2355}
2356
2357/*
2358 * Load up to 5 arguments, the first three of which will be in
2359 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer,
2360 * and as part of the load sequence, it must be replaced with
2361 * the target method pointer. Note, this may also be called
2362 * for "range" variants if the number of arguments is 5 or fewer.
2363 */
2364int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
2365 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
2366 const MethodReference& target_method,
2367 uint32_t vtable_idx, uintptr_t direct_code,
2368 uintptr_t direct_method, InvokeType type, bool skip_this) {
Elena Sayapinadd644502014-07-01 18:39:52 +07002369 if (!cu_->target64) {
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002370 return Mir2Lir::GenDalvikArgsNoRange(info,
2371 call_state, pcrLabel, next_call_insn,
2372 target_method,
2373 vtable_idx, direct_code,
2374 direct_method, type, skip_this);
2375 }
2376 return GenDalvikArgsRange(info,
2377 call_state, pcrLabel, next_call_insn,
2378 target_method,
2379 vtable_idx, direct_code,
2380 direct_method, type, skip_this);
2381}
2382
2383/*
2384 * May have 0+ arguments (also used for jumbo). Note that
2385 * source virtual registers may be in physical registers, so may
2386 * need to be flushed to home location before copying. This
2387 * applies to arg3 and above (see below).
2388 *
2389 * Two general strategies:
2390 * If < 20 arguments
2391 * Pass args 3-18 using vldm/vstm block copy
2392 * Pass arg0, arg1 & arg2 in kArg1-kArg3
2393 * If 20+ arguments
2394 * Pass args arg19+ using memcpy block copy
2395 * Pass arg0, arg1 & arg2 in kArg1-kArg3
2396 *
2397 */
2398int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
2399 LIR** pcrLabel, NextCallInsn next_call_insn,
2400 const MethodReference& target_method,
2401 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
2402 InvokeType type, bool skip_this) {
Elena Sayapinadd644502014-07-01 18:39:52 +07002403 if (!cu_->target64) {
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002404 return Mir2Lir::GenDalvikArgsRange(info, call_state,
2405 pcrLabel, next_call_insn,
2406 target_method,
2407 vtable_idx, direct_code, direct_method,
2408 type, skip_this);
2409 }
2410
2411 /* If no arguments, just return */
2412 if (info->num_arg_words == 0)
2413 return call_state;
2414
2415 const int start_index = skip_this ? 1 : 0;
2416
Chao-ying Fua77ee512014-07-01 17:43:41 -07002417 InToRegStorageX86_64Mapper mapper(this);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002418 InToRegStorageMapping in_to_reg_storage_mapping;
2419 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
2420 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
2421 const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 :
2422 in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1;
2423 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped);
2424
2425 // Fisrt of all, check whether it make sense to use bulk copying
2426 // Optimization is aplicable only for range case
2427 // TODO: make a constant instead of 2
2428 if (info->is_range && regs_left_to_pass_via_stack >= 2) {
2429 // Scan the rest of the args - if in phys_reg flush to memory
2430 for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) {
2431 RegLocation loc = info->args[next_arg];
2432 if (loc.wide) {
2433 loc = UpdateLocWide(loc);
2434 if (loc.location == kLocPhysReg) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002435 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
Chao-ying Fua77ee512014-07-01 17:43:41 -07002436 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002437 }
2438 next_arg += 2;
2439 } else {
2440 loc = UpdateLoc(loc);
2441 if (loc.location == kLocPhysReg) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002442 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
Chao-ying Fua77ee512014-07-01 17:43:41 -07002443 StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002444 }
2445 next_arg++;
2446 }
2447 }
2448
2449 // Logic below assumes that Method pointer is at offset zero from SP.
2450 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
2451
2452 // The rest can be copied together
2453 int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low);
2454 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set);
2455
2456 int current_src_offset = start_offset;
2457 int current_dest_offset = outs_offset;
2458
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002459 // Only davik regs are accessed in this loop; no next_call_insn() calls.
2460 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002461 while (regs_left_to_pass_via_stack > 0) {
2462 // This is based on the knowledge that the stack itself is 16-byte aligned.
2463 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
2464 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
2465 size_t bytes_to_move;
2466
2467 /*
2468 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
2469 * a 128-bit move because we won't get the chance to try to aligned. If there are more than
2470 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
2471 * We do this because we could potentially do a smaller move to align.
2472 */
2473 if (regs_left_to_pass_via_stack == 4 ||
2474 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
2475 // Moving 128-bits via xmm register.
2476 bytes_to_move = sizeof(uint32_t) * 4;
2477
2478 // Allocate a free xmm temp. Since we are working through the calling sequence,
2479 // we expect to have an xmm temporary available. AllocTempDouble will abort if
2480 // there are no free registers.
2481 RegStorage temp = AllocTempDouble();
2482
2483 LIR* ld1 = nullptr;
2484 LIR* ld2 = nullptr;
2485 LIR* st1 = nullptr;
2486 LIR* st2 = nullptr;
2487
2488 /*
2489 * The logic is similar for both loads and stores. If we have 16-byte alignment,
2490 * do an aligned move. If we have 8-byte alignment, then do the move in two
2491 * parts. This approach prevents possible cache line splits. Finally, fall back
2492 * to doing an unaligned move. In most cases we likely won't split the cache
2493 * line but we cannot prove it and thus take a conservative approach.
2494 */
2495 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
2496 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
2497
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002498 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002499 if (src_is_16b_aligned) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002500 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002501 } else if (src_is_8b_aligned) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002502 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP);
2503 ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1),
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002504 kMovHi128FP);
2505 } else {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002506 ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002507 }
2508
2509 if (dest_is_16b_aligned) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002510 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002511 } else if (dest_is_8b_aligned) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002512 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP);
2513 st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1),
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002514 temp, kMovHi128FP);
2515 } else {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002516 st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002517 }
2518
2519 // TODO If we could keep track of aliasing information for memory accesses that are wider
2520 // than 64-bit, we wouldn't need to set up a barrier.
2521 if (ld1 != nullptr) {
2522 if (ld2 != nullptr) {
2523 // For 64-bit load we can actually set up the aliasing information.
2524 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
2525 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
2526 } else {
2527 // Set barrier for 128-bit load.
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002528 ld1->u.m.def_mask = &kEncodeAll;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002529 }
2530 }
2531 if (st1 != nullptr) {
2532 if (st2 != nullptr) {
2533 // For 64-bit store we can actually set up the aliasing information.
2534 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
2535 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
2536 } else {
2537 // Set barrier for 128-bit store.
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002538 st1->u.m.def_mask = &kEncodeAll;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002539 }
2540 }
2541
2542 // Free the temporary used for the data movement.
2543 FreeTemp(temp);
2544 } else {
2545 // Moving 32-bits via general purpose register.
2546 bytes_to_move = sizeof(uint32_t);
2547
2548 // Instead of allocating a new temp, simply reuse one of the registers being used
2549 // for argument passing.
Chao-ying Fua77ee512014-07-01 17:43:41 -07002550 RegStorage temp = TargetReg(kArg3, false);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002551
2552 // Now load the argument VR and store to the outs.
Chao-ying Fua77ee512014-07-01 17:43:41 -07002553 Load32Disp(rs_rX86_SP, current_src_offset, temp);
2554 Store32Disp(rs_rX86_SP, current_dest_offset, temp);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002555 }
2556
2557 current_src_offset += bytes_to_move;
2558 current_dest_offset += bytes_to_move;
2559 regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
2560 }
2561 DCHECK_EQ(regs_left_to_pass_via_stack, 0);
2562 }
2563
2564 // Now handle rest not registers if they are
2565 if (in_to_reg_storage_mapping.IsThereStackMapped()) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002566 RegStorage regSingle = TargetReg(kArg2, false);
2567 RegStorage regWide = TargetReg(kArg3, true);
Chao-ying Fub6564c12014-06-24 13:24:36 -07002568 for (int i = start_index;
2569 i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) {
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002570 RegLocation rl_arg = info->args[i];
2571 rl_arg = UpdateRawLoc(rl_arg);
2572 RegStorage reg = in_to_reg_storage_mapping.Get(i);
2573 if (!reg.Valid()) {
2574 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
2575
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002576 {
2577 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
2578 if (rl_arg.wide) {
2579 if (rl_arg.location == kLocPhysReg) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002580 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile);
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002581 } else {
2582 LoadValueDirectWideFixed(rl_arg, regWide);
Chao-ying Fua77ee512014-07-01 17:43:41 -07002583 StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile);
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002584 }
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002585 } else {
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002586 if (rl_arg.location == kLocPhysReg) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002587 StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile);
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002588 } else {
2589 LoadValueDirectFixed(rl_arg, regSingle);
Chao-ying Fua77ee512014-07-01 17:43:41 -07002590 StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile);
Vladimir Marko8dea81c2014-06-06 14:50:36 +01002591 }
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002592 }
2593 }
2594 call_state = next_call_insn(cu_, info, call_state, target_method,
2595 vtable_idx, direct_code, direct_method, type);
2596 }
Chao-ying Fub6564c12014-06-24 13:24:36 -07002597 if (rl_arg.wide) {
2598 i++;
2599 }
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002600 }
2601 }
2602
2603 // Finish with mapped registers
2604 for (int i = start_index; i <= last_mapped_in; i++) {
2605 RegLocation rl_arg = info->args[i];
2606 rl_arg = UpdateRawLoc(rl_arg);
2607 RegStorage reg = in_to_reg_storage_mapping.Get(i);
2608 if (reg.Valid()) {
2609 if (rl_arg.wide) {
2610 LoadValueDirectWideFixed(rl_arg, reg);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002611 } else {
2612 LoadValueDirectFixed(rl_arg, reg);
2613 }
2614 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
2615 direct_code, direct_method, type);
2616 }
Chao-ying Fub6564c12014-06-24 13:24:36 -07002617 if (rl_arg.wide) {
2618 i++;
2619 }
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002620 }
2621
2622 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
2623 direct_code, direct_method, type);
2624 if (pcrLabel) {
Andreas Gampe5655e842014-06-17 16:36:07 -07002625 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
Chao-ying Fua77ee512014-07-01 17:43:41 -07002626 *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002627 } else {
2628 *pcrLabel = nullptr;
2629 // In lieu of generating a check for kArg1 being null, we need to
2630 // perform a load when doing implicit checks.
2631 RegStorage tmp = AllocTemp();
Chao-ying Fua77ee512014-07-01 17:43:41 -07002632 Load32Disp(TargetRefReg(kArg1), 0, tmp);
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +07002633 MarkPossibleNullPointerException(info->opt_flags);
2634 FreeTemp(tmp);
2635 }
2636 }
2637 return call_state;
2638}
2639
Brian Carlstrom7934ac22013-07-26 10:54:15 -07002640} // namespace art