Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Nicolas Geoffray | f3e2cc4 | 2014-02-18 18:37:26 +0000 | [diff] [blame] | 17 | #include <string> |
| 18 | #include <inttypes.h> |
| 19 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 20 | #include "codegen_x86.h" |
| 21 | #include "dex/compiler_internals.h" |
| 22 | #include "dex/quick/mir_to_lir-inl.h" |
buzbee | b5860fb | 2014-06-21 15:31:01 -0700 | [diff] [blame] | 23 | #include "dex/reg_storage_eq.h" |
Mark Mendell | e19c91f | 2014-02-25 08:19:08 -0800 | [diff] [blame] | 24 | #include "mirror/array.h" |
| 25 | #include "mirror/string.h" |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 26 | #include "x86_lir.h" |
| 27 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 28 | namespace art { |
| 29 | |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 30 | static constexpr RegStorage core_regs_arr_32[] = { |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 31 | rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, |
| 32 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 33 | static constexpr RegStorage core_regs_arr_64[] = { |
Dmitry Petrochenko | 76af0d3 | 2014-06-05 21:15:08 +0700 | [diff] [blame] | 34 | rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI, |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 35 | rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15 |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 36 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 37 | static constexpr RegStorage core_regs_arr_64q[] = { |
Dmitry Petrochenko | 0999a6f | 2014-05-22 12:26:50 +0700 | [diff] [blame] | 38 | rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q, |
Dmitry Petrochenko | a20468c | 2014-04-30 13:40:19 +0700 | [diff] [blame] | 39 | rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q |
Dmitry Petrochenko | 0999a6f | 2014-05-22 12:26:50 +0700 | [diff] [blame] | 40 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 41 | static constexpr RegStorage sp_regs_arr_32[] = { |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 42 | rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, |
| 43 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 44 | static constexpr RegStorage sp_regs_arr_64[] = { |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 45 | rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 46 | rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15 |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 47 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 48 | static constexpr RegStorage dp_regs_arr_32[] = { |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 49 | rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, |
| 50 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 51 | static constexpr RegStorage dp_regs_arr_64[] = { |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 52 | rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 53 | rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15 |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 54 | }; |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 55 | static constexpr RegStorage xp_regs_arr_32[] = { |
| 56 | rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, |
| 57 | }; |
| 58 | static constexpr RegStorage xp_regs_arr_64[] = { |
| 59 | rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, |
| 60 | rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15 |
| 61 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 62 | static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32}; |
Dmitry Petrochenko | 76af0d3 | 2014-06-05 21:15:08 +0700 | [diff] [blame] | 63 | static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32}; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 64 | static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64}; |
| 65 | static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX}; |
| 66 | static constexpr RegStorage core_temps_arr_64[] = { |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 67 | rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI, |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 68 | rs_r8, rs_r9, rs_r10, rs_r11 |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 69 | }; |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 70 | |
| 71 | // How to add register to be available for promotion: |
| 72 | // 1) Remove register from array defining temp |
| 73 | // 2) Update ClobberCallerSave |
| 74 | // 3) Update JNI compiler ABI: |
| 75 | // 3.1) add reg in JniCallingConvention method |
| 76 | // 3.2) update CoreSpillMask/FpSpillMask |
| 77 | // 4) Update entrypoints |
| 78 | // 4.1) Update constants in asm_support_x86_64.h for new frame size |
| 79 | // 4.2) Remove entry in SmashCallerSaves |
| 80 | // 4.3) Update jni_entrypoints to spill/unspill new callee save reg |
| 81 | // 4.4) Update quick_entrypoints to spill/unspill new callee save reg |
| 82 | // 5) Update runtime ABI |
| 83 | // 5.1) Update quick_method_frame_info with new required spills |
| 84 | // 5.2) Update QuickArgumentVisitor with new offsets to gprs and xmms |
| 85 | // Note that you cannot use register corresponding to incoming args |
| 86 | // according to ABI and QCG needs one additional XMM temp for |
| 87 | // bulk copy in preparation to call. |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 88 | static constexpr RegStorage core_temps_arr_64q[] = { |
Dmitry Petrochenko | 0999a6f | 2014-05-22 12:26:50 +0700 | [diff] [blame] | 89 | rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q, |
Dmitry Petrochenko | 0999a6f | 2014-05-22 12:26:50 +0700 | [diff] [blame] | 90 | rs_r8q, rs_r9q, rs_r10q, rs_r11q |
Dmitry Petrochenko | 0999a6f | 2014-05-22 12:26:50 +0700 | [diff] [blame] | 91 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 92 | static constexpr RegStorage sp_temps_arr_32[] = { |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 93 | rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, |
| 94 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 95 | static constexpr RegStorage sp_temps_arr_64[] = { |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 96 | rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 97 | rs_fr8, rs_fr9, rs_fr10, rs_fr11 |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 98 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 99 | static constexpr RegStorage dp_temps_arr_32[] = { |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 100 | rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, |
| 101 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 102 | static constexpr RegStorage dp_temps_arr_64[] = { |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 103 | rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 104 | rs_dr8, rs_dr9, rs_dr10, rs_dr11 |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 105 | }; |
| 106 | |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 107 | static constexpr RegStorage xp_temps_arr_32[] = { |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 108 | rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, |
| 109 | }; |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 110 | static constexpr RegStorage xp_temps_arr_64[] = { |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 111 | rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7, |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 112 | rs_xr8, rs_xr9, rs_xr10, rs_xr11 |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 113 | }; |
| 114 | |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 115 | static constexpr ArrayRef<const RegStorage> empty_pool; |
| 116 | static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32); |
| 117 | static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64); |
| 118 | static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q); |
| 119 | static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32); |
| 120 | static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64); |
| 121 | static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32); |
| 122 | static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64); |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 123 | static constexpr ArrayRef<const RegStorage> xp_regs_32(xp_regs_arr_32); |
| 124 | static constexpr ArrayRef<const RegStorage> xp_regs_64(xp_regs_arr_64); |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 125 | static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); |
| 126 | static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64); |
| 127 | static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q); |
| 128 | static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); |
| 129 | static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64); |
| 130 | static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q); |
| 131 | static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); |
| 132 | static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64); |
| 133 | static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32); |
| 134 | static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64); |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 135 | |
Vladimir Marko | 089142c | 2014-06-05 10:57:05 +0100 | [diff] [blame] | 136 | static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32); |
| 137 | static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 138 | |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 139 | RegStorage rs_rX86_SP; |
| 140 | |
| 141 | X86NativeRegisterPool rX86_ARG0; |
| 142 | X86NativeRegisterPool rX86_ARG1; |
| 143 | X86NativeRegisterPool rX86_ARG2; |
| 144 | X86NativeRegisterPool rX86_ARG3; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 145 | X86NativeRegisterPool rX86_ARG4; |
| 146 | X86NativeRegisterPool rX86_ARG5; |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 147 | X86NativeRegisterPool rX86_FARG0; |
| 148 | X86NativeRegisterPool rX86_FARG1; |
| 149 | X86NativeRegisterPool rX86_FARG2; |
| 150 | X86NativeRegisterPool rX86_FARG3; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 151 | X86NativeRegisterPool rX86_FARG4; |
| 152 | X86NativeRegisterPool rX86_FARG5; |
| 153 | X86NativeRegisterPool rX86_FARG6; |
| 154 | X86NativeRegisterPool rX86_FARG7; |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 155 | X86NativeRegisterPool rX86_RET0; |
| 156 | X86NativeRegisterPool rX86_RET1; |
| 157 | X86NativeRegisterPool rX86_INVOKE_TGT; |
| 158 | X86NativeRegisterPool rX86_COUNT; |
| 159 | |
| 160 | RegStorage rs_rX86_ARG0; |
| 161 | RegStorage rs_rX86_ARG1; |
| 162 | RegStorage rs_rX86_ARG2; |
| 163 | RegStorage rs_rX86_ARG3; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 164 | RegStorage rs_rX86_ARG4; |
| 165 | RegStorage rs_rX86_ARG5; |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 166 | RegStorage rs_rX86_FARG0; |
| 167 | RegStorage rs_rX86_FARG1; |
| 168 | RegStorage rs_rX86_FARG2; |
| 169 | RegStorage rs_rX86_FARG3; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 170 | RegStorage rs_rX86_FARG4; |
| 171 | RegStorage rs_rX86_FARG5; |
| 172 | RegStorage rs_rX86_FARG6; |
| 173 | RegStorage rs_rX86_FARG7; |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 174 | RegStorage rs_rX86_RET0; |
| 175 | RegStorage rs_rX86_RET1; |
| 176 | RegStorage rs_rX86_INVOKE_TGT; |
| 177 | RegStorage rs_rX86_COUNT; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 178 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 179 | RegLocation X86Mir2Lir::LocCReturn() { |
Bill Buzbee | 00e1ec6 | 2014-02-27 23:44:13 +0000 | [diff] [blame] | 180 | return x86_loc_c_return; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 181 | } |
| 182 | |
buzbee | a0cd2d7 | 2014-06-01 09:33:49 -0700 | [diff] [blame] | 183 | RegLocation X86Mir2Lir::LocCReturnRef() { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 184 | return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref; |
buzbee | a0cd2d7 | 2014-06-01 09:33:49 -0700 | [diff] [blame] | 185 | } |
| 186 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 187 | RegLocation X86Mir2Lir::LocCReturnWide() { |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 188 | return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 189 | } |
| 190 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 191 | RegLocation X86Mir2Lir::LocCReturnFloat() { |
Bill Buzbee | 00e1ec6 | 2014-02-27 23:44:13 +0000 | [diff] [blame] | 192 | return x86_loc_c_return_float; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 193 | } |
| 194 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 195 | RegLocation X86Mir2Lir::LocCReturnDouble() { |
Bill Buzbee | 00e1ec6 | 2014-02-27 23:44:13 +0000 | [diff] [blame] | 196 | return x86_loc_c_return_double; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 197 | } |
| 198 | |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 199 | // Return a target-dependent special register for 32-bit. |
| 200 | RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) { |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 201 | RegStorage res_reg = RegStorage::InvalidReg(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 202 | switch (reg) { |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 203 | case kSelf: res_reg = RegStorage::InvalidReg(); break; |
| 204 | case kSuspend: res_reg = RegStorage::InvalidReg(); break; |
| 205 | case kLr: res_reg = RegStorage::InvalidReg(); break; |
| 206 | case kPc: res_reg = RegStorage::InvalidReg(); break; |
| 207 | case kSp: res_reg = rs_rX86_SP; break; |
| 208 | case kArg0: res_reg = rs_rX86_ARG0; break; |
| 209 | case kArg1: res_reg = rs_rX86_ARG1; break; |
| 210 | case kArg2: res_reg = rs_rX86_ARG2; break; |
| 211 | case kArg3: res_reg = rs_rX86_ARG3; break; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 212 | case kArg4: res_reg = rs_rX86_ARG4; break; |
| 213 | case kArg5: res_reg = rs_rX86_ARG5; break; |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 214 | case kFArg0: res_reg = rs_rX86_FARG0; break; |
| 215 | case kFArg1: res_reg = rs_rX86_FARG1; break; |
| 216 | case kFArg2: res_reg = rs_rX86_FARG2; break; |
| 217 | case kFArg3: res_reg = rs_rX86_FARG3; break; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 218 | case kFArg4: res_reg = rs_rX86_FARG4; break; |
| 219 | case kFArg5: res_reg = rs_rX86_FARG5; break; |
| 220 | case kFArg6: res_reg = rs_rX86_FARG6; break; |
| 221 | case kFArg7: res_reg = rs_rX86_FARG7; break; |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 222 | case kRet0: res_reg = rs_rX86_RET0; break; |
| 223 | case kRet1: res_reg = rs_rX86_RET1; break; |
| 224 | case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break; |
| 225 | case kHiddenArg: res_reg = rs_rAX; break; |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 226 | case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break; |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 227 | case kCount: res_reg = rs_rX86_COUNT; break; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 228 | default: res_reg = RegStorage::InvalidReg(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 229 | } |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 230 | return res_reg; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 231 | } |
| 232 | |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 233 | RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) { |
| 234 | LOG(FATAL) << "Do not use this function!!!"; |
| 235 | return RegStorage::InvalidReg(); |
| 236 | } |
| 237 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 238 | /* |
| 239 | * Decode the register id. |
| 240 | */ |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 241 | ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const { |
| 242 | /* Double registers in x86 are just a single FP register. This is always just a single bit. */ |
| 243 | return ResourceMask::Bit( |
| 244 | /* FP register starts at bit position 16 */ |
| 245 | ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 246 | } |
| 247 | |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 248 | ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 249 | /* |
| 250 | * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be |
| 251 | * able to clean up some of the x86/Arm_Mips differences |
| 252 | */ |
| 253 | LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86"; |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 254 | return kEncodeNone; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 255 | } |
| 256 | |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 257 | void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, |
| 258 | ResourceMask* use_mask, ResourceMask* def_mask) { |
Dmitry Petrochenko | 6a58cb1 | 2014-04-02 17:27:59 +0700 | [diff] [blame] | 259 | DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); |
buzbee | b48819d | 2013-09-14 16:15:25 -0700 | [diff] [blame] | 260 | DCHECK(!lir->flags.use_def_invalid); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 261 | |
| 262 | // X86-specific resource map setup here. |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 263 | if (flags & REG_USE_SP) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 264 | use_mask->SetBit(kX86RegSP); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | if (flags & REG_DEF_SP) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 268 | def_mask->SetBit(kX86RegSP); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 269 | } |
| 270 | |
| 271 | if (flags & REG_DEFA) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 272 | SetupRegMask(def_mask, rs_rAX.GetReg()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 273 | } |
| 274 | |
| 275 | if (flags & REG_DEFD) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 276 | SetupRegMask(def_mask, rs_rDX.GetReg()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 277 | } |
| 278 | if (flags & REG_USEA) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 279 | SetupRegMask(use_mask, rs_rAX.GetReg()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 280 | } |
| 281 | |
| 282 | if (flags & REG_USEC) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 283 | SetupRegMask(use_mask, rs_rCX.GetReg()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | if (flags & REG_USED) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 287 | SetupRegMask(use_mask, rs_rDX.GetReg()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 288 | } |
Vladimir Marko | 70b797d | 2013-12-03 15:25:24 +0000 | [diff] [blame] | 289 | |
| 290 | if (flags & REG_USEB) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 291 | SetupRegMask(use_mask, rs_rBX.GetReg()); |
Vladimir Marko | 70b797d | 2013-12-03 15:25:24 +0000 | [diff] [blame] | 292 | } |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 293 | |
| 294 | // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI. |
| 295 | if (lir->opcode == kX86RepneScasw) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 296 | SetupRegMask(use_mask, rs_rAX.GetReg()); |
| 297 | SetupRegMask(use_mask, rs_rCX.GetReg()); |
| 298 | SetupRegMask(use_mask, rs_rDI.GetReg()); |
| 299 | SetupRegMask(def_mask, rs_rDI.GetReg()); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 300 | } |
Serguei Katkov | e90501d | 2014-03-12 15:56:54 +0700 | [diff] [blame] | 301 | |
| 302 | if (flags & USE_FP_STACK) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 303 | use_mask->SetBit(kX86FPStack); |
| 304 | def_mask->SetBit(kX86FPStack); |
Serguei Katkov | e90501d | 2014-03-12 15:56:54 +0700 | [diff] [blame] | 305 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | /* For dumping instructions */ |
| 309 | static const char* x86RegName[] = { |
| 310 | "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", |
| 311 | "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" |
| 312 | }; |
| 313 | |
| 314 | static const char* x86CondName[] = { |
| 315 | "O", |
| 316 | "NO", |
| 317 | "B/NAE/C", |
| 318 | "NB/AE/NC", |
| 319 | "Z/EQ", |
| 320 | "NZ/NE", |
| 321 | "BE/NA", |
| 322 | "NBE/A", |
| 323 | "S", |
| 324 | "NS", |
| 325 | "P/PE", |
| 326 | "NP/PO", |
| 327 | "L/NGE", |
| 328 | "NL/GE", |
| 329 | "LE/NG", |
| 330 | "NLE/G" |
| 331 | }; |
| 332 | |
| 333 | /* |
| 334 | * Interpret a format string and build a string no longer than size |
| 335 | * See format key in Assemble.cc. |
| 336 | */ |
| 337 | std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) { |
| 338 | std::string buf; |
| 339 | size_t i = 0; |
| 340 | size_t fmt_len = strlen(fmt); |
| 341 | while (i < fmt_len) { |
| 342 | if (fmt[i] != '!') { |
| 343 | buf += fmt[i]; |
| 344 | i++; |
| 345 | } else { |
| 346 | i++; |
| 347 | DCHECK_LT(i, fmt_len); |
| 348 | char operand_number_ch = fmt[i]; |
| 349 | i++; |
| 350 | if (operand_number_ch == '!') { |
| 351 | buf += "!"; |
| 352 | } else { |
| 353 | int operand_number = operand_number_ch - '0'; |
| 354 | DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands. |
| 355 | DCHECK_LT(i, fmt_len); |
| 356 | int operand = lir->operands[operand_number]; |
| 357 | switch (fmt[i]) { |
| 358 | case 'c': |
| 359 | DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName)); |
| 360 | buf += x86CondName[operand]; |
| 361 | break; |
| 362 | case 'd': |
| 363 | buf += StringPrintf("%d", operand); |
| 364 | break; |
Yixin Shou | 5192cbb | 2014-07-01 13:48:17 -0400 | [diff] [blame] | 365 | case 'q': { |
| 366 | int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 | |
| 367 | static_cast<uint32_t>(lir->operands[operand_number+1])); |
| 368 | buf +=StringPrintf("%" PRId64, value); |
| 369 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 370 | case 'p': { |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 371 | EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 372 | buf += StringPrintf("0x%08x", tab_rec->offset); |
| 373 | break; |
| 374 | } |
| 375 | case 'r': |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 376 | if (RegStorage::IsFloat(operand)) { |
| 377 | int fp_reg = RegStorage::RegNum(operand); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 378 | buf += StringPrintf("xmm%d", fp_reg); |
| 379 | } else { |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 380 | int reg_num = RegStorage::RegNum(operand); |
| 381 | DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName)); |
| 382 | buf += x86RegName[reg_num]; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 383 | } |
| 384 | break; |
| 385 | case 't': |
Ian Rogers | 107c31e | 2014-01-23 20:55:29 -0800 | [diff] [blame] | 386 | buf += StringPrintf("0x%08" PRIxPTR " (L%p)", |
| 387 | reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand, |
| 388 | lir->target); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 389 | break; |
| 390 | default: |
| 391 | buf += StringPrintf("DecodeError '%c'", fmt[i]); |
| 392 | break; |
| 393 | } |
| 394 | i++; |
| 395 | } |
| 396 | } |
| 397 | } |
| 398 | return buf; |
| 399 | } |
| 400 | |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 401 | void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 402 | char buf[256]; |
| 403 | buf[0] = 0; |
| 404 | |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 405 | if (mask.Equals(kEncodeAll)) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 406 | strcpy(buf, "all"); |
| 407 | } else { |
| 408 | char num[8]; |
| 409 | int i; |
| 410 | |
| 411 | for (i = 0; i < kX86RegEnd; i++) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 412 | if (mask.HasBit(i)) { |
Ian Rogers | 988e6ea | 2014-01-08 11:30:50 -0800 | [diff] [blame] | 413 | snprintf(num, arraysize(num), "%d ", i); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 414 | strcat(buf, num); |
| 415 | } |
| 416 | } |
| 417 | |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 418 | if (mask.HasBit(ResourceMask::kCCode)) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 419 | strcat(buf, "cc "); |
| 420 | } |
| 421 | /* Memory bits */ |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 422 | if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) { |
Ian Rogers | 988e6ea | 2014-01-08 11:30:50 -0800 | [diff] [blame] | 423 | snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s", |
| 424 | DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info), |
| 425 | (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : ""); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 426 | } |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 427 | if (mask.HasBit(ResourceMask::kLiteral)) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 428 | strcat(buf, "lit "); |
| 429 | } |
| 430 | |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 431 | if (mask.HasBit(ResourceMask::kHeapRef)) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 432 | strcat(buf, "heap "); |
| 433 | } |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 434 | if (mask.HasBit(ResourceMask::kMustNotAlias)) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 435 | strcat(buf, "noalias "); |
| 436 | } |
| 437 | } |
| 438 | if (buf[0]) { |
| 439 | LOG(INFO) << prefix << ": " << buf; |
| 440 | } |
| 441 | } |
| 442 | |
| 443 | void X86Mir2Lir::AdjustSpillMask() { |
| 444 | // Adjustment for LR spilling, x86 has no LR so nothing to do here |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 445 | core_spill_mask_ |= (1 << rs_rRET.GetRegNum()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 446 | num_core_spills_++; |
| 447 | } |
| 448 | |
Mark Mendell | e87f9b5 | 2014-04-30 14:13:18 -0400 | [diff] [blame] | 449 | RegStorage X86Mir2Lir::AllocateByteRegister() { |
Chao-ying Fu | 7e399fd | 2014-06-10 18:11:11 -0700 | [diff] [blame] | 450 | RegStorage reg = AllocTypedTemp(false, kCoreReg); |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 451 | if (!cu_->target64) { |
Chao-ying Fu | 7e399fd | 2014-06-10 18:11:11 -0700 | [diff] [blame] | 452 | DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum()); |
| 453 | } |
| 454 | return reg; |
| 455 | } |
| 456 | |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 457 | RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) { |
| 458 | return GetRegInfo(reg)->FindMatchingView(RegisterInfo::k128SoloStorageMask)->GetReg(); |
| 459 | } |
| 460 | |
Chao-ying Fu | 7e399fd | 2014-06-10 18:11:11 -0700 | [diff] [blame] | 461 | bool X86Mir2Lir::IsByteRegister(RegStorage reg) { |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 462 | return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum(); |
Mark Mendell | e87f9b5 | 2014-04-30 14:13:18 -0400 | [diff] [blame] | 463 | } |
| 464 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 465 | /* Clobber all regs that might be used by an external C call */ |
Vladimir Marko | 31c2aac | 2013-12-09 16:31:19 +0000 | [diff] [blame] | 466 | void X86Mir2Lir::ClobberCallerSave() { |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 467 | if (cu_->target64) { |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 468 | Clobber(rs_rAX); |
| 469 | Clobber(rs_rCX); |
| 470 | Clobber(rs_rDX); |
| 471 | Clobber(rs_rSI); |
| 472 | Clobber(rs_rDI); |
| 473 | |
Chao-ying Fu | 35ec2b5 | 2014-06-16 16:40:31 -0700 | [diff] [blame] | 474 | Clobber(rs_r8); |
| 475 | Clobber(rs_r9); |
| 476 | Clobber(rs_r10); |
| 477 | Clobber(rs_r11); |
| 478 | |
| 479 | Clobber(rs_fr8); |
| 480 | Clobber(rs_fr9); |
| 481 | Clobber(rs_fr10); |
| 482 | Clobber(rs_fr11); |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 483 | } else { |
| 484 | Clobber(rs_rAX); |
| 485 | Clobber(rs_rCX); |
| 486 | Clobber(rs_rDX); |
| 487 | Clobber(rs_rBX); |
Chao-ying Fu | 35ec2b5 | 2014-06-16 16:40:31 -0700 | [diff] [blame] | 488 | } |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 489 | |
| 490 | Clobber(rs_fr0); |
| 491 | Clobber(rs_fr1); |
| 492 | Clobber(rs_fr2); |
| 493 | Clobber(rs_fr3); |
| 494 | Clobber(rs_fr4); |
| 495 | Clobber(rs_fr5); |
| 496 | Clobber(rs_fr6); |
| 497 | Clobber(rs_fr7); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 498 | } |
| 499 | |
| 500 | RegLocation X86Mir2Lir::GetReturnWideAlt() { |
| 501 | RegLocation res = LocCReturnWide(); |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 502 | DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg()); |
| 503 | DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg()); |
| 504 | Clobber(rs_rAX); |
| 505 | Clobber(rs_rDX); |
| 506 | MarkInUse(rs_rAX); |
| 507 | MarkInUse(rs_rDX); |
| 508 | MarkWide(res.reg); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 509 | return res; |
| 510 | } |
| 511 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 512 | RegLocation X86Mir2Lir::GetReturnAlt() { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 513 | RegLocation res = LocCReturn(); |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 514 | res.reg.SetReg(rs_rDX.GetReg()); |
| 515 | Clobber(rs_rDX); |
| 516 | MarkInUse(rs_rDX); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 517 | return res; |
| 518 | } |
| 519 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 520 | /* To be used when explicitly managing register use */ |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 521 | void X86Mir2Lir::LockCallTemps() { |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 522 | LockTemp(rs_rX86_ARG0); |
| 523 | LockTemp(rs_rX86_ARG1); |
| 524 | LockTemp(rs_rX86_ARG2); |
| 525 | LockTemp(rs_rX86_ARG3); |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 526 | if (cu_->target64) { |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 527 | LockTemp(rs_rX86_ARG4); |
| 528 | LockTemp(rs_rX86_ARG5); |
| 529 | LockTemp(rs_rX86_FARG0); |
| 530 | LockTemp(rs_rX86_FARG1); |
| 531 | LockTemp(rs_rX86_FARG2); |
| 532 | LockTemp(rs_rX86_FARG3); |
| 533 | LockTemp(rs_rX86_FARG4); |
| 534 | LockTemp(rs_rX86_FARG5); |
| 535 | LockTemp(rs_rX86_FARG6); |
| 536 | LockTemp(rs_rX86_FARG7); |
| 537 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 538 | } |
| 539 | |
| 540 | /* To be used when explicitly managing register use */ |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 541 | void X86Mir2Lir::FreeCallTemps() { |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 542 | FreeTemp(rs_rX86_ARG0); |
| 543 | FreeTemp(rs_rX86_ARG1); |
| 544 | FreeTemp(rs_rX86_ARG2); |
| 545 | FreeTemp(rs_rX86_ARG3); |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 546 | if (cu_->target64) { |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 547 | FreeTemp(rs_rX86_ARG4); |
| 548 | FreeTemp(rs_rX86_ARG5); |
| 549 | FreeTemp(rs_rX86_FARG0); |
| 550 | FreeTemp(rs_rX86_FARG1); |
| 551 | FreeTemp(rs_rX86_FARG2); |
| 552 | FreeTemp(rs_rX86_FARG3); |
| 553 | FreeTemp(rs_rX86_FARG4); |
| 554 | FreeTemp(rs_rX86_FARG5); |
| 555 | FreeTemp(rs_rX86_FARG6); |
| 556 | FreeTemp(rs_rX86_FARG7); |
| 557 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 558 | } |
| 559 | |
Razvan A Lupusoru | 99ad723 | 2014-02-25 17:41:08 -0800 | [diff] [blame] | 560 | bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) { |
| 561 | switch (opcode) { |
| 562 | case kX86LockCmpxchgMR: |
| 563 | case kX86LockCmpxchgAR: |
Ian Rogers | 0f9b9c5 | 2014-06-09 01:32:12 -0700 | [diff] [blame] | 564 | case kX86LockCmpxchg64M: |
| 565 | case kX86LockCmpxchg64A: |
Razvan A Lupusoru | 99ad723 | 2014-02-25 17:41:08 -0800 | [diff] [blame] | 566 | case kX86XchgMR: |
| 567 | case kX86Mfence: |
| 568 | // Atomic memory instructions provide full barrier. |
| 569 | return true; |
| 570 | default: |
| 571 | break; |
| 572 | } |
| 573 | |
| 574 | // Conservative if cannot prove it provides full barrier. |
| 575 | return false; |
| 576 | } |
| 577 | |
Andreas Gampe | b14329f | 2014-05-15 11:16:06 -0700 | [diff] [blame] | 578 | bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 579 | #if ANDROID_SMP != 0 |
Razvan A Lupusoru | 99ad723 | 2014-02-25 17:41:08 -0800 | [diff] [blame] | 580 | // Start off with using the last LIR as the barrier. If it is not enough, then we will update it. |
| 581 | LIR* mem_barrier = last_lir_insn_; |
| 582 | |
Andreas Gampe | b14329f | 2014-05-15 11:16:06 -0700 | [diff] [blame] | 583 | bool ret = false; |
Razvan A Lupusoru | 99ad723 | 2014-02-25 17:41:08 -0800 | [diff] [blame] | 584 | /* |
| 585 | * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers |
| 586 | * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need |
| 587 | * to ensure is that there is a scheduling barrier in place. |
| 588 | */ |
| 589 | if (barrier_kind == kStoreLoad) { |
| 590 | // If no LIR exists already that can be used a barrier, then generate an mfence. |
| 591 | if (mem_barrier == nullptr) { |
| 592 | mem_barrier = NewLIR0(kX86Mfence); |
Andreas Gampe | b14329f | 2014-05-15 11:16:06 -0700 | [diff] [blame] | 593 | ret = true; |
Razvan A Lupusoru | 99ad723 | 2014-02-25 17:41:08 -0800 | [diff] [blame] | 594 | } |
| 595 | |
| 596 | // If last instruction does not provide full barrier, then insert an mfence. |
| 597 | if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) { |
| 598 | mem_barrier = NewLIR0(kX86Mfence); |
Andreas Gampe | b14329f | 2014-05-15 11:16:06 -0700 | [diff] [blame] | 599 | ret = true; |
Razvan A Lupusoru | 99ad723 | 2014-02-25 17:41:08 -0800 | [diff] [blame] | 600 | } |
| 601 | } |
| 602 | |
| 603 | // Now ensure that a scheduling barrier is in place. |
| 604 | if (mem_barrier == nullptr) { |
| 605 | GenBarrier(); |
| 606 | } else { |
| 607 | // Mark as a scheduling barrier. |
| 608 | DCHECK(!mem_barrier->flags.use_def_invalid); |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 609 | mem_barrier->u.m.def_mask = &kEncodeAll; |
Razvan A Lupusoru | 99ad723 | 2014-02-25 17:41:08 -0800 | [diff] [blame] | 610 | } |
Andreas Gampe | b14329f | 2014-05-15 11:16:06 -0700 | [diff] [blame] | 611 | return ret; |
| 612 | #else |
| 613 | return false; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 614 | #endif |
| 615 | } |
Bill Buzbee | 00e1ec6 | 2014-02-27 23:44:13 +0000 | [diff] [blame] | 616 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 617 | void X86Mir2Lir::CompilerInitializeRegAlloc() { |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 618 | if (cu_->target64) { |
Dmitry Petrochenko | 76af0d3 | 2014-06-05 21:15:08 +0700 | [diff] [blame] | 619 | reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64, |
| 620 | dp_regs_64, reserved_regs_64, reserved_regs_64q, |
| 621 | core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64); |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 622 | } else { |
buzbee | b01bf15 | 2014-05-13 15:59:07 -0700 | [diff] [blame] | 623 | reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32, |
| 624 | dp_regs_32, reserved_regs_32, empty_pool, |
| 625 | core_temps_32, empty_pool, sp_temps_32, dp_temps_32); |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 626 | } |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 627 | |
| 628 | // Target-specific adjustments. |
| 629 | |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 630 | // Add in XMM registers. |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 631 | const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32; |
| 632 | for (RegStorage reg : *xp_regs) { |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 633 | RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg)); |
| 634 | reginfo_map_.Put(reg.GetReg(), info); |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 635 | } |
| 636 | const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32; |
| 637 | for (RegStorage reg : *xp_temps) { |
| 638 | RegisterInfo* xp_reg_info = GetRegInfo(reg); |
| 639 | xp_reg_info->SetIsTemp(true); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 640 | } |
| 641 | |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 642 | // Alias single precision xmm to double xmms. |
| 643 | // TODO: as needed, add larger vector sizes - alias all to the largest. |
| 644 | GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); |
| 645 | for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { |
| 646 | int sp_reg_num = info->GetReg().GetRegNum(); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 647 | RegStorage xp_reg = RegStorage::Solo128(sp_reg_num); |
| 648 | RegisterInfo* xp_reg_info = GetRegInfo(xp_reg); |
| 649 | // 128-bit xmm vector register's master storage should refer to itself. |
| 650 | DCHECK_EQ(xp_reg_info, xp_reg_info->Master()); |
| 651 | |
| 652 | // Redirect 32-bit vector's master storage to 128-bit vector. |
| 653 | info->SetMaster(xp_reg_info); |
| 654 | |
Dmitry Petrochenko | 76af0d3 | 2014-06-05 21:15:08 +0700 | [diff] [blame] | 655 | RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num); |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 656 | RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 657 | // Redirect 64-bit vector's master storage to 128-bit vector. |
| 658 | dp_reg_info->SetMaster(xp_reg_info); |
Dmitry Petrochenko | 76af0d3 | 2014-06-05 21:15:08 +0700 | [diff] [blame] | 659 | // Singles should show a single 32-bit mask bit, at first referring to the low half. |
| 660 | DCHECK_EQ(info->StorageMask(), 0x1U); |
| 661 | } |
| 662 | |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 663 | if (cu_->target64) { |
Dmitry Petrochenko | 76af0d3 | 2014-06-05 21:15:08 +0700 | [diff] [blame] | 664 | // Alias 32bit W registers to corresponding 64bit X registers. |
| 665 | GrowableArray<RegisterInfo*>::Iterator w_it(®_pool_->core_regs_); |
| 666 | for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) { |
| 667 | int x_reg_num = info->GetReg().GetRegNum(); |
| 668 | RegStorage x_reg = RegStorage::Solo64(x_reg_num); |
| 669 | RegisterInfo* x_reg_info = GetRegInfo(x_reg); |
| 670 | // 64bit X register's master storage should refer to itself. |
| 671 | DCHECK_EQ(x_reg_info, x_reg_info->Master()); |
| 672 | // Redirect 32bit W master storage to 64bit X. |
| 673 | info->SetMaster(x_reg_info); |
| 674 | // 32bit W should show a single 32-bit mask bit, at first referring to the low half. |
| 675 | DCHECK_EQ(info->StorageMask(), 0x1U); |
| 676 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 677 | } |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 678 | |
| 679 | // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods. |
| 680 | // TODO: adjust for x86/hard float calling convention. |
| 681 | reg_pool_->next_core_reg_ = 2; |
| 682 | reg_pool_->next_sp_reg_ = 2; |
| 683 | reg_pool_->next_dp_reg_ = 1; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 684 | } |
| 685 | |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 686 | int X86Mir2Lir::VectorRegisterSize() { |
| 687 | return 128; |
| 688 | } |
| 689 | |
| 690 | int X86Mir2Lir::NumReservableVectorRegisters(bool fp_used) { |
| 691 | return fp_used ? 5 : 7; |
| 692 | } |
| 693 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 694 | void X86Mir2Lir::SpillCoreRegs() { |
| 695 | if (num_core_spills_ == 0) { |
| 696 | return; |
| 697 | } |
| 698 | // Spill mask not including fake return address register |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 699 | uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 700 | int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 701 | OpSize size = cu_->target64 ? k64 : k32; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 702 | for (int reg = 0; mask; mask >>= 1, reg++) { |
| 703 | if (mask & 0x1) { |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 704 | StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), |
| 705 | size, kNotVolatile); |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 706 | offset += GetInstructionSetPointerSize(cu_->instruction_set); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 707 | } |
| 708 | } |
| 709 | } |
| 710 | |
| 711 | void X86Mir2Lir::UnSpillCoreRegs() { |
| 712 | if (num_core_spills_ == 0) { |
| 713 | return; |
| 714 | } |
| 715 | // Spill mask not including fake return address register |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 716 | uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum()); |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 717 | int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_); |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 718 | OpSize size = cu_->target64 ? k64 : k32; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 719 | for (int reg = 0; mask; mask >>= 1, reg++) { |
| 720 | if (mask & 0x1) { |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 721 | LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg), |
| 722 | size, kNotVolatile); |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 723 | offset += GetInstructionSetPointerSize(cu_->instruction_set); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 724 | } |
| 725 | } |
| 726 | } |
| 727 | |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame^] | 728 | void X86Mir2Lir::SpillFPRegs() { |
| 729 | if (num_fp_spills_ == 0) { |
| 730 | return; |
| 731 | } |
| 732 | uint32_t mask = fp_spill_mask_; |
| 733 | int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); |
| 734 | for (int reg = 0; mask; mask >>= 1, reg++) { |
| 735 | if (mask & 0x1) { |
| 736 | StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), |
| 737 | k64, kNotVolatile); |
| 738 | offset += sizeof(double); |
| 739 | } |
| 740 | } |
| 741 | } |
| 742 | void X86Mir2Lir::UnSpillFPRegs() { |
| 743 | if (num_fp_spills_ == 0) { |
| 744 | return; |
| 745 | } |
| 746 | uint32_t mask = fp_spill_mask_; |
| 747 | int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_)); |
| 748 | for (int reg = 0; mask; mask >>= 1, reg++) { |
| 749 | if (mask & 0x1) { |
| 750 | LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg), |
| 751 | k64, kNotVolatile); |
| 752 | offset += sizeof(double); |
| 753 | } |
| 754 | } |
| 755 | } |
| 756 | |
| 757 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 758 | bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 759 | return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32); |
| 760 | } |
| 761 | |
Vladimir Marko | 674744e | 2014-04-24 15:18:26 +0100 | [diff] [blame] | 762 | bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) { |
| 763 | return true; |
| 764 | } |
| 765 | |
| 766 | RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) { |
Chao-ying Fu | e0ccdc0 | 2014-06-06 17:32:37 -0700 | [diff] [blame] | 767 | // X86_64 can handle any size. |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 768 | if (cu_->target64) { |
Chao-ying Fu | e0ccdc0 | 2014-06-06 17:32:37 -0700 | [diff] [blame] | 769 | if (size == kReference) { |
| 770 | return kRefReg; |
| 771 | } |
| 772 | return kCoreReg; |
| 773 | } |
| 774 | |
Vladimir Marko | 674744e | 2014-04-24 15:18:26 +0100 | [diff] [blame] | 775 | if (UNLIKELY(is_volatile)) { |
| 776 | // On x86, atomic 64-bit load/store requires an fp register. |
| 777 | // Smaller aligned load/store is atomic for both core and fp registers. |
| 778 | if (size == k64 || size == kDouble) { |
| 779 | return kFPReg; |
| 780 | } |
| 781 | } |
| 782 | return RegClassBySize(size); |
| 783 | } |
| 784 | |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 785 | X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 786 | : Mir2Lir(cu, mir_graph, arena), |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 787 | base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false), |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 788 | method_address_insns_(arena, 100, kGrowableArrayMisc), |
| 789 | class_type_address_insns_(arena, 100, kGrowableArrayMisc), |
Mark Mendell | ae9fd93 | 2014-02-10 16:14:35 -0800 | [diff] [blame] | 790 | call_method_insns_(arena, 100, kGrowableArrayMisc), |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 791 | stack_decrement_(nullptr), stack_increment_(nullptr), |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 792 | const_vectors_(nullptr) { |
| 793 | store_method_addr_used_ = false; |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 794 | if (kIsDebugBuild) { |
| 795 | for (int i = 0; i < kX86Last; i++) { |
| 796 | if (X86Mir2Lir::EncodingMap[i].opcode != i) { |
| 797 | LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 798 | << " is wrong: expecting " << i << ", seeing " |
| 799 | << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 800 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 801 | } |
| 802 | } |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 803 | if (cu_->target64) { |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 804 | rs_rX86_SP = rs_rX86_SP_64; |
| 805 | |
| 806 | rs_rX86_ARG0 = rs_rDI; |
| 807 | rs_rX86_ARG1 = rs_rSI; |
| 808 | rs_rX86_ARG2 = rs_rDX; |
| 809 | rs_rX86_ARG3 = rs_rCX; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 810 | rs_rX86_ARG4 = rs_r8; |
| 811 | rs_rX86_ARG5 = rs_r9; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 812 | rs_rX86_FARG0 = rs_fr0; |
| 813 | rs_rX86_FARG1 = rs_fr1; |
| 814 | rs_rX86_FARG2 = rs_fr2; |
| 815 | rs_rX86_FARG3 = rs_fr3; |
| 816 | rs_rX86_FARG4 = rs_fr4; |
| 817 | rs_rX86_FARG5 = rs_fr5; |
| 818 | rs_rX86_FARG6 = rs_fr6; |
| 819 | rs_rX86_FARG7 = rs_fr7; |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 820 | rX86_ARG0 = rDI; |
| 821 | rX86_ARG1 = rSI; |
| 822 | rX86_ARG2 = rDX; |
| 823 | rX86_ARG3 = rCX; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 824 | rX86_ARG4 = r8; |
| 825 | rX86_ARG5 = r9; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 826 | rX86_FARG0 = fr0; |
| 827 | rX86_FARG1 = fr1; |
| 828 | rX86_FARG2 = fr2; |
| 829 | rX86_FARG3 = fr3; |
| 830 | rX86_FARG4 = fr4; |
| 831 | rX86_FARG5 = fr5; |
| 832 | rX86_FARG6 = fr6; |
| 833 | rX86_FARG7 = fr7; |
Mark Mendell | 55884bc | 2014-06-10 10:21:29 -0400 | [diff] [blame] | 834 | rs_rX86_INVOKE_TGT = rs_rDI; |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 835 | } else { |
| 836 | rs_rX86_SP = rs_rX86_SP_32; |
| 837 | |
| 838 | rs_rX86_ARG0 = rs_rAX; |
| 839 | rs_rX86_ARG1 = rs_rCX; |
| 840 | rs_rX86_ARG2 = rs_rDX; |
| 841 | rs_rX86_ARG3 = rs_rBX; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 842 | rs_rX86_ARG4 = RegStorage::InvalidReg(); |
| 843 | rs_rX86_ARG5 = RegStorage::InvalidReg(); |
| 844 | rs_rX86_FARG0 = rs_rAX; |
| 845 | rs_rX86_FARG1 = rs_rCX; |
| 846 | rs_rX86_FARG2 = rs_rDX; |
| 847 | rs_rX86_FARG3 = rs_rBX; |
| 848 | rs_rX86_FARG4 = RegStorage::InvalidReg(); |
| 849 | rs_rX86_FARG5 = RegStorage::InvalidReg(); |
| 850 | rs_rX86_FARG6 = RegStorage::InvalidReg(); |
| 851 | rs_rX86_FARG7 = RegStorage::InvalidReg(); |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 852 | rX86_ARG0 = rAX; |
| 853 | rX86_ARG1 = rCX; |
| 854 | rX86_ARG2 = rDX; |
| 855 | rX86_ARG3 = rBX; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 856 | rX86_FARG0 = rAX; |
| 857 | rX86_FARG1 = rCX; |
| 858 | rX86_FARG2 = rDX; |
| 859 | rX86_FARG3 = rBX; |
Mark Mendell | 55884bc | 2014-06-10 10:21:29 -0400 | [diff] [blame] | 860 | rs_rX86_INVOKE_TGT = rs_rAX; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 861 | // TODO(64): Initialize with invalid reg |
| 862 | // rX86_ARG4 = RegStorage::InvalidReg(); |
| 863 | // rX86_ARG5 = RegStorage::InvalidReg(); |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 864 | } |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 865 | rs_rX86_RET0 = rs_rAX; |
| 866 | rs_rX86_RET1 = rs_rDX; |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 867 | rs_rX86_COUNT = rs_rCX; |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 868 | rX86_RET0 = rAX; |
| 869 | rX86_RET1 = rDX; |
| 870 | rX86_INVOKE_TGT = rAX; |
| 871 | rX86_COUNT = rCX; |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 872 | |
| 873 | // Initialize the number of reserved vector registers |
| 874 | num_reserved_vector_regs_ = -1; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 875 | } |
| 876 | |
| 877 | Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, |
| 878 | ArenaAllocator* const arena) { |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 879 | return new X86Mir2Lir(cu, mir_graph, arena); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 880 | } |
| 881 | |
| 882 | // Not used in x86 |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 883 | RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 884 | LOG(FATAL) << "Unexpected use of LoadHelper in x86"; |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 885 | return RegStorage::InvalidReg(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 886 | } |
| 887 | |
Andreas Gampe | 2f244e9 | 2014-05-08 03:35:25 -0700 | [diff] [blame] | 888 | // Not used in x86 |
| 889 | RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) { |
| 890 | LOG(FATAL) << "Unexpected use of LoadHelper in x86"; |
| 891 | return RegStorage::InvalidReg(); |
| 892 | } |
| 893 | |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 894 | LIR* X86Mir2Lir::CheckSuspendUsingLoad() { |
Dave Allison | 3d14eb6 | 2014-07-10 01:54:57 +0000 | [diff] [blame] | 895 | LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86"; |
| 896 | return nullptr; |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 897 | } |
| 898 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 899 | uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { |
buzbee | 409fe94 | 2013-10-11 10:49:56 -0700 | [diff] [blame] | 900 | DCHECK(!IsPseudoLirOp(opcode)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 901 | return X86Mir2Lir::EncodingMap[opcode].flags; |
| 902 | } |
| 903 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 904 | const char* X86Mir2Lir::GetTargetInstName(int opcode) { |
buzbee | 409fe94 | 2013-10-11 10:49:56 -0700 | [diff] [blame] | 905 | DCHECK(!IsPseudoLirOp(opcode)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 906 | return X86Mir2Lir::EncodingMap[opcode].name; |
| 907 | } |
| 908 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 909 | const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { |
buzbee | 409fe94 | 2013-10-11 10:49:56 -0700 | [diff] [blame] | 910 | DCHECK(!IsPseudoLirOp(opcode)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 911 | return X86Mir2Lir::EncodingMap[opcode].fmt; |
| 912 | } |
| 913 | |
Bill Buzbee | d61ba4b | 2014-01-13 21:44:01 +0000 | [diff] [blame] | 914 | void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { |
| 915 | // Can we do this directly to memory? |
| 916 | rl_dest = UpdateLocWide(rl_dest); |
| 917 | if ((rl_dest.location == kLocDalvikFrame) || |
| 918 | (rl_dest.location == kLocCompilerTemp)) { |
| 919 | int32_t val_lo = Low32Bits(value); |
| 920 | int32_t val_hi = High32Bits(value); |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 921 | int r_base = rs_rX86_SP.GetReg(); |
Bill Buzbee | d61ba4b | 2014-01-13 21:44:01 +0000 | [diff] [blame] | 922 | int displacement = SRegOffset(rl_dest.s_reg_low); |
| 923 | |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 924 | ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 925 | LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo); |
Bill Buzbee | d61ba4b | 2014-01-13 21:44:01 +0000 | [diff] [blame] | 926 | AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2, |
| 927 | false /* is_load */, true /* is64bit */); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 928 | store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi); |
Bill Buzbee | d61ba4b | 2014-01-13 21:44:01 +0000 | [diff] [blame] | 929 | AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2, |
| 930 | false /* is_load */, true /* is64bit */); |
| 931 | return; |
| 932 | } |
| 933 | |
| 934 | // Just use the standard code to do the generation. |
| 935 | Mir2Lir::GenConstWide(rl_dest, value); |
| 936 | } |
Mark Mendell | e02d48f | 2014-01-15 11:19:23 -0800 | [diff] [blame] | 937 | |
| 938 | // TODO: Merge with existing RegLocation dumper in vreg_analysis.cc |
| 939 | void X86Mir2Lir::DumpRegLocation(RegLocation loc) { |
| 940 | LOG(INFO) << "location: " << loc.location << ',' |
| 941 | << (loc.wide ? " w" : " ") |
| 942 | << (loc.defined ? " D" : " ") |
| 943 | << (loc.is_const ? " c" : " ") |
| 944 | << (loc.fp ? " F" : " ") |
| 945 | << (loc.core ? " C" : " ") |
| 946 | << (loc.ref ? " r" : " ") |
| 947 | << (loc.high_word ? " h" : " ") |
| 948 | << (loc.home ? " H" : " ") |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 949 | << ", low: " << static_cast<int>(loc.reg.GetLowReg()) |
Bill Buzbee | 00e1ec6 | 2014-02-27 23:44:13 +0000 | [diff] [blame] | 950 | << ", high: " << static_cast<int>(loc.reg.GetHighReg()) |
Mark Mendell | e02d48f | 2014-01-15 11:19:23 -0800 | [diff] [blame] | 951 | << ", s_reg: " << loc.s_reg_low |
| 952 | << ", orig: " << loc.orig_sreg; |
| 953 | } |
| 954 | |
Mark Mendell | 67c39c4 | 2014-01-31 17:28:00 -0800 | [diff] [blame] | 955 | void X86Mir2Lir::Materialize() { |
| 956 | // A good place to put the analysis before starting. |
| 957 | AnalyzeMIR(); |
| 958 | |
| 959 | // Now continue with regular code generation. |
| 960 | Mir2Lir::Materialize(); |
| 961 | } |
| 962 | |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 963 | void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 964 | SpecialTargetRegister symbolic_reg) { |
| 965 | /* |
| 966 | * For x86, just generate a 32 bit move immediate instruction, that will be filled |
| 967 | * in at 'link time'. For now, put a unique value based on target to ensure that |
| 968 | * code deduplication works. |
| 969 | */ |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 970 | int target_method_idx = target_method.dex_method_index; |
| 971 | const DexFile* target_dex_file = target_method.dex_file; |
| 972 | const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); |
| 973 | uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 974 | |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 975 | // Generate the move instruction with the unique pointer and save index, dex_file, and type. |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 976 | LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 977 | static_cast<int>(target_method_id_ptr), target_method_idx, |
| 978 | WrapPointer(const_cast<DexFile*>(target_dex_file)), type); |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 979 | AppendLIR(move); |
| 980 | method_address_insns_.Insert(move); |
| 981 | } |
| 982 | |
| 983 | void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { |
| 984 | /* |
| 985 | * For x86, just generate a 32 bit move immediate instruction, that will be filled |
| 986 | * in at 'link time'. For now, put a unique value based on target to ensure that |
| 987 | * code deduplication works. |
| 988 | */ |
| 989 | const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx); |
| 990 | uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); |
| 991 | |
| 992 | // Generate the move instruction with the unique pointer and save index and type. |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 993 | LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 994 | static_cast<int>(ptr), type_idx); |
| 995 | AppendLIR(move); |
| 996 | class_type_address_insns_.Insert(move); |
| 997 | } |
| 998 | |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 999 | LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 1000 | /* |
| 1001 | * For x86, just generate a 32 bit call relative instruction, that will be filled |
| 1002 | * in at 'link time'. For now, put a unique value based on target to ensure that |
| 1003 | * code deduplication works. |
| 1004 | */ |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 1005 | int target_method_idx = target_method.dex_method_index; |
| 1006 | const DexFile* target_dex_file = target_method.dex_file; |
| 1007 | const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); |
| 1008 | uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 1009 | |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 1010 | // Generate the call instruction with the unique pointer and save index, dex_file, and type. |
| 1011 | LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr), |
| 1012 | target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 1013 | AppendLIR(call); |
| 1014 | call_method_insns_.Insert(call); |
| 1015 | return call; |
| 1016 | } |
| 1017 | |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1018 | /* |
| 1019 | * @brief Enter a 32 bit quantity into a buffer |
| 1020 | * @param buf buffer. |
| 1021 | * @param data Data value. |
| 1022 | */ |
| 1023 | |
| 1024 | static void PushWord(std::vector<uint8_t>&buf, int32_t data) { |
| 1025 | buf.push_back(data & 0xff); |
| 1026 | buf.push_back((data >> 8) & 0xff); |
| 1027 | buf.push_back((data >> 16) & 0xff); |
| 1028 | buf.push_back((data >> 24) & 0xff); |
| 1029 | } |
| 1030 | |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 1031 | void X86Mir2Lir::InstallLiteralPools() { |
| 1032 | // These are handled differently for x86. |
| 1033 | DCHECK(code_literal_list_ == nullptr); |
| 1034 | DCHECK(method_literal_list_ == nullptr); |
| 1035 | DCHECK(class_literal_list_ == nullptr); |
| 1036 | |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1037 | // Align to 16 byte boundary. We have implicit knowledge that the start of the method is |
| 1038 | // on a 4 byte boundary. How can I check this if it changes (other than aligned loads |
| 1039 | // will fail at runtime)? |
| 1040 | if (const_vectors_ != nullptr) { |
| 1041 | int align_size = (16-4) - (code_buffer_.size() & 0xF); |
| 1042 | if (align_size < 0) { |
| 1043 | align_size += 16; |
| 1044 | } |
| 1045 | |
| 1046 | while (align_size > 0) { |
| 1047 | code_buffer_.push_back(0); |
| 1048 | align_size--; |
| 1049 | } |
| 1050 | for (LIR *p = const_vectors_; p != nullptr; p = p->next) { |
| 1051 | PushWord(code_buffer_, p->operands[0]); |
| 1052 | PushWord(code_buffer_, p->operands[1]); |
| 1053 | PushWord(code_buffer_, p->operands[2]); |
| 1054 | PushWord(code_buffer_, p->operands[3]); |
| 1055 | } |
| 1056 | } |
| 1057 | |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 1058 | // Handle the fixups for methods. |
| 1059 | for (uint32_t i = 0; i < method_address_insns_.Size(); i++) { |
| 1060 | LIR* p = method_address_insns_.Get(i); |
| 1061 | DCHECK_EQ(p->opcode, kX86Mov32RI); |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 1062 | uint32_t target_method_idx = p->operands[2]; |
| 1063 | const DexFile* target_dex_file = |
| 1064 | reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3])); |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 1065 | |
| 1066 | // The offset to patch is the last 4 bytes of the instruction. |
| 1067 | int patch_offset = p->offset + p->flags.size - 4; |
| 1068 | cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx, |
| 1069 | cu_->method_idx, cu_->invoke_type, |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 1070 | target_method_idx, target_dex_file, |
| 1071 | static_cast<InvokeType>(p->operands[4]), |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 1072 | patch_offset); |
| 1073 | } |
| 1074 | |
| 1075 | // Handle the fixups for class types. |
| 1076 | for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) { |
| 1077 | LIR* p = class_type_address_insns_.Get(i); |
| 1078 | DCHECK_EQ(p->opcode, kX86Mov32RI); |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 1079 | uint32_t target_method_idx = p->operands[2]; |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 1080 | |
| 1081 | // The offset to patch is the last 4 bytes of the instruction. |
| 1082 | int patch_offset = p->offset + p->flags.size - 4; |
| 1083 | cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx, |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 1084 | cu_->method_idx, target_method_idx, patch_offset); |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 1085 | } |
| 1086 | |
| 1087 | // And now the PC-relative calls to methods. |
| 1088 | for (uint32_t i = 0; i < call_method_insns_.Size(); i++) { |
| 1089 | LIR* p = call_method_insns_.Get(i); |
| 1090 | DCHECK_EQ(p->opcode, kX86CallI); |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 1091 | uint32_t target_method_idx = p->operands[1]; |
| 1092 | const DexFile* target_dex_file = |
| 1093 | reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2])); |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 1094 | |
| 1095 | // The offset to patch is the last 4 bytes of the instruction. |
| 1096 | int patch_offset = p->offset + p->flags.size - 4; |
| 1097 | cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx, |
Jeff Hao | 49161ce | 2014-03-12 11:05:25 -0700 | [diff] [blame] | 1098 | cu_->method_idx, cu_->invoke_type, |
| 1099 | target_method_idx, target_dex_file, |
| 1100 | static_cast<InvokeType>(p->operands[3]), |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 1101 | patch_offset, -4 /* offset */); |
| 1102 | } |
| 1103 | |
| 1104 | // And do the normal processing. |
| 1105 | Mir2Lir::InstallLiteralPools(); |
| 1106 | } |
| 1107 | |
DaniilSokolov | 70c4f06 | 2014-06-24 17:34:00 -0700 | [diff] [blame] | 1108 | bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) { |
| 1109 | if (cu_->target64) { |
| 1110 | // TODO: Implement ArrayCOpy intrinsic for x86_64 |
| 1111 | return false; |
| 1112 | } |
| 1113 | |
| 1114 | RegLocation rl_src = info->args[0]; |
| 1115 | RegLocation rl_srcPos = info->args[1]; |
| 1116 | RegLocation rl_dst = info->args[2]; |
| 1117 | RegLocation rl_dstPos = info->args[3]; |
| 1118 | RegLocation rl_length = info->args[4]; |
| 1119 | if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) { |
| 1120 | return false; |
| 1121 | } |
| 1122 | if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) { |
| 1123 | return false; |
| 1124 | } |
| 1125 | ClobberCallerSave(); |
| 1126 | LockCallTemps(); // Using fixed registers |
| 1127 | LoadValueDirectFixed(rl_src , rs_rAX); |
| 1128 | LoadValueDirectFixed(rl_dst , rs_rCX); |
| 1129 | LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr); |
| 1130 | LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr); |
| 1131 | LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr); |
| 1132 | LoadValueDirectFixed(rl_length , rs_rDX); |
| 1133 | LIR* len_negative = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr); |
| 1134 | LIR* len_too_big = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr); |
| 1135 | LoadValueDirectFixed(rl_src , rs_rAX); |
| 1136 | LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX); |
| 1137 | LIR* src_bad_len = nullptr; |
| 1138 | LIR* srcPos_negative = nullptr; |
| 1139 | if (!rl_srcPos.is_const) { |
| 1140 | LoadValueDirectFixed(rl_srcPos , rs_rBX); |
| 1141 | srcPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); |
| 1142 | OpRegReg(kOpAdd, rs_rBX, rs_rDX); |
| 1143 | src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); |
| 1144 | } else { |
| 1145 | int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg); |
| 1146 | if (pos_val == 0) { |
| 1147 | src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); |
| 1148 | } else { |
| 1149 | OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); |
| 1150 | src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); |
| 1151 | } |
| 1152 | } |
| 1153 | LIR* dstPos_negative = nullptr; |
| 1154 | LIR* dst_bad_len = nullptr; |
| 1155 | LoadValueDirectFixed(rl_dst, rs_rAX); |
| 1156 | LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX); |
| 1157 | if (!rl_dstPos.is_const) { |
| 1158 | LoadValueDirectFixed(rl_dstPos , rs_rBX); |
| 1159 | dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr); |
| 1160 | OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX); |
| 1161 | dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); |
| 1162 | } else { |
| 1163 | int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg); |
| 1164 | if (pos_val == 0) { |
| 1165 | dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr); |
| 1166 | } else { |
| 1167 | OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val); |
| 1168 | dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr); |
| 1169 | } |
| 1170 | } |
| 1171 | // everything is checked now |
| 1172 | LoadValueDirectFixed(rl_src , rs_rAX); |
| 1173 | LoadValueDirectFixed(rl_dst , rs_rBX); |
| 1174 | LoadValueDirectFixed(rl_srcPos , rs_rCX); |
| 1175 | NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(), |
| 1176 | rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value()); |
| 1177 | // RAX now holds the address of the first src element to be copied |
| 1178 | |
| 1179 | LoadValueDirectFixed(rl_dstPos , rs_rCX); |
| 1180 | NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(), |
| 1181 | rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() ); |
| 1182 | // RBX now holds the address of the first dst element to be copied |
| 1183 | |
| 1184 | // check if the number of elements to be copied is odd or even. If odd |
| 1185 | // then copy the first element (so that the remaining number of elements |
| 1186 | // is even). |
| 1187 | LoadValueDirectFixed(rl_length , rs_rCX); |
| 1188 | OpRegImm(kOpAnd, rs_rCX, 1); |
| 1189 | LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr); |
| 1190 | OpRegImm(kOpSub, rs_rDX, 1); |
| 1191 | LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); |
| 1192 | StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf); |
| 1193 | |
| 1194 | // since the remaining number of elements is even, we will copy by |
| 1195 | // two elements at a time. |
| 1196 | LIR *beginLoop = NewLIR0(kPseudoTargetLabel); |
| 1197 | LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr); |
| 1198 | OpRegImm(kOpSub, rs_rDX, 2); |
| 1199 | LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle); |
| 1200 | StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle); |
| 1201 | OpUnconditionalBranch(beginLoop); |
| 1202 | LIR *check_failed = NewLIR0(kPseudoTargetLabel); |
| 1203 | LIR* launchpad_branch = OpUnconditionalBranch(nullptr); |
| 1204 | LIR *return_point = NewLIR0(kPseudoTargetLabel); |
| 1205 | jmp_to_ret->target = return_point; |
| 1206 | jmp_to_begin_loop->target = beginLoop; |
| 1207 | src_dst_same->target = check_failed; |
| 1208 | len_negative->target = check_failed; |
| 1209 | len_too_big->target = check_failed; |
| 1210 | src_null_branch->target = check_failed; |
| 1211 | if (srcPos_negative != nullptr) |
| 1212 | srcPos_negative ->target = check_failed; |
| 1213 | if (src_bad_len != nullptr) |
| 1214 | src_bad_len->target = check_failed; |
| 1215 | dst_null_branch->target = check_failed; |
| 1216 | if (dstPos_negative != nullptr) |
| 1217 | dstPos_negative->target = check_failed; |
| 1218 | if (dst_bad_len != nullptr) |
| 1219 | dst_bad_len->target = check_failed; |
| 1220 | AddIntrinsicSlowPath(info, launchpad_branch, return_point); |
| 1221 | return true; |
| 1222 | } |
| 1223 | |
| 1224 | |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1225 | /* |
| 1226 | * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff, |
| 1227 | * otherwise bails to standard library code. |
| 1228 | */ |
| 1229 | bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { |
| 1230 | ClobberCallerSave(); |
| 1231 | LockCallTemps(); // Using fixed registers |
| 1232 | |
| 1233 | // EAX: 16 bit character being searched. |
| 1234 | // ECX: count: number of words to be searched. |
| 1235 | // EDI: String being searched. |
| 1236 | // EDX: temporary during execution. |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1237 | // EBX or R11: temporary during execution (depending on mode). |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1238 | |
| 1239 | RegLocation rl_obj = info->args[0]; |
| 1240 | RegLocation rl_char = info->args[1]; |
buzbee | a44d4f5 | 2014-03-05 11:26:39 -0800 | [diff] [blame] | 1241 | RegLocation rl_start; // Note: only present in III flavor or IndexOf. |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1242 | RegStorage tmpReg = cu_->target64 ? rs_r11 : rs_rBX; |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1243 | |
| 1244 | uint32_t char_value = |
| 1245 | rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0; |
| 1246 | |
| 1247 | if (char_value > 0xFFFF) { |
| 1248 | // We have to punt to the real String.indexOf. |
| 1249 | return false; |
| 1250 | } |
| 1251 | |
| 1252 | // Okay, we are commited to inlining this. |
buzbee | a0cd2d7 | 2014-06-01 09:33:49 -0700 | [diff] [blame] | 1253 | RegLocation rl_return = GetReturn(kCoreReg); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1254 | RegLocation rl_dest = InlineTarget(info); |
| 1255 | |
| 1256 | // Is the string non-NULL? |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 1257 | LoadValueDirectFixed(rl_obj, rs_rDX); |
| 1258 | GenNullCheck(rs_rDX, info->opt_flags); |
Vladimir Marko | 3bc8615 | 2014-03-13 14:11:28 +0000 | [diff] [blame] | 1259 | info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1260 | |
| 1261 | // Does the character fit in 16 bits? |
Mingyao Yang | 3a74d15 | 2014-04-21 15:39:44 -0700 | [diff] [blame] | 1262 | LIR* slowpath_branch = nullptr; |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1263 | if (rl_char.is_const) { |
| 1264 | // We need the value in EAX. |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 1265 | LoadConstantNoClobber(rs_rAX, char_value); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1266 | } else { |
| 1267 | // Character is not a constant; compare at runtime. |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 1268 | LoadValueDirectFixed(rl_char, rs_rAX); |
Mingyao Yang | 3a74d15 | 2014-04-21 15:39:44 -0700 | [diff] [blame] | 1269 | slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1270 | } |
| 1271 | |
| 1272 | // From here down, we know that we are looking for a char that fits in 16 bits. |
Mark Mendell | e19c91f | 2014-02-25 08:19:08 -0800 | [diff] [blame] | 1273 | // Location of reference to data array within the String object. |
| 1274 | int value_offset = mirror::String::ValueOffset().Int32Value(); |
| 1275 | // Location of count within the String object. |
| 1276 | int count_offset = mirror::String::CountOffset().Int32Value(); |
| 1277 | // Starting offset within data array. |
| 1278 | int offset_offset = mirror::String::OffsetOffset().Int32Value(); |
| 1279 | // Start of char data with array_. |
| 1280 | int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1281 | |
| 1282 | // Character is in EAX. |
| 1283 | // Object pointer is in EDX. |
| 1284 | |
| 1285 | // We need to preserve EDI, but have no spare registers, so push it on the stack. |
| 1286 | // We have to remember that all stack addresses after this are offset by sizeof(EDI). |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 1287 | NewLIR1(kX86Push32R, rs_rDI.GetReg()); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1288 | |
Dave Allison | 3d14eb6 | 2014-07-10 01:54:57 +0000 | [diff] [blame] | 1289 | // Compute the number of words to search in to rCX. |
| 1290 | Load32Disp(rs_rDX, count_offset, rs_rCX); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1291 | LIR *length_compare = nullptr; |
| 1292 | int start_value = 0; |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1293 | bool is_index_on_stack = false; |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1294 | if (zero_based) { |
| 1295 | // We have to handle an empty string. Use special instruction JECXZ. |
| 1296 | length_compare = NewLIR0(kX86Jecxz8); |
| 1297 | } else { |
buzbee | a44d4f5 | 2014-03-05 11:26:39 -0800 | [diff] [blame] | 1298 | rl_start = info->args[2]; |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1299 | // We have to offset by the start index. |
| 1300 | if (rl_start.is_const) { |
| 1301 | start_value = mir_graph_->ConstantValue(rl_start.orig_sreg); |
| 1302 | start_value = std::max(start_value, 0); |
| 1303 | |
| 1304 | // Is the start > count? |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 1305 | length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1306 | |
| 1307 | if (start_value != 0) { |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 1308 | OpRegImm(kOpSub, rs_rCX, start_value); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1309 | } |
| 1310 | } else { |
| 1311 | // Runtime start index. |
buzbee | 30adc73 | 2014-05-09 15:10:18 -0700 | [diff] [blame] | 1312 | rl_start = UpdateLocTyped(rl_start, kCoreReg); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1313 | if (rl_start.location == kLocPhysReg) { |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1314 | // Handle "start index < 0" case. |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1315 | OpRegReg(kOpXor, tmpReg, tmpReg); |
| 1316 | OpRegReg(kOpCmp, rl_start.reg, tmpReg); |
| 1317 | OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, tmpReg); |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1318 | |
| 1319 | // The length of the string should be greater than the start index. |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 1320 | length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr); |
| 1321 | OpRegReg(kOpSub, rs_rCX, rl_start.reg); |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1322 | if (rl_start.reg == rs_rDI) { |
| 1323 | // The special case. We will use EDI further, so lets put start index to stack. |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 1324 | NewLIR1(kX86Push32R, rs_rDI.GetReg()); |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1325 | is_index_on_stack = true; |
| 1326 | } |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1327 | } else { |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1328 | // Load the start index from stack, remembering that we pushed EDI. |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1329 | int displacement = SRegOffset(rl_start.s_reg_low) + (cu_->target64 ? 2 : 1) * sizeof(uint32_t); |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 1330 | { |
| 1331 | ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1332 | Load32Disp(rs_rX86_SP, displacement, tmpReg); |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 1333 | } |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1334 | OpRegReg(kOpXor, rs_rDI, rs_rDI); |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1335 | OpRegReg(kOpCmp, tmpReg, rs_rDI); |
| 1336 | OpCondRegReg(kOpCmov, kCondLt, tmpReg, rs_rDI); |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1337 | |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1338 | length_compare = OpCmpBranch(kCondLe, rs_rCX, tmpReg, nullptr); |
| 1339 | OpRegReg(kOpSub, rs_rCX, tmpReg); |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1340 | // Put the start index to stack. |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1341 | NewLIR1(kX86Push32R, tmpReg.GetReg()); |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1342 | is_index_on_stack = true; |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1343 | } |
| 1344 | } |
| 1345 | } |
| 1346 | DCHECK(length_compare != nullptr); |
| 1347 | |
| 1348 | // ECX now contains the count in words to be searched. |
| 1349 | |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1350 | // Load the address of the string into R11 or EBX (depending on mode). |
Mark Mendell | e19c91f | 2014-02-25 08:19:08 -0800 | [diff] [blame] | 1351 | // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET. |
buzbee | 695d13a | 2014-04-19 13:32:20 -0700 | [diff] [blame] | 1352 | Load32Disp(rs_rDX, value_offset, rs_rDI); |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1353 | Load32Disp(rs_rDX, offset_offset, tmpReg); |
| 1354 | OpLea(tmpReg, rs_rDI, tmpReg, 1, data_offset); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1355 | |
| 1356 | // Now compute into EDI where the search will start. |
| 1357 | if (zero_based || rl_start.is_const) { |
| 1358 | if (start_value == 0) { |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1359 | OpRegCopy(rs_rDI, tmpReg); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1360 | } else { |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1361 | NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), tmpReg.GetReg(), 2 * start_value); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1362 | } |
| 1363 | } else { |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1364 | if (is_index_on_stack == true) { |
| 1365 | // Load the start index from stack. |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 1366 | NewLIR1(kX86Pop32R, rs_rDX.GetReg()); |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1367 | OpLea(rs_rDI, tmpReg, rs_rDX, 1, 0); |
Alexei Zavjalov | a1758d8 | 2014-04-17 01:55:43 +0700 | [diff] [blame] | 1368 | } else { |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1369 | OpLea(rs_rDI, tmpReg, rl_start.reg, 1, 0); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1370 | } |
| 1371 | } |
| 1372 | |
| 1373 | // EDI now contains the start of the string to be searched. |
| 1374 | // We are all prepared to do the search for the character. |
| 1375 | NewLIR0(kX86RepneScasw); |
| 1376 | |
| 1377 | // Did we find a match? |
| 1378 | LIR* failed_branch = OpCondBranch(kCondNe, nullptr); |
| 1379 | |
| 1380 | // yes, we matched. Compute the index of the result. |
| 1381 | // index = ((curr_ptr - orig_ptr) / 2) - 1. |
nikolay serdjuk | c5e4ce1 | 2014-06-10 17:07:10 +0700 | [diff] [blame] | 1382 | OpRegReg(kOpSub, rs_rDI, tmpReg); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 1383 | OpRegImm(kOpAsr, rs_rDI, 1); |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 1384 | NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1385 | LIR *all_done = NewLIR1(kX86Jmp8, 0); |
| 1386 | |
| 1387 | // Failed to match; return -1. |
| 1388 | LIR *not_found = NewLIR0(kPseudoTargetLabel); |
| 1389 | length_compare->target = not_found; |
| 1390 | failed_branch->target = not_found; |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 1391 | LoadConstantNoClobber(rl_return.reg, -1); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1392 | |
| 1393 | // And join up at the end. |
| 1394 | all_done->target = NewLIR0(kPseudoTargetLabel); |
| 1395 | // Restore EDI from the stack. |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 1396 | NewLIR1(kX86Pop32R, rs_rDI.GetReg()); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1397 | |
| 1398 | // Out of line code returns here. |
Mingyao Yang | 3a74d15 | 2014-04-21 15:39:44 -0700 | [diff] [blame] | 1399 | if (slowpath_branch != nullptr) { |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1400 | LIR *return_point = NewLIR0(kPseudoTargetLabel); |
Mingyao Yang | 3a74d15 | 2014-04-21 15:39:44 -0700 | [diff] [blame] | 1401 | AddIntrinsicSlowPath(info, slowpath_branch, return_point); |
Mark Mendell | 4028a6c | 2014-02-19 20:06:20 -0800 | [diff] [blame] | 1402 | } |
| 1403 | |
| 1404 | StoreValue(rl_dest, rl_return); |
| 1405 | return true; |
| 1406 | } |
| 1407 | |
Mark Mendell | ae9fd93 | 2014-02-10 16:14:35 -0800 | [diff] [blame] | 1408 | /* |
Mark Mendell | ae9fd93 | 2014-02-10 16:14:35 -0800 | [diff] [blame] | 1409 | * @brief Enter an 'advance LOC' into the FDE buffer |
| 1410 | * @param buf FDE buffer. |
| 1411 | * @param increment Amount by which to increase the current location. |
| 1412 | */ |
| 1413 | static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) { |
| 1414 | if (increment < 64) { |
| 1415 | // Encoding in opcode. |
| 1416 | buf.push_back(0x1 << 6 | increment); |
| 1417 | } else if (increment < 256) { |
| 1418 | // Single byte delta. |
| 1419 | buf.push_back(0x02); |
| 1420 | buf.push_back(increment); |
| 1421 | } else if (increment < 256 * 256) { |
| 1422 | // Two byte delta. |
| 1423 | buf.push_back(0x03); |
| 1424 | buf.push_back(increment & 0xff); |
| 1425 | buf.push_back((increment >> 8) & 0xff); |
| 1426 | } else { |
| 1427 | // Four byte delta. |
| 1428 | buf.push_back(0x04); |
| 1429 | PushWord(buf, increment); |
| 1430 | } |
| 1431 | } |
| 1432 | |
| 1433 | |
| 1434 | std::vector<uint8_t>* X86CFIInitialization() { |
| 1435 | return X86Mir2Lir::ReturnCommonCallFrameInformation(); |
| 1436 | } |
| 1437 | |
| 1438 | std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() { |
| 1439 | std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; |
| 1440 | |
| 1441 | // Length of the CIE (except for this field). |
| 1442 | PushWord(*cfi_info, 16); |
| 1443 | |
| 1444 | // CIE id. |
| 1445 | PushWord(*cfi_info, 0xFFFFFFFFU); |
| 1446 | |
| 1447 | // Version: 3. |
| 1448 | cfi_info->push_back(0x03); |
| 1449 | |
| 1450 | // Augmentation: empty string. |
| 1451 | cfi_info->push_back(0x0); |
| 1452 | |
| 1453 | // Code alignment: 1. |
| 1454 | cfi_info->push_back(0x01); |
| 1455 | |
| 1456 | // Data alignment: -4. |
| 1457 | cfi_info->push_back(0x7C); |
| 1458 | |
| 1459 | // Return address register (R8). |
| 1460 | cfi_info->push_back(0x08); |
| 1461 | |
| 1462 | // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4. |
| 1463 | cfi_info->push_back(0x0C); |
| 1464 | cfi_info->push_back(0x04); |
| 1465 | cfi_info->push_back(0x04); |
| 1466 | |
| 1467 | // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);. |
| 1468 | cfi_info->push_back(0x2 << 6 | 0x08); |
| 1469 | cfi_info->push_back(0x01); |
| 1470 | |
| 1471 | // And 2 Noops to align to 4 byte boundary. |
| 1472 | cfi_info->push_back(0x0); |
| 1473 | cfi_info->push_back(0x0); |
| 1474 | |
| 1475 | DCHECK_EQ(cfi_info->size() & 3, 0U); |
| 1476 | return cfi_info; |
| 1477 | } |
| 1478 | |
| 1479 | static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) { |
| 1480 | uint8_t buffer[12]; |
| 1481 | uint8_t *ptr = EncodeUnsignedLeb128(buffer, value); |
| 1482 | for (uint8_t *p = buffer; p < ptr; p++) { |
| 1483 | buf.push_back(*p); |
| 1484 | } |
| 1485 | } |
| 1486 | |
| 1487 | std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() { |
| 1488 | std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>; |
| 1489 | |
| 1490 | // Generate the FDE for the method. |
| 1491 | DCHECK_NE(data_offset_, 0U); |
| 1492 | |
| 1493 | // Length (will be filled in later in this routine). |
| 1494 | PushWord(*cfi_info, 0); |
| 1495 | |
| 1496 | // CIE_pointer (can be filled in by linker); might be left at 0 if there is only |
| 1497 | // one CIE for the whole debug_frame section. |
| 1498 | PushWord(*cfi_info, 0); |
| 1499 | |
| 1500 | // 'initial_location' (filled in by linker). |
| 1501 | PushWord(*cfi_info, 0); |
| 1502 | |
| 1503 | // 'address_range' (number of bytes in the method). |
| 1504 | PushWord(*cfi_info, data_offset_); |
| 1505 | |
| 1506 | // The instructions in the FDE. |
| 1507 | if (stack_decrement_ != nullptr) { |
| 1508 | // Advance LOC to just past the stack decrement. |
| 1509 | uint32_t pc = NEXT_LIR(stack_decrement_)->offset; |
| 1510 | AdvanceLoc(*cfi_info, pc); |
| 1511 | |
| 1512 | // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size. |
| 1513 | cfi_info->push_back(0x0e); |
| 1514 | EncodeUnsignedLeb128(*cfi_info, frame_size_); |
| 1515 | |
| 1516 | // We continue with that stack until the epilogue. |
| 1517 | if (stack_increment_ != nullptr) { |
| 1518 | uint32_t new_pc = NEXT_LIR(stack_increment_)->offset; |
| 1519 | AdvanceLoc(*cfi_info, new_pc - pc); |
| 1520 | |
| 1521 | // We probably have code snippets after the epilogue, so save the |
| 1522 | // current state: DW_CFA_remember_state. |
| 1523 | cfi_info->push_back(0x0a); |
| 1524 | |
| 1525 | // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return |
| 1526 | // PC on the stack now. |
| 1527 | cfi_info->push_back(0x0e); |
| 1528 | EncodeUnsignedLeb128(*cfi_info, 4); |
| 1529 | |
| 1530 | // Everything after that is the same as before the epilogue. |
| 1531 | // Stack bump was followed by RET instruction. |
| 1532 | LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_)); |
| 1533 | if (post_ret_insn != nullptr) { |
| 1534 | pc = new_pc; |
| 1535 | new_pc = post_ret_insn->offset; |
| 1536 | AdvanceLoc(*cfi_info, new_pc - pc); |
| 1537 | // Restore the state: DW_CFA_restore_state. |
| 1538 | cfi_info->push_back(0x0b); |
| 1539 | } |
| 1540 | } |
| 1541 | } |
| 1542 | |
| 1543 | // Padding to a multiple of 4 |
| 1544 | while ((cfi_info->size() & 3) != 0) { |
| 1545 | // DW_CFA_nop is encoded as 0. |
| 1546 | cfi_info->push_back(0); |
| 1547 | } |
| 1548 | |
| 1549 | // Set the length of the FDE inside the generated bytes. |
| 1550 | uint32_t length = cfi_info->size() - 4; |
| 1551 | (*cfi_info)[0] = length; |
| 1552 | (*cfi_info)[1] = length >> 8; |
| 1553 | (*cfi_info)[2] = length >> 16; |
| 1554 | (*cfi_info)[3] = length >> 24; |
| 1555 | return cfi_info; |
| 1556 | } |
| 1557 | |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1558 | void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { |
| 1559 | switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1560 | case kMirOpReserveVectorRegisters: |
| 1561 | ReserveVectorRegisters(mir); |
| 1562 | break; |
| 1563 | case kMirOpReturnVectorRegisters: |
| 1564 | ReturnVectorRegisters(); |
| 1565 | break; |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1566 | case kMirOpConstVector: |
| 1567 | GenConst128(bb, mir); |
| 1568 | break; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1569 | case kMirOpMoveVector: |
| 1570 | GenMoveVector(bb, mir); |
| 1571 | break; |
| 1572 | case kMirOpPackedMultiply: |
| 1573 | GenMultiplyVector(bb, mir); |
| 1574 | break; |
| 1575 | case kMirOpPackedAddition: |
| 1576 | GenAddVector(bb, mir); |
| 1577 | break; |
| 1578 | case kMirOpPackedSubtract: |
| 1579 | GenSubtractVector(bb, mir); |
| 1580 | break; |
| 1581 | case kMirOpPackedShiftLeft: |
| 1582 | GenShiftLeftVector(bb, mir); |
| 1583 | break; |
| 1584 | case kMirOpPackedSignedShiftRight: |
| 1585 | GenSignedShiftRightVector(bb, mir); |
| 1586 | break; |
| 1587 | case kMirOpPackedUnsignedShiftRight: |
| 1588 | GenUnsignedShiftRightVector(bb, mir); |
| 1589 | break; |
| 1590 | case kMirOpPackedAnd: |
| 1591 | GenAndVector(bb, mir); |
| 1592 | break; |
| 1593 | case kMirOpPackedOr: |
| 1594 | GenOrVector(bb, mir); |
| 1595 | break; |
| 1596 | case kMirOpPackedXor: |
| 1597 | GenXorVector(bb, mir); |
| 1598 | break; |
| 1599 | case kMirOpPackedAddReduce: |
| 1600 | GenAddReduceVector(bb, mir); |
| 1601 | break; |
| 1602 | case kMirOpPackedReduce: |
| 1603 | GenReduceVector(bb, mir); |
| 1604 | break; |
| 1605 | case kMirOpPackedSet: |
| 1606 | GenSetVector(bb, mir); |
| 1607 | break; |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1608 | default: |
| 1609 | break; |
| 1610 | } |
| 1611 | } |
| 1612 | |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1613 | void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) { |
| 1614 | // We should not try to reserve twice without returning the registers |
| 1615 | DCHECK_NE(num_reserved_vector_regs_, -1); |
| 1616 | |
| 1617 | int num_vector_reg = mir->dalvikInsn.vA; |
| 1618 | for (int i = 0; i < num_vector_reg; i++) { |
| 1619 | RegStorage xp_reg = RegStorage::Solo128(i); |
| 1620 | RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); |
| 1621 | Clobber(xp_reg); |
| 1622 | |
| 1623 | for (RegisterInfo *info = xp_reg_info->GetAliasChain(); |
| 1624 | info != nullptr; |
| 1625 | info = info->GetAliasChain()) { |
| 1626 | if (info->GetReg().IsSingle()) { |
| 1627 | reg_pool_->sp_regs_.Delete(info); |
| 1628 | } else { |
| 1629 | reg_pool_->dp_regs_.Delete(info); |
| 1630 | } |
| 1631 | } |
| 1632 | } |
| 1633 | |
| 1634 | num_reserved_vector_regs_ = num_vector_reg; |
| 1635 | } |
| 1636 | |
| 1637 | void X86Mir2Lir::ReturnVectorRegisters() { |
| 1638 | // Return all the reserved registers |
| 1639 | for (int i = 0; i < num_reserved_vector_regs_; i++) { |
| 1640 | RegStorage xp_reg = RegStorage::Solo128(i); |
| 1641 | RegisterInfo *xp_reg_info = GetRegInfo(xp_reg); |
| 1642 | |
| 1643 | for (RegisterInfo *info = xp_reg_info->GetAliasChain(); |
| 1644 | info != nullptr; |
| 1645 | info = info->GetAliasChain()) { |
| 1646 | if (info->GetReg().IsSingle()) { |
| 1647 | reg_pool_->sp_regs_.Insert(info); |
| 1648 | } else { |
| 1649 | reg_pool_->dp_regs_.Insert(info); |
| 1650 | } |
| 1651 | } |
| 1652 | } |
| 1653 | |
| 1654 | // We don't have anymore reserved vector registers |
| 1655 | num_reserved_vector_regs_ = -1; |
| 1656 | } |
| 1657 | |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1658 | void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1659 | store_method_addr_used_ = true; |
| 1660 | int type_size = mir->dalvikInsn.vB; |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1661 | // We support 128 bit vectors. |
| 1662 | DCHECK_EQ(type_size & 0xFFFF, 128); |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1663 | RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1664 | uint32_t *args = mir->dalvikInsn.arg; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1665 | int reg = rs_dest.GetReg(); |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1666 | // Check for all 0 case. |
| 1667 | if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) { |
| 1668 | NewLIR2(kX86XorpsRR, reg, reg); |
| 1669 | return; |
| 1670 | } |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1671 | |
| 1672 | // Append the mov const vector to reg opcode. |
| 1673 | AppendOpcodeWithConst(kX86MovupsRM, reg, mir); |
| 1674 | } |
| 1675 | |
| 1676 | void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) { |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1677 | // Okay, load it from the constant vector area. |
| 1678 | LIR *data_target = ScanVectorLiteral(mir); |
| 1679 | if (data_target == nullptr) { |
| 1680 | data_target = AddVectorLiteral(mir); |
| 1681 | } |
| 1682 | |
| 1683 | // Address the start of the method. |
| 1684 | RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); |
Chao-ying Fu | e0ccdc0 | 2014-06-06 17:32:37 -0700 | [diff] [blame] | 1685 | if (rl_method.wide) { |
| 1686 | rl_method = LoadValueWide(rl_method, kCoreReg); |
| 1687 | } else { |
| 1688 | rl_method = LoadValue(rl_method, kCoreReg); |
| 1689 | } |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1690 | |
| 1691 | // Load the proper value from the literal area. |
| 1692 | // We don't know the proper offset for the value, so pick one that will force |
| 1693 | // 4 byte offset. We will fix this up in the assembler later to have the right |
| 1694 | // value. |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 1695 | ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1696 | LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg()); |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1697 | load->flags.fixup = kFixupLoad; |
| 1698 | load->target = data_target; |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 1699 | } |
| 1700 | |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1701 | void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) { |
| 1702 | // We only support 128 bit registers. |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1703 | DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); |
| 1704 | RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1705 | RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1706 | NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg()); |
| 1707 | } |
| 1708 | |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1709 | void X86Mir2Lir::GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir) { |
| 1710 | const int BYTE_SIZE = 8; |
| 1711 | RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1712 | RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); |
| 1713 | RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempWide()); |
| 1714 | |
| 1715 | /* |
| 1716 | * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM |
| 1717 | * and multiplying 8 at a time before recombining back into one XMM register. |
| 1718 | * |
| 1719 | * let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes) |
| 1720 | * xmm3 is tmp (operate on high bits of 16bit lanes) |
| 1721 | * |
| 1722 | * xmm3 = xmm1 |
| 1723 | * xmm1 = xmm1 .* xmm2 |
| 1724 | * xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff // xmm1 now has low bits |
| 1725 | * xmm3 = xmm3 .>> 8 |
| 1726 | * xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00 |
| 1727 | * xmm2 = xmm2 .* xmm3 // xmm2 now has high bits |
| 1728 | * xmm1 = xmm1 | xmm2 // combine results |
| 1729 | */ |
| 1730 | |
| 1731 | // Copy xmm1. |
| 1732 | NewLIR2(kX86Mova128RR, rs_src1_high_tmp.GetReg(), rs_dest_src1.GetReg()); |
| 1733 | |
| 1734 | // Multiply low bits. |
| 1735 | NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); |
| 1736 | |
| 1737 | // xmm1 now has low bits. |
| 1738 | AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); |
| 1739 | |
| 1740 | // Prepare high bits for multiplication. |
| 1741 | NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), BYTE_SIZE); |
| 1742 | AndMaskVectorRegister(rs_src2, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); |
| 1743 | |
| 1744 | // Multiply high bits and xmm2 now has high bits. |
| 1745 | NewLIR2(kX86PmullwRR, rs_src2.GetReg(), rs_src1_high_tmp.GetReg()); |
| 1746 | |
| 1747 | // Combine back into dest XMM register. |
| 1748 | NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); |
| 1749 | } |
| 1750 | |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1751 | void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1752 | DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); |
| 1753 | OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); |
| 1754 | RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1755 | RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1756 | int opcode = 0; |
| 1757 | switch (opsize) { |
| 1758 | case k32: |
| 1759 | opcode = kX86PmulldRR; |
| 1760 | break; |
| 1761 | case kSignedHalf: |
| 1762 | opcode = kX86PmullwRR; |
| 1763 | break; |
| 1764 | case kSingle: |
| 1765 | opcode = kX86MulpsRR; |
| 1766 | break; |
| 1767 | case kDouble: |
| 1768 | opcode = kX86MulpdRR; |
| 1769 | break; |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1770 | case kSignedByte: |
| 1771 | // HW doesn't support 16x16 byte multiplication so emulate it. |
| 1772 | GenMultiplyVectorSignedByte(bb, mir); |
| 1773 | return; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1774 | default: |
| 1775 | LOG(FATAL) << "Unsupported vector multiply " << opsize; |
| 1776 | break; |
| 1777 | } |
| 1778 | NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); |
| 1779 | } |
| 1780 | |
| 1781 | void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1782 | DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); |
| 1783 | OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); |
| 1784 | RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1785 | RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1786 | int opcode = 0; |
| 1787 | switch (opsize) { |
| 1788 | case k32: |
| 1789 | opcode = kX86PadddRR; |
| 1790 | break; |
| 1791 | case kSignedHalf: |
| 1792 | case kUnsignedHalf: |
| 1793 | opcode = kX86PaddwRR; |
| 1794 | break; |
| 1795 | case kUnsignedByte: |
| 1796 | case kSignedByte: |
| 1797 | opcode = kX86PaddbRR; |
| 1798 | break; |
| 1799 | case kSingle: |
| 1800 | opcode = kX86AddpsRR; |
| 1801 | break; |
| 1802 | case kDouble: |
| 1803 | opcode = kX86AddpdRR; |
| 1804 | break; |
| 1805 | default: |
| 1806 | LOG(FATAL) << "Unsupported vector addition " << opsize; |
| 1807 | break; |
| 1808 | } |
| 1809 | NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); |
| 1810 | } |
| 1811 | |
| 1812 | void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1813 | DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); |
| 1814 | OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); |
| 1815 | RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1816 | RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1817 | int opcode = 0; |
| 1818 | switch (opsize) { |
| 1819 | case k32: |
| 1820 | opcode = kX86PsubdRR; |
| 1821 | break; |
| 1822 | case kSignedHalf: |
| 1823 | case kUnsignedHalf: |
| 1824 | opcode = kX86PsubwRR; |
| 1825 | break; |
| 1826 | case kUnsignedByte: |
| 1827 | case kSignedByte: |
| 1828 | opcode = kX86PsubbRR; |
| 1829 | break; |
| 1830 | case kSingle: |
| 1831 | opcode = kX86SubpsRR; |
| 1832 | break; |
| 1833 | case kDouble: |
| 1834 | opcode = kX86SubpdRR; |
| 1835 | break; |
| 1836 | default: |
| 1837 | LOG(FATAL) << "Unsupported vector subtraction " << opsize; |
| 1838 | break; |
| 1839 | } |
| 1840 | NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg()); |
| 1841 | } |
| 1842 | |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1843 | void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) { |
| 1844 | RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1845 | RegStorage rs_tmp = Get128BitRegister(AllocTempWide()); |
| 1846 | |
| 1847 | int opcode = 0; |
| 1848 | int imm = mir->dalvikInsn.vB; |
| 1849 | |
| 1850 | switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) { |
| 1851 | case kMirOpPackedShiftLeft: |
| 1852 | opcode = kX86PsllwRI; |
| 1853 | break; |
| 1854 | case kMirOpPackedSignedShiftRight: |
| 1855 | opcode = kX86PsrawRI; |
| 1856 | break; |
| 1857 | case kMirOpPackedUnsignedShiftRight: |
| 1858 | opcode = kX86PsrlwRI; |
| 1859 | break; |
| 1860 | default: |
| 1861 | LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode; |
| 1862 | break; |
| 1863 | } |
| 1864 | |
| 1865 | /* |
| 1866 | * xmm1 will have low bits |
| 1867 | * xmm2 will have high bits |
| 1868 | * |
| 1869 | * xmm2 = xmm1 |
| 1870 | * xmm1 = xmm1 .<< N |
| 1871 | * xmm2 = xmm2 && 0xFF00FF00FF00FF00FF00FF00FF00FF00 |
| 1872 | * xmm2 = xmm2 .<< N |
| 1873 | * xmm1 = xmm1 | xmm2 |
| 1874 | */ |
| 1875 | |
| 1876 | // Copy xmm1. |
| 1877 | NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_dest_src1.GetReg()); |
| 1878 | |
| 1879 | // Shift lower values. |
| 1880 | NewLIR2(opcode, rs_dest_src1.GetReg(), imm); |
| 1881 | |
| 1882 | // Mask bottom bits. |
| 1883 | AndMaskVectorRegister(rs_tmp, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00); |
| 1884 | |
| 1885 | // Shift higher values. |
| 1886 | NewLIR2(opcode, rs_tmp.GetReg(), imm); |
| 1887 | |
| 1888 | // Combine back into dest XMM register. |
| 1889 | NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_tmp.GetReg()); |
| 1890 | } |
| 1891 | |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1892 | void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1893 | DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); |
| 1894 | OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); |
| 1895 | RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1896 | int imm = mir->dalvikInsn.vB; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1897 | int opcode = 0; |
| 1898 | switch (opsize) { |
| 1899 | case k32: |
| 1900 | opcode = kX86PslldRI; |
| 1901 | break; |
| 1902 | case k64: |
| 1903 | opcode = kX86PsllqRI; |
| 1904 | break; |
| 1905 | case kSignedHalf: |
| 1906 | case kUnsignedHalf: |
| 1907 | opcode = kX86PsllwRI; |
| 1908 | break; |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1909 | case kSignedByte: |
| 1910 | case kUnsignedByte: |
| 1911 | GenShiftByteVector(bb, mir); |
| 1912 | return; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1913 | default: |
| 1914 | LOG(FATAL) << "Unsupported vector shift left " << opsize; |
| 1915 | break; |
| 1916 | } |
| 1917 | NewLIR2(opcode, rs_dest_src1.GetReg(), imm); |
| 1918 | } |
| 1919 | |
| 1920 | void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1921 | DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); |
| 1922 | OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); |
| 1923 | RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1924 | int imm = mir->dalvikInsn.vB; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1925 | int opcode = 0; |
| 1926 | switch (opsize) { |
| 1927 | case k32: |
| 1928 | opcode = kX86PsradRI; |
| 1929 | break; |
| 1930 | case kSignedHalf: |
| 1931 | case kUnsignedHalf: |
| 1932 | opcode = kX86PsrawRI; |
| 1933 | break; |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1934 | case kSignedByte: |
| 1935 | case kUnsignedByte: |
| 1936 | GenShiftByteVector(bb, mir); |
| 1937 | return; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1938 | default: |
| 1939 | LOG(FATAL) << "Unsupported vector signed shift right " << opsize; |
| 1940 | break; |
| 1941 | } |
| 1942 | NewLIR2(opcode, rs_dest_src1.GetReg(), imm); |
| 1943 | } |
| 1944 | |
| 1945 | void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1946 | DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); |
| 1947 | OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); |
| 1948 | RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1949 | int imm = mir->dalvikInsn.vB; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1950 | int opcode = 0; |
| 1951 | switch (opsize) { |
| 1952 | case k32: |
| 1953 | opcode = kX86PsrldRI; |
| 1954 | break; |
| 1955 | case k64: |
| 1956 | opcode = kX86PsrlqRI; |
| 1957 | break; |
| 1958 | case kSignedHalf: |
| 1959 | case kUnsignedHalf: |
| 1960 | opcode = kX86PsrlwRI; |
| 1961 | break; |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1962 | case kSignedByte: |
| 1963 | case kUnsignedByte: |
| 1964 | GenShiftByteVector(bb, mir); |
| 1965 | return; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1966 | default: |
| 1967 | LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize; |
| 1968 | break; |
| 1969 | } |
| 1970 | NewLIR2(opcode, rs_dest_src1.GetReg(), imm); |
| 1971 | } |
| 1972 | |
| 1973 | void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) { |
| 1974 | // We only support 128 bit registers. |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1975 | DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); |
| 1976 | RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1977 | RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1978 | NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); |
| 1979 | } |
| 1980 | |
| 1981 | void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) { |
| 1982 | // We only support 128 bit registers. |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1983 | DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); |
| 1984 | RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1985 | RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1986 | NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); |
| 1987 | } |
| 1988 | |
| 1989 | void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) { |
| 1990 | // We only support 128 bit registers. |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1991 | DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); |
| 1992 | RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 1993 | RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 1994 | NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg()); |
| 1995 | } |
| 1996 | |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 1997 | void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) { |
| 1998 | MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4); |
| 1999 | } |
| 2000 | |
| 2001 | void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) { |
| 2002 | // Create temporary MIR as container for 128-bit binary mask. |
| 2003 | MIR const_mir; |
| 2004 | MIR* const_mirp = &const_mir; |
| 2005 | const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector); |
| 2006 | const_mirp->dalvikInsn.arg[0] = m0; |
| 2007 | const_mirp->dalvikInsn.arg[1] = m1; |
| 2008 | const_mirp->dalvikInsn.arg[2] = m2; |
| 2009 | const_mirp->dalvikInsn.arg[3] = m3; |
| 2010 | |
| 2011 | // Mask vector with const from literal pool. |
| 2012 | AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp); |
| 2013 | } |
| 2014 | |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2015 | void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2016 | OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); |
| 2017 | RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); |
| 2018 | RegLocation rl_dest = mir_graph_->GetDest(mir); |
| 2019 | RegStorage rs_tmp; |
| 2020 | |
| 2021 | int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8; |
| 2022 | int vec_unit_size = 0; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2023 | int opcode = 0; |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2024 | int extr_opcode = 0; |
| 2025 | RegLocation rl_result; |
| 2026 | |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2027 | switch (opsize) { |
| 2028 | case k32: |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2029 | extr_opcode = kX86PextrdRRI; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2030 | opcode = kX86PhadddRR; |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2031 | vec_unit_size = 4; |
| 2032 | break; |
| 2033 | case kSignedByte: |
| 2034 | case kUnsignedByte: |
| 2035 | extr_opcode = kX86PextrbRRI; |
| 2036 | opcode = kX86PhaddwRR; |
| 2037 | vec_unit_size = 2; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2038 | break; |
| 2039 | case kSignedHalf: |
| 2040 | case kUnsignedHalf: |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2041 | extr_opcode = kX86PextrwRRI; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2042 | opcode = kX86PhaddwRR; |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2043 | vec_unit_size = 2; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2044 | break; |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2045 | case kSingle: |
| 2046 | rl_result = EvalLoc(rl_dest, kFPReg, true); |
| 2047 | vec_unit_size = 4; |
| 2048 | for (int i = 0; i < 3; i++) { |
| 2049 | NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); |
| 2050 | NewLIR3(kX86ShufpsRRI, rs_src1.GetReg(), rs_src1.GetReg(), 0x39); |
| 2051 | } |
| 2052 | NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg()); |
| 2053 | StoreValue(rl_dest, rl_result); |
| 2054 | |
| 2055 | // For single-precision floats, we are done here |
| 2056 | return; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2057 | default: |
| 2058 | LOG(FATAL) << "Unsupported vector add reduce " << opsize; |
| 2059 | break; |
| 2060 | } |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2061 | |
| 2062 | int elems = vec_bytes / vec_unit_size; |
| 2063 | |
| 2064 | // Emulate horizontal add instruction by reducing 2 vectors with 8 values before adding them again |
| 2065 | // TODO is overflow handled correctly? |
| 2066 | if (opsize == kSignedByte || opsize == kUnsignedByte) { |
| 2067 | rs_tmp = Get128BitRegister(AllocTempWide()); |
| 2068 | |
| 2069 | // tmp = xmm1 .>> 8. |
| 2070 | NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_src1.GetReg()); |
| 2071 | NewLIR2(kX86PsrlwRI, rs_tmp.GetReg(), 8); |
| 2072 | |
| 2073 | // Zero extend low bits in xmm1. |
| 2074 | AndMaskVectorRegister(rs_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF); |
| 2075 | } |
| 2076 | |
| 2077 | while (elems > 1) { |
| 2078 | if (opsize == kSignedByte || opsize == kUnsignedByte) { |
| 2079 | NewLIR2(opcode, rs_tmp.GetReg(), rs_tmp.GetReg()); |
| 2080 | } |
| 2081 | NewLIR2(opcode, rs_src1.GetReg(), rs_src1.GetReg()); |
| 2082 | elems >>= 1; |
| 2083 | } |
| 2084 | |
| 2085 | // Combine the results if we separated them. |
| 2086 | if (opsize == kSignedByte || opsize == kUnsignedByte) { |
| 2087 | NewLIR2(kX86PaddbRR, rs_src1.GetReg(), rs_tmp.GetReg()); |
| 2088 | } |
| 2089 | |
| 2090 | // We need to extract to a GPR. |
| 2091 | RegStorage temp = AllocTemp(); |
| 2092 | NewLIR3(extr_opcode, temp.GetReg(), rs_src1.GetReg(), 0); |
| 2093 | |
| 2094 | // Can we do this directly into memory? |
| 2095 | rl_result = UpdateLocTyped(rl_dest, kCoreReg); |
| 2096 | if (rl_result.location == kLocPhysReg) { |
| 2097 | // Ensure res is in a core reg |
| 2098 | rl_result = EvalLoc(rl_dest, kCoreReg, true); |
| 2099 | OpRegReg(kOpAdd, rl_result.reg, temp); |
| 2100 | StoreFinalValue(rl_dest, rl_result); |
| 2101 | } else { |
| 2102 | OpMemReg(kOpAdd, rl_result, temp.GetReg()); |
| 2103 | } |
| 2104 | |
| 2105 | FreeTemp(temp); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2106 | } |
| 2107 | |
| 2108 | void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2109 | OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); |
| 2110 | RegLocation rl_dest = mir_graph_->GetDest(mir); |
| 2111 | RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB); |
| 2112 | int extract_index = mir->dalvikInsn.arg[0]; |
| 2113 | int extr_opcode = 0; |
| 2114 | RegLocation rl_result; |
| 2115 | bool is_wide = false; |
| 2116 | |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2117 | switch (opsize) { |
| 2118 | case k32: |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2119 | rl_result = UpdateLocTyped(rl_dest, kCoreReg); |
| 2120 | extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdMRI : kX86PextrdRRI; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2121 | break; |
| 2122 | case kSignedHalf: |
| 2123 | case kUnsignedHalf: |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2124 | rl_result= UpdateLocTyped(rl_dest, kCoreReg); |
| 2125 | extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwMRI : kX86PextrwRRI; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2126 | break; |
| 2127 | default: |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2128 | LOG(FATAL) << "Unsupported vector add reduce " << opsize; |
| 2129 | return; |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2130 | break; |
| 2131 | } |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2132 | |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2133 | if (rl_result.location == kLocPhysReg) { |
| 2134 | NewLIR3(extr_opcode, rl_result.reg.GetReg(), rs_src1.GetReg(), extract_index); |
| 2135 | if (is_wide == true) { |
| 2136 | StoreFinalValue(rl_dest, rl_result); |
| 2137 | } else { |
| 2138 | StoreFinalValueWide(rl_dest, rl_result); |
| 2139 | } |
| 2140 | } else { |
| 2141 | int displacement = SRegOffset(rl_result.s_reg_low); |
| 2142 | LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, rs_src1.GetReg()); |
| 2143 | AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */); |
| 2144 | AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */); |
| 2145 | } |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2146 | } |
| 2147 | |
| 2148 | void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2149 | DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U); |
| 2150 | OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16); |
| 2151 | RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA); |
| 2152 | int op_low = 0, op_high = 0, imm = 0, op_mov = kX86MovdxrRR; |
| 2153 | RegisterClass reg_type = kCoreReg; |
| 2154 | |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2155 | switch (opsize) { |
| 2156 | case k32: |
| 2157 | op_low = kX86PshufdRRI; |
| 2158 | break; |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2159 | case kSingle: |
| 2160 | op_low = kX86PshufdRRI; |
| 2161 | op_mov = kX86Mova128RR; |
| 2162 | reg_type = kFPReg; |
| 2163 | break; |
| 2164 | case k64: |
| 2165 | op_low = kX86PshufdRRI; |
| 2166 | imm = 0x44; |
| 2167 | break; |
| 2168 | case kDouble: |
| 2169 | op_low = kX86PshufdRRI; |
| 2170 | op_mov = kX86Mova128RR; |
| 2171 | reg_type = kFPReg; |
| 2172 | imm = 0x44; |
| 2173 | break; |
| 2174 | case kSignedByte: |
| 2175 | case kUnsignedByte: |
| 2176 | // Shuffle 8 bit value into 16 bit word. |
| 2177 | // We set val = val + (val << 8) below and use 16 bit shuffle. |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2178 | case kSignedHalf: |
| 2179 | case kUnsignedHalf: |
| 2180 | // Handles low quadword. |
| 2181 | op_low = kX86PshuflwRRI; |
| 2182 | // Handles upper quadword. |
| 2183 | op_high = kX86PshufdRRI; |
| 2184 | break; |
| 2185 | default: |
| 2186 | LOG(FATAL) << "Unsupported vector set " << opsize; |
| 2187 | break; |
| 2188 | } |
| 2189 | |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2190 | RegLocation rl_src = mir_graph_->GetSrc(mir, 0); |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2191 | |
| 2192 | // Load the value from the VR into the reg. |
| 2193 | if (rl_src.wide == 0) { |
| 2194 | rl_src = LoadValue(rl_src, reg_type); |
| 2195 | } else { |
| 2196 | rl_src = LoadValueWide(rl_src, reg_type); |
| 2197 | } |
| 2198 | |
| 2199 | // If opsize is 8 bits wide then double value and use 16 bit shuffle instead. |
| 2200 | if (opsize == kSignedByte || opsize == kUnsignedByte) { |
| 2201 | RegStorage temp = AllocTemp(); |
| 2202 | // val = val + (val << 8). |
| 2203 | NewLIR2(kX86Mov32RR, temp.GetReg(), rl_src.reg.GetReg()); |
| 2204 | NewLIR2(kX86Sal32RI, temp.GetReg(), 8); |
| 2205 | NewLIR2(kX86Or32RR, rl_src.reg.GetReg(), temp.GetReg()); |
| 2206 | FreeTemp(temp); |
| 2207 | } |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2208 | |
| 2209 | // Load the value into the XMM register. |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2210 | NewLIR2(op_mov, rs_dest.GetReg(), rl_src.reg.GetReg()); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2211 | |
| 2212 | // Now shuffle the value across the destination. |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2213 | NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), imm); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2214 | |
| 2215 | // And then repeat as needed. |
| 2216 | if (op_high != 0) { |
Udayan Banerji | 60bfe7b | 2014-07-08 19:59:43 -0700 | [diff] [blame] | 2217 | NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), imm); |
Mark Mendell | fe94578 | 2014-05-22 09:52:36 -0400 | [diff] [blame] | 2218 | } |
| 2219 | } |
| 2220 | |
Mark Mendell | d65c51a | 2014-04-29 16:55:20 -0400 | [diff] [blame] | 2221 | LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) { |
| 2222 | int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); |
| 2223 | for (LIR *p = const_vectors_; p != nullptr; p = p->next) { |
| 2224 | if (args[0] == p->operands[0] && args[1] == p->operands[1] && |
| 2225 | args[2] == p->operands[2] && args[3] == p->operands[3]) { |
| 2226 | return p; |
| 2227 | } |
| 2228 | } |
| 2229 | return nullptr; |
| 2230 | } |
| 2231 | |
| 2232 | LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { |
| 2233 | LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); |
| 2234 | int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg); |
| 2235 | new_value->operands[0] = args[0]; |
| 2236 | new_value->operands[1] = args[1]; |
| 2237 | new_value->operands[2] = args[2]; |
| 2238 | new_value->operands[3] = args[3]; |
| 2239 | new_value->next = const_vectors_; |
| 2240 | if (const_vectors_ == nullptr) { |
| 2241 | estimated_native_code_size_ += 12; // Amount needed to align to 16 byte boundary. |
| 2242 | } |
| 2243 | estimated_native_code_size_ += 16; // Space for one vector. |
| 2244 | const_vectors_ = new_value; |
| 2245 | return new_value; |
| 2246 | } |
| 2247 | |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2248 | // ------------ ABI support: mapping of args to physical registers ------------- |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 2249 | RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2250 | const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; |
| 2251 | const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); |
| 2252 | const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, |
| 2253 | kFArg4, kFArg5, kFArg6, kFArg7}; |
| 2254 | const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2255 | |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2256 | if (is_double_or_float) { |
| 2257 | if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2258 | return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2259 | } |
| 2260 | } else { |
| 2261 | if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 2262 | return is_ref ? ml_->TargetRefReg(coreArgMappingToPhysicalReg[cur_core_reg_++]) : |
| 2263 | ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2264 | } |
| 2265 | } |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2266 | return RegStorage::InvalidReg(); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2267 | } |
| 2268 | |
| 2269 | RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { |
| 2270 | DCHECK(IsInitialized()); |
| 2271 | auto res = mapping_.find(in_position); |
| 2272 | return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); |
| 2273 | } |
| 2274 | |
| 2275 | void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { |
| 2276 | DCHECK(mapper != nullptr); |
| 2277 | max_mapped_in_ = -1; |
| 2278 | is_there_stack_mapped_ = false; |
| 2279 | for (int in_position = 0; in_position < count; in_position++) { |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 2280 | RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, |
| 2281 | arg_locs[in_position].wide, arg_locs[in_position].ref); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2282 | if (reg.Valid()) { |
| 2283 | mapping_[in_position] = reg; |
| 2284 | max_mapped_in_ = std::max(max_mapped_in_, in_position); |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 2285 | if (arg_locs[in_position].wide) { |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2286 | // We covered 2 args, so skip the next one |
| 2287 | in_position++; |
| 2288 | } |
| 2289 | } else { |
| 2290 | is_there_stack_mapped_ = true; |
| 2291 | } |
| 2292 | } |
| 2293 | initialized_ = true; |
| 2294 | } |
| 2295 | |
| 2296 | RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) { |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 2297 | if (!cu_->target64) { |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2298 | return GetCoreArgMappingToPhysicalReg(arg_num); |
| 2299 | } |
| 2300 | |
| 2301 | if (!in_to_reg_storage_mapping_.IsInitialized()) { |
| 2302 | int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; |
| 2303 | RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg]; |
| 2304 | |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2305 | InToRegStorageX86_64Mapper mapper(this); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2306 | in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper); |
| 2307 | } |
| 2308 | return in_to_reg_storage_mapping_.Get(arg_num); |
| 2309 | } |
| 2310 | |
| 2311 | RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) { |
| 2312 | // For the 32-bit internal ABI, the first 3 arguments are passed in registers. |
| 2313 | // Not used for 64-bit, TODO: Move X86_32 to the same framework |
| 2314 | switch (core_arg_num) { |
| 2315 | case 0: |
| 2316 | return rs_rX86_ARG1; |
| 2317 | case 1: |
| 2318 | return rs_rX86_ARG2; |
| 2319 | case 2: |
| 2320 | return rs_rX86_ARG3; |
| 2321 | default: |
| 2322 | return RegStorage::InvalidReg(); |
| 2323 | } |
| 2324 | } |
| 2325 | |
| 2326 | // ---------End of ABI support: mapping of args to physical registers ------------- |
| 2327 | |
| 2328 | /* |
| 2329 | * If there are any ins passed in registers that have not been promoted |
| 2330 | * to a callee-save register, flush them to the frame. Perform initial |
| 2331 | * assignment of promoted arguments. |
| 2332 | * |
| 2333 | * ArgLocs is an array of location records describing the incoming arguments |
| 2334 | * with one location record per word of argument. |
| 2335 | */ |
| 2336 | void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 2337 | if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2338 | /* |
| 2339 | * Dummy up a RegLocation for the incoming Method* |
| 2340 | * It will attempt to keep kArg0 live (or copy it to home location |
| 2341 | * if promoted). |
| 2342 | */ |
| 2343 | |
| 2344 | RegLocation rl_src = rl_method; |
| 2345 | rl_src.location = kLocPhysReg; |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2346 | rl_src.reg = TargetRefReg(kArg0); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2347 | rl_src.home = false; |
| 2348 | MarkLive(rl_src); |
| 2349 | StoreValue(rl_method, rl_src); |
| 2350 | // If Method* has been promoted, explicitly flush |
| 2351 | if (rl_method.location == kLocPhysReg) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2352 | StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetRefReg(kArg0)), kNotVolatile); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2353 | } |
| 2354 | |
| 2355 | if (cu_->num_ins == 0) { |
| 2356 | return; |
| 2357 | } |
| 2358 | |
| 2359 | int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; |
| 2360 | /* |
| 2361 | * Copy incoming arguments to their proper home locations. |
| 2362 | * NOTE: an older version of dx had an issue in which |
| 2363 | * it would reuse static method argument registers. |
| 2364 | * This could result in the same Dalvik virtual register |
| 2365 | * being promoted to both core and fp regs. To account for this, |
| 2366 | * we only copy to the corresponding promoted physical register |
| 2367 | * if it matches the type of the SSA name for the incoming |
| 2368 | * argument. It is also possible that long and double arguments |
| 2369 | * end up half-promoted. In those cases, we must flush the promoted |
| 2370 | * half to memory as well. |
| 2371 | */ |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2372 | ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2373 | for (int i = 0; i < cu_->num_ins; i++) { |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2374 | // get reg corresponding to input |
Dmitry Petrochenko | 4d5d794 | 2014-06-27 12:25:01 +0700 | [diff] [blame] | 2375 | RegStorage reg = GetArgMappingToPhysicalReg(i); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2376 | |
Dmitry Petrochenko | 4d5d794 | 2014-06-27 12:25:01 +0700 | [diff] [blame] | 2377 | RegLocation* t_loc = &ArgLocs[i]; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2378 | if (reg.Valid()) { |
Dmitry Petrochenko | 4d5d794 | 2014-06-27 12:25:01 +0700 | [diff] [blame] | 2379 | // If arriving in register. |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2380 | |
Dmitry Petrochenko | 4d5d794 | 2014-06-27 12:25:01 +0700 | [diff] [blame] | 2381 | // We have already updated the arg location with promoted info |
| 2382 | // so we can be based on it. |
| 2383 | if (t_loc->location == kLocPhysReg) { |
| 2384 | // Just copy it. |
| 2385 | OpRegCopy(t_loc->reg, reg); |
| 2386 | } else { |
| 2387 | // Needs flush. |
| 2388 | if (t_loc->ref) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2389 | StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2390 | } else { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2391 | StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32, |
Dmitry Petrochenko | 4d5d794 | 2014-06-27 12:25:01 +0700 | [diff] [blame] | 2392 | kNotVolatile); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2393 | } |
| 2394 | } |
| 2395 | } else { |
Dmitry Petrochenko | 4d5d794 | 2014-06-27 12:25:01 +0700 | [diff] [blame] | 2396 | // If arriving in frame & promoted. |
| 2397 | if (t_loc->location == kLocPhysReg) { |
| 2398 | if (t_loc->ref) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2399 | LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile); |
Dmitry Petrochenko | 4d5d794 | 2014-06-27 12:25:01 +0700 | [diff] [blame] | 2400 | } else { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2401 | LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, |
Dmitry Petrochenko | 4d5d794 | 2014-06-27 12:25:01 +0700 | [diff] [blame] | 2402 | t_loc->wide ? k64 : k32, kNotVolatile); |
| 2403 | } |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2404 | } |
Dmitry Petrochenko | 4d5d794 | 2014-06-27 12:25:01 +0700 | [diff] [blame] | 2405 | } |
| 2406 | if (t_loc->wide) { |
| 2407 | // Increment i to skip the next one. |
| 2408 | i++; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2409 | } |
| 2410 | } |
| 2411 | } |
| 2412 | |
| 2413 | /* |
| 2414 | * Load up to 5 arguments, the first three of which will be in |
| 2415 | * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, |
| 2416 | * and as part of the load sequence, it must be replaced with |
| 2417 | * the target method pointer. Note, this may also be called |
| 2418 | * for "range" variants if the number of arguments is 5 or fewer. |
| 2419 | */ |
| 2420 | int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, |
| 2421 | int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, |
| 2422 | const MethodReference& target_method, |
| 2423 | uint32_t vtable_idx, uintptr_t direct_code, |
| 2424 | uintptr_t direct_method, InvokeType type, bool skip_this) { |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 2425 | if (!cu_->target64) { |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2426 | return Mir2Lir::GenDalvikArgsNoRange(info, |
| 2427 | call_state, pcrLabel, next_call_insn, |
| 2428 | target_method, |
| 2429 | vtable_idx, direct_code, |
| 2430 | direct_method, type, skip_this); |
| 2431 | } |
| 2432 | return GenDalvikArgsRange(info, |
| 2433 | call_state, pcrLabel, next_call_insn, |
| 2434 | target_method, |
| 2435 | vtable_idx, direct_code, |
| 2436 | direct_method, type, skip_this); |
| 2437 | } |
| 2438 | |
| 2439 | /* |
| 2440 | * May have 0+ arguments (also used for jumbo). Note that |
| 2441 | * source virtual registers may be in physical registers, so may |
| 2442 | * need to be flushed to home location before copying. This |
| 2443 | * applies to arg3 and above (see below). |
| 2444 | * |
| 2445 | * Two general strategies: |
| 2446 | * If < 20 arguments |
| 2447 | * Pass args 3-18 using vldm/vstm block copy |
| 2448 | * Pass arg0, arg1 & arg2 in kArg1-kArg3 |
| 2449 | * If 20+ arguments |
| 2450 | * Pass args arg19+ using memcpy block copy |
| 2451 | * Pass arg0, arg1 & arg2 in kArg1-kArg3 |
| 2452 | * |
| 2453 | */ |
| 2454 | int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, |
| 2455 | LIR** pcrLabel, NextCallInsn next_call_insn, |
| 2456 | const MethodReference& target_method, |
| 2457 | uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, |
| 2458 | InvokeType type, bool skip_this) { |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 2459 | if (!cu_->target64) { |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2460 | return Mir2Lir::GenDalvikArgsRange(info, call_state, |
| 2461 | pcrLabel, next_call_insn, |
| 2462 | target_method, |
| 2463 | vtable_idx, direct_code, direct_method, |
| 2464 | type, skip_this); |
| 2465 | } |
| 2466 | |
| 2467 | /* If no arguments, just return */ |
| 2468 | if (info->num_arg_words == 0) |
| 2469 | return call_state; |
| 2470 | |
| 2471 | const int start_index = skip_this ? 1 : 0; |
| 2472 | |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2473 | InToRegStorageX86_64Mapper mapper(this); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2474 | InToRegStorageMapping in_to_reg_storage_mapping; |
| 2475 | in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper); |
| 2476 | const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn(); |
| 2477 | const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 : |
| 2478 | in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1; |
| 2479 | int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped); |
| 2480 | |
| 2481 | // Fisrt of all, check whether it make sense to use bulk copying |
| 2482 | // Optimization is aplicable only for range case |
| 2483 | // TODO: make a constant instead of 2 |
| 2484 | if (info->is_range && regs_left_to_pass_via_stack >= 2) { |
| 2485 | // Scan the rest of the args - if in phys_reg flush to memory |
| 2486 | for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) { |
| 2487 | RegLocation loc = info->args[next_arg]; |
| 2488 | if (loc.wide) { |
| 2489 | loc = UpdateLocWide(loc); |
| 2490 | if (loc.location == kLocPhysReg) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2491 | ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2492 | StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2493 | } |
| 2494 | next_arg += 2; |
| 2495 | } else { |
| 2496 | loc = UpdateLoc(loc); |
| 2497 | if (loc.location == kLocPhysReg) { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2498 | ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2499 | StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2500 | } |
| 2501 | next_arg++; |
| 2502 | } |
| 2503 | } |
| 2504 | |
| 2505 | // Logic below assumes that Method pointer is at offset zero from SP. |
| 2506 | DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0); |
| 2507 | |
| 2508 | // The rest can be copied together |
| 2509 | int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); |
| 2510 | int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); |
| 2511 | |
| 2512 | int current_src_offset = start_offset; |
| 2513 | int current_dest_offset = outs_offset; |
| 2514 | |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2515 | // Only davik regs are accessed in this loop; no next_call_insn() calls. |
| 2516 | ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2517 | while (regs_left_to_pass_via_stack > 0) { |
| 2518 | // This is based on the knowledge that the stack itself is 16-byte aligned. |
| 2519 | bool src_is_16b_aligned = (current_src_offset & 0xF) == 0; |
| 2520 | bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0; |
| 2521 | size_t bytes_to_move; |
| 2522 | |
| 2523 | /* |
| 2524 | * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a |
| 2525 | * a 128-bit move because we won't get the chance to try to aligned. If there are more than |
| 2526 | * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned. |
| 2527 | * We do this because we could potentially do a smaller move to align. |
| 2528 | */ |
| 2529 | if (regs_left_to_pass_via_stack == 4 || |
| 2530 | (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) { |
| 2531 | // Moving 128-bits via xmm register. |
| 2532 | bytes_to_move = sizeof(uint32_t) * 4; |
| 2533 | |
| 2534 | // Allocate a free xmm temp. Since we are working through the calling sequence, |
| 2535 | // we expect to have an xmm temporary available. AllocTempDouble will abort if |
| 2536 | // there are no free registers. |
| 2537 | RegStorage temp = AllocTempDouble(); |
| 2538 | |
| 2539 | LIR* ld1 = nullptr; |
| 2540 | LIR* ld2 = nullptr; |
| 2541 | LIR* st1 = nullptr; |
| 2542 | LIR* st2 = nullptr; |
| 2543 | |
| 2544 | /* |
| 2545 | * The logic is similar for both loads and stores. If we have 16-byte alignment, |
| 2546 | * do an aligned move. If we have 8-byte alignment, then do the move in two |
| 2547 | * parts. This approach prevents possible cache line splits. Finally, fall back |
| 2548 | * to doing an unaligned move. In most cases we likely won't split the cache |
| 2549 | * line but we cannot prove it and thus take a conservative approach. |
| 2550 | */ |
| 2551 | bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; |
| 2552 | bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; |
| 2553 | |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2554 | ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2555 | if (src_is_16b_aligned) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2556 | ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2557 | } else if (src_is_8b_aligned) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2558 | ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP); |
| 2559 | ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1), |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2560 | kMovHi128FP); |
| 2561 | } else { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2562 | ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2563 | } |
| 2564 | |
| 2565 | if (dest_is_16b_aligned) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2566 | st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2567 | } else if (dest_is_8b_aligned) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2568 | st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP); |
| 2569 | st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1), |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2570 | temp, kMovHi128FP); |
| 2571 | } else { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2572 | st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2573 | } |
| 2574 | |
| 2575 | // TODO If we could keep track of aliasing information for memory accesses that are wider |
| 2576 | // than 64-bit, we wouldn't need to set up a barrier. |
| 2577 | if (ld1 != nullptr) { |
| 2578 | if (ld2 != nullptr) { |
| 2579 | // For 64-bit load we can actually set up the aliasing information. |
| 2580 | AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); |
| 2581 | AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); |
| 2582 | } else { |
| 2583 | // Set barrier for 128-bit load. |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2584 | ld1->u.m.def_mask = &kEncodeAll; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2585 | } |
| 2586 | } |
| 2587 | if (st1 != nullptr) { |
| 2588 | if (st2 != nullptr) { |
| 2589 | // For 64-bit store we can actually set up the aliasing information. |
| 2590 | AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); |
| 2591 | AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); |
| 2592 | } else { |
| 2593 | // Set barrier for 128-bit store. |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2594 | st1->u.m.def_mask = &kEncodeAll; |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2595 | } |
| 2596 | } |
| 2597 | |
| 2598 | // Free the temporary used for the data movement. |
| 2599 | FreeTemp(temp); |
| 2600 | } else { |
| 2601 | // Moving 32-bits via general purpose register. |
| 2602 | bytes_to_move = sizeof(uint32_t); |
| 2603 | |
| 2604 | // Instead of allocating a new temp, simply reuse one of the registers being used |
| 2605 | // for argument passing. |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2606 | RegStorage temp = TargetReg(kArg3, false); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2607 | |
| 2608 | // Now load the argument VR and store to the outs. |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2609 | Load32Disp(rs_rX86_SP, current_src_offset, temp); |
| 2610 | Store32Disp(rs_rX86_SP, current_dest_offset, temp); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2611 | } |
| 2612 | |
| 2613 | current_src_offset += bytes_to_move; |
| 2614 | current_dest_offset += bytes_to_move; |
| 2615 | regs_left_to_pass_via_stack -= (bytes_to_move >> 2); |
| 2616 | } |
| 2617 | DCHECK_EQ(regs_left_to_pass_via_stack, 0); |
| 2618 | } |
| 2619 | |
| 2620 | // Now handle rest not registers if they are |
| 2621 | if (in_to_reg_storage_mapping.IsThereStackMapped()) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2622 | RegStorage regSingle = TargetReg(kArg2, false); |
| 2623 | RegStorage regWide = TargetReg(kArg3, true); |
Chao-ying Fu | b6564c1 | 2014-06-24 13:24:36 -0700 | [diff] [blame] | 2624 | for (int i = start_index; |
| 2625 | i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2626 | RegLocation rl_arg = info->args[i]; |
| 2627 | rl_arg = UpdateRawLoc(rl_arg); |
| 2628 | RegStorage reg = in_to_reg_storage_mapping.Get(i); |
| 2629 | if (!reg.Valid()) { |
| 2630 | int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set); |
| 2631 | |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2632 | { |
| 2633 | ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); |
| 2634 | if (rl_arg.wide) { |
| 2635 | if (rl_arg.location == kLocPhysReg) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2636 | StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile); |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2637 | } else { |
| 2638 | LoadValueDirectWideFixed(rl_arg, regWide); |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2639 | StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile); |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2640 | } |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2641 | } else { |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2642 | if (rl_arg.location == kLocPhysReg) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2643 | StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile); |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2644 | } else { |
| 2645 | LoadValueDirectFixed(rl_arg, regSingle); |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2646 | StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile); |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 2647 | } |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2648 | } |
| 2649 | } |
| 2650 | call_state = next_call_insn(cu_, info, call_state, target_method, |
| 2651 | vtable_idx, direct_code, direct_method, type); |
| 2652 | } |
Chao-ying Fu | b6564c1 | 2014-06-24 13:24:36 -0700 | [diff] [blame] | 2653 | if (rl_arg.wide) { |
| 2654 | i++; |
| 2655 | } |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2656 | } |
| 2657 | } |
| 2658 | |
| 2659 | // Finish with mapped registers |
| 2660 | for (int i = start_index; i <= last_mapped_in; i++) { |
| 2661 | RegLocation rl_arg = info->args[i]; |
| 2662 | rl_arg = UpdateRawLoc(rl_arg); |
| 2663 | RegStorage reg = in_to_reg_storage_mapping.Get(i); |
| 2664 | if (reg.Valid()) { |
| 2665 | if (rl_arg.wide) { |
| 2666 | LoadValueDirectWideFixed(rl_arg, reg); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2667 | } else { |
| 2668 | LoadValueDirectFixed(rl_arg, reg); |
| 2669 | } |
| 2670 | call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, |
| 2671 | direct_code, direct_method, type); |
| 2672 | } |
Chao-ying Fu | b6564c1 | 2014-06-24 13:24:36 -0700 | [diff] [blame] | 2673 | if (rl_arg.wide) { |
| 2674 | i++; |
| 2675 | } |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2676 | } |
| 2677 | |
| 2678 | call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, |
| 2679 | direct_code, direct_method, type); |
| 2680 | if (pcrLabel) { |
Andreas Gampe | 5655e84 | 2014-06-17 16:36:07 -0700 | [diff] [blame] | 2681 | if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2682 | *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2683 | } else { |
| 2684 | *pcrLabel = nullptr; |
| 2685 | // In lieu of generating a check for kArg1 being null, we need to |
| 2686 | // perform a load when doing implicit checks. |
| 2687 | RegStorage tmp = AllocTemp(); |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 2688 | Load32Disp(TargetRefReg(kArg1), 0, tmp); |
Dmitry Petrochenko | 58994cd | 2014-05-17 01:02:18 +0700 | [diff] [blame] | 2689 | MarkPossibleNullPointerException(info->opt_flags); |
| 2690 | FreeTemp(tmp); |
| 2691 | } |
| 2692 | } |
| 2693 | return call_state; |
| 2694 | } |
| 2695 | |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 2696 | } // namespace art |