Dan Gohman | 1adf1b0 | 2008-08-19 21:45:35 +0000 | [diff] [blame] | 1 | //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defines the X86-specific support for the FastISel class. Much |
| 11 | // of the target-specific code is generated by tablegen in the file |
| 12 | // X86GenFastISel.inc, which is #included here. |
| 13 | // |
| 14 | //===----------------------------------------------------------------------===// |
| 15 | |
| 16 | #include "X86.h" |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 17 | #include "X86InstrBuilder.h" |
Dan Gohman | 1adf1b0 | 2008-08-19 21:45:35 +0000 | [diff] [blame] | 18 | #include "X86ISelLowering.h" |
Evan Cheng | 88e3041 | 2008-09-03 01:04:47 +0000 | [diff] [blame] | 19 | #include "X86RegisterInfo.h" |
| 20 | #include "X86Subtarget.h" |
Dan Gohman | 22bb311 | 2008-08-22 00:20:26 +0000 | [diff] [blame] | 21 | #include "X86TargetMachine.h" |
Dan Gohman | d89ae99 | 2008-09-05 01:06:14 +0000 | [diff] [blame] | 22 | #include "llvm/Instructions.h" |
Dan Gohman | 6e3f05f | 2008-09-04 23:26:51 +0000 | [diff] [blame] | 23 | #include "llvm/DerivedTypes.h" |
Evan Cheng | c3f44b0 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 24 | #include "llvm/CodeGen/FastISel.h" |
Owen Anderson | 95267a1 | 2008-09-05 00:06:23 +0000 | [diff] [blame] | 25 | #include "llvm/CodeGen/MachineConstantPool.h" |
Owen Anderson | 667d8f7 | 2008-08-29 17:45:56 +0000 | [diff] [blame] | 26 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
Evan Cheng | c3f44b0 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 27 | |
| 28 | using namespace llvm; |
| 29 | |
| 30 | class X86FastISel : public FastISel { |
| 31 | /// Subtarget - Keep a pointer to the X86Subtarget around so that we can |
| 32 | /// make the right decision when generating code for different targets. |
| 33 | const X86Subtarget *Subtarget; |
| 34 | |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 35 | public: |
Dan Gohman | 3df24e6 | 2008-09-03 23:12:08 +0000 | [diff] [blame] | 36 | explicit X86FastISel(MachineFunction &mf, |
| 37 | DenseMap<const Value *, unsigned> &vm, |
| 38 | DenseMap<const BasicBlock *, MachineBasicBlock *> &bm) |
| 39 | : FastISel(mf, vm, bm) { |
Evan Cheng | 88e3041 | 2008-09-03 01:04:47 +0000 | [diff] [blame] | 40 | Subtarget = &TM.getSubtarget<X86Subtarget>(); |
| 41 | } |
Evan Cheng | c3f44b0 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 42 | |
Dan Gohman | 3df24e6 | 2008-09-03 23:12:08 +0000 | [diff] [blame] | 43 | virtual bool TargetSelectInstruction(Instruction *I); |
Evan Cheng | c3f44b0 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 44 | |
Dan Gohman | 1adf1b0 | 2008-08-19 21:45:35 +0000 | [diff] [blame] | 45 | #include "X86GenFastISel.inc" |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 46 | |
| 47 | private: |
Evan Cheng | 0de588f | 2008-09-05 21:00:03 +0000 | [diff] [blame] | 48 | bool X86FastEmitLoad(MVT VT, unsigned Op0, Value *V, unsigned &RR); |
| 49 | |
| 50 | bool X86FastEmitStore(MVT VT, unsigned Op0, unsigned Op1, Value *V); |
| 51 | |
Dan Gohman | 3df24e6 | 2008-09-03 23:12:08 +0000 | [diff] [blame] | 52 | bool X86SelectConstAddr(Value *V, unsigned &Op0); |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 53 | |
Dan Gohman | 3df24e6 | 2008-09-03 23:12:08 +0000 | [diff] [blame] | 54 | bool X86SelectLoad(Instruction *I); |
Owen Anderson | a3971df | 2008-09-04 07:08:58 +0000 | [diff] [blame] | 55 | |
| 56 | bool X86SelectStore(Instruction *I); |
Dan Gohman | 6e3f05f | 2008-09-04 23:26:51 +0000 | [diff] [blame] | 57 | |
| 58 | bool X86SelectCmp(Instruction *I); |
Dan Gohman | d89ae99 | 2008-09-05 01:06:14 +0000 | [diff] [blame] | 59 | |
| 60 | bool X86SelectZExt(Instruction *I); |
| 61 | |
| 62 | bool X86SelectBranch(Instruction *I); |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 63 | |
| 64 | bool X86SelectShift(Instruction *I); |
| 65 | |
| 66 | bool X86SelectSelect(Instruction *I); |
Evan Cheng | 0de588f | 2008-09-05 21:00:03 +0000 | [diff] [blame] | 67 | |
Owen Anderson | 9c7216f | 2008-09-05 20:49:33 +0000 | [diff] [blame] | 68 | unsigned TargetMaterializeConstant(Constant *C, MachineConstantPool* MCP); |
Evan Cheng | c3f44b0 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 69 | }; |
Dan Gohman | 99b2182 | 2008-08-28 23:21:34 +0000 | [diff] [blame] | 70 | |
Evan Cheng | 0de588f | 2008-09-05 21:00:03 +0000 | [diff] [blame] | 71 | /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT. |
| 72 | /// The address is either pre-computed, i.e. Op0, or a GlobalAddress, i.e. V. |
| 73 | /// Return true and the result register by reference if it is possible. |
| 74 | bool X86FastISel::X86FastEmitLoad(MVT VT, unsigned Op0, Value *V, |
| 75 | unsigned &ResultReg) { |
| 76 | // Get opcode and regclass of the output for the given load instruction. |
| 77 | unsigned Opc = 0; |
| 78 | const TargetRegisterClass *RC = NULL; |
| 79 | switch (VT.getSimpleVT()) { |
| 80 | default: return false; |
| 81 | case MVT::i8: |
| 82 | Opc = X86::MOV8rm; |
| 83 | RC = X86::GR8RegisterClass; |
| 84 | break; |
| 85 | case MVT::i16: |
| 86 | Opc = X86::MOV16rm; |
| 87 | RC = X86::GR16RegisterClass; |
| 88 | break; |
| 89 | case MVT::i32: |
| 90 | Opc = X86::MOV32rm; |
| 91 | RC = X86::GR32RegisterClass; |
| 92 | break; |
| 93 | case MVT::i64: |
| 94 | // Must be in x86-64 mode. |
| 95 | Opc = X86::MOV64rm; |
| 96 | RC = X86::GR64RegisterClass; |
| 97 | break; |
| 98 | case MVT::f32: |
| 99 | if (Subtarget->hasSSE1()) { |
| 100 | Opc = X86::MOVSSrm; |
| 101 | RC = X86::FR32RegisterClass; |
| 102 | } else { |
| 103 | Opc = X86::LD_Fp32m; |
| 104 | RC = X86::RFP32RegisterClass; |
| 105 | } |
| 106 | break; |
| 107 | case MVT::f64: |
| 108 | if (Subtarget->hasSSE2()) { |
| 109 | Opc = X86::MOVSDrm; |
| 110 | RC = X86::FR64RegisterClass; |
| 111 | } else { |
| 112 | Opc = X86::LD_Fp64m; |
| 113 | RC = X86::RFP64RegisterClass; |
| 114 | } |
| 115 | break; |
| 116 | case MVT::f80: |
| 117 | Opc = X86::LD_Fp80m; |
| 118 | RC = X86::RFP80RegisterClass; |
| 119 | break; |
| 120 | } |
| 121 | |
| 122 | ResultReg = createResultReg(RC); |
| 123 | X86AddressMode AM; |
| 124 | if (Op0) |
| 125 | // Address is in register. |
| 126 | AM.Base.Reg = Op0; |
| 127 | else |
| 128 | AM.GV = cast<GlobalValue>(V); |
| 129 | addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM); |
| 130 | return true; |
| 131 | } |
| 132 | |
| 133 | /// X86FastEmitStore - Emit a machine instruction to store a value Op0 of |
| 134 | /// type VT. The address is either pre-computed, i.e. Op1, or a GlobalAddress, |
| 135 | /// i.e. V. Return true if it is possible. |
| 136 | bool |
| 137 | X86FastISel::X86FastEmitStore(MVT VT, unsigned Op0, unsigned Op1, Value *V) { |
| 138 | // Get opcode and regclass of the output for the given load instruction. |
| 139 | unsigned Opc = 0; |
| 140 | const TargetRegisterClass *RC = NULL; |
| 141 | switch (VT.getSimpleVT()) { |
| 142 | default: return false; |
| 143 | case MVT::i8: |
| 144 | Opc = X86::MOV8mr; |
| 145 | RC = X86::GR8RegisterClass; |
| 146 | break; |
| 147 | case MVT::i16: |
| 148 | Opc = X86::MOV16mr; |
| 149 | RC = X86::GR16RegisterClass; |
| 150 | break; |
| 151 | case MVT::i32: |
| 152 | Opc = X86::MOV32mr; |
| 153 | RC = X86::GR32RegisterClass; |
| 154 | break; |
| 155 | case MVT::i64: |
| 156 | // Must be in x86-64 mode. |
| 157 | Opc = X86::MOV64mr; |
| 158 | RC = X86::GR64RegisterClass; |
| 159 | break; |
| 160 | case MVT::f32: |
| 161 | if (Subtarget->hasSSE1()) { |
| 162 | Opc = X86::MOVSSmr; |
| 163 | RC = X86::FR32RegisterClass; |
| 164 | } else { |
| 165 | Opc = X86::ST_Fp32m; |
| 166 | RC = X86::RFP32RegisterClass; |
| 167 | } |
| 168 | break; |
| 169 | case MVT::f64: |
| 170 | if (Subtarget->hasSSE2()) { |
| 171 | Opc = X86::MOVSDmr; |
| 172 | RC = X86::FR64RegisterClass; |
| 173 | } else { |
| 174 | Opc = X86::ST_Fp64m; |
| 175 | RC = X86::RFP64RegisterClass; |
| 176 | } |
| 177 | break; |
| 178 | case MVT::f80: |
| 179 | Opc = X86::ST_FP80m; |
| 180 | RC = X86::RFP80RegisterClass; |
| 181 | break; |
| 182 | } |
| 183 | |
| 184 | X86AddressMode AM; |
| 185 | if (Op1) |
| 186 | // Address is in register. |
| 187 | AM.Base.Reg = Op1; |
| 188 | else |
| 189 | AM.GV = cast<GlobalValue>(V); |
| 190 | addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Op0); |
| 191 | return true; |
| 192 | } |
| 193 | |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 194 | /// X86SelectConstAddr - Select and emit code to materialize constant address. |
| 195 | /// |
| 196 | bool X86FastISel::X86SelectConstAddr(Value *V, |
Dan Gohman | 3df24e6 | 2008-09-03 23:12:08 +0000 | [diff] [blame] | 197 | unsigned &Op0) { |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 198 | // FIXME: Only GlobalAddress for now. |
| 199 | GlobalValue *GV = dyn_cast<GlobalValue>(V); |
| 200 | if (!GV) |
| 201 | return false; |
| 202 | |
| 203 | if (Subtarget->GVRequiresExtraLoad(GV, TM, false)) { |
| 204 | // Issue load from stub if necessary. |
| 205 | unsigned Opc = 0; |
| 206 | const TargetRegisterClass *RC = NULL; |
| 207 | if (TLI.getPointerTy() == MVT::i32) { |
| 208 | Opc = X86::MOV32rm; |
| 209 | RC = X86::GR32RegisterClass; |
| 210 | } else { |
| 211 | Opc = X86::MOV64rm; |
| 212 | RC = X86::GR64RegisterClass; |
| 213 | } |
| 214 | Op0 = createResultReg(RC); |
| 215 | X86AddressMode AM; |
| 216 | AM.GV = GV; |
| 217 | addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM); |
Evan Cheng | 373d50a | 2008-09-04 06:18:33 +0000 | [diff] [blame] | 218 | // Prevent loading GV stub multiple times in same MBB. |
| 219 | LocalValueMap[V] = Op0; |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 220 | } |
| 221 | return true; |
| 222 | } |
| 223 | |
Owen Anderson | a3971df | 2008-09-04 07:08:58 +0000 | [diff] [blame] | 224 | /// X86SelectStore - Select and emit code to implement store instructions. |
| 225 | bool X86FastISel::X86SelectStore(Instruction* I) { |
| 226 | MVT VT = MVT::getMVT(I->getOperand(0)->getType()); |
| 227 | if (VT == MVT::Other || !VT.isSimple()) |
| 228 | // Unhandled type. Halt "fast" selection and bail. |
| 229 | return false; |
| 230 | if (VT == MVT::iPTR) |
| 231 | // Use pointer type. |
| 232 | VT = TLI.getPointerTy(); |
| 233 | // We only handle legal types. For example, on x86-32 the instruction |
| 234 | // selector contains all of the 64-bit instructions from x86-64, |
| 235 | // under the assumption that i64 won't be used if the target doesn't |
| 236 | // support it. |
| 237 | if (!TLI.isTypeLegal(VT)) |
| 238 | return false; |
| 239 | unsigned Op0 = getRegForValue(I->getOperand(0)); |
| 240 | if (Op0 == 0) |
| 241 | // Unhandled operand. Halt "fast" selection and bail. |
| 242 | return false; |
| 243 | |
| 244 | Value *V = I->getOperand(1); |
| 245 | unsigned Op1 = getRegForValue(V); |
| 246 | if (Op1 == 0) { |
| 247 | // Handle constant load address. |
| 248 | if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op1)) |
| 249 | // Unhandled operand. Halt "fast" selection and bail. |
| 250 | return false; |
| 251 | } |
Owen Anderson | a3971df | 2008-09-04 07:08:58 +0000 | [diff] [blame] | 252 | |
Evan Cheng | 0de588f | 2008-09-05 21:00:03 +0000 | [diff] [blame] | 253 | return X86FastEmitStore(VT, Op0, Op1, V); |
Owen Anderson | a3971df | 2008-09-04 07:08:58 +0000 | [diff] [blame] | 254 | } |
| 255 | |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 256 | /// X86SelectLoad - Select and emit code to implement load instructions. |
| 257 | /// |
Dan Gohman | 3df24e6 | 2008-09-03 23:12:08 +0000 | [diff] [blame] | 258 | bool X86FastISel::X86SelectLoad(Instruction *I) { |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 259 | MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true); |
| 260 | if (VT == MVT::Other || !VT.isSimple()) |
| 261 | // Unhandled type. Halt "fast" selection and bail. |
| 262 | return false; |
| 263 | if (VT == MVT::iPTR) |
| 264 | // Use pointer type. |
| 265 | VT = TLI.getPointerTy(); |
| 266 | // We only handle legal types. For example, on x86-32 the instruction |
| 267 | // selector contains all of the 64-bit instructions from x86-64, |
| 268 | // under the assumption that i64 won't be used if the target doesn't |
| 269 | // support it. |
| 270 | if (!TLI.isTypeLegal(VT)) |
| 271 | return false; |
| 272 | |
| 273 | Value *V = I->getOperand(0); |
Dan Gohman | 3df24e6 | 2008-09-03 23:12:08 +0000 | [diff] [blame] | 274 | unsigned Op0 = getRegForValue(V); |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 275 | if (Op0 == 0) { |
| 276 | // Handle constant load address. |
Evan Cheng | 0de588f | 2008-09-05 21:00:03 +0000 | [diff] [blame] | 277 | // FIXME: If load type is something we can't handle, this can result in |
| 278 | // a dead stub load instruction. |
Dan Gohman | 3df24e6 | 2008-09-03 23:12:08 +0000 | [diff] [blame] | 279 | if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op0)) |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 280 | // Unhandled operand. Halt "fast" selection and bail. |
| 281 | return false; |
| 282 | } |
| 283 | |
Evan Cheng | 0de588f | 2008-09-05 21:00:03 +0000 | [diff] [blame] | 284 | unsigned ResultReg = 0; |
| 285 | if (X86FastEmitLoad(VT, Op0, V, ResultReg)) { |
| 286 | UpdateValueMap(I, ResultReg); |
| 287 | return true; |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 288 | } |
Evan Cheng | 0de588f | 2008-09-05 21:00:03 +0000 | [diff] [blame] | 289 | return false; |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 290 | } |
| 291 | |
Dan Gohman | 6e3f05f | 2008-09-04 23:26:51 +0000 | [diff] [blame] | 292 | bool X86FastISel::X86SelectCmp(Instruction *I) { |
| 293 | CmpInst *CI = cast<CmpInst>(I); |
| 294 | |
Dan Gohman | 4f22bb0 | 2008-09-05 01:33:56 +0000 | [diff] [blame] | 295 | MVT VT = TLI.getValueType(I->getOperand(0)->getType()); |
| 296 | if (!TLI.isTypeLegal(VT)) |
| 297 | return false; |
| 298 | |
Dan Gohman | 6e3f05f | 2008-09-04 23:26:51 +0000 | [diff] [blame] | 299 | unsigned Op0Reg = getRegForValue(CI->getOperand(0)); |
Dan Gohman | f52550b | 2008-09-05 01:15:35 +0000 | [diff] [blame] | 300 | if (Op0Reg == 0) return false; |
Dan Gohman | 6e3f05f | 2008-09-04 23:26:51 +0000 | [diff] [blame] | 301 | unsigned Op1Reg = getRegForValue(CI->getOperand(1)); |
Dan Gohman | f52550b | 2008-09-05 01:15:35 +0000 | [diff] [blame] | 302 | if (Op1Reg == 0) return false; |
| 303 | |
Dan Gohman | 6e3f05f | 2008-09-04 23:26:51 +0000 | [diff] [blame] | 304 | unsigned Opc; |
Dan Gohman | f52550b | 2008-09-05 01:15:35 +0000 | [diff] [blame] | 305 | switch (VT.getSimpleVT()) { |
Dan Gohman | 6e3f05f | 2008-09-04 23:26:51 +0000 | [diff] [blame] | 306 | case MVT::i8: Opc = X86::CMP8rr; break; |
| 307 | case MVT::i16: Opc = X86::CMP16rr; break; |
| 308 | case MVT::i32: Opc = X86::CMP32rr; break; |
| 309 | case MVT::i64: Opc = X86::CMP64rr; break; |
| 310 | case MVT::f32: Opc = X86::UCOMISSrr; break; |
| 311 | case MVT::f64: Opc = X86::UCOMISDrr; break; |
| 312 | default: return false; |
| 313 | } |
| 314 | |
| 315 | unsigned ResultReg = createResultReg(&X86::GR8RegClass); |
| 316 | switch (CI->getPredicate()) { |
| 317 | case CmpInst::FCMP_OEQ: { |
| 318 | unsigned EReg = createResultReg(&X86::GR8RegClass); |
| 319 | unsigned NPReg = createResultReg(&X86::GR8RegClass); |
| 320 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 321 | BuildMI(MBB, TII.get(X86::SETEr), EReg); |
| 322 | BuildMI(MBB, TII.get(X86::SETNPr), NPReg); |
| 323 | BuildMI(MBB, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg); |
| 324 | break; |
| 325 | } |
| 326 | case CmpInst::FCMP_UNE: { |
| 327 | unsigned NEReg = createResultReg(&X86::GR8RegClass); |
| 328 | unsigned PReg = createResultReg(&X86::GR8RegClass); |
| 329 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 330 | BuildMI(MBB, TII.get(X86::SETNEr), NEReg); |
| 331 | BuildMI(MBB, TII.get(X86::SETPr), PReg); |
| 332 | BuildMI(MBB, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg); |
| 333 | break; |
| 334 | } |
| 335 | case CmpInst::FCMP_OGT: |
| 336 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 337 | BuildMI(MBB, TII.get(X86::SETAr), ResultReg); |
| 338 | break; |
| 339 | case CmpInst::FCMP_OGE: |
| 340 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 341 | BuildMI(MBB, TII.get(X86::SETAEr), ResultReg); |
| 342 | break; |
| 343 | case CmpInst::FCMP_OLT: |
| 344 | BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg); |
| 345 | BuildMI(MBB, TII.get(X86::SETAr), ResultReg); |
| 346 | break; |
| 347 | case CmpInst::FCMP_OLE: |
| 348 | BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg); |
| 349 | BuildMI(MBB, TII.get(X86::SETAEr), ResultReg); |
| 350 | break; |
| 351 | case CmpInst::FCMP_ONE: |
| 352 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 353 | BuildMI(MBB, TII.get(X86::SETNEr), ResultReg); |
| 354 | break; |
| 355 | case CmpInst::FCMP_ORD: |
| 356 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 357 | BuildMI(MBB, TII.get(X86::SETNPr), ResultReg); |
| 358 | break; |
| 359 | case CmpInst::FCMP_UNO: |
| 360 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 361 | BuildMI(MBB, TII.get(X86::SETPr), ResultReg); |
| 362 | break; |
| 363 | case CmpInst::FCMP_UEQ: |
| 364 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 365 | BuildMI(MBB, TII.get(X86::SETEr), ResultReg); |
| 366 | break; |
| 367 | case CmpInst::FCMP_UGT: |
| 368 | BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg); |
| 369 | BuildMI(MBB, TII.get(X86::SETBr), ResultReg); |
| 370 | break; |
| 371 | case CmpInst::FCMP_UGE: |
| 372 | BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg); |
| 373 | BuildMI(MBB, TII.get(X86::SETBEr), ResultReg); |
| 374 | break; |
| 375 | case CmpInst::FCMP_ULT: |
| 376 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 377 | BuildMI(MBB, TII.get(X86::SETBr), ResultReg); |
| 378 | break; |
| 379 | case CmpInst::FCMP_ULE: |
| 380 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 381 | BuildMI(MBB, TII.get(X86::SETBEr), ResultReg); |
| 382 | break; |
| 383 | case CmpInst::ICMP_EQ: |
| 384 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 385 | BuildMI(MBB, TII.get(X86::SETEr), ResultReg); |
| 386 | break; |
| 387 | case CmpInst::ICMP_NE: |
| 388 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 389 | BuildMI(MBB, TII.get(X86::SETNEr), ResultReg); |
| 390 | break; |
| 391 | case CmpInst::ICMP_UGT: |
| 392 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 393 | BuildMI(MBB, TII.get(X86::SETAr), ResultReg); |
| 394 | break; |
| 395 | case CmpInst::ICMP_UGE: |
| 396 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 397 | BuildMI(MBB, TII.get(X86::SETAEr), ResultReg); |
| 398 | break; |
| 399 | case CmpInst::ICMP_ULT: |
| 400 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 401 | BuildMI(MBB, TII.get(X86::SETBr), ResultReg); |
| 402 | break; |
| 403 | case CmpInst::ICMP_ULE: |
| 404 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 405 | BuildMI(MBB, TII.get(X86::SETBEr), ResultReg); |
| 406 | break; |
| 407 | case CmpInst::ICMP_SGT: |
| 408 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 409 | BuildMI(MBB, TII.get(X86::SETGr), ResultReg); |
| 410 | break; |
| 411 | case CmpInst::ICMP_SGE: |
| 412 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 413 | BuildMI(MBB, TII.get(X86::SETGEr), ResultReg); |
| 414 | break; |
| 415 | case CmpInst::ICMP_SLT: |
| 416 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 417 | BuildMI(MBB, TII.get(X86::SETLr), ResultReg); |
| 418 | break; |
| 419 | case CmpInst::ICMP_SLE: |
| 420 | BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); |
| 421 | BuildMI(MBB, TII.get(X86::SETLEr), ResultReg); |
| 422 | break; |
| 423 | default: |
| 424 | return false; |
| 425 | } |
| 426 | |
| 427 | UpdateValueMap(I, ResultReg); |
| 428 | return true; |
| 429 | } |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 430 | |
Dan Gohman | d89ae99 | 2008-09-05 01:06:14 +0000 | [diff] [blame] | 431 | bool X86FastISel::X86SelectZExt(Instruction *I) { |
| 432 | // Special-case hack: The only i1 values we know how to produce currently |
| 433 | // set the upper bits of an i8 value to zero. |
| 434 | if (I->getType() == Type::Int8Ty && |
| 435 | I->getOperand(0)->getType() == Type::Int1Ty) { |
| 436 | unsigned ResultReg = getRegForValue(I->getOperand(0)); |
Dan Gohman | f52550b | 2008-09-05 01:15:35 +0000 | [diff] [blame] | 437 | if (ResultReg == 0) return false; |
Dan Gohman | d89ae99 | 2008-09-05 01:06:14 +0000 | [diff] [blame] | 438 | UpdateValueMap(I, ResultReg); |
| 439 | return true; |
| 440 | } |
| 441 | |
| 442 | return false; |
| 443 | } |
| 444 | |
| 445 | bool X86FastISel::X86SelectBranch(Instruction *I) { |
| 446 | BranchInst *BI = cast<BranchInst>(I); |
| 447 | // Unconditional branches are selected by tablegen-generated code. |
| 448 | unsigned OpReg = getRegForValue(BI->getCondition()); |
Dan Gohman | f52550b | 2008-09-05 01:15:35 +0000 | [diff] [blame] | 449 | if (OpReg == 0) return false; |
Dan Gohman | d89ae99 | 2008-09-05 01:06:14 +0000 | [diff] [blame] | 450 | MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)]; |
| 451 | MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)]; |
| 452 | |
| 453 | BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg); |
| 454 | BuildMI(MBB, TII.get(X86::JNE)).addMBB(TrueMBB); |
| 455 | BuildMI(MBB, TII.get(X86::JMP)).addMBB(FalseMBB); |
| 456 | |
| 457 | MBB->addSuccessor(TrueMBB); |
| 458 | MBB->addSuccessor(FalseMBB); |
| 459 | |
| 460 | return true; |
| 461 | } |
| 462 | |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 463 | bool X86FastISel::X86SelectShift(Instruction *I) { |
| 464 | unsigned CReg = 0; |
| 465 | unsigned Opc = 0; |
| 466 | const TargetRegisterClass *RC = NULL; |
| 467 | if (I->getType() == Type::Int8Ty) { |
| 468 | CReg = X86::CL; |
| 469 | RC = &X86::GR8RegClass; |
| 470 | switch (I->getOpcode()) { |
Dan Gohman | 31d2691 | 2008-09-05 21:13:04 +0000 | [diff] [blame] | 471 | case Instruction::LShr: Opc = X86::SHR8rCL; break; |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 472 | case Instruction::AShr: Opc = X86::SAR8rCL; break; |
Dan Gohman | 31d2691 | 2008-09-05 21:13:04 +0000 | [diff] [blame] | 473 | case Instruction::Shl: Opc = X86::SHL8rCL; break; |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 474 | default: return false; |
| 475 | } |
| 476 | } else if (I->getType() == Type::Int16Ty) { |
| 477 | CReg = X86::CX; |
| 478 | RC = &X86::GR16RegClass; |
| 479 | switch (I->getOpcode()) { |
Dan Gohman | 31d2691 | 2008-09-05 21:13:04 +0000 | [diff] [blame] | 480 | case Instruction::LShr: Opc = X86::SHR16rCL; break; |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 481 | case Instruction::AShr: Opc = X86::SAR16rCL; break; |
Dan Gohman | 31d2691 | 2008-09-05 21:13:04 +0000 | [diff] [blame] | 482 | case Instruction::Shl: Opc = X86::SHL16rCL; break; |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 483 | default: return false; |
| 484 | } |
| 485 | } else if (I->getType() == Type::Int32Ty) { |
| 486 | CReg = X86::ECX; |
| 487 | RC = &X86::GR32RegClass; |
| 488 | switch (I->getOpcode()) { |
Dan Gohman | 31d2691 | 2008-09-05 21:13:04 +0000 | [diff] [blame] | 489 | case Instruction::LShr: Opc = X86::SHR32rCL; break; |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 490 | case Instruction::AShr: Opc = X86::SAR32rCL; break; |
Dan Gohman | 31d2691 | 2008-09-05 21:13:04 +0000 | [diff] [blame] | 491 | case Instruction::Shl: Opc = X86::SHL32rCL; break; |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 492 | default: return false; |
| 493 | } |
| 494 | } else if (I->getType() == Type::Int64Ty) { |
| 495 | CReg = X86::RCX; |
| 496 | RC = &X86::GR64RegClass; |
| 497 | switch (I->getOpcode()) { |
Dan Gohman | 31d2691 | 2008-09-05 21:13:04 +0000 | [diff] [blame] | 498 | case Instruction::LShr: Opc = X86::SHR64rCL; break; |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 499 | case Instruction::AShr: Opc = X86::SAR64rCL; break; |
Dan Gohman | 31d2691 | 2008-09-05 21:13:04 +0000 | [diff] [blame] | 500 | case Instruction::Shl: Opc = X86::SHL64rCL; break; |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 501 | default: return false; |
| 502 | } |
| 503 | } else { |
| 504 | return false; |
| 505 | } |
| 506 | |
Dan Gohman | f58cb6d | 2008-09-05 21:27:34 +0000 | [diff] [blame] | 507 | MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true); |
| 508 | if (VT == MVT::Other || !TLI.isTypeLegal(VT)) |
| 509 | return false; |
| 510 | |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 511 | unsigned Op0Reg = getRegForValue(I->getOperand(0)); |
| 512 | if (Op0Reg == 0) return false; |
| 513 | unsigned Op1Reg = getRegForValue(I->getOperand(1)); |
| 514 | if (Op1Reg == 0) return false; |
| 515 | TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC); |
| 516 | unsigned ResultReg = createResultReg(RC); |
| 517 | BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op0Reg); |
| 518 | UpdateValueMap(I, ResultReg); |
| 519 | return true; |
| 520 | } |
| 521 | |
| 522 | bool X86FastISel::X86SelectSelect(Instruction *I) { |
Dan Gohman | f58cb6d | 2008-09-05 21:27:34 +0000 | [diff] [blame] | 523 | const Type *Ty = I->getType(); |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 524 | if (isa<PointerType>(Ty)) |
| 525 | Ty = TLI.getTargetData()->getIntPtrType(); |
| 526 | |
| 527 | unsigned Opc = 0; |
| 528 | const TargetRegisterClass *RC = NULL; |
| 529 | if (Ty == Type::Int16Ty) { |
Dan Gohman | 31d2691 | 2008-09-05 21:13:04 +0000 | [diff] [blame] | 530 | Opc = X86::CMOVE16rr; |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 531 | RC = &X86::GR16RegClass; |
| 532 | } else if (Ty == Type::Int32Ty) { |
Dan Gohman | 31d2691 | 2008-09-05 21:13:04 +0000 | [diff] [blame] | 533 | Opc = X86::CMOVE32rr; |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 534 | RC = &X86::GR32RegClass; |
| 535 | } else if (Ty == Type::Int64Ty) { |
Dan Gohman | 31d2691 | 2008-09-05 21:13:04 +0000 | [diff] [blame] | 536 | Opc = X86::CMOVE64rr; |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 537 | RC = &X86::GR64RegClass; |
| 538 | } else { |
| 539 | return false; |
| 540 | } |
| 541 | |
Dan Gohman | f58cb6d | 2008-09-05 21:27:34 +0000 | [diff] [blame] | 542 | MVT VT = MVT::getMVT(Ty, /*HandleUnknown=*/true); |
| 543 | if (VT == MVT::Other || !TLI.isTypeLegal(VT)) |
| 544 | return false; |
| 545 | |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 546 | unsigned Op0Reg = getRegForValue(I->getOperand(0)); |
| 547 | if (Op0Reg == 0) return false; |
| 548 | unsigned Op1Reg = getRegForValue(I->getOperand(1)); |
| 549 | if (Op1Reg == 0) return false; |
| 550 | unsigned Op2Reg = getRegForValue(I->getOperand(2)); |
| 551 | if (Op2Reg == 0) return false; |
| 552 | |
| 553 | BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg); |
| 554 | unsigned ResultReg = createResultReg(RC); |
| 555 | BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg); |
| 556 | UpdateValueMap(I, ResultReg); |
| 557 | return true; |
| 558 | } |
| 559 | |
Dan Gohman | 99b2182 | 2008-08-28 23:21:34 +0000 | [diff] [blame] | 560 | bool |
Dan Gohman | 3df24e6 | 2008-09-03 23:12:08 +0000 | [diff] [blame] | 561 | X86FastISel::TargetSelectInstruction(Instruction *I) { |
Dan Gohman | 99b2182 | 2008-08-28 23:21:34 +0000 | [diff] [blame] | 562 | switch (I->getOpcode()) { |
| 563 | default: break; |
Evan Cheng | 8b19e56 | 2008-09-03 06:44:39 +0000 | [diff] [blame] | 564 | case Instruction::Load: |
Dan Gohman | 3df24e6 | 2008-09-03 23:12:08 +0000 | [diff] [blame] | 565 | return X86SelectLoad(I); |
Owen Anderson | 79924eb | 2008-09-04 16:48:33 +0000 | [diff] [blame] | 566 | case Instruction::Store: |
| 567 | return X86SelectStore(I); |
Dan Gohman | 6e3f05f | 2008-09-04 23:26:51 +0000 | [diff] [blame] | 568 | case Instruction::ICmp: |
| 569 | case Instruction::FCmp: |
| 570 | return X86SelectCmp(I); |
Dan Gohman | d89ae99 | 2008-09-05 01:06:14 +0000 | [diff] [blame] | 571 | case Instruction::ZExt: |
| 572 | return X86SelectZExt(I); |
| 573 | case Instruction::Br: |
| 574 | return X86SelectBranch(I); |
Dan Gohman | c39f4db | 2008-09-05 18:30:08 +0000 | [diff] [blame] | 575 | case Instruction::LShr: |
| 576 | case Instruction::AShr: |
| 577 | case Instruction::Shl: |
| 578 | return X86SelectShift(I); |
| 579 | case Instruction::Select: |
| 580 | return X86SelectSelect(I); |
Dan Gohman | 99b2182 | 2008-08-28 23:21:34 +0000 | [diff] [blame] | 581 | } |
| 582 | |
| 583 | return false; |
| 584 | } |
| 585 | |
Owen Anderson | 9c7216f | 2008-09-05 20:49:33 +0000 | [diff] [blame] | 586 | unsigned X86FastISel::TargetMaterializeConstant(Constant *C, |
| 587 | MachineConstantPool* MCP) { |
Owen Anderson | 95267a1 | 2008-09-05 00:06:23 +0000 | [diff] [blame] | 588 | // Can't handle PIC-mode yet. |
| 589 | if (TM.getRelocationModel() == Reloc::PIC_) |
| 590 | return 0; |
| 591 | |
| 592 | MVT VT = MVT::getMVT(C->getType(), /*HandleUnknown=*/true); |
| 593 | if (VT == MVT::Other || !VT.isSimple()) |
| 594 | // Unhandled type. Halt "fast" selection and bail. |
| 595 | return false; |
| 596 | if (VT == MVT::iPTR) |
| 597 | // Use pointer type. |
| 598 | VT = TLI.getPointerTy(); |
| 599 | // We only handle legal types. For example, on x86-32 the instruction |
| 600 | // selector contains all of the 64-bit instructions from x86-64, |
| 601 | // under the assumption that i64 won't be used if the target doesn't |
| 602 | // support it. |
| 603 | if (!TLI.isTypeLegal(VT)) |
| 604 | return false; |
| 605 | |
| 606 | // Get opcode and regclass of the output for the given load instruction. |
| 607 | unsigned Opc = 0; |
| 608 | const TargetRegisterClass *RC = NULL; |
| 609 | switch (VT.getSimpleVT()) { |
| 610 | default: return false; |
| 611 | case MVT::i8: |
| 612 | Opc = X86::MOV8rm; |
| 613 | RC = X86::GR8RegisterClass; |
| 614 | break; |
| 615 | case MVT::i16: |
| 616 | Opc = X86::MOV16rm; |
| 617 | RC = X86::GR16RegisterClass; |
| 618 | break; |
| 619 | case MVT::i32: |
| 620 | Opc = X86::MOV32rm; |
| 621 | RC = X86::GR32RegisterClass; |
| 622 | break; |
| 623 | case MVT::i64: |
| 624 | // Must be in x86-64 mode. |
| 625 | Opc = X86::MOV64rm; |
| 626 | RC = X86::GR64RegisterClass; |
| 627 | break; |
| 628 | case MVT::f32: |
| 629 | if (Subtarget->hasSSE1()) { |
| 630 | Opc = X86::MOVSSrm; |
| 631 | RC = X86::FR32RegisterClass; |
| 632 | } else { |
| 633 | Opc = X86::LD_Fp32m; |
| 634 | RC = X86::RFP32RegisterClass; |
| 635 | } |
| 636 | break; |
| 637 | case MVT::f64: |
| 638 | if (Subtarget->hasSSE2()) { |
| 639 | Opc = X86::MOVSDrm; |
| 640 | RC = X86::FR64RegisterClass; |
| 641 | } else { |
| 642 | Opc = X86::LD_Fp64m; |
| 643 | RC = X86::RFP64RegisterClass; |
| 644 | } |
| 645 | break; |
| 646 | case MVT::f80: |
| 647 | Opc = X86::LD_Fp80m; |
| 648 | RC = X86::RFP80RegisterClass; |
| 649 | break; |
| 650 | } |
| 651 | |
| 652 | unsigned ResultReg = createResultReg(RC); |
| 653 | if (isa<GlobalValue>(C)) { |
Evan Cheng | 0de588f | 2008-09-05 21:00:03 +0000 | [diff] [blame] | 654 | // FIXME: If store value type is something we can't handle, this can result |
| 655 | // in a dead stub load instruction. |
Owen Anderson | 95267a1 | 2008-09-05 00:06:23 +0000 | [diff] [blame] | 656 | if (X86SelectConstAddr(C, ResultReg)) |
| 657 | return ResultReg; |
Evan Cheng | 0de588f | 2008-09-05 21:00:03 +0000 | [diff] [blame] | 658 | return 0; |
Owen Anderson | 95267a1 | 2008-09-05 00:06:23 +0000 | [diff] [blame] | 659 | } |
| 660 | |
Owen Anderson | 3b217c6 | 2008-09-06 01:11:01 +0000 | [diff] [blame^] | 661 | // MachineConstantPool wants an explicit alignment. |
| 662 | unsigned Align = |
| 663 | TM.getTargetData()->getPreferredTypeAlignmentShift(C->getType()); |
| 664 | if (Align == 0) { |
| 665 | // Alignment of vector types. FIXME! |
| 666 | Align = TM.getTargetData()->getABITypeSize(C->getType()); |
| 667 | Align = Log2_64(Align); |
| 668 | } |
Owen Anderson | 95267a1 | 2008-09-05 00:06:23 +0000 | [diff] [blame] | 669 | |
Owen Anderson | 3b217c6 | 2008-09-06 01:11:01 +0000 | [diff] [blame^] | 670 | unsigned MCPOffset = MCP->getConstantPoolIndex(C, Align); |
Owen Anderson | 95267a1 | 2008-09-05 00:06:23 +0000 | [diff] [blame] | 671 | addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset); |
Owen Anderson | 95267a1 | 2008-09-05 00:06:23 +0000 | [diff] [blame] | 672 | return ResultReg; |
| 673 | } |
| 674 | |
Evan Cheng | c3f44b0 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 675 | namespace llvm { |
Dan Gohman | 3df24e6 | 2008-09-03 23:12:08 +0000 | [diff] [blame] | 676 | llvm::FastISel *X86::createFastISel(MachineFunction &mf, |
| 677 | DenseMap<const Value *, unsigned> &vm, |
| 678 | DenseMap<const BasicBlock *, MachineBasicBlock *> &bm) { |
| 679 | return new X86FastISel(mf, vm, bm); |
Evan Cheng | c3f44b0 | 2008-09-03 00:03:49 +0000 | [diff] [blame] | 680 | } |
Dan Gohman | 99b2182 | 2008-08-28 23:21:34 +0000 | [diff] [blame] | 681 | } |