Misha Brukman | a85d6bc | 2002-11-22 22:42:50 +0000 | [diff] [blame] | 1 | //===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===// |
Misha Brukman | 0e0a7a45 | 2005-04-21 23:38:14 +0000 | [diff] [blame] | 2 | // |
John Criswell | b576c94 | 2003-10-20 19:43:21 +0000 | [diff] [blame] | 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file was developed by the LLVM research group and is distributed under |
| 6 | // the University of Illinois Open Source License. See LICENSE.TXT for details. |
Misha Brukman | 0e0a7a45 | 2005-04-21 23:38:14 +0000 | [diff] [blame] | 7 | // |
John Criswell | b576c94 | 2003-10-20 19:43:21 +0000 | [diff] [blame] | 8 | //===----------------------------------------------------------------------===// |
Chris Lattner | 7261408 | 2002-10-25 22:55:53 +0000 | [diff] [blame] | 9 | // |
Chris Lattner | 3501fea | 2003-01-14 22:00:31 +0000 | [diff] [blame] | 10 | // This file contains the X86 implementation of the TargetInstrInfo class. |
Chris Lattner | 7261408 | 2002-10-25 22:55:53 +0000 | [diff] [blame] | 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
Chris Lattner | 055c965 | 2002-10-29 21:05:24 +0000 | [diff] [blame] | 14 | #include "X86InstrInfo.h" |
Chris Lattner | 4ce42a7 | 2002-12-03 05:42:53 +0000 | [diff] [blame] | 15 | #include "X86.h" |
Chris Lattner | abf05b2 | 2003-08-03 21:55:55 +0000 | [diff] [blame] | 16 | #include "X86GenInstrInfo.inc" |
Evan Cheng | aa3c141 | 2006-05-30 21:45:53 +0000 | [diff] [blame] | 17 | #include "X86InstrBuilder.h" |
| 18 | #include "X86Subtarget.h" |
| 19 | #include "X86TargetMachine.h" |
| 20 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
Brian Gaeke | d0fde30 | 2003-11-11 22:41:34 +0000 | [diff] [blame] | 21 | using namespace llvm; |
| 22 | |
Evan Cheng | aa3c141 | 2006-05-30 21:45:53 +0000 | [diff] [blame] | 23 | X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) |
| 24 | : TargetInstrInfo(X86Insts, sizeof(X86Insts)/sizeof(X86Insts[0])), |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 25 | TM(tm), RI(tm, *this) { |
Chris Lattner | 7261408 | 2002-10-25 22:55:53 +0000 | [diff] [blame] | 26 | } |
| 27 | |
Chris Lattner | ae1dc40 | 2006-10-17 22:41:45 +0000 | [diff] [blame] | 28 | /// getDWARF_LABELOpcode - Return the opcode of the target's DWARF_LABEL |
| 29 | /// instruction if it has one. This is used by codegen passes that update |
| 30 | /// DWARF line number info as they modify the code. |
| 31 | unsigned X86InstrInfo::getDWARF_LABELOpcode() const { |
| 32 | return X86::DWARF_LABEL; |
| 33 | } |
| 34 | |
Chris Lattner | 7261408 | 2002-10-25 22:55:53 +0000 | [diff] [blame] | 35 | |
Alkis Evlogimenos | 5e30002 | 2003-12-28 17:35:08 +0000 | [diff] [blame] | 36 | bool X86InstrInfo::isMoveInstr(const MachineInstr& MI, |
| 37 | unsigned& sourceReg, |
| 38 | unsigned& destReg) const { |
| 39 | MachineOpCode oc = MI.getOpcode(); |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 40 | if (oc == X86::MOV8rr || oc == X86::MOV16rr || |
| 41 | oc == X86::MOV32rr || oc == X86::MOV64rr || |
Evan Cheng | 403be7e | 2006-05-08 08:01:26 +0000 | [diff] [blame] | 42 | oc == X86::MOV16to16_ || oc == X86::MOV32to32_ || |
Evan Cheng | bda54cd | 2006-02-01 23:03:16 +0000 | [diff] [blame] | 43 | oc == X86::FpMOV || oc == X86::MOVSSrr || oc == X86::MOVSDrr || |
Evan Cheng | fe5cb19 | 2006-02-16 22:45:17 +0000 | [diff] [blame] | 44 | oc == X86::FsMOVAPSrr || oc == X86::FsMOVAPDrr || |
Evan Cheng | 82521dd | 2006-03-21 07:09:35 +0000 | [diff] [blame] | 45 | oc == X86::MOVAPSrr || oc == X86::MOVAPDrr || |
Evan Cheng | 11e15b3 | 2006-04-03 20:53:28 +0000 | [diff] [blame] | 46 | oc == X86::MOVSS2PSrr || oc == X86::MOVSD2PDrr || |
| 47 | oc == X86::MOVPS2SSrr || oc == X86::MOVPD2SDrr || |
| 48 | oc == X86::MOVDI2PDIrr || oc == X86::MOVQI2PQIrr || |
| 49 | oc == X86::MOVPDI2DIrr) { |
Alkis Evlogimenos | 5e30002 | 2003-12-28 17:35:08 +0000 | [diff] [blame] | 50 | assert(MI.getNumOperands() == 2 && |
| 51 | MI.getOperand(0).isRegister() && |
| 52 | MI.getOperand(1).isRegister() && |
| 53 | "invalid register-register move instruction"); |
Alkis Evlogimenos | be766c7 | 2004-02-13 21:01:20 +0000 | [diff] [blame] | 54 | sourceReg = MI.getOperand(1).getReg(); |
| 55 | destReg = MI.getOperand(0).getReg(); |
Alkis Evlogimenos | 5e30002 | 2003-12-28 17:35:08 +0000 | [diff] [blame] | 56 | return true; |
| 57 | } |
| 58 | return false; |
| 59 | } |
Alkis Evlogimenos | 36f506e | 2004-07-31 09:38:47 +0000 | [diff] [blame] | 60 | |
Chris Lattner | 4083960 | 2006-02-02 20:12:32 +0000 | [diff] [blame] | 61 | unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI, |
| 62 | int &FrameIndex) const { |
| 63 | switch (MI->getOpcode()) { |
| 64 | default: break; |
| 65 | case X86::MOV8rm: |
| 66 | case X86::MOV16rm: |
Evan Cheng | f4df680 | 2006-05-11 07:33:49 +0000 | [diff] [blame] | 67 | case X86::MOV16_rm: |
Chris Lattner | 4083960 | 2006-02-02 20:12:32 +0000 | [diff] [blame] | 68 | case X86::MOV32rm: |
Evan Cheng | f4df680 | 2006-05-11 07:33:49 +0000 | [diff] [blame] | 69 | case X86::MOV32_rm: |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 70 | case X86::MOV64rm: |
Chris Lattner | 4083960 | 2006-02-02 20:12:32 +0000 | [diff] [blame] | 71 | case X86::FpLD64m: |
| 72 | case X86::MOVSSrm: |
| 73 | case X86::MOVSDrm: |
Chris Lattner | 993c897 | 2006-04-18 16:44:51 +0000 | [diff] [blame] | 74 | case X86::MOVAPSrm: |
| 75 | case X86::MOVAPDrm: |
Chris Lattner | 4083960 | 2006-02-02 20:12:32 +0000 | [diff] [blame] | 76 | if (MI->getOperand(1).isFrameIndex() && MI->getOperand(2).isImmediate() && |
| 77 | MI->getOperand(3).isRegister() && MI->getOperand(4).isImmediate() && |
| 78 | MI->getOperand(2).getImmedValue() == 1 && |
| 79 | MI->getOperand(3).getReg() == 0 && |
| 80 | MI->getOperand(4).getImmedValue() == 0) { |
| 81 | FrameIndex = MI->getOperand(1).getFrameIndex(); |
| 82 | return MI->getOperand(0).getReg(); |
| 83 | } |
| 84 | break; |
| 85 | } |
| 86 | return 0; |
| 87 | } |
| 88 | |
| 89 | unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI, |
| 90 | int &FrameIndex) const { |
| 91 | switch (MI->getOpcode()) { |
| 92 | default: break; |
| 93 | case X86::MOV8mr: |
| 94 | case X86::MOV16mr: |
Evan Cheng | f4df680 | 2006-05-11 07:33:49 +0000 | [diff] [blame] | 95 | case X86::MOV16_mr: |
Chris Lattner | 4083960 | 2006-02-02 20:12:32 +0000 | [diff] [blame] | 96 | case X86::MOV32mr: |
Evan Cheng | f4df680 | 2006-05-11 07:33:49 +0000 | [diff] [blame] | 97 | case X86::MOV32_mr: |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 98 | case X86::MOV64mr: |
Chris Lattner | 4083960 | 2006-02-02 20:12:32 +0000 | [diff] [blame] | 99 | case X86::FpSTP64m: |
| 100 | case X86::MOVSSmr: |
| 101 | case X86::MOVSDmr: |
Chris Lattner | 993c897 | 2006-04-18 16:44:51 +0000 | [diff] [blame] | 102 | case X86::MOVAPSmr: |
| 103 | case X86::MOVAPDmr: |
Chris Lattner | 4083960 | 2006-02-02 20:12:32 +0000 | [diff] [blame] | 104 | if (MI->getOperand(0).isFrameIndex() && MI->getOperand(1).isImmediate() && |
| 105 | MI->getOperand(2).isRegister() && MI->getOperand(3).isImmediate() && |
Chris Lattner | 1c07e72 | 2006-02-02 20:38:12 +0000 | [diff] [blame] | 106 | MI->getOperand(1).getImmedValue() == 1 && |
| 107 | MI->getOperand(2).getReg() == 0 && |
| 108 | MI->getOperand(3).getImmedValue() == 0) { |
| 109 | FrameIndex = MI->getOperand(0).getFrameIndex(); |
Chris Lattner | 4083960 | 2006-02-02 20:12:32 +0000 | [diff] [blame] | 110 | return MI->getOperand(4).getReg(); |
| 111 | } |
| 112 | break; |
| 113 | } |
| 114 | return 0; |
| 115 | } |
| 116 | |
| 117 | |
Chris Lattner | bcea4d6 | 2005-01-02 02:37:07 +0000 | [diff] [blame] | 118 | /// convertToThreeAddress - This method must be implemented by targets that |
| 119 | /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target |
| 120 | /// may be able to convert a two-address instruction into a true |
| 121 | /// three-address instruction on demand. This allows the X86 target (for |
| 122 | /// example) to convert ADD and SHL instructions into LEA instructions if they |
| 123 | /// would require register copies due to two-addressness. |
| 124 | /// |
| 125 | /// This method returns a null pointer if the transformation cannot be |
| 126 | /// performed, otherwise it returns the new instruction. |
| 127 | /// |
| 128 | MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr *MI) const { |
| 129 | // All instructions input are two-addr instructions. Get the known operands. |
| 130 | unsigned Dest = MI->getOperand(0).getReg(); |
| 131 | unsigned Src = MI->getOperand(1).getReg(); |
| 132 | |
Evan Cheng | ccba76b | 2006-05-30 20:26:50 +0000 | [diff] [blame] | 133 | switch (MI->getOpcode()) { |
| 134 | default: break; |
| 135 | case X86::SHUFPSrri: { |
| 136 | assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!"); |
Evan Cheng | 51da42c | 2006-05-30 21:30:59 +0000 | [diff] [blame] | 137 | const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); |
Evan Cheng | aa3c141 | 2006-05-30 21:45:53 +0000 | [diff] [blame] | 138 | unsigned A = MI->getOperand(0).getReg(); |
| 139 | unsigned B = MI->getOperand(1).getReg(); |
| 140 | unsigned C = MI->getOperand(2).getReg(); |
| 141 | unsigned M = MI->getOperand(3).getImmedValue(); |
Evan Cheng | a0eaf2d | 2006-05-30 22:13:36 +0000 | [diff] [blame] | 142 | if (!Subtarget->hasSSE2() || B != C) return 0; |
Evan Cheng | aa3c141 | 2006-05-30 21:45:53 +0000 | [diff] [blame] | 143 | return BuildMI(X86::PSHUFDri, 2, A).addReg(B).addImm(M); |
Evan Cheng | ccba76b | 2006-05-30 20:26:50 +0000 | [diff] [blame] | 144 | } |
| 145 | } |
| 146 | |
Misha Brukman | 0e0a7a45 | 2005-04-21 23:38:14 +0000 | [diff] [blame] | 147 | // FIXME: None of these instructions are promotable to LEAs without |
| 148 | // additional information. In particular, LEA doesn't set the flags that |
Chris Lattner | 5aee0b9 | 2005-01-02 04:18:17 +0000 | [diff] [blame] | 149 | // add and inc do. :( |
| 150 | return 0; |
| 151 | |
Chris Lattner | bcea4d6 | 2005-01-02 02:37:07 +0000 | [diff] [blame] | 152 | // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When |
| 153 | // we have subtarget support, enable the 16-bit LEA generation here. |
| 154 | bool DisableLEA16 = true; |
| 155 | |
| 156 | switch (MI->getOpcode()) { |
| 157 | case X86::INC32r: |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 158 | case X86::INC64_32r: |
Chris Lattner | bcea4d6 | 2005-01-02 02:37:07 +0000 | [diff] [blame] | 159 | assert(MI->getNumOperands() == 2 && "Unknown inc instruction!"); |
| 160 | return addRegOffset(BuildMI(X86::LEA32r, 5, Dest), Src, 1); |
| 161 | case X86::INC16r: |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 162 | case X86::INC64_16r: |
Chris Lattner | bcea4d6 | 2005-01-02 02:37:07 +0000 | [diff] [blame] | 163 | if (DisableLEA16) return 0; |
| 164 | assert(MI->getNumOperands() == 2 && "Unknown inc instruction!"); |
| 165 | return addRegOffset(BuildMI(X86::LEA16r, 5, Dest), Src, 1); |
| 166 | case X86::DEC32r: |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 167 | case X86::DEC64_32r: |
Chris Lattner | bcea4d6 | 2005-01-02 02:37:07 +0000 | [diff] [blame] | 168 | assert(MI->getNumOperands() == 2 && "Unknown dec instruction!"); |
| 169 | return addRegOffset(BuildMI(X86::LEA32r, 5, Dest), Src, -1); |
| 170 | case X86::DEC16r: |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 171 | case X86::DEC64_16r: |
Chris Lattner | bcea4d6 | 2005-01-02 02:37:07 +0000 | [diff] [blame] | 172 | if (DisableLEA16) return 0; |
| 173 | assert(MI->getNumOperands() == 2 && "Unknown dec instruction!"); |
| 174 | return addRegOffset(BuildMI(X86::LEA16r, 5, Dest), Src, -1); |
| 175 | case X86::ADD32rr: |
| 176 | assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); |
| 177 | return addRegReg(BuildMI(X86::LEA32r, 5, Dest), Src, |
| 178 | MI->getOperand(2).getReg()); |
| 179 | case X86::ADD16rr: |
| 180 | if (DisableLEA16) return 0; |
| 181 | assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); |
| 182 | return addRegReg(BuildMI(X86::LEA16r, 5, Dest), Src, |
| 183 | MI->getOperand(2).getReg()); |
| 184 | case X86::ADD32ri: |
Evan Cheng | 6de0163 | 2006-05-19 18:43:41 +0000 | [diff] [blame] | 185 | case X86::ADD32ri8: |
Chris Lattner | bcea4d6 | 2005-01-02 02:37:07 +0000 | [diff] [blame] | 186 | assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); |
| 187 | if (MI->getOperand(2).isImmediate()) |
| 188 | return addRegOffset(BuildMI(X86::LEA32r, 5, Dest), Src, |
| 189 | MI->getOperand(2).getImmedValue()); |
| 190 | return 0; |
| 191 | case X86::ADD16ri: |
Evan Cheng | 6de0163 | 2006-05-19 18:43:41 +0000 | [diff] [blame] | 192 | case X86::ADD16ri8: |
Chris Lattner | bcea4d6 | 2005-01-02 02:37:07 +0000 | [diff] [blame] | 193 | if (DisableLEA16) return 0; |
| 194 | assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); |
| 195 | if (MI->getOperand(2).isImmediate()) |
| 196 | return addRegOffset(BuildMI(X86::LEA16r, 5, Dest), Src, |
| 197 | MI->getOperand(2).getImmedValue()); |
| 198 | break; |
| 199 | |
| 200 | case X86::SHL16ri: |
| 201 | if (DisableLEA16) return 0; |
| 202 | case X86::SHL32ri: |
| 203 | assert(MI->getNumOperands() == 3 && MI->getOperand(2).isImmediate() && |
| 204 | "Unknown shl instruction!"); |
| 205 | unsigned ShAmt = MI->getOperand(2).getImmedValue(); |
| 206 | if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) { |
| 207 | X86AddressMode AM; |
| 208 | AM.Scale = 1 << ShAmt; |
| 209 | AM.IndexReg = Src; |
| 210 | unsigned Opc = MI->getOpcode() == X86::SHL32ri ? X86::LEA32r :X86::LEA16r; |
| 211 | return addFullAddress(BuildMI(Opc, 5, Dest), AM); |
| 212 | } |
| 213 | break; |
| 214 | } |
| 215 | |
| 216 | return 0; |
| 217 | } |
| 218 | |
Chris Lattner | 41e431b | 2005-01-19 07:11:01 +0000 | [diff] [blame] | 219 | /// commuteInstruction - We have a few instructions that must be hacked on to |
| 220 | /// commute them. |
| 221 | /// |
| 222 | MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const { |
Chris Lattner | 6458f18 | 2006-09-28 23:33:12 +0000 | [diff] [blame] | 223 | // FIXME: Can commute cmoves by changing the condition! |
Chris Lattner | 41e431b | 2005-01-19 07:11:01 +0000 | [diff] [blame] | 224 | switch (MI->getOpcode()) { |
Chris Lattner | 0df53d2 | 2005-01-19 07:31:24 +0000 | [diff] [blame] | 225 | case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) |
| 226 | case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) |
Chris Lattner | 41e431b | 2005-01-19 07:11:01 +0000 | [diff] [blame] | 227 | case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) |
| 228 | case X86::SHLD32rri8:{// A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) |
Chris Lattner | 0df53d2 | 2005-01-19 07:31:24 +0000 | [diff] [blame] | 229 | unsigned Opc; |
| 230 | unsigned Size; |
| 231 | switch (MI->getOpcode()) { |
| 232 | default: assert(0 && "Unreachable!"); |
| 233 | case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; |
| 234 | case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; |
| 235 | case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; |
| 236 | case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; |
| 237 | } |
Chris Lattner | 41e431b | 2005-01-19 07:11:01 +0000 | [diff] [blame] | 238 | unsigned Amt = MI->getOperand(3).getImmedValue(); |
| 239 | unsigned A = MI->getOperand(0).getReg(); |
| 240 | unsigned B = MI->getOperand(1).getReg(); |
| 241 | unsigned C = MI->getOperand(2).getReg(); |
Chris Lattner | a76f048 | 2005-01-19 16:55:52 +0000 | [diff] [blame] | 242 | return BuildMI(Opc, 3, A).addReg(C).addReg(B).addImm(Size-Amt); |
Chris Lattner | 41e431b | 2005-01-19 07:11:01 +0000 | [diff] [blame] | 243 | } |
| 244 | default: |
| 245 | return TargetInstrInfo::commuteInstruction(MI); |
| 246 | } |
| 247 | } |
| 248 | |
Chris Lattner | 7fbe972 | 2006-10-20 17:42:20 +0000 | [diff] [blame] | 249 | static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) { |
| 250 | switch (BrOpc) { |
| 251 | default: return X86::COND_INVALID; |
| 252 | case X86::JE: return X86::COND_E; |
| 253 | case X86::JNE: return X86::COND_NE; |
| 254 | case X86::JL: return X86::COND_L; |
| 255 | case X86::JLE: return X86::COND_LE; |
| 256 | case X86::JG: return X86::COND_G; |
| 257 | case X86::JGE: return X86::COND_GE; |
| 258 | case X86::JB: return X86::COND_B; |
| 259 | case X86::JBE: return X86::COND_BE; |
| 260 | case X86::JA: return X86::COND_A; |
| 261 | case X86::JAE: return X86::COND_AE; |
| 262 | case X86::JS: return X86::COND_S; |
| 263 | case X86::JNS: return X86::COND_NS; |
| 264 | case X86::JP: return X86::COND_P; |
| 265 | case X86::JNP: return X86::COND_NP; |
| 266 | case X86::JO: return X86::COND_O; |
| 267 | case X86::JNO: return X86::COND_NO; |
| 268 | } |
| 269 | } |
| 270 | |
| 271 | unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { |
| 272 | switch (CC) { |
| 273 | default: assert(0 && "Illegal condition code!"); |
| 274 | case X86::COND_E: return X86::JE; |
| 275 | case X86::COND_NE: return X86::JNE; |
| 276 | case X86::COND_L: return X86::JL; |
| 277 | case X86::COND_LE: return X86::JLE; |
| 278 | case X86::COND_G: return X86::JG; |
| 279 | case X86::COND_GE: return X86::JGE; |
| 280 | case X86::COND_B: return X86::JB; |
| 281 | case X86::COND_BE: return X86::JBE; |
| 282 | case X86::COND_A: return X86::JA; |
| 283 | case X86::COND_AE: return X86::JAE; |
| 284 | case X86::COND_S: return X86::JS; |
| 285 | case X86::COND_NS: return X86::JNS; |
| 286 | case X86::COND_P: return X86::JP; |
| 287 | case X86::COND_NP: return X86::JNP; |
| 288 | case X86::COND_O: return X86::JO; |
| 289 | case X86::COND_NO: return X86::JNO; |
| 290 | } |
| 291 | } |
| 292 | |
Chris Lattner | 9cd6875 | 2006-10-21 05:52:40 +0000 | [diff] [blame] | 293 | /// GetOppositeBranchCondition - Return the inverse of the specified condition, |
| 294 | /// e.g. turning COND_E to COND_NE. |
| 295 | X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { |
| 296 | switch (CC) { |
| 297 | default: assert(0 && "Illegal condition code!"); |
| 298 | case X86::COND_E: return X86::COND_NE; |
| 299 | case X86::COND_NE: return X86::COND_E; |
| 300 | case X86::COND_L: return X86::COND_GE; |
| 301 | case X86::COND_LE: return X86::COND_G; |
| 302 | case X86::COND_G: return X86::COND_LE; |
| 303 | case X86::COND_GE: return X86::COND_L; |
| 304 | case X86::COND_B: return X86::COND_AE; |
| 305 | case X86::COND_BE: return X86::COND_A; |
| 306 | case X86::COND_A: return X86::COND_BE; |
| 307 | case X86::COND_AE: return X86::COND_B; |
| 308 | case X86::COND_S: return X86::COND_NS; |
| 309 | case X86::COND_NS: return X86::COND_S; |
| 310 | case X86::COND_P: return X86::COND_NP; |
| 311 | case X86::COND_NP: return X86::COND_P; |
| 312 | case X86::COND_O: return X86::COND_NO; |
| 313 | case X86::COND_NO: return X86::COND_O; |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | |
Chris Lattner | 7fbe972 | 2006-10-20 17:42:20 +0000 | [diff] [blame] | 318 | bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, |
| 319 | MachineBasicBlock *&TBB, |
| 320 | MachineBasicBlock *&FBB, |
| 321 | std::vector<MachineOperand> &Cond) const { |
| 322 | // TODO: If FP_REG_KILL is around, ignore it. |
| 323 | |
| 324 | // If the block has no terminators, it just falls into the block after it. |
| 325 | MachineBasicBlock::iterator I = MBB.end(); |
| 326 | if (I == MBB.begin() || !isTerminatorInstr((--I)->getOpcode())) |
| 327 | return false; |
| 328 | |
| 329 | // Get the last instruction in the block. |
| 330 | MachineInstr *LastInst = I; |
| 331 | |
| 332 | // If there is only one terminator instruction, process it. |
| 333 | if (I == MBB.begin() || !isTerminatorInstr((--I)->getOpcode())) { |
| 334 | if (!isBranch(LastInst->getOpcode())) |
| 335 | return true; |
| 336 | |
| 337 | // If the block ends with a branch there are 3 possibilities: |
| 338 | // it's an unconditional, conditional, or indirect branch. |
| 339 | |
| 340 | if (LastInst->getOpcode() == X86::JMP) { |
| 341 | TBB = LastInst->getOperand(0).getMachineBasicBlock(); |
| 342 | return false; |
| 343 | } |
| 344 | X86::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode()); |
| 345 | if (BranchCode == X86::COND_INVALID) |
| 346 | return true; // Can't handle indirect branch. |
| 347 | |
| 348 | // Otherwise, block ends with fall-through condbranch. |
| 349 | TBB = LastInst->getOperand(0).getMachineBasicBlock(); |
| 350 | Cond.push_back(MachineOperand::CreateImm(BranchCode)); |
| 351 | return false; |
| 352 | } |
| 353 | |
| 354 | // Get the instruction before it if it's a terminator. |
| 355 | MachineInstr *SecondLastInst = I; |
| 356 | |
| 357 | // If there are three terminators, we don't know what sort of block this is. |
| 358 | if (SecondLastInst && I != MBB.begin() && |
| 359 | isTerminatorInstr((--I)->getOpcode())) |
| 360 | return true; |
| 361 | |
Chris Lattner | 6ce6443 | 2006-10-30 22:27:23 +0000 | [diff] [blame] | 362 | // If the block ends with X86::JMP and a conditional branch, handle it. |
Chris Lattner | 7fbe972 | 2006-10-20 17:42:20 +0000 | [diff] [blame] | 363 | X86::CondCode BranchCode = GetCondFromBranchOpc(SecondLastInst->getOpcode()); |
| 364 | if (BranchCode != X86::COND_INVALID && LastInst->getOpcode() == X86::JMP) { |
Chris Lattner | 6ce6443 | 2006-10-30 22:27:23 +0000 | [diff] [blame] | 365 | TBB = SecondLastInst->getOperand(0).getMachineBasicBlock(); |
| 366 | Cond.push_back(MachineOperand::CreateImm(BranchCode)); |
| 367 | FBB = LastInst->getOperand(0).getMachineBasicBlock(); |
| 368 | return false; |
| 369 | } |
Chris Lattner | 7fbe972 | 2006-10-20 17:42:20 +0000 | [diff] [blame] | 370 | |
| 371 | // Otherwise, can't handle this. |
| 372 | return true; |
| 373 | } |
| 374 | |
| 375 | void X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { |
| 376 | MachineBasicBlock::iterator I = MBB.end(); |
| 377 | if (I == MBB.begin()) return; |
| 378 | --I; |
| 379 | if (I->getOpcode() != X86::JMP && |
| 380 | GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) |
| 381 | return; |
| 382 | |
| 383 | // Remove the branch. |
| 384 | I->eraseFromParent(); |
| 385 | |
| 386 | I = MBB.end(); |
| 387 | |
| 388 | if (I == MBB.begin()) return; |
| 389 | --I; |
| 390 | if (GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) |
| 391 | return; |
| 392 | |
| 393 | // Remove the branch. |
| 394 | I->eraseFromParent(); |
| 395 | } |
| 396 | |
| 397 | void X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, |
| 398 | MachineBasicBlock *FBB, |
| 399 | const std::vector<MachineOperand> &Cond) const { |
| 400 | // Shouldn't be a fall through. |
| 401 | assert(TBB && "InsertBranch must not be told to insert a fallthrough"); |
Chris Lattner | 34a84ac | 2006-10-21 05:34:23 +0000 | [diff] [blame] | 402 | assert((Cond.size() == 1 || Cond.size() == 0) && |
| 403 | "X86 branch conditions have one component!"); |
| 404 | |
| 405 | if (FBB == 0) { // One way branch. |
| 406 | if (Cond.empty()) { |
| 407 | // Unconditional branch? |
| 408 | BuildMI(&MBB, X86::JMP, 1).addMBB(TBB); |
| 409 | } else { |
| 410 | // Conditional branch. |
| 411 | unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm()); |
| 412 | BuildMI(&MBB, Opc, 1).addMBB(TBB); |
| 413 | } |
Chris Lattner | 7fbe972 | 2006-10-20 17:42:20 +0000 | [diff] [blame] | 414 | return; |
| 415 | } |
| 416 | |
Chris Lattner | 879d09c | 2006-10-21 05:42:09 +0000 | [diff] [blame] | 417 | // Two-way Conditional branch. |
Chris Lattner | 7fbe972 | 2006-10-20 17:42:20 +0000 | [diff] [blame] | 418 | unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm()); |
| 419 | BuildMI(&MBB, Opc, 1).addMBB(TBB); |
Chris Lattner | 879d09c | 2006-10-21 05:42:09 +0000 | [diff] [blame] | 420 | BuildMI(&MBB, X86::JMP, 1).addMBB(FBB); |
Chris Lattner | 7fbe972 | 2006-10-20 17:42:20 +0000 | [diff] [blame] | 421 | } |
| 422 | |
Chris Lattner | c24ff8e | 2006-10-28 17:29:57 +0000 | [diff] [blame] | 423 | bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const { |
| 424 | if (MBB.empty()) return false; |
| 425 | |
| 426 | switch (MBB.back().getOpcode()) { |
| 427 | case X86::JMP: // Uncond branch. |
| 428 | case X86::JMP32r: // Indirect branch. |
| 429 | case X86::JMP32m: // Indirect branch through mem. |
| 430 | return true; |
| 431 | default: return false; |
| 432 | } |
| 433 | } |
| 434 | |
Chris Lattner | 7fbe972 | 2006-10-20 17:42:20 +0000 | [diff] [blame] | 435 | bool X86InstrInfo:: |
| 436 | ReverseBranchCondition(std::vector<MachineOperand> &Cond) const { |
Chris Lattner | 9cd6875 | 2006-10-21 05:52:40 +0000 | [diff] [blame] | 437 | assert(Cond.size() == 1 && "Invalid X86 branch condition!"); |
| 438 | Cond[0].setImm(GetOppositeBranchCondition((X86::CondCode)Cond[0].getImm())); |
| 439 | return false; |
Chris Lattner | 7fbe972 | 2006-10-20 17:42:20 +0000 | [diff] [blame] | 440 | } |
| 441 | |
Evan Cheng | 25ab690 | 2006-09-08 06:48:29 +0000 | [diff] [blame] | 442 | const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const { |
| 443 | const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); |
| 444 | if (Subtarget->is64Bit()) |
| 445 | return &X86::GR64RegClass; |
| 446 | else |
| 447 | return &X86::GR32RegClass; |
| 448 | } |