Chris Lattner | 6410552 | 2008-01-01 01:03:04 +0000 | [diff] [blame] | 1 | //===-- TargetInstrInfoImpl.cpp - Target Instruction Information ----------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements the TargetInstrInfoImpl class, it just provides default |
| 11 | // implementations of various methods. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "llvm/Target/TargetInstrInfo.h" |
Dan Gohman | a70dca1 | 2009-10-09 23:27:56 +0000 | [diff] [blame] | 16 | #include "llvm/Target/TargetMachine.h" |
| 17 | #include "llvm/Target/TargetRegisterInfo.h" |
Owen Anderson | 44eb65c | 2008-08-14 22:49:33 +0000 | [diff] [blame] | 18 | #include "llvm/ADT/SmallVector.h" |
Dan Gohman | c54baa2 | 2008-12-03 18:43:12 +0000 | [diff] [blame] | 19 | #include "llvm/CodeGen/MachineFrameInfo.h" |
Chris Lattner | 6410552 | 2008-01-01 01:03:04 +0000 | [diff] [blame] | 20 | #include "llvm/CodeGen/MachineInstr.h" |
Evan Cheng | 58dcb0e | 2008-06-16 07:33:11 +0000 | [diff] [blame] | 21 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
Dan Gohman | c76909a | 2009-09-25 20:36:54 +0000 | [diff] [blame] | 22 | #include "llvm/CodeGen/MachineMemOperand.h" |
Dan Gohman | a70dca1 | 2009-10-09 23:27:56 +0000 | [diff] [blame] | 23 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
Dan Gohman | c54baa2 | 2008-12-03 18:43:12 +0000 | [diff] [blame] | 24 | #include "llvm/CodeGen/PseudoSourceValue.h" |
Evan Cheng | 34c7509 | 2009-07-10 23:26:12 +0000 | [diff] [blame] | 25 | #include "llvm/Support/ErrorHandling.h" |
| 26 | #include "llvm/Support/raw_ostream.h" |
Chris Lattner | 6410552 | 2008-01-01 01:03:04 +0000 | [diff] [blame] | 27 | using namespace llvm; |
| 28 | |
| 29 | // commuteInstruction - The default implementation of this method just exchanges |
Evan Cheng | 34c7509 | 2009-07-10 23:26:12 +0000 | [diff] [blame] | 30 | // the two operands returned by findCommutedOpIndices. |
Evan Cheng | 58dcb0e | 2008-06-16 07:33:11 +0000 | [diff] [blame] | 31 | MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI, |
| 32 | bool NewMI) const { |
Evan Cheng | 498c290 | 2009-07-01 08:29:08 +0000 | [diff] [blame] | 33 | const TargetInstrDesc &TID = MI->getDesc(); |
| 34 | bool HasDef = TID.getNumDefs(); |
Evan Cheng | 34c7509 | 2009-07-10 23:26:12 +0000 | [diff] [blame] | 35 | if (HasDef && !MI->getOperand(0).isReg()) |
| 36 | // No idea how to commute this instruction. Target should implement its own. |
| 37 | return 0; |
| 38 | unsigned Idx1, Idx2; |
| 39 | if (!findCommutedOpIndices(MI, Idx1, Idx2)) { |
| 40 | std::string msg; |
| 41 | raw_string_ostream Msg(msg); |
| 42 | Msg << "Don't know how to commute: " << *MI; |
Chris Lattner | 75361b6 | 2010-04-07 22:58:41 +0000 | [diff] [blame] | 43 | report_fatal_error(Msg.str()); |
Evan Cheng | 34c7509 | 2009-07-10 23:26:12 +0000 | [diff] [blame] | 44 | } |
Evan Cheng | 498c290 | 2009-07-01 08:29:08 +0000 | [diff] [blame] | 45 | |
| 46 | assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() && |
Chris Lattner | 6410552 | 2008-01-01 01:03:04 +0000 | [diff] [blame] | 47 | "This only knows how to commute register operands so far"); |
Evan Cheng | 498c290 | 2009-07-01 08:29:08 +0000 | [diff] [blame] | 48 | unsigned Reg1 = MI->getOperand(Idx1).getReg(); |
| 49 | unsigned Reg2 = MI->getOperand(Idx2).getReg(); |
| 50 | bool Reg1IsKill = MI->getOperand(Idx1).isKill(); |
| 51 | bool Reg2IsKill = MI->getOperand(Idx2).isKill(); |
Evan Cheng | 58dcb0e | 2008-06-16 07:33:11 +0000 | [diff] [blame] | 52 | bool ChangeReg0 = false; |
Evan Cheng | 498c290 | 2009-07-01 08:29:08 +0000 | [diff] [blame] | 53 | if (HasDef && MI->getOperand(0).getReg() == Reg1) { |
Evan Cheng | a4d16a1 | 2008-02-13 02:46:49 +0000 | [diff] [blame] | 54 | // Must be two address instruction! |
| 55 | assert(MI->getDesc().getOperandConstraint(0, TOI::TIED_TO) && |
| 56 | "Expecting a two-address instruction!"); |
| 57 | Reg2IsKill = false; |
Evan Cheng | 58dcb0e | 2008-06-16 07:33:11 +0000 | [diff] [blame] | 58 | ChangeReg0 = true; |
Evan Cheng | a4d16a1 | 2008-02-13 02:46:49 +0000 | [diff] [blame] | 59 | } |
Evan Cheng | 58dcb0e | 2008-06-16 07:33:11 +0000 | [diff] [blame] | 60 | |
| 61 | if (NewMI) { |
| 62 | // Create a new instruction. |
Evan Cheng | 498c290 | 2009-07-01 08:29:08 +0000 | [diff] [blame] | 63 | unsigned Reg0 = HasDef |
| 64 | ? (ChangeReg0 ? Reg2 : MI->getOperand(0).getReg()) : 0; |
| 65 | bool Reg0IsDead = HasDef ? MI->getOperand(0).isDead() : false; |
Dan Gohman | 8e5f2c6 | 2008-07-07 23:14:23 +0000 | [diff] [blame] | 66 | MachineFunction &MF = *MI->getParent()->getParent(); |
Evan Cheng | 498c290 | 2009-07-01 08:29:08 +0000 | [diff] [blame] | 67 | if (HasDef) |
| 68 | return BuildMI(MF, MI->getDebugLoc(), MI->getDesc()) |
| 69 | .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead)) |
| 70 | .addReg(Reg2, getKillRegState(Reg2IsKill)) |
| 71 | .addReg(Reg1, getKillRegState(Reg2IsKill)); |
| 72 | else |
| 73 | return BuildMI(MF, MI->getDebugLoc(), MI->getDesc()) |
| 74 | .addReg(Reg2, getKillRegState(Reg2IsKill)) |
| 75 | .addReg(Reg1, getKillRegState(Reg2IsKill)); |
Evan Cheng | 58dcb0e | 2008-06-16 07:33:11 +0000 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | if (ChangeReg0) |
| 79 | MI->getOperand(0).setReg(Reg2); |
Evan Cheng | 498c290 | 2009-07-01 08:29:08 +0000 | [diff] [blame] | 80 | MI->getOperand(Idx2).setReg(Reg1); |
| 81 | MI->getOperand(Idx1).setReg(Reg2); |
| 82 | MI->getOperand(Idx2).setIsKill(Reg1IsKill); |
| 83 | MI->getOperand(Idx1).setIsKill(Reg2IsKill); |
Chris Lattner | 6410552 | 2008-01-01 01:03:04 +0000 | [diff] [blame] | 84 | return MI; |
| 85 | } |
| 86 | |
Evan Cheng | 261ce1d | 2009-07-10 19:15:51 +0000 | [diff] [blame] | 87 | /// findCommutedOpIndices - If specified MI is commutable, return the two |
| 88 | /// operand indices that would swap value. Return true if the instruction |
| 89 | /// is not in a form which this routine understands. |
| 90 | bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI, |
| 91 | unsigned &SrcOpIdx1, |
| 92 | unsigned &SrcOpIdx2) const { |
Evan Cheng | 498c290 | 2009-07-01 08:29:08 +0000 | [diff] [blame] | 93 | const TargetInstrDesc &TID = MI->getDesc(); |
Evan Cheng | 261ce1d | 2009-07-10 19:15:51 +0000 | [diff] [blame] | 94 | if (!TID.isCommutable()) |
Evan Cheng | 498c290 | 2009-07-01 08:29:08 +0000 | [diff] [blame] | 95 | return false; |
Evan Cheng | 261ce1d | 2009-07-10 19:15:51 +0000 | [diff] [blame] | 96 | // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this |
| 97 | // is not true, then the target must implement this. |
| 98 | SrcOpIdx1 = TID.getNumDefs(); |
| 99 | SrcOpIdx2 = SrcOpIdx1 + 1; |
| 100 | if (!MI->getOperand(SrcOpIdx1).isReg() || |
| 101 | !MI->getOperand(SrcOpIdx2).isReg()) |
| 102 | // No idea. |
| 103 | return false; |
| 104 | return true; |
Evan Cheng | f20db15 | 2008-02-15 18:21:33 +0000 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | |
Chris Lattner | 6410552 | 2008-01-01 01:03:04 +0000 | [diff] [blame] | 108 | bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI, |
Owen Anderson | 44eb65c | 2008-08-14 22:49:33 +0000 | [diff] [blame] | 109 | const SmallVectorImpl<MachineOperand> &Pred) const { |
Chris Lattner | 6410552 | 2008-01-01 01:03:04 +0000 | [diff] [blame] | 110 | bool MadeChange = false; |
Chris Lattner | 749c6f6 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 111 | const TargetInstrDesc &TID = MI->getDesc(); |
| 112 | if (!TID.isPredicable()) |
| 113 | return false; |
| 114 | |
| 115 | for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) { |
| 116 | if (TID.OpInfo[i].isPredicate()) { |
| 117 | MachineOperand &MO = MI->getOperand(i); |
Dan Gohman | d735b80 | 2008-10-03 15:45:36 +0000 | [diff] [blame] | 118 | if (MO.isReg()) { |
Chris Lattner | 749c6f6 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 119 | MO.setReg(Pred[j].getReg()); |
| 120 | MadeChange = true; |
Dan Gohman | d735b80 | 2008-10-03 15:45:36 +0000 | [diff] [blame] | 121 | } else if (MO.isImm()) { |
Chris Lattner | 749c6f6 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 122 | MO.setImm(Pred[j].getImm()); |
| 123 | MadeChange = true; |
Dan Gohman | d735b80 | 2008-10-03 15:45:36 +0000 | [diff] [blame] | 124 | } else if (MO.isMBB()) { |
Chris Lattner | 749c6f6 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 125 | MO.setMBB(Pred[j].getMBB()); |
| 126 | MadeChange = true; |
Chris Lattner | 6410552 | 2008-01-01 01:03:04 +0000 | [diff] [blame] | 127 | } |
Chris Lattner | 749c6f6 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 128 | ++j; |
Chris Lattner | 6410552 | 2008-01-01 01:03:04 +0000 | [diff] [blame] | 129 | } |
| 130 | } |
| 131 | return MadeChange; |
| 132 | } |
Evan Cheng | ca1267c | 2008-03-31 20:40:39 +0000 | [diff] [blame] | 133 | |
| 134 | void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock &MBB, |
| 135 | MachineBasicBlock::iterator I, |
| 136 | unsigned DestReg, |
Evan Cheng | 3784453 | 2009-07-16 09:20:10 +0000 | [diff] [blame] | 137 | unsigned SubIdx, |
Evan Cheng | d57cdd5 | 2009-11-14 02:55:43 +0000 | [diff] [blame] | 138 | const MachineInstr *Orig, |
| 139 | const TargetRegisterInfo *TRI) const { |
Dan Gohman | 8e5f2c6 | 2008-07-07 23:14:23 +0000 | [diff] [blame] | 140 | MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); |
Evan Cheng | 3784453 | 2009-07-16 09:20:10 +0000 | [diff] [blame] | 141 | MachineOperand &MO = MI->getOperand(0); |
Evan Cheng | d57cdd5 | 2009-11-14 02:55:43 +0000 | [diff] [blame] | 142 | if (TargetRegisterInfo::isVirtualRegister(DestReg)) { |
| 143 | MO.setReg(DestReg); |
| 144 | MO.setSubReg(SubIdx); |
Evan Cheng | 39aa725 | 2009-11-16 06:31:49 +0000 | [diff] [blame] | 145 | } else if (SubIdx) { |
Evan Cheng | d57cdd5 | 2009-11-14 02:55:43 +0000 | [diff] [blame] | 146 | MO.setReg(TRI->getSubReg(DestReg, SubIdx)); |
Evan Cheng | 39aa725 | 2009-11-16 06:31:49 +0000 | [diff] [blame] | 147 | } else { |
| 148 | MO.setReg(DestReg); |
Evan Cheng | d57cdd5 | 2009-11-14 02:55:43 +0000 | [diff] [blame] | 149 | } |
Evan Cheng | ca1267c | 2008-03-31 20:40:39 +0000 | [diff] [blame] | 150 | MBB.insert(I, MI); |
| 151 | } |
| 152 | |
Evan Cheng | 506049f | 2010-03-03 01:44:33 +0000 | [diff] [blame] | 153 | bool TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0, |
| 154 | const MachineInstr *MI1) const { |
| 155 | return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); |
| 156 | } |
| 157 | |
Jakob Stoklund Olesen | 30ac046 | 2010-01-06 23:47:07 +0000 | [diff] [blame] | 158 | MachineInstr *TargetInstrInfoImpl::duplicate(MachineInstr *Orig, |
| 159 | MachineFunction &MF) const { |
| 160 | assert(!Orig->getDesc().isNotDuplicable() && |
| 161 | "Instruction cannot be duplicated"); |
| 162 | return MF.CloneMachineInstr(Orig); |
| 163 | } |
| 164 | |
Nicolas Geoffray | 52e724a | 2008-04-16 20:10:13 +0000 | [diff] [blame] | 165 | unsigned |
| 166 | TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const { |
| 167 | unsigned FnSize = 0; |
| 168 | for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end(); |
| 169 | MBBI != E; ++MBBI) { |
| 170 | const MachineBasicBlock &MBB = *MBBI; |
Evan Cheng | 3885578 | 2008-09-11 05:58:06 +0000 | [diff] [blame] | 171 | for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end(); |
| 172 | I != E; ++I) |
Nicolas Geoffray | 52e724a | 2008-04-16 20:10:13 +0000 | [diff] [blame] | 173 | FnSize += GetInstSizeInBytes(I); |
| 174 | } |
| 175 | return FnSize; |
| 176 | } |
Dan Gohman | c54baa2 | 2008-12-03 18:43:12 +0000 | [diff] [blame] | 177 | |
| 178 | /// foldMemoryOperand - Attempt to fold a load or store of the specified stack |
| 179 | /// slot into the specified machine instruction for the specified operand(s). |
| 180 | /// If this is possible, a new instruction is returned with the specified |
| 181 | /// operand folded, otherwise NULL is returned. The client is responsible for |
| 182 | /// removing the old instruction and adding the new one in the instruction |
| 183 | /// stream. |
| 184 | MachineInstr* |
| 185 | TargetInstrInfo::foldMemoryOperand(MachineFunction &MF, |
| 186 | MachineInstr* MI, |
| 187 | const SmallVectorImpl<unsigned> &Ops, |
| 188 | int FrameIndex) const { |
| 189 | unsigned Flags = 0; |
| 190 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
| 191 | if (MI->getOperand(Ops[i]).isDef()) |
| 192 | Flags |= MachineMemOperand::MOStore; |
| 193 | else |
| 194 | Flags |= MachineMemOperand::MOLoad; |
| 195 | |
| 196 | // Ask the target to do the actual folding. |
| 197 | MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FrameIndex); |
| 198 | if (!NewMI) return 0; |
| 199 | |
| 200 | assert((!(Flags & MachineMemOperand::MOStore) || |
| 201 | NewMI->getDesc().mayStore()) && |
| 202 | "Folded a def to a non-store!"); |
| 203 | assert((!(Flags & MachineMemOperand::MOLoad) || |
| 204 | NewMI->getDesc().mayLoad()) && |
| 205 | "Folded a use to a non-load!"); |
| 206 | const MachineFrameInfo &MFI = *MF.getFrameInfo(); |
| 207 | assert(MFI.getObjectOffset(FrameIndex) != -1); |
Dan Gohman | c76909a | 2009-09-25 20:36:54 +0000 | [diff] [blame] | 208 | MachineMemOperand *MMO = |
Evan Cheng | ff89dcb | 2009-10-18 18:16:27 +0000 | [diff] [blame] | 209 | MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIndex), |
Dan Gohman | c76909a | 2009-09-25 20:36:54 +0000 | [diff] [blame] | 210 | Flags, /*Offset=*/0, |
| 211 | MFI.getObjectSize(FrameIndex), |
| 212 | MFI.getObjectAlignment(FrameIndex)); |
Dan Gohman | c54baa2 | 2008-12-03 18:43:12 +0000 | [diff] [blame] | 213 | NewMI->addMemOperand(MF, MMO); |
| 214 | |
| 215 | return NewMI; |
| 216 | } |
| 217 | |
| 218 | /// foldMemoryOperand - Same as the previous version except it allows folding |
| 219 | /// of any load and store from / to any address, not just from a specific |
| 220 | /// stack slot. |
| 221 | MachineInstr* |
| 222 | TargetInstrInfo::foldMemoryOperand(MachineFunction &MF, |
| 223 | MachineInstr* MI, |
| 224 | const SmallVectorImpl<unsigned> &Ops, |
| 225 | MachineInstr* LoadMI) const { |
| 226 | assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!"); |
| 227 | #ifndef NDEBUG |
| 228 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
| 229 | assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!"); |
| 230 | #endif |
| 231 | |
| 232 | // Ask the target to do the actual folding. |
| 233 | MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI); |
| 234 | if (!NewMI) return 0; |
| 235 | |
| 236 | // Copy the memoperands from the load to the folded instruction. |
Dan Gohman | c76909a | 2009-09-25 20:36:54 +0000 | [diff] [blame] | 237 | NewMI->setMemRefs(LoadMI->memoperands_begin(), |
| 238 | LoadMI->memoperands_end()); |
Dan Gohman | c54baa2 | 2008-12-03 18:43:12 +0000 | [diff] [blame] | 239 | |
| 240 | return NewMI; |
| 241 | } |
Dan Gohman | a70dca1 | 2009-10-09 23:27:56 +0000 | [diff] [blame] | 242 | |
| 243 | bool |
| 244 | TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(const MachineInstr * |
| 245 | MI, |
| 246 | AliasAnalysis * |
| 247 | AA) const { |
| 248 | const MachineFunction &MF = *MI->getParent()->getParent(); |
| 249 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 250 | const TargetMachine &TM = MF.getTarget(); |
| 251 | const TargetInstrInfo &TII = *TM.getInstrInfo(); |
| 252 | const TargetRegisterInfo &TRI = *TM.getRegisterInfo(); |
| 253 | |
| 254 | // A load from a fixed stack slot can be rematerialized. This may be |
| 255 | // redundant with subsequent checks, but it's target-independent, |
| 256 | // simple, and a common case. |
| 257 | int FrameIdx = 0; |
| 258 | if (TII.isLoadFromStackSlot(MI, FrameIdx) && |
| 259 | MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx)) |
| 260 | return true; |
| 261 | |
| 262 | const TargetInstrDesc &TID = MI->getDesc(); |
| 263 | |
| 264 | // Avoid instructions obviously unsafe for remat. |
| 265 | if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable() || |
| 266 | TID.mayStore()) |
| 267 | return false; |
| 268 | |
| 269 | // Avoid instructions which load from potentially varying memory. |
| 270 | if (TID.mayLoad() && !MI->isInvariantLoad(AA)) |
| 271 | return false; |
| 272 | |
| 273 | // If any of the registers accessed are non-constant, conservatively assume |
| 274 | // the instruction is not rematerializable. |
| 275 | for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { |
| 276 | const MachineOperand &MO = MI->getOperand(i); |
| 277 | if (!MO.isReg()) continue; |
| 278 | unsigned Reg = MO.getReg(); |
| 279 | if (Reg == 0) |
| 280 | continue; |
| 281 | |
| 282 | // Check for a well-behaved physical register. |
| 283 | if (TargetRegisterInfo::isPhysicalRegister(Reg)) { |
| 284 | if (MO.isUse()) { |
| 285 | // If the physreg has no defs anywhere, it's just an ambient register |
| 286 | // and we can freely move its uses. Alternatively, if it's allocatable, |
| 287 | // it could get allocated to something with a def during allocation. |
| 288 | if (!MRI.def_empty(Reg)) |
| 289 | return false; |
| 290 | BitVector AllocatableRegs = TRI.getAllocatableSet(MF, 0); |
| 291 | if (AllocatableRegs.test(Reg)) |
| 292 | return false; |
| 293 | // Check for a def among the register's aliases too. |
| 294 | for (const unsigned *Alias = TRI.getAliasSet(Reg); *Alias; ++Alias) { |
| 295 | unsigned AliasReg = *Alias; |
| 296 | if (!MRI.def_empty(AliasReg)) |
| 297 | return false; |
| 298 | if (AllocatableRegs.test(AliasReg)) |
| 299 | return false; |
| 300 | } |
| 301 | } else { |
| 302 | // A physreg def. We can't remat it. |
| 303 | return false; |
| 304 | } |
| 305 | continue; |
| 306 | } |
| 307 | |
| 308 | // Only allow one virtual-register def, and that in the first operand. |
| 309 | if (MO.isDef() != (i == 0)) |
| 310 | return false; |
| 311 | |
| 312 | // For the def, it should be the only def of that register. |
Chris Lattner | 7896c9f | 2009-12-03 00:50:42 +0000 | [diff] [blame] | 313 | if (MO.isDef() && (llvm::next(MRI.def_begin(Reg)) != MRI.def_end() || |
Dan Gohman | a70dca1 | 2009-10-09 23:27:56 +0000 | [diff] [blame] | 314 | MRI.isLiveIn(Reg))) |
| 315 | return false; |
| 316 | |
| 317 | // Don't allow any virtual-register uses. Rematting an instruction with |
| 318 | // virtual register uses would length the live ranges of the uses, which |
| 319 | // is not necessarily a good idea, certainly not "trivial". |
| 320 | if (MO.isUse()) |
| 321 | return false; |
| 322 | } |
| 323 | |
| 324 | // Everything checked out. |
| 325 | return true; |
| 326 | } |