Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 1 | //===-- R600LowerConstCopy.cpp - Propagate ConstCopy / lower them to MOV---===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
| 11 | /// This pass is intended to handle remaining ConstCopy pseudo MachineInstr. |
| 12 | /// ISel will fold each Const Buffer read inside scalar ALU. However it cannot |
| 13 | /// fold them inside vector instruction, like DOT4 or Cube ; ISel emits |
| 14 | /// ConstCopy instead. This pass (executed after ExpandingSpecialInstr) will try |
| 15 | /// to fold them if possible or replace them by MOV otherwise. |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 16 | // |
| 17 | //===----------------------------------------------------------------------===// |
| 18 | |
| 19 | #include "AMDGPU.h" |
| 20 | #include "R600InstrInfo.h" |
| 21 | #include "llvm/CodeGen/MachineFunction.h" |
| 22 | #include "llvm/CodeGen/MachineFunctionPass.h" |
| 23 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 24 | #include "llvm/IR/GlobalValue.h" |
| 25 | |
| 26 | namespace llvm { |
| 27 | |
| 28 | class R600LowerConstCopy : public MachineFunctionPass { |
| 29 | private: |
| 30 | static char ID; |
| 31 | const R600InstrInfo *TII; |
Tom Stellard | df063e6 | 2013-02-05 17:09:16 +0000 | [diff] [blame] | 32 | |
| 33 | struct ConstPairs { |
| 34 | unsigned XYPair; |
| 35 | unsigned ZWPair; |
| 36 | }; |
| 37 | |
| 38 | bool canFoldInBundle(ConstPairs &UsedConst, unsigned ReadConst) const; |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 39 | public: |
| 40 | R600LowerConstCopy(TargetMachine &tm); |
| 41 | virtual bool runOnMachineFunction(MachineFunction &MF); |
| 42 | |
| 43 | const char *getPassName() const { return "R600 Eliminate Symbolic Operand"; } |
| 44 | }; |
| 45 | |
| 46 | char R600LowerConstCopy::ID = 0; |
| 47 | |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 48 | R600LowerConstCopy::R600LowerConstCopy(TargetMachine &tm) : |
| 49 | MachineFunctionPass(ID), |
| 50 | TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) |
| 51 | { |
| 52 | } |
| 53 | |
Tom Stellard | df063e6 | 2013-02-05 17:09:16 +0000 | [diff] [blame] | 54 | bool R600LowerConstCopy::canFoldInBundle(ConstPairs &UsedConst, |
| 55 | unsigned ReadConst) const { |
| 56 | unsigned ReadConstChan = ReadConst & 3; |
| 57 | unsigned ReadConstIndex = ReadConst & (~3); |
| 58 | if (ReadConstChan < 2) { |
| 59 | if (!UsedConst.XYPair) { |
| 60 | UsedConst.XYPair = ReadConstIndex; |
| 61 | } |
| 62 | return UsedConst.XYPair == ReadConstIndex; |
| 63 | } else { |
| 64 | if (!UsedConst.ZWPair) { |
| 65 | UsedConst.ZWPair = ReadConstIndex; |
| 66 | } |
| 67 | return UsedConst.ZWPair == ReadConstIndex; |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | static bool isControlFlow(const MachineInstr &MI) { |
| 72 | return (MI.getOpcode() == AMDGPU::IF_PREDICATE_SET) || |
| 73 | (MI.getOpcode() == AMDGPU::ENDIF) || |
| 74 | (MI.getOpcode() == AMDGPU::ELSE) || |
| 75 | (MI.getOpcode() == AMDGPU::WHILELOOP) || |
| 76 | (MI.getOpcode() == AMDGPU::BREAK); |
| 77 | } |
| 78 | |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 79 | bool R600LowerConstCopy::runOnMachineFunction(MachineFunction &MF) { |
Tom Stellard | df063e6 | 2013-02-05 17:09:16 +0000 | [diff] [blame] | 80 | |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 81 | for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); |
| 82 | BB != BB_E; ++BB) { |
| 83 | MachineBasicBlock &MBB = *BB; |
Tom Stellard | df063e6 | 2013-02-05 17:09:16 +0000 | [diff] [blame] | 84 | DenseMap<unsigned, MachineInstr *> RegToConstIndex; |
| 85 | for (MachineBasicBlock::instr_iterator I = MBB.instr_begin(), |
| 86 | E = MBB.instr_end(); I != E;) { |
| 87 | |
| 88 | if (I->getOpcode() == AMDGPU::CONST_COPY) { |
| 89 | MachineInstr &MI = *I; |
| 90 | I = llvm::next(I); |
| 91 | unsigned DstReg = MI.getOperand(0).getReg(); |
| 92 | DenseMap<unsigned, MachineInstr *>::iterator SrcMI = |
| 93 | RegToConstIndex.find(DstReg); |
| 94 | if (SrcMI != RegToConstIndex.end()) { |
| 95 | SrcMI->second->eraseFromParent(); |
| 96 | RegToConstIndex.erase(SrcMI); |
| 97 | } |
| 98 | MachineInstr *NewMI = |
| 99 | TII->buildDefaultInstruction(MBB, &MI, AMDGPU::MOV, |
| 100 | MI.getOperand(0).getReg(), AMDGPU::ALU_CONST); |
| 101 | TII->setImmOperand(NewMI, R600Operands::SRC0_SEL, |
| 102 | MI.getOperand(1).getImm()); |
| 103 | RegToConstIndex[DstReg] = NewMI; |
| 104 | MI.eraseFromParent(); |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 105 | continue; |
Tom Stellard | df063e6 | 2013-02-05 17:09:16 +0000 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | std::vector<unsigned> Defs; |
| 109 | // We consider all Instructions as bundled because algorithm that handle |
| 110 | // const read port limitations inside an IG is still valid with single |
| 111 | // instructions. |
| 112 | std::vector<MachineInstr *> Bundle; |
| 113 | |
| 114 | if (I->isBundle()) { |
| 115 | unsigned BundleSize = I->getBundleSize(); |
| 116 | for (unsigned i = 0; i < BundleSize; i++) { |
| 117 | I = llvm::next(I); |
| 118 | Bundle.push_back(I); |
| 119 | } |
| 120 | } else if (TII->isALUInstr(I->getOpcode())){ |
| 121 | Bundle.push_back(I); |
| 122 | } else if (isControlFlow(*I)) { |
| 123 | RegToConstIndex.clear(); |
| 124 | I = llvm::next(I); |
| 125 | continue; |
| 126 | } else { |
| 127 | MachineInstr &MI = *I; |
| 128 | for (MachineInstr::mop_iterator MOp = MI.operands_begin(), |
| 129 | MOpE = MI.operands_end(); MOp != MOpE; ++MOp) { |
| 130 | MachineOperand &MO = *MOp; |
| 131 | if (!MO.isReg()) |
| 132 | continue; |
| 133 | if (MO.isDef()) { |
| 134 | Defs.push_back(MO.getReg()); |
| 135 | } else { |
| 136 | // Either a TEX or an Export inst, prevent from erasing def of used |
| 137 | // operand |
| 138 | RegToConstIndex.erase(MO.getReg()); |
| 139 | for (MCSubRegIterator SR(MO.getReg(), &TII->getRegisterInfo()); |
| 140 | SR.isValid(); ++SR) { |
| 141 | RegToConstIndex.erase(*SR); |
| 142 | } |
| 143 | } |
| 144 | } |
| 145 | } |
| 146 | |
| 147 | |
| 148 | R600Operands::Ops OpTable[3][2] = { |
| 149 | {R600Operands::SRC0, R600Operands::SRC0_SEL}, |
| 150 | {R600Operands::SRC1, R600Operands::SRC1_SEL}, |
| 151 | {R600Operands::SRC2, R600Operands::SRC2_SEL}, |
| 152 | }; |
| 153 | |
| 154 | for(std::vector<MachineInstr *>::iterator It = Bundle.begin(), |
| 155 | ItE = Bundle.end(); It != ItE; ++It) { |
| 156 | MachineInstr *MI = *It; |
| 157 | if (TII->isPredicated(MI)) { |
| 158 | // We don't want to erase previous assignment |
| 159 | RegToConstIndex.erase(MI->getOperand(0).getReg()); |
| 160 | } else { |
| 161 | int WriteIDX = TII->getOperandIdx(MI->getOpcode(), R600Operands::WRITE); |
| 162 | if (WriteIDX < 0 || MI->getOperand(WriteIDX).getImm()) |
| 163 | Defs.push_back(MI->getOperand(0).getReg()); |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | ConstPairs CP = {0,0}; |
| 168 | for (unsigned SrcOp = 0; SrcOp < 3; SrcOp++) { |
| 169 | for(std::vector<MachineInstr *>::iterator It = Bundle.begin(), |
| 170 | ItE = Bundle.end(); It != ItE; ++It) { |
| 171 | MachineInstr *MI = *It; |
| 172 | int SrcIdx = TII->getOperandIdx(MI->getOpcode(), OpTable[SrcOp][0]); |
| 173 | if (SrcIdx < 0) |
| 174 | continue; |
| 175 | MachineOperand &MO = MI->getOperand(SrcIdx); |
| 176 | DenseMap<unsigned, MachineInstr *>::iterator SrcMI = |
| 177 | RegToConstIndex.find(MO.getReg()); |
| 178 | if (SrcMI != RegToConstIndex.end()) { |
| 179 | MachineInstr *CstMov = SrcMI->second; |
| 180 | int ConstMovSel = |
| 181 | TII->getOperandIdx(CstMov->getOpcode(), R600Operands::SRC0_SEL); |
| 182 | unsigned ConstIndex = CstMov->getOperand(ConstMovSel).getImm(); |
Vincent Lejeune | f940fd0 | 2013-02-14 16:57:19 +0000 | [diff] [blame] | 183 | if (MI->isInsideBundle() && canFoldInBundle(CP, ConstIndex)) { |
Tom Stellard | df063e6 | 2013-02-05 17:09:16 +0000 | [diff] [blame] | 184 | TII->setImmOperand(MI, OpTable[SrcOp][1], ConstIndex); |
| 185 | MI->getOperand(SrcIdx).setReg(AMDGPU::ALU_CONST); |
| 186 | } else { |
| 187 | RegToConstIndex.erase(SrcMI); |
| 188 | } |
| 189 | } |
| 190 | } |
| 191 | } |
| 192 | |
| 193 | for (std::vector<unsigned>::iterator It = Defs.begin(), ItE = Defs.end(); |
| 194 | It != ItE; ++It) { |
| 195 | DenseMap<unsigned, MachineInstr *>::iterator SrcMI = |
| 196 | RegToConstIndex.find(*It); |
| 197 | if (SrcMI != RegToConstIndex.end()) { |
| 198 | SrcMI->second->eraseFromParent(); |
| 199 | RegToConstIndex.erase(SrcMI); |
| 200 | } |
| 201 | } |
| 202 | I = llvm::next(I); |
| 203 | } |
| 204 | |
| 205 | if (MBB.succ_empty()) { |
| 206 | for (DenseMap<unsigned, MachineInstr *>::iterator |
| 207 | DI = RegToConstIndex.begin(), DE = RegToConstIndex.end(); |
| 208 | DI != DE; ++DI) { |
| 209 | DI->second->eraseFromParent(); |
| 210 | } |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 211 | } |
| 212 | } |
| 213 | return false; |
| 214 | } |
| 215 | |
| 216 | FunctionPass *createR600LowerConstCopy(TargetMachine &tm) { |
| 217 | return new R600LowerConstCopy(tm); |
| 218 | } |
| 219 | |
| 220 | } |
| 221 | |
| 222 | |