Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 1 | //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
| 11 | /// \brief SI Implementation of TargetInstrInfo. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 15 | #include "SIInstrInfo.h" |
| 16 | #include "AMDGPUTargetMachine.h" |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 17 | #include "GCNHazardRecognizer.h" |
Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 18 | #include "SIDefines.h" |
Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 19 | #include "SIMachineFunctionInfo.h" |
Tom Stellard | c5cf2f0 | 2014-08-21 20:40:54 +0000 | [diff] [blame] | 20 | #include "llvm/CodeGen/MachineFrameInfo.h" |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 21 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 22 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 23 | #include "llvm/CodeGen/ScheduleDAG.h" |
Tom Stellard | 4e07b1d | 2014-06-10 21:20:41 +0000 | [diff] [blame] | 24 | #include "llvm/IR/Function.h" |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 25 | #include "llvm/CodeGen/RegisterScavenging.h" |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 26 | #include "llvm/MC/MCInstrDesc.h" |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 27 | #include "llvm/Support/Debug.h" |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 28 | |
| 29 | using namespace llvm; |
| 30 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 31 | SIInstrInfo::SIInstrInfo(const SISubtarget &ST) |
| 32 | : AMDGPUInstrInfo(ST), RI(), ST(ST) {} |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 33 | |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 34 | //===----------------------------------------------------------------------===// |
| 35 | // TargetInstrInfo callbacks |
| 36 | //===----------------------------------------------------------------------===// |
| 37 | |
Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 38 | static unsigned getNumOperandsNoGlue(SDNode *Node) { |
| 39 | unsigned N = Node->getNumOperands(); |
| 40 | while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) |
| 41 | --N; |
| 42 | return N; |
| 43 | } |
| 44 | |
| 45 | static SDValue findChainOperand(SDNode *Load) { |
| 46 | SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1); |
| 47 | assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node"); |
| 48 | return LastOp; |
| 49 | } |
| 50 | |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 51 | /// \brief Returns true if both nodes have the same value for the given |
| 52 | /// operand \p Op, or if both nodes do not have this operand. |
| 53 | static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { |
| 54 | unsigned Opc0 = N0->getMachineOpcode(); |
| 55 | unsigned Opc1 = N1->getMachineOpcode(); |
| 56 | |
| 57 | int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); |
| 58 | int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); |
| 59 | |
| 60 | if (Op0Idx == -1 && Op1Idx == -1) |
| 61 | return true; |
| 62 | |
| 63 | |
| 64 | if ((Op0Idx == -1 && Op1Idx != -1) || |
| 65 | (Op1Idx == -1 && Op0Idx != -1)) |
| 66 | return false; |
| 67 | |
| 68 | // getNamedOperandIdx returns the index for the MachineInstr's operands, |
| 69 | // which includes the result as the first operand. We are indexing into the |
| 70 | // MachineSDNode's operands, so we need to skip the result operand to get |
| 71 | // the real index. |
| 72 | --Op0Idx; |
| 73 | --Op1Idx; |
| 74 | |
Tom Stellard | b8b8413 | 2014-09-03 15:22:39 +0000 | [diff] [blame] | 75 | return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 76 | } |
| 77 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 78 | bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, |
Matt Arsenault | a48b866 | 2015-04-23 23:34:48 +0000 | [diff] [blame] | 79 | AliasAnalysis *AA) const { |
| 80 | // TODO: The generic check fails for VALU instructions that should be |
| 81 | // rematerializable due to implicit reads of exec. We really want all of the |
| 82 | // generic logic for this except for this. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 83 | switch (MI.getOpcode()) { |
Matt Arsenault | a48b866 | 2015-04-23 23:34:48 +0000 | [diff] [blame] | 84 | case AMDGPU::V_MOV_B32_e32: |
| 85 | case AMDGPU::V_MOV_B32_e64: |
Matt Arsenault | 80f766a | 2015-09-10 01:23:28 +0000 | [diff] [blame] | 86 | case AMDGPU::V_MOV_B64_PSEUDO: |
Matt Arsenault | a48b866 | 2015-04-23 23:34:48 +0000 | [diff] [blame] | 87 | return true; |
| 88 | default: |
| 89 | return false; |
| 90 | } |
| 91 | } |
| 92 | |
Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 93 | bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, |
| 94 | int64_t &Offset0, |
| 95 | int64_t &Offset1) const { |
| 96 | if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) |
| 97 | return false; |
| 98 | |
| 99 | unsigned Opc0 = Load0->getMachineOpcode(); |
| 100 | unsigned Opc1 = Load1->getMachineOpcode(); |
| 101 | |
| 102 | // Make sure both are actually loads. |
| 103 | if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) |
| 104 | return false; |
| 105 | |
| 106 | if (isDS(Opc0) && isDS(Opc1)) { |
Tom Stellard | 20fa0be | 2014-10-07 21:09:20 +0000 | [diff] [blame] | 107 | |
| 108 | // FIXME: Handle this case: |
| 109 | if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) |
| 110 | return false; |
Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 111 | |
Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 112 | // Check base reg. |
| 113 | if (Load0->getOperand(1) != Load1->getOperand(1)) |
| 114 | return false; |
| 115 | |
| 116 | // Check chain. |
| 117 | if (findChainOperand(Load0) != findChainOperand(Load1)) |
| 118 | return false; |
| 119 | |
Matt Arsenault | 972c12a | 2014-09-17 17:48:32 +0000 | [diff] [blame] | 120 | // Skip read2 / write2 variants for simplicity. |
| 121 | // TODO: We should report true if the used offsets are adjacent (excluded |
| 122 | // st64 versions). |
| 123 | if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || |
| 124 | AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) |
| 125 | return false; |
| 126 | |
Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 127 | Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue(); |
| 128 | Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue(); |
| 129 | return true; |
| 130 | } |
| 131 | |
| 132 | if (isSMRD(Opc0) && isSMRD(Opc1)) { |
| 133 | assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); |
| 134 | |
| 135 | // Check base reg. |
| 136 | if (Load0->getOperand(0) != Load1->getOperand(0)) |
| 137 | return false; |
| 138 | |
Tom Stellard | f0a575f | 2015-03-23 16:06:01 +0000 | [diff] [blame] | 139 | const ConstantSDNode *Load0Offset = |
| 140 | dyn_cast<ConstantSDNode>(Load0->getOperand(1)); |
| 141 | const ConstantSDNode *Load1Offset = |
| 142 | dyn_cast<ConstantSDNode>(Load1->getOperand(1)); |
| 143 | |
| 144 | if (!Load0Offset || !Load1Offset) |
| 145 | return false; |
| 146 | |
Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 147 | // Check chain. |
| 148 | if (findChainOperand(Load0) != findChainOperand(Load1)) |
| 149 | return false; |
| 150 | |
Tom Stellard | f0a575f | 2015-03-23 16:06:01 +0000 | [diff] [blame] | 151 | Offset0 = Load0Offset->getZExtValue(); |
| 152 | Offset1 = Load1Offset->getZExtValue(); |
Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 153 | return true; |
| 154 | } |
| 155 | |
| 156 | // MUBUF and MTBUF can access the same addresses. |
| 157 | if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { |
Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 158 | |
| 159 | // MUBUF and MTBUF have vaddr at different indices. |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 160 | if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || |
| 161 | findChainOperand(Load0) != findChainOperand(Load1) || |
| 162 | !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || |
Tom Stellard | b8b8413 | 2014-09-03 15:22:39 +0000 | [diff] [blame] | 163 | !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) |
Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 164 | return false; |
| 165 | |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 166 | int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); |
| 167 | int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); |
| 168 | |
| 169 | if (OffIdx0 == -1 || OffIdx1 == -1) |
| 170 | return false; |
| 171 | |
| 172 | // getNamedOperandIdx returns the index for MachineInstrs. Since they |
| 173 | // inlcude the output in the operand list, but SDNodes don't, we need to |
| 174 | // subtract the index by one. |
| 175 | --OffIdx0; |
| 176 | --OffIdx1; |
| 177 | |
| 178 | SDValue Off0 = Load0->getOperand(OffIdx0); |
| 179 | SDValue Off1 = Load1->getOperand(OffIdx1); |
| 180 | |
| 181 | // The offset might be a FrameIndexSDNode. |
| 182 | if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) |
| 183 | return false; |
| 184 | |
| 185 | Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); |
| 186 | Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); |
Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 187 | return true; |
| 188 | } |
| 189 | |
| 190 | return false; |
| 191 | } |
| 192 | |
Matt Arsenault | 2e99112 | 2014-09-10 23:26:16 +0000 | [diff] [blame] | 193 | static bool isStride64(unsigned Opc) { |
| 194 | switch (Opc) { |
| 195 | case AMDGPU::DS_READ2ST64_B32: |
| 196 | case AMDGPU::DS_READ2ST64_B64: |
| 197 | case AMDGPU::DS_WRITE2ST64_B32: |
| 198 | case AMDGPU::DS_WRITE2ST64_B64: |
| 199 | return true; |
| 200 | default: |
| 201 | return false; |
| 202 | } |
| 203 | } |
| 204 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 205 | bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, |
Chad Rosier | c27a18f | 2016-03-09 16:00:35 +0000 | [diff] [blame] | 206 | int64_t &Offset, |
Sanjoy Das | b666ea3 | 2015-06-15 18:44:14 +0000 | [diff] [blame] | 207 | const TargetRegisterInfo *TRI) const { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 208 | unsigned Opc = LdSt.getOpcode(); |
Matt Arsenault | 3add643 | 2015-10-20 04:35:43 +0000 | [diff] [blame] | 209 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 210 | if (isDS(LdSt)) { |
| 211 | const MachineOperand *OffsetImm = |
| 212 | getNamedOperand(LdSt, AMDGPU::OpName::offset); |
Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 213 | if (OffsetImm) { |
| 214 | // Normal, single offset LDS instruction. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 215 | const MachineOperand *AddrReg = |
| 216 | getNamedOperand(LdSt, AMDGPU::OpName::addr); |
Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 217 | |
Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 218 | BaseReg = AddrReg->getReg(); |
| 219 | Offset = OffsetImm->getImm(); |
| 220 | return true; |
Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 221 | } |
| 222 | |
Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 223 | // The 2 offset instructions use offset0 and offset1 instead. We can treat |
| 224 | // these as a load with a single offset if the 2 offsets are consecutive. We |
| 225 | // will use this for some partially aligned loads. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 226 | const MachineOperand *Offset0Imm = |
| 227 | getNamedOperand(LdSt, AMDGPU::OpName::offset0); |
| 228 | const MachineOperand *Offset1Imm = |
| 229 | getNamedOperand(LdSt, AMDGPU::OpName::offset1); |
Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 230 | |
Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 231 | uint8_t Offset0 = Offset0Imm->getImm(); |
| 232 | uint8_t Offset1 = Offset1Imm->getImm(); |
Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 233 | |
Matt Arsenault | 84db5d9 | 2015-07-14 17:57:36 +0000 | [diff] [blame] | 234 | if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { |
Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 235 | // Each of these offsets is in element sized units, so we need to convert |
| 236 | // to bytes of the individual reads. |
| 237 | |
| 238 | unsigned EltSize; |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 239 | if (LdSt.mayLoad()) |
| 240 | EltSize = getOpRegClass(LdSt, 0)->getSize() / 2; |
Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 241 | else { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 242 | assert(LdSt.mayStore()); |
Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 243 | int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 244 | EltSize = getOpRegClass(LdSt, Data0Idx)->getSize(); |
Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 245 | } |
| 246 | |
Matt Arsenault | 2e99112 | 2014-09-10 23:26:16 +0000 | [diff] [blame] | 247 | if (isStride64(Opc)) |
| 248 | EltSize *= 64; |
| 249 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 250 | const MachineOperand *AddrReg = |
| 251 | getNamedOperand(LdSt, AMDGPU::OpName::addr); |
Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 252 | BaseReg = AddrReg->getReg(); |
| 253 | Offset = EltSize * Offset0; |
| 254 | return true; |
| 255 | } |
| 256 | |
| 257 | return false; |
Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 258 | } |
| 259 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 260 | if (isMUBUF(LdSt) || isMTBUF(LdSt)) { |
Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 261 | if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset) != -1) |
| 262 | return false; |
| 263 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 264 | const MachineOperand *AddrReg = |
| 265 | getNamedOperand(LdSt, AMDGPU::OpName::vaddr); |
Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 266 | if (!AddrReg) |
| 267 | return false; |
| 268 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 269 | const MachineOperand *OffsetImm = |
| 270 | getNamedOperand(LdSt, AMDGPU::OpName::offset); |
Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 271 | BaseReg = AddrReg->getReg(); |
| 272 | Offset = OffsetImm->getImm(); |
| 273 | return true; |
| 274 | } |
| 275 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 276 | if (isSMRD(LdSt)) { |
| 277 | const MachineOperand *OffsetImm = |
| 278 | getNamedOperand(LdSt, AMDGPU::OpName::offset); |
Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 279 | if (!OffsetImm) |
| 280 | return false; |
| 281 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 282 | const MachineOperand *SBaseReg = |
| 283 | getNamedOperand(LdSt, AMDGPU::OpName::sbase); |
Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 284 | BaseReg = SBaseReg->getReg(); |
| 285 | Offset = OffsetImm->getImm(); |
| 286 | return true; |
| 287 | } |
| 288 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 289 | if (isFLAT(LdSt)) { |
| 290 | const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::addr); |
Matt Arsenault | 43578ec | 2016-06-02 20:05:20 +0000 | [diff] [blame] | 291 | BaseReg = AddrReg->getReg(); |
| 292 | Offset = 0; |
| 293 | return true; |
| 294 | } |
| 295 | |
Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 296 | return false; |
| 297 | } |
| 298 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 299 | bool SIInstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt, |
| 300 | MachineInstr &SecondLdSt, |
Jun Bum Lim | 4c5bd58 | 2016-04-15 14:58:38 +0000 | [diff] [blame] | 301 | unsigned NumLoads) const { |
NAKAMURA Takumi | fe1202c | 2016-06-20 00:37:41 +0000 | [diff] [blame] | 302 | const MachineOperand *FirstDst = nullptr; |
| 303 | const MachineOperand *SecondDst = nullptr; |
Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 304 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 305 | if (isDS(FirstLdSt) && isDS(SecondLdSt)) { |
| 306 | FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); |
| 307 | SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); |
Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 308 | } |
| 309 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 310 | if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) { |
| 311 | FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst); |
| 312 | SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst); |
Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 313 | } |
| 314 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 315 | if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) || |
| 316 | (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt))) { |
| 317 | FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata); |
| 318 | SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata); |
Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 319 | } |
| 320 | |
| 321 | if (!FirstDst || !SecondDst) |
Matt Arsenault | 0e75a06 | 2014-09-17 17:48:30 +0000 | [diff] [blame] | 322 | return false; |
| 323 | |
Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 324 | // Try to limit clustering based on the total number of bytes loaded |
| 325 | // rather than the number of instructions. This is done to help reduce |
| 326 | // register pressure. The method used is somewhat inexact, though, |
| 327 | // because it assumes that all loads in the cluster will load the |
| 328 | // same number of bytes as FirstLdSt. |
Matt Arsenault | 0e75a06 | 2014-09-17 17:48:30 +0000 | [diff] [blame] | 329 | |
Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 330 | // The unit of this value is bytes. |
| 331 | // FIXME: This needs finer tuning. |
| 332 | unsigned LoadClusterThreshold = 16; |
Matt Arsenault | 0e75a06 | 2014-09-17 17:48:30 +0000 | [diff] [blame] | 333 | |
Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 334 | const MachineRegisterInfo &MRI = |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 335 | FirstLdSt.getParent()->getParent()->getRegInfo(); |
Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 336 | const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg()); |
| 337 | |
| 338 | return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold; |
Matt Arsenault | 0e75a06 | 2014-09-17 17:48:30 +0000 | [diff] [blame] | 339 | } |
| 340 | |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 341 | void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
| 342 | MachineBasicBlock::iterator MI, |
| 343 | const DebugLoc &DL, unsigned DestReg, |
| 344 | unsigned SrcReg, bool KillSrc) const { |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 345 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 346 | // If we are trying to copy to or from SCC, there is a bug somewhere else in |
| 347 | // the backend. While it may be theoretically possible to do this, it should |
| 348 | // never be necessary. |
| 349 | assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC); |
| 350 | |
Craig Topper | 0afd0ab | 2013-07-15 06:39:13 +0000 | [diff] [blame] | 351 | static const int16_t Sub0_15[] = { |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 352 | AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, |
| 353 | AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, |
| 354 | AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, |
Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 355 | AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 356 | }; |
| 357 | |
Nicolai Haehnle | 6bcf8b2 | 2015-12-19 01:36:26 +0000 | [diff] [blame] | 358 | static const int16_t Sub0_15_64[] = { |
| 359 | AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, |
| 360 | AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, |
| 361 | AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, |
| 362 | AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, |
| 363 | }; |
| 364 | |
Craig Topper | 0afd0ab | 2013-07-15 06:39:13 +0000 | [diff] [blame] | 365 | static const int16_t Sub0_7[] = { |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 366 | AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, |
Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 367 | AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 368 | }; |
| 369 | |
Nicolai Haehnle | 6bcf8b2 | 2015-12-19 01:36:26 +0000 | [diff] [blame] | 370 | static const int16_t Sub0_7_64[] = { |
| 371 | AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, |
| 372 | AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, |
| 373 | }; |
| 374 | |
Craig Topper | 0afd0ab | 2013-07-15 06:39:13 +0000 | [diff] [blame] | 375 | static const int16_t Sub0_3[] = { |
Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 376 | AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 377 | }; |
| 378 | |
Nicolai Haehnle | 6bcf8b2 | 2015-12-19 01:36:26 +0000 | [diff] [blame] | 379 | static const int16_t Sub0_3_64[] = { |
| 380 | AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, |
| 381 | }; |
| 382 | |
Craig Topper | 0afd0ab | 2013-07-15 06:39:13 +0000 | [diff] [blame] | 383 | static const int16_t Sub0_2[] = { |
Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 384 | AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, |
Christian Konig | 8b1ed28 | 2013-04-10 08:39:16 +0000 | [diff] [blame] | 385 | }; |
| 386 | |
Craig Topper | 0afd0ab | 2013-07-15 06:39:13 +0000 | [diff] [blame] | 387 | static const int16_t Sub0_1[] = { |
Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 388 | AMDGPU::sub0, AMDGPU::sub1, |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 389 | }; |
| 390 | |
| 391 | unsigned Opcode; |
Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 392 | ArrayRef<int16_t> SubIndices; |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 393 | |
| 394 | if (AMDGPU::SReg_32RegClass.contains(DestReg)) { |
| 395 | assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); |
| 396 | BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) |
| 397 | .addReg(SrcReg, getKillRegState(KillSrc)); |
| 398 | return; |
| 399 | |
Tom Stellard | aac1889 | 2013-02-07 19:39:43 +0000 | [diff] [blame] | 400 | } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) { |
Matt Arsenault | 834b1aa | 2015-02-14 02:55:54 +0000 | [diff] [blame] | 401 | if (DestReg == AMDGPU::VCC) { |
Matt Arsenault | 9998168 | 2015-02-14 02:55:56 +0000 | [diff] [blame] | 402 | if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { |
| 403 | BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) |
| 404 | .addReg(SrcReg, getKillRegState(KillSrc)); |
| 405 | } else { |
| 406 | // FIXME: Hack until VReg_1 removed. |
| 407 | assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); |
Matt Arsenault | 4635915 | 2015-08-08 00:41:48 +0000 | [diff] [blame] | 408 | BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_I32_e32)) |
Matt Arsenault | 9998168 | 2015-02-14 02:55:56 +0000 | [diff] [blame] | 409 | .addImm(0) |
| 410 | .addReg(SrcReg, getKillRegState(KillSrc)); |
| 411 | } |
Matt Arsenault | 834b1aa | 2015-02-14 02:55:54 +0000 | [diff] [blame] | 412 | |
Matt Arsenault | 834b1aa | 2015-02-14 02:55:54 +0000 | [diff] [blame] | 413 | return; |
| 414 | } |
| 415 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 416 | assert(AMDGPU::SReg_64RegClass.contains(SrcReg)); |
| 417 | BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) |
| 418 | .addReg(SrcReg, getKillRegState(KillSrc)); |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 419 | return; |
| 420 | |
| 421 | } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) { |
| 422 | assert(AMDGPU::SReg_128RegClass.contains(SrcReg)); |
Nicolai Haehnle | 6bcf8b2 | 2015-12-19 01:36:26 +0000 | [diff] [blame] | 423 | Opcode = AMDGPU::S_MOV_B64; |
| 424 | SubIndices = Sub0_3_64; |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 425 | |
| 426 | } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) { |
| 427 | assert(AMDGPU::SReg_256RegClass.contains(SrcReg)); |
Nicolai Haehnle | 6bcf8b2 | 2015-12-19 01:36:26 +0000 | [diff] [blame] | 428 | Opcode = AMDGPU::S_MOV_B64; |
| 429 | SubIndices = Sub0_7_64; |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 430 | |
| 431 | } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) { |
| 432 | assert(AMDGPU::SReg_512RegClass.contains(SrcReg)); |
Nicolai Haehnle | 6bcf8b2 | 2015-12-19 01:36:26 +0000 | [diff] [blame] | 433 | Opcode = AMDGPU::S_MOV_B64; |
| 434 | SubIndices = Sub0_15_64; |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 435 | |
Tom Stellard | 45c0b3a | 2015-01-07 20:59:25 +0000 | [diff] [blame] | 436 | } else if (AMDGPU::VGPR_32RegClass.contains(DestReg)) { |
| 437 | assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || |
NAKAMURA Takumi | 4bb85f9 | 2013-10-28 04:07:23 +0000 | [diff] [blame] | 438 | AMDGPU::SReg_32RegClass.contains(SrcReg)); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 439 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) |
| 440 | .addReg(SrcReg, getKillRegState(KillSrc)); |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 441 | return; |
| 442 | |
| 443 | } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) { |
| 444 | assert(AMDGPU::VReg_64RegClass.contains(SrcReg) || |
NAKAMURA Takumi | 4bb85f9 | 2013-10-28 04:07:23 +0000 | [diff] [blame] | 445 | AMDGPU::SReg_64RegClass.contains(SrcReg)); |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 446 | Opcode = AMDGPU::V_MOV_B32_e32; |
| 447 | SubIndices = Sub0_1; |
| 448 | |
Christian Konig | 8b1ed28 | 2013-04-10 08:39:16 +0000 | [diff] [blame] | 449 | } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) { |
| 450 | assert(AMDGPU::VReg_96RegClass.contains(SrcReg)); |
| 451 | Opcode = AMDGPU::V_MOV_B32_e32; |
| 452 | SubIndices = Sub0_2; |
| 453 | |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 454 | } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) { |
| 455 | assert(AMDGPU::VReg_128RegClass.contains(SrcReg) || |
NAKAMURA Takumi | 4bb85f9 | 2013-10-28 04:07:23 +0000 | [diff] [blame] | 456 | AMDGPU::SReg_128RegClass.contains(SrcReg)); |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 457 | Opcode = AMDGPU::V_MOV_B32_e32; |
| 458 | SubIndices = Sub0_3; |
| 459 | |
| 460 | } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) { |
| 461 | assert(AMDGPU::VReg_256RegClass.contains(SrcReg) || |
NAKAMURA Takumi | 4bb85f9 | 2013-10-28 04:07:23 +0000 | [diff] [blame] | 462 | AMDGPU::SReg_256RegClass.contains(SrcReg)); |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 463 | Opcode = AMDGPU::V_MOV_B32_e32; |
| 464 | SubIndices = Sub0_7; |
| 465 | |
| 466 | } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) { |
| 467 | assert(AMDGPU::VReg_512RegClass.contains(SrcReg) || |
NAKAMURA Takumi | 4bb85f9 | 2013-10-28 04:07:23 +0000 | [diff] [blame] | 468 | AMDGPU::SReg_512RegClass.contains(SrcReg)); |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 469 | Opcode = AMDGPU::V_MOV_B32_e32; |
| 470 | SubIndices = Sub0_15; |
| 471 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 472 | } else { |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 473 | llvm_unreachable("Can't copy register!"); |
| 474 | } |
| 475 | |
Matt Arsenault | 73d2f89 | 2016-07-15 22:32:02 +0000 | [diff] [blame] | 476 | bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); |
Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 477 | |
| 478 | for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { |
| 479 | unsigned SubIdx; |
| 480 | if (Forward) |
| 481 | SubIdx = SubIndices[Idx]; |
| 482 | else |
| 483 | SubIdx = SubIndices[SubIndices.size() - Idx - 1]; |
| 484 | |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 485 | MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, |
| 486 | get(Opcode), RI.getSubReg(DestReg, SubIdx)); |
| 487 | |
Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 488 | Builder.addReg(RI.getSubReg(SrcReg, SubIdx)); |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 489 | |
Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 490 | if (Idx == SubIndices.size() - 1) |
Matt Arsenault | 598f553 | 2016-06-02 00:04:30 +0000 | [diff] [blame] | 491 | Builder.addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit); |
Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 492 | |
| 493 | if (Idx == 0) |
Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 494 | Builder.addReg(DestReg, RegState::Define | RegState::Implicit); |
Matt Arsenault | 73d2f89 | 2016-07-15 22:32:02 +0000 | [diff] [blame] | 495 | |
| 496 | Builder.addReg(SrcReg, RegState::Implicit); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 497 | } |
| 498 | } |
| 499 | |
Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 500 | int SIInstrInfo::commuteOpcode(unsigned Opcode) const { |
Christian Konig | 3c14580 | 2013-03-27 09:12:59 +0000 | [diff] [blame] | 501 | int NewOpc; |
| 502 | |
| 503 | // Try to map original to commuted opcode |
Marek Olsak | 191507e | 2015-02-03 17:38:12 +0000 | [diff] [blame] | 504 | NewOpc = AMDGPU::getCommuteRev(Opcode); |
Marek Olsak | cfbdba2 | 2015-06-26 20:29:10 +0000 | [diff] [blame] | 505 | if (NewOpc != -1) |
| 506 | // Check if the commuted (REV) opcode exists on the target. |
| 507 | return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; |
Christian Konig | 3c14580 | 2013-03-27 09:12:59 +0000 | [diff] [blame] | 508 | |
| 509 | // Try to map commuted to original opcode |
Marek Olsak | 191507e | 2015-02-03 17:38:12 +0000 | [diff] [blame] | 510 | NewOpc = AMDGPU::getCommuteOrig(Opcode); |
Marek Olsak | cfbdba2 | 2015-06-26 20:29:10 +0000 | [diff] [blame] | 511 | if (NewOpc != -1) |
| 512 | // Check if the original (non-REV) opcode exists on the target. |
| 513 | return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; |
Christian Konig | 3c14580 | 2013-03-27 09:12:59 +0000 | [diff] [blame] | 514 | |
| 515 | return Opcode; |
| 516 | } |
| 517 | |
Tom Stellard | ef3b864 | 2015-01-07 19:56:17 +0000 | [diff] [blame] | 518 | unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { |
| 519 | |
| 520 | if (DstRC->getSize() == 4) { |
| 521 | return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; |
| 522 | } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) { |
| 523 | return AMDGPU::S_MOV_B64; |
Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 524 | } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) { |
| 525 | return AMDGPU::V_MOV_B64_PSEUDO; |
Tom Stellard | ef3b864 | 2015-01-07 19:56:17 +0000 | [diff] [blame] | 526 | } |
| 527 | return AMDGPU::COPY; |
| 528 | } |
| 529 | |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 530 | static unsigned getSGPRSpillSaveOpcode(unsigned Size) { |
| 531 | switch (Size) { |
| 532 | case 4: |
| 533 | return AMDGPU::SI_SPILL_S32_SAVE; |
| 534 | case 8: |
| 535 | return AMDGPU::SI_SPILL_S64_SAVE; |
| 536 | case 16: |
| 537 | return AMDGPU::SI_SPILL_S128_SAVE; |
| 538 | case 32: |
| 539 | return AMDGPU::SI_SPILL_S256_SAVE; |
| 540 | case 64: |
| 541 | return AMDGPU::SI_SPILL_S512_SAVE; |
| 542 | default: |
| 543 | llvm_unreachable("unknown register size"); |
| 544 | } |
| 545 | } |
| 546 | |
| 547 | static unsigned getVGPRSpillSaveOpcode(unsigned Size) { |
| 548 | switch (Size) { |
| 549 | case 4: |
| 550 | return AMDGPU::SI_SPILL_V32_SAVE; |
| 551 | case 8: |
| 552 | return AMDGPU::SI_SPILL_V64_SAVE; |
Tom Stellard | 703b2ec | 2016-04-12 23:57:30 +0000 | [diff] [blame] | 553 | case 12: |
| 554 | return AMDGPU::SI_SPILL_V96_SAVE; |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 555 | case 16: |
| 556 | return AMDGPU::SI_SPILL_V128_SAVE; |
| 557 | case 32: |
| 558 | return AMDGPU::SI_SPILL_V256_SAVE; |
| 559 | case 64: |
| 560 | return AMDGPU::SI_SPILL_V512_SAVE; |
| 561 | default: |
| 562 | llvm_unreachable("unknown register size"); |
| 563 | } |
| 564 | } |
| 565 | |
Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 566 | void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, |
| 567 | MachineBasicBlock::iterator MI, |
| 568 | unsigned SrcReg, bool isKill, |
| 569 | int FrameIndex, |
| 570 | const TargetRegisterClass *RC, |
| 571 | const TargetRegisterInfo *TRI) const { |
Tom Stellard | 4e07b1d | 2014-06-10 21:20:41 +0000 | [diff] [blame] | 572 | MachineFunction *MF = MBB.getParent(); |
Tom Stellard | 42fb60e | 2015-01-14 15:42:31 +0000 | [diff] [blame] | 573 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
Matthias Braun | 941a705 | 2016-07-28 18:40:00 +0000 | [diff] [blame] | 574 | MachineFrameInfo &FrameInfo = MF->getFrameInfo(); |
Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 575 | DebugLoc DL = MBB.findDebugLoc(MI); |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 576 | |
Matthias Braun | 941a705 | 2016-07-28 18:40:00 +0000 | [diff] [blame] | 577 | unsigned Size = FrameInfo.getObjectSize(FrameIndex); |
| 578 | unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 579 | MachinePointerInfo PtrInfo |
| 580 | = MachinePointerInfo::getFixedStack(*MF, FrameIndex); |
| 581 | MachineMemOperand *MMO |
| 582 | = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, |
| 583 | Size, Align); |
Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 584 | |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 585 | if (RI.isSGPRClass(RC)) { |
Matt Arsenault | 5b22dfa | 2015-11-05 05:27:10 +0000 | [diff] [blame] | 586 | MFI->setHasSpilledSGPRs(); |
| 587 | |
Matt Arsenault | 2510a31 | 2016-09-03 06:57:55 +0000 | [diff] [blame] | 588 | // We are only allowed to create one new instruction when spilling |
| 589 | // registers, so we need to use pseudo instruction for spilling SGPRs. |
| 590 | const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(RC->getSize())); |
| 591 | |
| 592 | // The SGPR spill/restore instructions only work on number sgprs, so we need |
| 593 | // to make sure we are using the correct register class. |
Matt Arsenault | b6e1cc2 | 2016-05-21 00:53:42 +0000 | [diff] [blame] | 594 | if (TargetRegisterInfo::isVirtualRegister(SrcReg) && RC->getSize() == 4) { |
Matt Arsenault | b6e1cc2 | 2016-05-21 00:53:42 +0000 | [diff] [blame] | 595 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 596 | MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); |
| 597 | } |
| 598 | |
Matt Arsenault | 2510a31 | 2016-09-03 06:57:55 +0000 | [diff] [blame] | 599 | BuildMI(MBB, MI, DL, OpDesc) |
Matt Arsenault | 3354f42 | 2016-09-10 01:20:33 +0000 | [diff] [blame^] | 600 | .addReg(SrcReg, getKillRegState(isKill)) // data |
| 601 | .addFrameIndex(FrameIndex) // addr |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 602 | .addMemOperand(MMO); |
Tom Stellard | 42fb60e | 2015-01-14 15:42:31 +0000 | [diff] [blame] | 603 | |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 604 | return; |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 605 | } |
Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 606 | |
Nicolai Haehnle | df3a20c | 2016-04-06 19:40:20 +0000 | [diff] [blame] | 607 | if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) { |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 608 | LLVMContext &Ctx = MF->getFunction()->getContext(); |
| 609 | Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to" |
| 610 | " spill register"); |
Tom Stellard | 0febe68 | 2015-01-14 15:42:34 +0000 | [diff] [blame] | 611 | BuildMI(MBB, MI, DL, get(AMDGPU::KILL)) |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 612 | .addReg(SrcReg); |
| 613 | |
| 614 | return; |
| 615 | } |
| 616 | |
| 617 | assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); |
| 618 | |
| 619 | unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize()); |
| 620 | MFI->setHasSpilledVGPRs(); |
| 621 | BuildMI(MBB, MI, DL, get(Opcode)) |
Matt Arsenault | 3354f42 | 2016-09-10 01:20:33 +0000 | [diff] [blame^] | 622 | .addReg(SrcReg, getKillRegState(isKill)) // data |
| 623 | .addFrameIndex(FrameIndex) // addr |
Matt Arsenault | 2510a31 | 2016-09-03 06:57:55 +0000 | [diff] [blame] | 624 | .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc |
| 625 | .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset |
| 626 | .addImm(0) // offset |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 627 | .addMemOperand(MMO); |
| 628 | } |
| 629 | |
| 630 | static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { |
| 631 | switch (Size) { |
| 632 | case 4: |
| 633 | return AMDGPU::SI_SPILL_S32_RESTORE; |
| 634 | case 8: |
| 635 | return AMDGPU::SI_SPILL_S64_RESTORE; |
| 636 | case 16: |
| 637 | return AMDGPU::SI_SPILL_S128_RESTORE; |
| 638 | case 32: |
| 639 | return AMDGPU::SI_SPILL_S256_RESTORE; |
| 640 | case 64: |
| 641 | return AMDGPU::SI_SPILL_S512_RESTORE; |
| 642 | default: |
| 643 | llvm_unreachable("unknown register size"); |
| 644 | } |
| 645 | } |
| 646 | |
| 647 | static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { |
| 648 | switch (Size) { |
| 649 | case 4: |
| 650 | return AMDGPU::SI_SPILL_V32_RESTORE; |
| 651 | case 8: |
| 652 | return AMDGPU::SI_SPILL_V64_RESTORE; |
Tom Stellard | 703b2ec | 2016-04-12 23:57:30 +0000 | [diff] [blame] | 653 | case 12: |
| 654 | return AMDGPU::SI_SPILL_V96_RESTORE; |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 655 | case 16: |
| 656 | return AMDGPU::SI_SPILL_V128_RESTORE; |
| 657 | case 32: |
| 658 | return AMDGPU::SI_SPILL_V256_RESTORE; |
| 659 | case 64: |
| 660 | return AMDGPU::SI_SPILL_V512_RESTORE; |
| 661 | default: |
| 662 | llvm_unreachable("unknown register size"); |
Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 663 | } |
| 664 | } |
| 665 | |
| 666 | void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, |
| 667 | MachineBasicBlock::iterator MI, |
| 668 | unsigned DestReg, int FrameIndex, |
| 669 | const TargetRegisterClass *RC, |
| 670 | const TargetRegisterInfo *TRI) const { |
Tom Stellard | 4e07b1d | 2014-06-10 21:20:41 +0000 | [diff] [blame] | 671 | MachineFunction *MF = MBB.getParent(); |
Tom Stellard | e99fb65 | 2015-01-20 19:33:04 +0000 | [diff] [blame] | 672 | const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
Matthias Braun | 941a705 | 2016-07-28 18:40:00 +0000 | [diff] [blame] | 673 | MachineFrameInfo &FrameInfo = MF->getFrameInfo(); |
Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 674 | DebugLoc DL = MBB.findDebugLoc(MI); |
Matthias Braun | 941a705 | 2016-07-28 18:40:00 +0000 | [diff] [blame] | 675 | unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); |
| 676 | unsigned Size = FrameInfo.getObjectSize(FrameIndex); |
Tom Stellard | 4e07b1d | 2014-06-10 21:20:41 +0000 | [diff] [blame] | 677 | |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 678 | MachinePointerInfo PtrInfo |
| 679 | = MachinePointerInfo::getFixedStack(*MF, FrameIndex); |
| 680 | |
| 681 | MachineMemOperand *MMO = MF->getMachineMemOperand( |
| 682 | PtrInfo, MachineMemOperand::MOLoad, Size, Align); |
| 683 | |
| 684 | if (RI.isSGPRClass(RC)) { |
| 685 | // FIXME: Maybe this should not include a memoperand because it will be |
| 686 | // lowered to non-memory instructions. |
Matt Arsenault | 2510a31 | 2016-09-03 06:57:55 +0000 | [diff] [blame] | 687 | const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(RC->getSize())); |
Matt Arsenault | b6e1cc2 | 2016-05-21 00:53:42 +0000 | [diff] [blame] | 688 | if (TargetRegisterInfo::isVirtualRegister(DestReg) && RC->getSize() == 4) { |
Matt Arsenault | b6e1cc2 | 2016-05-21 00:53:42 +0000 | [diff] [blame] | 689 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 690 | MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass); |
| 691 | } |
| 692 | |
Matt Arsenault | 2510a31 | 2016-09-03 06:57:55 +0000 | [diff] [blame] | 693 | BuildMI(MBB, MI, DL, OpDesc, DestReg) |
Matt Arsenault | 3354f42 | 2016-09-10 01:20:33 +0000 | [diff] [blame^] | 694 | .addFrameIndex(FrameIndex) // addr |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 695 | .addMemOperand(MMO); |
| 696 | |
| 697 | return; |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 698 | } |
Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 699 | |
Nicolai Haehnle | df3a20c | 2016-04-06 19:40:20 +0000 | [diff] [blame] | 700 | if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) { |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 701 | LLVMContext &Ctx = MF->getFunction()->getContext(); |
| 702 | Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to" |
| 703 | " restore register"); |
Tom Stellard | 0febe68 | 2015-01-14 15:42:34 +0000 | [diff] [blame] | 704 | BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg); |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 705 | |
| 706 | return; |
Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 707 | } |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 708 | |
| 709 | assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); |
| 710 | |
| 711 | unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize()); |
| 712 | BuildMI(MBB, MI, DL, get(Opcode), DestReg) |
Matt Arsenault | 3354f42 | 2016-09-10 01:20:33 +0000 | [diff] [blame^] | 713 | .addFrameIndex(FrameIndex) // vaddr |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 714 | .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc |
| 715 | .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset |
Tom Stellard | 649b5db | 2016-03-04 18:31:18 +0000 | [diff] [blame] | 716 | .addImm(0) // offset |
Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 717 | .addMemOperand(MMO); |
Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 718 | } |
| 719 | |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 720 | /// \param @Offset Offset in bytes of the FrameIndex being spilled |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 721 | unsigned SIInstrInfo::calculateLDSSpillAddress( |
| 722 | MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg, |
| 723 | unsigned FrameOffset, unsigned Size) const { |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 724 | MachineFunction *MF = MBB.getParent(); |
| 725 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 726 | const SISubtarget &ST = MF->getSubtarget<SISubtarget>(); |
| 727 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 728 | DebugLoc DL = MBB.findDebugLoc(MI); |
Konstantin Zhuravlyov | 1d65026 | 2016-09-06 20:22:28 +0000 | [diff] [blame] | 729 | unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize(); |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 730 | unsigned WavefrontSize = ST.getWavefrontSize(); |
| 731 | |
| 732 | unsigned TIDReg = MFI->getTIDReg(); |
| 733 | if (!MFI->hasCalculatedTID()) { |
| 734 | MachineBasicBlock &Entry = MBB.getParent()->front(); |
| 735 | MachineBasicBlock::iterator Insert = Entry.front(); |
| 736 | DebugLoc DL = Insert->getDebugLoc(); |
| 737 | |
Tom Stellard | 19f4301 | 2016-07-28 14:30:43 +0000 | [diff] [blame] | 738 | TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, |
| 739 | *MF); |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 740 | if (TIDReg == AMDGPU::NoRegister) |
| 741 | return TIDReg; |
| 742 | |
Nicolai Haehnle | df3a20c | 2016-04-06 19:40:20 +0000 | [diff] [blame] | 743 | if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) && |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 744 | WorkGroupSize > WavefrontSize) { |
| 745 | |
Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 746 | unsigned TIDIGXReg |
| 747 | = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_X); |
| 748 | unsigned TIDIGYReg |
| 749 | = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Y); |
| 750 | unsigned TIDIGZReg |
| 751 | = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Z); |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 752 | unsigned InputPtrReg = |
Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 753 | TRI->getPreloadedValue(*MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); |
Benjamin Kramer | 7149aab | 2015-03-01 18:09:56 +0000 | [diff] [blame] | 754 | for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 755 | if (!Entry.isLiveIn(Reg)) |
| 756 | Entry.addLiveIn(Reg); |
| 757 | } |
| 758 | |
Matthias Braun | 7dc03f0 | 2016-04-06 02:47:09 +0000 | [diff] [blame] | 759 | RS->enterBasicBlock(Entry); |
Matt Arsenault | 0c90e95 | 2015-11-06 18:17:45 +0000 | [diff] [blame] | 760 | // FIXME: Can we scavenge an SReg_64 and access the subregs? |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 761 | unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); |
| 762 | unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); |
| 763 | BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) |
| 764 | .addReg(InputPtrReg) |
| 765 | .addImm(SI::KernelInputOffsets::NGROUPS_Z); |
| 766 | BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) |
| 767 | .addReg(InputPtrReg) |
| 768 | .addImm(SI::KernelInputOffsets::NGROUPS_Y); |
| 769 | |
| 770 | // NGROUPS.X * NGROUPS.Y |
| 771 | BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) |
| 772 | .addReg(STmp1) |
| 773 | .addReg(STmp0); |
| 774 | // (NGROUPS.X * NGROUPS.Y) * TIDIG.X |
| 775 | BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) |
| 776 | .addReg(STmp1) |
| 777 | .addReg(TIDIGXReg); |
| 778 | // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) |
| 779 | BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) |
| 780 | .addReg(STmp0) |
| 781 | .addReg(TIDIGYReg) |
| 782 | .addReg(TIDReg); |
| 783 | // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z |
| 784 | BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg) |
| 785 | .addReg(TIDReg) |
| 786 | .addReg(TIDIGZReg); |
| 787 | } else { |
| 788 | // Get the wave id |
| 789 | BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), |
| 790 | TIDReg) |
| 791 | .addImm(-1) |
| 792 | .addImm(0); |
| 793 | |
Marek Olsak | c536850 | 2015-01-15 18:43:01 +0000 | [diff] [blame] | 794 | BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 795 | TIDReg) |
| 796 | .addImm(-1) |
| 797 | .addReg(TIDReg); |
| 798 | } |
| 799 | |
| 800 | BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), |
| 801 | TIDReg) |
| 802 | .addImm(2) |
| 803 | .addReg(TIDReg); |
| 804 | MFI->setTIDReg(TIDReg); |
| 805 | } |
| 806 | |
| 807 | // Add FrameIndex to LDS offset |
Matt Arsenault | 52ef401 | 2016-07-26 16:45:58 +0000 | [diff] [blame] | 808 | unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize); |
Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 809 | BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg) |
| 810 | .addImm(LDSOffset) |
| 811 | .addReg(TIDReg); |
| 812 | |
| 813 | return TmpReg; |
| 814 | } |
| 815 | |
Tom Stellard | d37630e | 2016-04-07 14:47:07 +0000 | [diff] [blame] | 816 | void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB, |
| 817 | MachineBasicBlock::iterator MI, |
Nicolai Haehnle | 87323da | 2015-12-17 16:46:42 +0000 | [diff] [blame] | 818 | int Count) const { |
Tom Stellard | 341e293 | 2016-05-02 18:02:24 +0000 | [diff] [blame] | 819 | DebugLoc DL = MBB.findDebugLoc(MI); |
Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 820 | while (Count > 0) { |
| 821 | int Arg; |
| 822 | if (Count >= 8) |
| 823 | Arg = 7; |
| 824 | else |
| 825 | Arg = Count - 1; |
| 826 | Count -= 8; |
Tom Stellard | 341e293 | 2016-05-02 18:02:24 +0000 | [diff] [blame] | 827 | BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) |
Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 828 | .addImm(Arg); |
| 829 | } |
| 830 | } |
| 831 | |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 832 | void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, |
| 833 | MachineBasicBlock::iterator MI) const { |
| 834 | insertWaitStates(MBB, MI, 1); |
| 835 | } |
| 836 | |
| 837 | unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) const { |
| 838 | switch (MI.getOpcode()) { |
| 839 | default: return 1; // FIXME: Do wait states equal cycles? |
| 840 | |
| 841 | case AMDGPU::S_NOP: |
| 842 | return MI.getOperand(0).getImm() + 1; |
| 843 | } |
| 844 | } |
| 845 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 846 | bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
| 847 | MachineBasicBlock &MBB = *MI.getParent(); |
Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 848 | DebugLoc DL = MBB.findDebugLoc(MI); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 849 | switch (MI.getOpcode()) { |
Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 850 | default: return AMDGPUInstrInfo::expandPostRAPseudo(MI); |
| 851 | |
Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 852 | case AMDGPU::V_MOV_B64_PSEUDO: { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 853 | unsigned Dst = MI.getOperand(0).getReg(); |
Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 854 | unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); |
| 855 | unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); |
| 856 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 857 | const MachineOperand &SrcOp = MI.getOperand(1); |
Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 858 | // FIXME: Will this work for 64-bit floating point immediates? |
| 859 | assert(!SrcOp.isFPImm()); |
| 860 | if (SrcOp.isImm()) { |
| 861 | APInt Imm(64, SrcOp.getImm()); |
| 862 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) |
Matt Arsenault | 80bc355 | 2016-06-13 15:53:52 +0000 | [diff] [blame] | 863 | .addImm(Imm.getLoBits(32).getZExtValue()) |
| 864 | .addReg(Dst, RegState::Implicit | RegState::Define); |
Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 865 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) |
Matt Arsenault | 80bc355 | 2016-06-13 15:53:52 +0000 | [diff] [blame] | 866 | .addImm(Imm.getHiBits(32).getZExtValue()) |
| 867 | .addReg(Dst, RegState::Implicit | RegState::Define); |
Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 868 | } else { |
| 869 | assert(SrcOp.isReg()); |
| 870 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) |
Matt Arsenault | 80bc355 | 2016-06-13 15:53:52 +0000 | [diff] [blame] | 871 | .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) |
| 872 | .addReg(Dst, RegState::Implicit | RegState::Define); |
Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 873 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) |
Matt Arsenault | 80bc355 | 2016-06-13 15:53:52 +0000 | [diff] [blame] | 874 | .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) |
| 875 | .addReg(Dst, RegState::Implicit | RegState::Define); |
Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 876 | } |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 877 | MI.eraseFromParent(); |
Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 878 | break; |
| 879 | } |
Tom Stellard | bf3e6e5 | 2016-06-14 20:29:59 +0000 | [diff] [blame] | 880 | case AMDGPU::SI_PC_ADD_REL_OFFSET: { |
Tom Stellard | c93fc11 | 2015-12-10 02:13:01 +0000 | [diff] [blame] | 881 | MachineFunction &MF = *MBB.getParent(); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 882 | unsigned Reg = MI.getOperand(0).getReg(); |
Matt Arsenault | 11587d9 | 2016-08-10 19:11:45 +0000 | [diff] [blame] | 883 | unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0); |
| 884 | unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1); |
Tom Stellard | c93fc11 | 2015-12-10 02:13:01 +0000 | [diff] [blame] | 885 | |
| 886 | // Create a bundle so these instructions won't be re-ordered by the |
| 887 | // post-RA scheduler. |
| 888 | MIBundleBuilder Bundler(MBB, MI); |
| 889 | Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); |
| 890 | |
| 891 | // Add 32-bit offset from this instruction to the start of the |
| 892 | // constant data. |
| 893 | Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 894 | .addReg(RegLo) |
| 895 | .addOperand(MI.getOperand(1))); |
Tom Stellard | c93fc11 | 2015-12-10 02:13:01 +0000 | [diff] [blame] | 896 | Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) |
| 897 | .addReg(RegHi) |
| 898 | .addImm(0)); |
| 899 | |
| 900 | llvm::finalizeBundle(MBB, Bundler.begin()); |
| 901 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 902 | MI.eraseFromParent(); |
Tom Stellard | c93fc11 | 2015-12-10 02:13:01 +0000 | [diff] [blame] | 903 | break; |
| 904 | } |
Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 905 | } |
| 906 | return true; |
| 907 | } |
| 908 | |
Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 909 | bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, |
| 910 | MachineOperand &Src0, |
| 911 | unsigned Src0OpName, |
| 912 | MachineOperand &Src1, |
| 913 | unsigned Src1OpName) const { |
| 914 | MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); |
| 915 | if (!Src0Mods) |
| 916 | return false; |
| 917 | |
| 918 | MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); |
| 919 | assert(Src1Mods && |
| 920 | "All commutable instructions have both src0 and src1 modifiers"); |
| 921 | |
| 922 | int Src0ModsVal = Src0Mods->getImm(); |
| 923 | int Src1ModsVal = Src1Mods->getImm(); |
| 924 | |
| 925 | Src1Mods->setImm(Src0ModsVal); |
| 926 | Src0Mods->setImm(Src1ModsVal); |
| 927 | return true; |
| 928 | } |
| 929 | |
| 930 | static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, |
| 931 | MachineOperand &RegOp, |
| 932 | MachineOperand &ImmOp) { |
| 933 | // TODO: Handle other immediate like types. |
| 934 | if (!ImmOp.isImm()) |
| 935 | return nullptr; |
| 936 | |
| 937 | int64_t ImmVal = ImmOp.getImm(); |
| 938 | ImmOp.ChangeToRegister(RegOp.getReg(), false, false, |
| 939 | RegOp.isKill(), RegOp.isDead(), RegOp.isUndef(), |
| 940 | RegOp.isDebug()); |
| 941 | ImmOp.setSubReg(RegOp.getSubReg()); |
| 942 | RegOp.ChangeToImmediate(ImmVal); |
| 943 | return &MI; |
| 944 | } |
| 945 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 946 | MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, |
Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 947 | unsigned Src0Idx, |
| 948 | unsigned Src1Idx) const { |
| 949 | assert(!NewMI && "this should never be used"); |
| 950 | |
| 951 | unsigned Opc = MI.getOpcode(); |
| 952 | int CommutedOpcode = commuteOpcode(Opc); |
Marek Olsak | cfbdba2 | 2015-06-26 20:29:10 +0000 | [diff] [blame] | 953 | if (CommutedOpcode == -1) |
| 954 | return nullptr; |
| 955 | |
Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 956 | assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == |
| 957 | static_cast<int>(Src0Idx) && |
| 958 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == |
| 959 | static_cast<int>(Src1Idx) && |
| 960 | "inconsistency with findCommutedOpIndices"); |
| 961 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 962 | MachineOperand &Src0 = MI.getOperand(Src0Idx); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 963 | MachineOperand &Src1 = MI.getOperand(Src1Idx); |
Matt Arsenault | aa5ccfb | 2014-10-17 18:00:37 +0000 | [diff] [blame] | 964 | |
Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 965 | MachineInstr *CommutedMI = nullptr; |
| 966 | if (Src0.isReg() && Src1.isReg()) { |
| 967 | if (isOperandLegal(MI, Src1Idx, &Src0)) { |
| 968 | // Be sure to copy the source modifiers to the right place. |
| 969 | CommutedMI |
| 970 | = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); |
Matt Arsenault | d282ada | 2014-10-17 18:00:48 +0000 | [diff] [blame] | 971 | } |
| 972 | |
Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 973 | } else if (Src0.isReg() && !Src1.isReg()) { |
| 974 | // src0 should always be able to support any operand type, so no need to |
| 975 | // check operand legality. |
| 976 | CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); |
| 977 | } else if (!Src0.isReg() && Src1.isReg()) { |
| 978 | if (isOperandLegal(MI, Src1Idx, &Src0)) |
| 979 | CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 980 | } else { |
Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 981 | // FIXME: Found two non registers to commute. This does happen. |
| 982 | return nullptr; |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 983 | } |
Christian Konig | 3c14580 | 2013-03-27 09:12:59 +0000 | [diff] [blame] | 984 | |
Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 985 | |
| 986 | if (CommutedMI) { |
| 987 | swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, |
| 988 | Src1, AMDGPU::OpName::src1_modifiers); |
| 989 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 990 | CommutedMI->setDesc(get(CommutedOpcode)); |
Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 991 | } |
Christian Konig | 3c14580 | 2013-03-27 09:12:59 +0000 | [diff] [blame] | 992 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 993 | return CommutedMI; |
Christian Konig | 76edd4f | 2013-02-26 17:52:29 +0000 | [diff] [blame] | 994 | } |
| 995 | |
Matt Arsenault | 92befe7 | 2014-09-26 17:54:54 +0000 | [diff] [blame] | 996 | // This needs to be implemented because the source modifiers may be inserted |
| 997 | // between the true commutable operands, and the base |
| 998 | // TargetInstrInfo::commuteInstruction uses it. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 999 | bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0, |
Andrew Kaylor | 16c4da0 | 2015-09-28 20:33:22 +0000 | [diff] [blame] | 1000 | unsigned &SrcOpIdx1) const { |
Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 1001 | if (!MI.isCommutable()) |
Matt Arsenault | 92befe7 | 2014-09-26 17:54:54 +0000 | [diff] [blame] | 1002 | return false; |
| 1003 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1004 | unsigned Opc = MI.getOpcode(); |
Matt Arsenault | 92befe7 | 2014-09-26 17:54:54 +0000 | [diff] [blame] | 1005 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); |
| 1006 | if (Src0Idx == -1) |
| 1007 | return false; |
| 1008 | |
Matt Arsenault | 92befe7 | 2014-09-26 17:54:54 +0000 | [diff] [blame] | 1009 | int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); |
| 1010 | if (Src1Idx == -1) |
| 1011 | return false; |
| 1012 | |
Andrew Kaylor | 16c4da0 | 2015-09-28 20:33:22 +0000 | [diff] [blame] | 1013 | return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); |
Matt Arsenault | 92befe7 | 2014-09-26 17:54:54 +0000 | [diff] [blame] | 1014 | } |
| 1015 | |
Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1016 | unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { |
| 1017 | switch (Cond) { |
| 1018 | case SIInstrInfo::SCC_TRUE: |
| 1019 | return AMDGPU::S_CBRANCH_SCC1; |
| 1020 | case SIInstrInfo::SCC_FALSE: |
| 1021 | return AMDGPU::S_CBRANCH_SCC0; |
Matt Arsenault | 4945905 | 2016-05-21 00:29:40 +0000 | [diff] [blame] | 1022 | case SIInstrInfo::VCCNZ: |
| 1023 | return AMDGPU::S_CBRANCH_VCCNZ; |
| 1024 | case SIInstrInfo::VCCZ: |
| 1025 | return AMDGPU::S_CBRANCH_VCCZ; |
| 1026 | case SIInstrInfo::EXECNZ: |
| 1027 | return AMDGPU::S_CBRANCH_EXECNZ; |
| 1028 | case SIInstrInfo::EXECZ: |
| 1029 | return AMDGPU::S_CBRANCH_EXECZ; |
Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1030 | default: |
| 1031 | llvm_unreachable("invalid branch predicate"); |
| 1032 | } |
| 1033 | } |
| 1034 | |
| 1035 | SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { |
| 1036 | switch (Opcode) { |
| 1037 | case AMDGPU::S_CBRANCH_SCC0: |
| 1038 | return SCC_FALSE; |
| 1039 | case AMDGPU::S_CBRANCH_SCC1: |
| 1040 | return SCC_TRUE; |
Matt Arsenault | 4945905 | 2016-05-21 00:29:40 +0000 | [diff] [blame] | 1041 | case AMDGPU::S_CBRANCH_VCCNZ: |
| 1042 | return VCCNZ; |
| 1043 | case AMDGPU::S_CBRANCH_VCCZ: |
| 1044 | return VCCZ; |
| 1045 | case AMDGPU::S_CBRANCH_EXECNZ: |
| 1046 | return EXECNZ; |
| 1047 | case AMDGPU::S_CBRANCH_EXECZ: |
| 1048 | return EXECZ; |
Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1049 | default: |
| 1050 | return INVALID_BR; |
| 1051 | } |
| 1052 | } |
| 1053 | |
Jacques Pienaar | 71c30a1 | 2016-07-15 14:41:04 +0000 | [diff] [blame] | 1054 | bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, |
Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1055 | MachineBasicBlock *&FBB, |
| 1056 | SmallVectorImpl<MachineOperand> &Cond, |
| 1057 | bool AllowModify) const { |
| 1058 | MachineBasicBlock::iterator I = MBB.getFirstTerminator(); |
| 1059 | |
| 1060 | if (I == MBB.end()) |
| 1061 | return false; |
| 1062 | |
| 1063 | if (I->getOpcode() == AMDGPU::S_BRANCH) { |
| 1064 | // Unconditional Branch |
| 1065 | TBB = I->getOperand(0).getMBB(); |
| 1066 | return false; |
| 1067 | } |
| 1068 | |
| 1069 | BranchPredicate Pred = getBranchPredicate(I->getOpcode()); |
| 1070 | if (Pred == INVALID_BR) |
| 1071 | return true; |
| 1072 | |
| 1073 | MachineBasicBlock *CondBB = I->getOperand(0).getMBB(); |
| 1074 | Cond.push_back(MachineOperand::CreateImm(Pred)); |
| 1075 | |
| 1076 | ++I; |
| 1077 | |
| 1078 | if (I == MBB.end()) { |
| 1079 | // Conditional branch followed by fall-through. |
| 1080 | TBB = CondBB; |
| 1081 | return false; |
| 1082 | } |
| 1083 | |
| 1084 | if (I->getOpcode() == AMDGPU::S_BRANCH) { |
| 1085 | TBB = CondBB; |
| 1086 | FBB = I->getOperand(0).getMBB(); |
| 1087 | return false; |
| 1088 | } |
| 1089 | |
| 1090 | return true; |
| 1091 | } |
| 1092 | |
| 1093 | unsigned SIInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { |
| 1094 | MachineBasicBlock::iterator I = MBB.getFirstTerminator(); |
| 1095 | |
| 1096 | unsigned Count = 0; |
| 1097 | while (I != MBB.end()) { |
| 1098 | MachineBasicBlock::iterator Next = std::next(I); |
| 1099 | I->eraseFromParent(); |
| 1100 | ++Count; |
| 1101 | I = Next; |
| 1102 | } |
| 1103 | |
| 1104 | return Count; |
| 1105 | } |
| 1106 | |
| 1107 | unsigned SIInstrInfo::InsertBranch(MachineBasicBlock &MBB, |
| 1108 | MachineBasicBlock *TBB, |
| 1109 | MachineBasicBlock *FBB, |
| 1110 | ArrayRef<MachineOperand> Cond, |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 1111 | const DebugLoc &DL) const { |
Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1112 | |
| 1113 | if (!FBB && Cond.empty()) { |
| 1114 | BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) |
| 1115 | .addMBB(TBB); |
| 1116 | return 1; |
| 1117 | } |
| 1118 | |
| 1119 | assert(TBB && Cond[0].isImm()); |
| 1120 | |
| 1121 | unsigned Opcode |
| 1122 | = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); |
| 1123 | |
| 1124 | if (!FBB) { |
| 1125 | BuildMI(&MBB, DL, get(Opcode)) |
| 1126 | .addMBB(TBB); |
| 1127 | return 1; |
| 1128 | } |
| 1129 | |
| 1130 | assert(TBB && FBB); |
| 1131 | |
| 1132 | BuildMI(&MBB, DL, get(Opcode)) |
| 1133 | .addMBB(TBB); |
| 1134 | BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) |
| 1135 | .addMBB(FBB); |
| 1136 | |
| 1137 | return 2; |
| 1138 | } |
| 1139 | |
Matt Arsenault | 72fcd5f | 2016-05-21 00:29:34 +0000 | [diff] [blame] | 1140 | bool SIInstrInfo::ReverseBranchCondition( |
| 1141 | SmallVectorImpl<MachineOperand> &Cond) const { |
| 1142 | assert(Cond.size() == 1); |
| 1143 | Cond[0].setImm(-Cond[0].getImm()); |
| 1144 | return false; |
| 1145 | } |
| 1146 | |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1147 | static void removeModOperands(MachineInstr &MI) { |
| 1148 | unsigned Opc = MI.getOpcode(); |
| 1149 | int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, |
| 1150 | AMDGPU::OpName::src0_modifiers); |
| 1151 | int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, |
| 1152 | AMDGPU::OpName::src1_modifiers); |
| 1153 | int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, |
| 1154 | AMDGPU::OpName::src2_modifiers); |
| 1155 | |
| 1156 | MI.RemoveOperand(Src2ModIdx); |
| 1157 | MI.RemoveOperand(Src1ModIdx); |
| 1158 | MI.RemoveOperand(Src0ModIdx); |
| 1159 | } |
| 1160 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1161 | bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1162 | unsigned Reg, MachineRegisterInfo *MRI) const { |
| 1163 | if (!MRI->hasOneNonDBGUse(Reg)) |
| 1164 | return false; |
| 1165 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1166 | unsigned Opc = UseMI.getOpcode(); |
Tom Stellard | 2add8a1 | 2016-09-06 20:00:26 +0000 | [diff] [blame] | 1167 | if (Opc == AMDGPU::COPY) { |
| 1168 | bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg()); |
| 1169 | switch (DefMI.getOpcode()) { |
| 1170 | default: |
| 1171 | return false; |
| 1172 | case AMDGPU::S_MOV_B64: |
| 1173 | // TODO: We could fold 64-bit immediates, but this get compilicated |
| 1174 | // when there are sub-registers. |
| 1175 | return false; |
| 1176 | |
| 1177 | case AMDGPU::V_MOV_B32_e32: |
| 1178 | case AMDGPU::S_MOV_B32: |
| 1179 | break; |
| 1180 | } |
| 1181 | unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; |
| 1182 | const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); |
| 1183 | assert(ImmOp); |
| 1184 | // FIXME: We could handle FrameIndex values here. |
| 1185 | if (!ImmOp->isImm()) { |
| 1186 | return false; |
| 1187 | } |
| 1188 | UseMI.setDesc(get(NewOpc)); |
| 1189 | UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm()); |
| 1190 | UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); |
| 1191 | return true; |
| 1192 | } |
| 1193 | |
Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 1194 | if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64) { |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1195 | // Don't fold if we are using source modifiers. The new VOP2 instructions |
| 1196 | // don't have them. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1197 | if (hasModifiersSet(UseMI, AMDGPU::OpName::src0_modifiers) || |
| 1198 | hasModifiersSet(UseMI, AMDGPU::OpName::src1_modifiers) || |
| 1199 | hasModifiersSet(UseMI, AMDGPU::OpName::src2_modifiers)) { |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1200 | return false; |
| 1201 | } |
| 1202 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1203 | const MachineOperand &ImmOp = DefMI.getOperand(1); |
Matt Arsenault | 3d1c1de | 2016-04-14 21:58:24 +0000 | [diff] [blame] | 1204 | |
| 1205 | // If this is a free constant, there's no reason to do this. |
| 1206 | // TODO: We could fold this here instead of letting SIFoldOperands do it |
| 1207 | // later. |
| 1208 | if (isInlineConstant(ImmOp, 4)) |
| 1209 | return false; |
| 1210 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1211 | MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); |
| 1212 | MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); |
| 1213 | MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1214 | |
Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 1215 | // Multiplied part is the constant: Use v_madmk_f32 |
| 1216 | // We should only expect these to be on src0 due to canonicalizations. |
| 1217 | if (Src0->isReg() && Src0->getReg() == Reg) { |
Matt Arsenault | a266bd8 | 2016-03-02 04:05:14 +0000 | [diff] [blame] | 1218 | if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) |
Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 1219 | return false; |
| 1220 | |
Matt Arsenault | a266bd8 | 2016-03-02 04:05:14 +0000 | [diff] [blame] | 1221 | if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) |
Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 1222 | return false; |
| 1223 | |
Nikolay Haustov | 6560781 | 2016-03-11 09:27:25 +0000 | [diff] [blame] | 1224 | // We need to swap operands 0 and 1 since madmk constant is at operand 1. |
Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 1225 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1226 | const int64_t Imm = DefMI.getOperand(1).getImm(); |
Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 1227 | |
| 1228 | // FIXME: This would be a lot easier if we could return a new instruction |
| 1229 | // instead of having to modify in place. |
| 1230 | |
| 1231 | // Remove these first since they are at the end. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1232 | UseMI.RemoveOperand( |
| 1233 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); |
| 1234 | UseMI.RemoveOperand( |
| 1235 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); |
Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 1236 | |
| 1237 | unsigned Src1Reg = Src1->getReg(); |
| 1238 | unsigned Src1SubReg = Src1->getSubReg(); |
Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 1239 | Src0->setReg(Src1Reg); |
| 1240 | Src0->setSubReg(Src1SubReg); |
Matt Arsenault | 5e10016 | 2015-04-24 01:57:58 +0000 | [diff] [blame] | 1241 | Src0->setIsKill(Src1->isKill()); |
| 1242 | |
Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 1243 | if (Opc == AMDGPU::V_MAC_F32_e64) { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1244 | UseMI.untieRegOperand( |
| 1245 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); |
Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 1246 | } |
| 1247 | |
Nikolay Haustov | 6560781 | 2016-03-11 09:27:25 +0000 | [diff] [blame] | 1248 | Src1->ChangeToImmediate(Imm); |
Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 1249 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1250 | removeModOperands(UseMI); |
| 1251 | UseMI.setDesc(get(AMDGPU::V_MADMK_F32)); |
Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 1252 | |
| 1253 | bool DeleteDef = MRI->hasOneNonDBGUse(Reg); |
| 1254 | if (DeleteDef) |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1255 | DefMI.eraseFromParent(); |
Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 1256 | |
| 1257 | return true; |
| 1258 | } |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1259 | |
| 1260 | // Added part is the constant: Use v_madak_f32 |
| 1261 | if (Src2->isReg() && Src2->getReg() == Reg) { |
| 1262 | // Not allowed to use constant bus for another operand. |
| 1263 | // We can however allow an inline immediate as src0. |
| 1264 | if (!Src0->isImm() && |
| 1265 | (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))) |
| 1266 | return false; |
| 1267 | |
Matt Arsenault | a266bd8 | 2016-03-02 04:05:14 +0000 | [diff] [blame] | 1268 | if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1269 | return false; |
| 1270 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1271 | const int64_t Imm = DefMI.getOperand(1).getImm(); |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1272 | |
| 1273 | // FIXME: This would be a lot easier if we could return a new instruction |
| 1274 | // instead of having to modify in place. |
| 1275 | |
| 1276 | // Remove these first since they are at the end. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1277 | UseMI.RemoveOperand( |
| 1278 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); |
| 1279 | UseMI.RemoveOperand( |
| 1280 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1281 | |
Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 1282 | if (Opc == AMDGPU::V_MAC_F32_e64) { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1283 | UseMI.untieRegOperand( |
| 1284 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); |
Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 1285 | } |
| 1286 | |
| 1287 | // ChangingToImmediate adds Src2 back to the instruction. |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1288 | Src2->ChangeToImmediate(Imm); |
| 1289 | |
| 1290 | // These come before src2. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1291 | removeModOperands(UseMI); |
| 1292 | UseMI.setDesc(get(AMDGPU::V_MADAK_F32)); |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1293 | |
| 1294 | bool DeleteDef = MRI->hasOneNonDBGUse(Reg); |
| 1295 | if (DeleteDef) |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1296 | DefMI.eraseFromParent(); |
Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 1297 | |
| 1298 | return true; |
| 1299 | } |
| 1300 | } |
| 1301 | |
| 1302 | return false; |
| 1303 | } |
| 1304 | |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1305 | static bool offsetsDoNotOverlap(int WidthA, int OffsetA, |
| 1306 | int WidthB, int OffsetB) { |
| 1307 | int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; |
| 1308 | int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; |
| 1309 | int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; |
| 1310 | return LowOffset + LowWidth <= HighOffset; |
| 1311 | } |
| 1312 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1313 | bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa, |
| 1314 | MachineInstr &MIb) const { |
Chad Rosier | c27a18f | 2016-03-09 16:00:35 +0000 | [diff] [blame] | 1315 | unsigned BaseReg0, BaseReg1; |
| 1316 | int64_t Offset0, Offset1; |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1317 | |
Sanjoy Das | b666ea3 | 2015-06-15 18:44:14 +0000 | [diff] [blame] | 1318 | if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) && |
| 1319 | getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) { |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 1320 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1321 | if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 1322 | // FIXME: Handle ds_read2 / ds_write2. |
| 1323 | return false; |
| 1324 | } |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1325 | unsigned Width0 = (*MIa.memoperands_begin())->getSize(); |
| 1326 | unsigned Width1 = (*MIb.memoperands_begin())->getSize(); |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1327 | if (BaseReg0 == BaseReg1 && |
| 1328 | offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { |
| 1329 | return true; |
| 1330 | } |
| 1331 | } |
| 1332 | |
| 1333 | return false; |
| 1334 | } |
| 1335 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1336 | bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa, |
| 1337 | MachineInstr &MIb, |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1338 | AliasAnalysis *AA) const { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1339 | assert((MIa.mayLoad() || MIa.mayStore()) && |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1340 | "MIa must load from or modify a memory location"); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1341 | assert((MIb.mayLoad() || MIb.mayStore()) && |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1342 | "MIb must load from or modify a memory location"); |
| 1343 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1344 | if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1345 | return false; |
| 1346 | |
| 1347 | // XXX - Can we relax this between address spaces? |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1348 | if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1349 | return false; |
| 1350 | |
Tom Stellard | 662f330 | 2016-08-29 12:05:32 +0000 | [diff] [blame] | 1351 | if (AA && MIa.hasOneMemOperand() && MIb.hasOneMemOperand()) { |
| 1352 | const MachineMemOperand *MMOa = *MIa.memoperands_begin(); |
| 1353 | const MachineMemOperand *MMOb = *MIb.memoperands_begin(); |
| 1354 | if (MMOa->getValue() && MMOb->getValue()) { |
| 1355 | MemoryLocation LocA(MMOa->getValue(), MMOa->getSize(), MMOa->getAAInfo()); |
| 1356 | MemoryLocation LocB(MMOb->getValue(), MMOb->getSize(), MMOb->getAAInfo()); |
| 1357 | if (!AA->alias(LocA, LocB)) |
| 1358 | return true; |
| 1359 | } |
| 1360 | } |
| 1361 | |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1362 | // TODO: Should we check the address space from the MachineMemOperand? That |
| 1363 | // would allow us to distinguish objects we know don't alias based on the |
Benjamin Kramer | df005cb | 2015-08-08 18:27:36 +0000 | [diff] [blame] | 1364 | // underlying address space, even if it was lowered to a different one, |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1365 | // e.g. private accesses lowered to use MUBUF instructions on a scratch |
| 1366 | // buffer. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1367 | if (isDS(MIa)) { |
| 1368 | if (isDS(MIb)) |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1369 | return checkInstOffsetsDoNotOverlap(MIa, MIb); |
| 1370 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1371 | return !isFLAT(MIb); |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1372 | } |
| 1373 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1374 | if (isMUBUF(MIa) || isMTBUF(MIa)) { |
| 1375 | if (isMUBUF(MIb) || isMTBUF(MIb)) |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1376 | return checkInstOffsetsDoNotOverlap(MIa, MIb); |
| 1377 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1378 | return !isFLAT(MIb) && !isSMRD(MIb); |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1379 | } |
| 1380 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1381 | if (isSMRD(MIa)) { |
| 1382 | if (isSMRD(MIb)) |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1383 | return checkInstOffsetsDoNotOverlap(MIa, MIb); |
| 1384 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1385 | return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa); |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1386 | } |
| 1387 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1388 | if (isFLAT(MIa)) { |
| 1389 | if (isFLAT(MIb)) |
Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 1390 | return checkInstOffsetsDoNotOverlap(MIa, MIb); |
| 1391 | |
| 1392 | return false; |
| 1393 | } |
| 1394 | |
| 1395 | return false; |
| 1396 | } |
| 1397 | |
Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 1398 | MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1399 | MachineInstr &MI, |
| 1400 | LiveVariables *LV) const { |
Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 1401 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1402 | switch (MI.getOpcode()) { |
| 1403 | default: |
| 1404 | return nullptr; |
| 1405 | case AMDGPU::V_MAC_F32_e64: |
| 1406 | break; |
| 1407 | case AMDGPU::V_MAC_F32_e32: { |
| 1408 | const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); |
| 1409 | if (Src0->isImm() && !isInlineConstant(*Src0, 4)) |
| 1410 | return nullptr; |
| 1411 | break; |
| 1412 | } |
Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 1413 | } |
| 1414 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1415 | const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); |
| 1416 | const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); |
| 1417 | const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); |
| 1418 | const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); |
Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 1419 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1420 | return BuildMI(*MBB, MI, MI.getDebugLoc(), get(AMDGPU::V_MAD_F32)) |
| 1421 | .addOperand(*Dst) |
| 1422 | .addImm(0) // Src0 mods |
| 1423 | .addOperand(*Src0) |
| 1424 | .addImm(0) // Src1 mods |
| 1425 | .addOperand(*Src1) |
| 1426 | .addImm(0) // Src mods |
| 1427 | .addOperand(*Src2) |
| 1428 | .addImm(0) // clamp |
| 1429 | .addImm(0); // omod |
Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 1430 | } |
| 1431 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1432 | bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, |
Nicolai Haehnle | 213e87f | 2016-03-21 20:28:33 +0000 | [diff] [blame] | 1433 | const MachineBasicBlock *MBB, |
| 1434 | const MachineFunction &MF) const { |
Matt Arsenault | 95c7897 | 2016-07-09 01:13:51 +0000 | [diff] [blame] | 1435 | // XXX - Do we want the SP check in the base implementation? |
| 1436 | |
Nicolai Haehnle | 213e87f | 2016-03-21 20:28:33 +0000 | [diff] [blame] | 1437 | // Target-independent instructions do not have an implicit-use of EXEC, even |
| 1438 | // when they operate on VGPRs. Treating EXEC modifications as scheduling |
| 1439 | // boundaries prevents incorrect movements of such instructions. |
Matt Arsenault | 95c7897 | 2016-07-09 01:13:51 +0000 | [diff] [blame] | 1440 | return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) || |
| 1441 | MI.modifiesRegister(AMDGPU::EXEC, &RI); |
Nicolai Haehnle | 213e87f | 2016-03-21 20:28:33 +0000 | [diff] [blame] | 1442 | } |
| 1443 | |
Matt Arsenault | d7bdcc4 | 2014-03-31 19:54:27 +0000 | [diff] [blame] | 1444 | bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { |
Matt Arsenault | 303011a | 2014-12-17 21:04:08 +0000 | [diff] [blame] | 1445 | int64_t SVal = Imm.getSExtValue(); |
| 1446 | if (SVal >= -16 && SVal <= 64) |
Matt Arsenault | d7bdcc4 | 2014-03-31 19:54:27 +0000 | [diff] [blame] | 1447 | return true; |
Tom Stellard | d008446 | 2014-03-17 17:03:52 +0000 | [diff] [blame] | 1448 | |
Matt Arsenault | 303011a | 2014-12-17 21:04:08 +0000 | [diff] [blame] | 1449 | if (Imm.getBitWidth() == 64) { |
| 1450 | uint64_t Val = Imm.getZExtValue(); |
| 1451 | return (DoubleToBits(0.0) == Val) || |
| 1452 | (DoubleToBits(1.0) == Val) || |
| 1453 | (DoubleToBits(-1.0) == Val) || |
| 1454 | (DoubleToBits(0.5) == Val) || |
| 1455 | (DoubleToBits(-0.5) == Val) || |
| 1456 | (DoubleToBits(2.0) == Val) || |
| 1457 | (DoubleToBits(-2.0) == Val) || |
| 1458 | (DoubleToBits(4.0) == Val) || |
| 1459 | (DoubleToBits(-4.0) == Val); |
| 1460 | } |
| 1461 | |
Tom Stellard | d008446 | 2014-03-17 17:03:52 +0000 | [diff] [blame] | 1462 | // The actual type of the operand does not seem to matter as long |
| 1463 | // as the bits match one of the inline immediate values. For example: |
| 1464 | // |
| 1465 | // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, |
| 1466 | // so it is a legal inline immediate. |
| 1467 | // |
| 1468 | // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in |
| 1469 | // floating-point, so it is a legal inline immediate. |
Matt Arsenault | 303011a | 2014-12-17 21:04:08 +0000 | [diff] [blame] | 1470 | uint32_t Val = Imm.getZExtValue(); |
Matt Arsenault | d7bdcc4 | 2014-03-31 19:54:27 +0000 | [diff] [blame] | 1471 | |
Matt Arsenault | 303011a | 2014-12-17 21:04:08 +0000 | [diff] [blame] | 1472 | return (FloatToBits(0.0f) == Val) || |
| 1473 | (FloatToBits(1.0f) == Val) || |
| 1474 | (FloatToBits(-1.0f) == Val) || |
| 1475 | (FloatToBits(0.5f) == Val) || |
| 1476 | (FloatToBits(-0.5f) == Val) || |
| 1477 | (FloatToBits(2.0f) == Val) || |
| 1478 | (FloatToBits(-2.0f) == Val) || |
| 1479 | (FloatToBits(4.0f) == Val) || |
| 1480 | (FloatToBits(-4.0f) == Val); |
Matt Arsenault | d7bdcc4 | 2014-03-31 19:54:27 +0000 | [diff] [blame] | 1481 | } |
| 1482 | |
Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 1483 | bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, |
| 1484 | unsigned OpSize) const { |
| 1485 | if (MO.isImm()) { |
| 1486 | // MachineOperand provides no way to tell the true operand size, since it |
| 1487 | // only records a 64-bit value. We need to know the size to determine if a |
| 1488 | // 32-bit floating point immediate bit pattern is legal for an integer |
| 1489 | // immediate. It would be for any 32-bit integer operand, but would not be |
| 1490 | // for a 64-bit one. |
| 1491 | |
| 1492 | unsigned BitSize = 8 * OpSize; |
| 1493 | return isInlineConstant(APInt(BitSize, MO.getImm(), true)); |
| 1494 | } |
Matt Arsenault | d7bdcc4 | 2014-03-31 19:54:27 +0000 | [diff] [blame] | 1495 | |
Matt Arsenault | d7bdcc4 | 2014-03-31 19:54:27 +0000 | [diff] [blame] | 1496 | return false; |
Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 1497 | } |
| 1498 | |
Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 1499 | bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO, |
| 1500 | unsigned OpSize) const { |
| 1501 | return MO.isImm() && !isInlineConstant(MO, OpSize); |
Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 1502 | } |
| 1503 | |
Matt Arsenault | c1ebd82 | 2016-08-13 01:43:54 +0000 | [diff] [blame] | 1504 | bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, |
| 1505 | unsigned OpSize) const { |
| 1506 | switch (MO.getType()) { |
| 1507 | case MachineOperand::MO_Register: |
| 1508 | return false; |
| 1509 | case MachineOperand::MO_Immediate: |
| 1510 | return !isInlineConstant(MO, OpSize); |
| 1511 | case MachineOperand::MO_FrameIndex: |
| 1512 | case MachineOperand::MO_MachineBasicBlock: |
| 1513 | case MachineOperand::MO_ExternalSymbol: |
| 1514 | case MachineOperand::MO_GlobalAddress: |
| 1515 | case MachineOperand::MO_MCSymbol: |
| 1516 | return true; |
| 1517 | default: |
| 1518 | llvm_unreachable("unexpected operand type"); |
| 1519 | } |
| 1520 | } |
| 1521 | |
Matt Arsenault | becb140 | 2014-06-23 18:28:31 +0000 | [diff] [blame] | 1522 | static bool compareMachineOp(const MachineOperand &Op0, |
| 1523 | const MachineOperand &Op1) { |
| 1524 | if (Op0.getType() != Op1.getType()) |
| 1525 | return false; |
| 1526 | |
| 1527 | switch (Op0.getType()) { |
| 1528 | case MachineOperand::MO_Register: |
| 1529 | return Op0.getReg() == Op1.getReg(); |
| 1530 | case MachineOperand::MO_Immediate: |
| 1531 | return Op0.getImm() == Op1.getImm(); |
Matt Arsenault | becb140 | 2014-06-23 18:28:31 +0000 | [diff] [blame] | 1532 | default: |
| 1533 | llvm_unreachable("Didn't expect to be comparing these operand types"); |
| 1534 | } |
| 1535 | } |
| 1536 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1537 | bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, |
| 1538 | const MachineOperand &MO) const { |
| 1539 | const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo]; |
Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 1540 | |
Tom Stellard | fb77f00 | 2015-01-13 22:59:41 +0000 | [diff] [blame] | 1541 | assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); |
Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 1542 | |
| 1543 | if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) |
| 1544 | return true; |
| 1545 | |
| 1546 | if (OpInfo.RegClass < 0) |
| 1547 | return false; |
| 1548 | |
Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 1549 | unsigned OpSize = RI.getRegClass(OpInfo.RegClass)->getSize(); |
| 1550 | if (isLiteralConstant(MO, OpSize)) |
Tom Stellard | b655052 | 2015-01-12 19:33:18 +0000 | [diff] [blame] | 1551 | return RI.opCanUseLiteralConstant(OpInfo.OperandType); |
Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 1552 | |
Tom Stellard | b655052 | 2015-01-12 19:33:18 +0000 | [diff] [blame] | 1553 | return RI.opCanUseInlineConstant(OpInfo.OperandType); |
Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 1554 | } |
| 1555 | |
Tom Stellard | 86d12eb | 2014-08-01 00:32:28 +0000 | [diff] [blame] | 1556 | bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { |
Marek Olsak | a93603d | 2015-01-15 18:42:51 +0000 | [diff] [blame] | 1557 | int Op32 = AMDGPU::getVOPe32(Opcode); |
| 1558 | if (Op32 == -1) |
| 1559 | return false; |
| 1560 | |
| 1561 | return pseudoToMCOpcode(Op32) != -1; |
Tom Stellard | 86d12eb | 2014-08-01 00:32:28 +0000 | [diff] [blame] | 1562 | } |
| 1563 | |
Tom Stellard | b4a313a | 2014-08-01 00:32:39 +0000 | [diff] [blame] | 1564 | bool SIInstrInfo::hasModifiers(unsigned Opcode) const { |
| 1565 | // The src0_modifier operand is present on all instructions |
| 1566 | // that have modifiers. |
| 1567 | |
| 1568 | return AMDGPU::getNamedOperandIdx(Opcode, |
| 1569 | AMDGPU::OpName::src0_modifiers) != -1; |
| 1570 | } |
| 1571 | |
Matt Arsenault | ace5b76 | 2014-10-17 18:00:43 +0000 | [diff] [blame] | 1572 | bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, |
| 1573 | unsigned OpName) const { |
| 1574 | const MachineOperand *Mods = getNamedOperand(MI, OpName); |
| 1575 | return Mods && Mods->getImm(); |
| 1576 | } |
| 1577 | |
Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 1578 | bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, |
Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 1579 | const MachineOperand &MO, |
| 1580 | unsigned OpSize) const { |
Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 1581 | // Literal constants use the constant bus. |
Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 1582 | if (isLiteralConstant(MO, OpSize)) |
Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 1583 | return true; |
| 1584 | |
| 1585 | if (!MO.isReg() || !MO.isUse()) |
| 1586 | return false; |
| 1587 | |
| 1588 | if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) |
| 1589 | return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); |
| 1590 | |
| 1591 | // FLAT_SCR is just an SGPR pair. |
| 1592 | if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR)) |
| 1593 | return true; |
| 1594 | |
| 1595 | // EXEC register uses the constant bus. |
| 1596 | if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC) |
| 1597 | return true; |
| 1598 | |
| 1599 | // SGPRs use the constant bus |
Matt Arsenault | 8226fc4 | 2016-03-02 23:00:21 +0000 | [diff] [blame] | 1600 | return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 || |
| 1601 | (!MO.isImplicit() && |
| 1602 | (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) || |
| 1603 | AMDGPU::SGPR_64RegClass.contains(MO.getReg())))); |
Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 1604 | } |
| 1605 | |
Matt Arsenault | e223ceb | 2015-10-21 21:15:01 +0000 | [diff] [blame] | 1606 | static unsigned findImplicitSGPRRead(const MachineInstr &MI) { |
| 1607 | for (const MachineOperand &MO : MI.implicit_operands()) { |
| 1608 | // We only care about reads. |
| 1609 | if (MO.isDef()) |
| 1610 | continue; |
| 1611 | |
| 1612 | switch (MO.getReg()) { |
| 1613 | case AMDGPU::VCC: |
| 1614 | case AMDGPU::M0: |
| 1615 | case AMDGPU::FLAT_SCR: |
| 1616 | return MO.getReg(); |
| 1617 | |
| 1618 | default: |
| 1619 | break; |
| 1620 | } |
| 1621 | } |
| 1622 | |
| 1623 | return AMDGPU::NoRegister; |
| 1624 | } |
| 1625 | |
Matt Arsenault | 529cf25 | 2016-06-23 01:26:16 +0000 | [diff] [blame] | 1626 | static bool shouldReadExec(const MachineInstr &MI) { |
| 1627 | if (SIInstrInfo::isVALU(MI)) { |
| 1628 | switch (MI.getOpcode()) { |
| 1629 | case AMDGPU::V_READLANE_B32: |
| 1630 | case AMDGPU::V_READLANE_B32_si: |
| 1631 | case AMDGPU::V_READLANE_B32_vi: |
| 1632 | case AMDGPU::V_WRITELANE_B32: |
| 1633 | case AMDGPU::V_WRITELANE_B32_si: |
| 1634 | case AMDGPU::V_WRITELANE_B32_vi: |
| 1635 | return false; |
| 1636 | } |
| 1637 | |
| 1638 | return true; |
| 1639 | } |
| 1640 | |
| 1641 | if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) || |
| 1642 | SIInstrInfo::isSALU(MI) || |
| 1643 | SIInstrInfo::isSMRD(MI)) |
| 1644 | return false; |
| 1645 | |
| 1646 | return true; |
| 1647 | } |
| 1648 | |
Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 1649 | static bool isSubRegOf(const SIRegisterInfo &TRI, |
| 1650 | const MachineOperand &SuperVec, |
| 1651 | const MachineOperand &SubReg) { |
| 1652 | if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg())) |
| 1653 | return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); |
| 1654 | |
| 1655 | return SubReg.getSubReg() != AMDGPU::NoSubRegister && |
| 1656 | SubReg.getReg() == SuperVec.getReg(); |
| 1657 | } |
| 1658 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1659 | bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, |
Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 1660 | StringRef &ErrInfo) const { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1661 | uint16_t Opcode = MI.getOpcode(); |
| 1662 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 1663 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); |
| 1664 | int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); |
| 1665 | int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); |
| 1666 | |
Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 1667 | // Make sure the number of operands is correct. |
| 1668 | const MCInstrDesc &Desc = get(Opcode); |
| 1669 | if (!Desc.isVariadic() && |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1670 | Desc.getNumOperands() != MI.getNumExplicitOperands()) { |
| 1671 | ErrInfo = "Instruction has wrong number of operands."; |
| 1672 | return false; |
Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 1673 | } |
| 1674 | |
Changpeng Fang | c996393 | 2015-12-18 20:04:28 +0000 | [diff] [blame] | 1675 | // Make sure the register classes are correct. |
Tom Stellard | b4a313a | 2014-08-01 00:32:39 +0000 | [diff] [blame] | 1676 | for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1677 | if (MI.getOperand(i).isFPImm()) { |
Tom Stellard | fb77f00 | 2015-01-13 22:59:41 +0000 | [diff] [blame] | 1678 | ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " |
| 1679 | "all fp values to integers."; |
| 1680 | return false; |
| 1681 | } |
| 1682 | |
Marek Olsak | 8eeebcc | 2015-02-18 22:12:41 +0000 | [diff] [blame] | 1683 | int RegClass = Desc.OpInfo[i].RegClass; |
| 1684 | |
Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 1685 | switch (Desc.OpInfo[i].OperandType) { |
Tom Stellard | 1106b1c | 2015-01-20 17:49:41 +0000 | [diff] [blame] | 1686 | case MCOI::OPERAND_REGISTER: |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1687 | if (MI.getOperand(i).isImm()) { |
Tom Stellard | 1106b1c | 2015-01-20 17:49:41 +0000 | [diff] [blame] | 1688 | ErrInfo = "Illegal immediate value for operand."; |
| 1689 | return false; |
| 1690 | } |
| 1691 | break; |
Sam Kolton | 1eeb11b | 2016-09-09 14:44:04 +0000 | [diff] [blame] | 1692 | case AMDGPU::OPERAND_REG_IMM32_INT: |
| 1693 | case AMDGPU::OPERAND_REG_IMM32_FP: |
Tom Stellard | 1106b1c | 2015-01-20 17:49:41 +0000 | [diff] [blame] | 1694 | break; |
Sam Kolton | 1eeb11b | 2016-09-09 14:44:04 +0000 | [diff] [blame] | 1695 | case AMDGPU::OPERAND_REG_INLINE_C_INT: |
| 1696 | case AMDGPU::OPERAND_REG_INLINE_C_FP: |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1697 | if (isLiteralConstant(MI.getOperand(i), |
Marek Olsak | 8eeebcc | 2015-02-18 22:12:41 +0000 | [diff] [blame] | 1698 | RI.getRegClass(RegClass)->getSize())) { |
| 1699 | ErrInfo = "Illegal immediate value for operand."; |
| 1700 | return false; |
Tom Stellard | a305f93 | 2014-07-02 20:53:44 +0000 | [diff] [blame] | 1701 | } |
Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 1702 | break; |
| 1703 | case MCOI::OPERAND_IMMEDIATE: |
Matt Arsenault | ffc8275 | 2016-07-05 17:09:01 +0000 | [diff] [blame] | 1704 | case AMDGPU::OPERAND_KIMM32: |
Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 1705 | // Check if this operand is an immediate. |
| 1706 | // FrameIndex operands will be replaced by immediates, so they are |
| 1707 | // allowed. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1708 | if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { |
Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 1709 | ErrInfo = "Expected immediate, but got non-immediate"; |
| 1710 | return false; |
| 1711 | } |
Justin Bogner | b03fd12 | 2016-08-17 05:10:15 +0000 | [diff] [blame] | 1712 | LLVM_FALLTHROUGH; |
Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 1713 | default: |
| 1714 | continue; |
| 1715 | } |
| 1716 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1717 | if (!MI.getOperand(i).isReg()) |
Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 1718 | continue; |
| 1719 | |
Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 1720 | if (RegClass != -1) { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1721 | unsigned Reg = MI.getOperand(i).getReg(); |
Matt Arsenault | 1322b6f | 2016-07-09 01:13:56 +0000 | [diff] [blame] | 1722 | if (Reg == AMDGPU::NoRegister || |
| 1723 | TargetRegisterInfo::isVirtualRegister(Reg)) |
Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 1724 | continue; |
| 1725 | |
| 1726 | const TargetRegisterClass *RC = RI.getRegClass(RegClass); |
| 1727 | if (!RC->contains(Reg)) { |
| 1728 | ErrInfo = "Operand has incorrect register class."; |
| 1729 | return false; |
| 1730 | } |
| 1731 | } |
| 1732 | } |
| 1733 | |
Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 1734 | // Verify VOP* |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1735 | if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI)) { |
Matt Arsenault | e368cb3 | 2014-12-11 23:37:32 +0000 | [diff] [blame] | 1736 | // Only look at the true operands. Only a real operand can use the constant |
| 1737 | // bus, and we don't want to check pseudo-operands like the source modifier |
| 1738 | // flags. |
| 1739 | const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; |
| 1740 | |
Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 1741 | unsigned ConstantBusCount = 0; |
Matt Arsenault | ffc8275 | 2016-07-05 17:09:01 +0000 | [diff] [blame] | 1742 | |
| 1743 | if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) |
| 1744 | ++ConstantBusCount; |
| 1745 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1746 | unsigned SGPRUsed = findImplicitSGPRRead(MI); |
Matt Arsenault | e223ceb | 2015-10-21 21:15:01 +0000 | [diff] [blame] | 1747 | if (SGPRUsed != AMDGPU::NoRegister) |
| 1748 | ++ConstantBusCount; |
| 1749 | |
Matt Arsenault | e368cb3 | 2014-12-11 23:37:32 +0000 | [diff] [blame] | 1750 | for (int OpIdx : OpIndices) { |
| 1751 | if (OpIdx == -1) |
| 1752 | break; |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1753 | const MachineOperand &MO = MI.getOperand(OpIdx); |
Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 1754 | if (usesConstantBus(MRI, MO, getOpSize(Opcode, OpIdx))) { |
Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 1755 | if (MO.isReg()) { |
| 1756 | if (MO.getReg() != SGPRUsed) |
Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 1757 | ++ConstantBusCount; |
Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 1758 | SGPRUsed = MO.getReg(); |
| 1759 | } else { |
| 1760 | ++ConstantBusCount; |
Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 1761 | } |
| 1762 | } |
Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 1763 | } |
| 1764 | if (ConstantBusCount > 1) { |
| 1765 | ErrInfo = "VOP* instruction uses the constant bus more than once"; |
| 1766 | return false; |
| 1767 | } |
| 1768 | } |
| 1769 | |
Matt Arsenault | becb140 | 2014-06-23 18:28:31 +0000 | [diff] [blame] | 1770 | // Verify misc. restrictions on specific instructions. |
| 1771 | if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || |
| 1772 | Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1773 | const MachineOperand &Src0 = MI.getOperand(Src0Idx); |
| 1774 | const MachineOperand &Src1 = MI.getOperand(Src1Idx); |
| 1775 | const MachineOperand &Src2 = MI.getOperand(Src2Idx); |
Matt Arsenault | becb140 | 2014-06-23 18:28:31 +0000 | [diff] [blame] | 1776 | if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { |
| 1777 | if (!compareMachineOp(Src0, Src1) && |
| 1778 | !compareMachineOp(Src0, Src2)) { |
| 1779 | ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; |
| 1780 | return false; |
| 1781 | } |
| 1782 | } |
| 1783 | } |
| 1784 | |
Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 1785 | if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || |
| 1786 | Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || |
| 1787 | Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || |
| 1788 | Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { |
| 1789 | const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || |
| 1790 | Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; |
| 1791 | |
| 1792 | const unsigned StaticNumOps = Desc.getNumOperands() + |
| 1793 | Desc.getNumImplicitUses(); |
| 1794 | const unsigned NumImplicitOps = IsDst ? 2 : 1; |
| 1795 | |
| 1796 | if (MI.getNumOperands() != StaticNumOps + NumImplicitOps) { |
| 1797 | ErrInfo = "missing implicit register operands"; |
| 1798 | return false; |
| 1799 | } |
| 1800 | |
| 1801 | const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); |
| 1802 | if (IsDst) { |
| 1803 | if (!Dst->isUse()) { |
| 1804 | ErrInfo = "v_movreld_b32 vdst should be a use operand"; |
| 1805 | return false; |
| 1806 | } |
| 1807 | |
| 1808 | unsigned UseOpIdx; |
| 1809 | if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || |
| 1810 | UseOpIdx != StaticNumOps + 1) { |
| 1811 | ErrInfo = "movrel implicit operands should be tied"; |
| 1812 | return false; |
| 1813 | } |
| 1814 | } |
| 1815 | |
| 1816 | const MachineOperand &Src0 = MI.getOperand(Src0Idx); |
| 1817 | const MachineOperand &ImpUse |
| 1818 | = MI.getOperand(StaticNumOps + NumImplicitOps - 1); |
| 1819 | if (!ImpUse.isReg() || !ImpUse.isUse() || |
| 1820 | !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { |
| 1821 | ErrInfo = "src0 should be subreg of implicit vector use"; |
| 1822 | return false; |
| 1823 | } |
| 1824 | } |
| 1825 | |
Matt Arsenault | d092a06 | 2015-10-02 18:58:37 +0000 | [diff] [blame] | 1826 | // Make sure we aren't losing exec uses in the td files. This mostly requires |
| 1827 | // being careful when using let Uses to try to add other use registers. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1828 | if (shouldReadExec(MI)) { |
| 1829 | if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { |
Matt Arsenault | d092a06 | 2015-10-02 18:58:37 +0000 | [diff] [blame] | 1830 | ErrInfo = "VALU instruction does not implicitly read exec mask"; |
| 1831 | return false; |
| 1832 | } |
| 1833 | } |
| 1834 | |
Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 1835 | return true; |
| 1836 | } |
| 1837 | |
Matt Arsenault | f14032a | 2013-11-15 22:02:28 +0000 | [diff] [blame] | 1838 | unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) { |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 1839 | switch (MI.getOpcode()) { |
| 1840 | default: return AMDGPU::INSTRUCTION_LIST_END; |
| 1841 | case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; |
| 1842 | case AMDGPU::COPY: return AMDGPU::COPY; |
| 1843 | case AMDGPU::PHI: return AMDGPU::PHI; |
Tom Stellard | 204e61b | 2014-04-07 19:45:45 +0000 | [diff] [blame] | 1844 | case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; |
Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 1845 | case AMDGPU::S_MOV_B32: |
| 1846 | return MI.getOperand(1).isReg() ? |
Tom Stellard | 8c12fd9 | 2014-03-24 16:12:34 +0000 | [diff] [blame] | 1847 | AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; |
Tom Stellard | 80942a1 | 2014-09-05 14:07:59 +0000 | [diff] [blame] | 1848 | case AMDGPU::S_ADD_I32: |
| 1849 | case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32; |
Matt Arsenault | 43b8e4e | 2013-11-18 20:09:29 +0000 | [diff] [blame] | 1850 | case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32; |
Tom Stellard | 80942a1 | 2014-09-05 14:07:59 +0000 | [diff] [blame] | 1851 | case AMDGPU::S_SUB_I32: |
| 1852 | case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32; |
Matt Arsenault | 43b8e4e | 2013-11-18 20:09:29 +0000 | [diff] [blame] | 1853 | case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; |
Matt Arsenault | 869cd07 | 2014-09-03 23:24:35 +0000 | [diff] [blame] | 1854 | case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32; |
Matt Arsenault | 124384f | 2016-09-09 23:32:53 +0000 | [diff] [blame] | 1855 | case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; |
| 1856 | case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; |
| 1857 | case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; |
| 1858 | case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; |
| 1859 | case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; |
| 1860 | case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; |
| 1861 | case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 1862 | case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; |
| 1863 | case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; |
| 1864 | case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; |
| 1865 | case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; |
| 1866 | case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; |
| 1867 | case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; |
Matt Arsenault | 27cc958 | 2014-04-18 01:53:18 +0000 | [diff] [blame] | 1868 | case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; |
| 1869 | case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; |
Matt Arsenault | 78b8670 | 2014-04-18 05:19:26 +0000 | [diff] [blame] | 1870 | case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; |
| 1871 | case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; |
Marek Olsak | 63a7b08 | 2015-03-24 13:40:21 +0000 | [diff] [blame] | 1872 | case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; |
Matt Arsenault | 43160e7 | 2014-06-18 17:13:57 +0000 | [diff] [blame] | 1873 | case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; |
Matt Arsenault | 2c33562 | 2014-04-09 07:16:16 +0000 | [diff] [blame] | 1874 | case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; |
Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 1875 | case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; |
Matt Arsenault | 0cb92e1 | 2014-04-11 19:25:18 +0000 | [diff] [blame] | 1876 | case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; |
| 1877 | case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; |
| 1878 | case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; |
| 1879 | case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; |
| 1880 | case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; |
| 1881 | case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 1882 | case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; |
| 1883 | case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; |
| 1884 | case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; |
| 1885 | case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; |
| 1886 | case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; |
| 1887 | case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; |
Marek Olsak | c536850 | 2015-01-15 18:43:01 +0000 | [diff] [blame] | 1888 | case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; |
Matt Arsenault | 295b86e | 2014-06-17 17:36:27 +0000 | [diff] [blame] | 1889 | case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; |
Matt Arsenault | 8579601 | 2014-06-17 17:36:24 +0000 | [diff] [blame] | 1890 | case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; |
Marek Olsak | d2af89d | 2015-03-04 17:33:45 +0000 | [diff] [blame] | 1891 | case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 1892 | case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; |
| 1893 | case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 1894 | } |
| 1895 | } |
| 1896 | |
| 1897 | bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const { |
| 1898 | return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END; |
| 1899 | } |
| 1900 | |
| 1901 | const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, |
| 1902 | unsigned OpNo) const { |
| 1903 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
| 1904 | const MCInstrDesc &Desc = get(MI.getOpcode()); |
| 1905 | if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || |
Matt Arsenault | 102a704 | 2014-12-11 23:37:34 +0000 | [diff] [blame] | 1906 | Desc.OpInfo[OpNo].RegClass == -1) { |
| 1907 | unsigned Reg = MI.getOperand(OpNo).getReg(); |
| 1908 | |
| 1909 | if (TargetRegisterInfo::isVirtualRegister(Reg)) |
| 1910 | return MRI.getRegClass(Reg); |
Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 1911 | return RI.getPhysRegClass(Reg); |
Matt Arsenault | 102a704 | 2014-12-11 23:37:34 +0000 | [diff] [blame] | 1912 | } |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 1913 | |
| 1914 | unsigned RCID = Desc.OpInfo[OpNo].RegClass; |
| 1915 | return RI.getRegClass(RCID); |
| 1916 | } |
| 1917 | |
| 1918 | bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const { |
| 1919 | switch (MI.getOpcode()) { |
| 1920 | case AMDGPU::COPY: |
| 1921 | case AMDGPU::REG_SEQUENCE: |
Tom Stellard | 4f3b04d | 2014-04-17 21:00:07 +0000 | [diff] [blame] | 1922 | case AMDGPU::PHI: |
Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 1923 | case AMDGPU::INSERT_SUBREG: |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 1924 | return RI.hasVGPRs(getOpRegClass(MI, 0)); |
| 1925 | default: |
| 1926 | return RI.hasVGPRs(getOpRegClass(MI, OpNo)); |
| 1927 | } |
| 1928 | } |
| 1929 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1930 | void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 1931 | MachineBasicBlock::iterator I = MI; |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1932 | MachineBasicBlock *MBB = MI.getParent(); |
| 1933 | MachineOperand &MO = MI.getOperand(OpIdx); |
Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 1934 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1935 | unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 1936 | const TargetRegisterClass *RC = RI.getRegClass(RCID); |
| 1937 | unsigned Opcode = AMDGPU::V_MOV_B32_e32; |
Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 1938 | if (MO.isReg()) |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 1939 | Opcode = AMDGPU::COPY; |
Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 1940 | else if (RI.isSGPRClass(RC)) |
Matt Arsenault | 671a005 | 2013-11-14 10:08:50 +0000 | [diff] [blame] | 1941 | Opcode = AMDGPU::S_MOV_B32; |
Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 1942 | |
Matt Arsenault | 3a4d86a | 2013-11-18 20:09:55 +0000 | [diff] [blame] | 1943 | const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); |
Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 1944 | if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) |
Tom Stellard | 0c93c9e | 2014-09-05 14:08:01 +0000 | [diff] [blame] | 1945 | VRC = &AMDGPU::VReg_64RegClass; |
Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 1946 | else |
Tom Stellard | 45c0b3a | 2015-01-07 20:59:25 +0000 | [diff] [blame] | 1947 | VRC = &AMDGPU::VGPR_32RegClass; |
Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 1948 | |
Matt Arsenault | 3a4d86a | 2013-11-18 20:09:55 +0000 | [diff] [blame] | 1949 | unsigned Reg = MRI.createVirtualRegister(VRC); |
Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 1950 | DebugLoc DL = MBB->findDebugLoc(I); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1951 | BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).addOperand(MO); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 1952 | MO.ChangeToRegister(Reg, false); |
| 1953 | } |
| 1954 | |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 1955 | unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, |
| 1956 | MachineRegisterInfo &MRI, |
| 1957 | MachineOperand &SuperReg, |
| 1958 | const TargetRegisterClass *SuperRC, |
| 1959 | unsigned SubIdx, |
| 1960 | const TargetRegisterClass *SubRC) |
| 1961 | const { |
Matt Arsenault | c8e2ce4 | 2015-09-24 07:16:37 +0000 | [diff] [blame] | 1962 | MachineBasicBlock *MBB = MI->getParent(); |
| 1963 | DebugLoc DL = MI->getDebugLoc(); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 1964 | unsigned SubReg = MRI.createVirtualRegister(SubRC); |
| 1965 | |
Matt Arsenault | c8e2ce4 | 2015-09-24 07:16:37 +0000 | [diff] [blame] | 1966 | if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { |
| 1967 | BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) |
| 1968 | .addReg(SuperReg.getReg(), 0, SubIdx); |
| 1969 | return SubReg; |
| 1970 | } |
| 1971 | |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 1972 | // Just in case the super register is itself a sub-register, copy it to a new |
Matt Arsenault | 08d8494 | 2014-06-03 23:06:13 +0000 | [diff] [blame] | 1973 | // value so we don't need to worry about merging its subreg index with the |
| 1974 | // SubIdx passed to this function. The register coalescer should be able to |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 1975 | // eliminate this extra copy. |
Matt Arsenault | c8e2ce4 | 2015-09-24 07:16:37 +0000 | [diff] [blame] | 1976 | unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 1977 | |
Matt Arsenault | 7480a0e | 2014-11-17 21:11:37 +0000 | [diff] [blame] | 1978 | BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) |
| 1979 | .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); |
| 1980 | |
| 1981 | BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) |
| 1982 | .addReg(NewSuperReg, 0, SubIdx); |
| 1983 | |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 1984 | return SubReg; |
| 1985 | } |
| 1986 | |
Matt Arsenault | 248b7b6 | 2014-03-24 20:08:09 +0000 | [diff] [blame] | 1987 | MachineOperand SIInstrInfo::buildExtractSubRegOrImm( |
| 1988 | MachineBasicBlock::iterator MII, |
| 1989 | MachineRegisterInfo &MRI, |
| 1990 | MachineOperand &Op, |
| 1991 | const TargetRegisterClass *SuperRC, |
| 1992 | unsigned SubIdx, |
| 1993 | const TargetRegisterClass *SubRC) const { |
| 1994 | if (Op.isImm()) { |
Matt Arsenault | 248b7b6 | 2014-03-24 20:08:09 +0000 | [diff] [blame] | 1995 | if (SubIdx == AMDGPU::sub0) |
Matt Arsenault | d745c28 | 2016-09-08 17:44:36 +0000 | [diff] [blame] | 1996 | return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); |
Matt Arsenault | 248b7b6 | 2014-03-24 20:08:09 +0000 | [diff] [blame] | 1997 | if (SubIdx == AMDGPU::sub1) |
Matt Arsenault | d745c28 | 2016-09-08 17:44:36 +0000 | [diff] [blame] | 1998 | return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); |
Matt Arsenault | 248b7b6 | 2014-03-24 20:08:09 +0000 | [diff] [blame] | 1999 | |
| 2000 | llvm_unreachable("Unhandled register index for immediate"); |
| 2001 | } |
| 2002 | |
| 2003 | unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, |
| 2004 | SubIdx, SubRC); |
| 2005 | return MachineOperand::CreateReg(SubReg, false); |
| 2006 | } |
| 2007 | |
Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 2008 | // Change the order of operands from (0, 1, 2) to (0, 2, 1) |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2009 | void SIInstrInfo::swapOperands(MachineInstr &Inst) const { |
| 2010 | assert(Inst.getNumExplicitOperands() == 3); |
| 2011 | MachineOperand Op1 = Inst.getOperand(1); |
| 2012 | Inst.RemoveOperand(1); |
| 2013 | Inst.addOperand(Op1); |
Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 2014 | } |
| 2015 | |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2016 | bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, |
| 2017 | const MCOperandInfo &OpInfo, |
| 2018 | const MachineOperand &MO) const { |
| 2019 | if (!MO.isReg()) |
| 2020 | return false; |
| 2021 | |
| 2022 | unsigned Reg = MO.getReg(); |
| 2023 | const TargetRegisterClass *RC = |
| 2024 | TargetRegisterInfo::isVirtualRegister(Reg) ? |
| 2025 | MRI.getRegClass(Reg) : |
| 2026 | RI.getPhysRegClass(Reg); |
| 2027 | |
Nicolai Haehnle | 82fc962 | 2016-01-07 17:10:29 +0000 | [diff] [blame] | 2028 | const SIRegisterInfo *TRI = |
| 2029 | static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); |
| 2030 | RC = TRI->getSubRegClass(RC, MO.getSubReg()); |
| 2031 | |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2032 | // In order to be legal, the common sub-class must be equal to the |
| 2033 | // class of the current operand. For example: |
| 2034 | // |
Sam Kolton | 1eeb11b | 2016-09-09 14:44:04 +0000 | [diff] [blame] | 2035 | // v_mov_b32 s0 ; Operand defined as vsrc_b32 |
| 2036 | // ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2037 | // |
| 2038 | // s_sendmsg 0, s0 ; Operand defined as m0reg |
| 2039 | // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL |
| 2040 | |
| 2041 | return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC; |
| 2042 | } |
| 2043 | |
| 2044 | bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, |
| 2045 | const MCOperandInfo &OpInfo, |
| 2046 | const MachineOperand &MO) const { |
| 2047 | if (MO.isReg()) |
| 2048 | return isLegalRegOperand(MRI, OpInfo, MO); |
| 2049 | |
| 2050 | // Handle non-register types that are treated like immediates. |
| 2051 | assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); |
| 2052 | return true; |
| 2053 | } |
| 2054 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2055 | bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, |
Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 2056 | const MachineOperand *MO) const { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2057 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
| 2058 | const MCInstrDesc &InstDesc = MI.getDesc(); |
Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 2059 | const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; |
| 2060 | const TargetRegisterClass *DefinedRC = |
| 2061 | OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; |
| 2062 | if (!MO) |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2063 | MO = &MI.getOperand(OpIdx); |
Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 2064 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2065 | if (isVALU(MI) && usesConstantBus(MRI, *MO, DefinedRC->getSize())) { |
Matt Arsenault | fcb345f | 2016-02-11 06:15:39 +0000 | [diff] [blame] | 2066 | |
| 2067 | RegSubRegPair SGPRUsed; |
| 2068 | if (MO->isReg()) |
| 2069 | SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg()); |
| 2070 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2071 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 2072 | if (i == OpIdx) |
| 2073 | continue; |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2074 | const MachineOperand &Op = MI.getOperand(i); |
Matt Arsenault | ffc8275 | 2016-07-05 17:09:01 +0000 | [diff] [blame] | 2075 | if (Op.isReg()) { |
| 2076 | if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) && |
| 2077 | usesConstantBus(MRI, Op, getOpSize(MI, i))) { |
| 2078 | return false; |
| 2079 | } |
| 2080 | } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { |
Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 2081 | return false; |
| 2082 | } |
| 2083 | } |
| 2084 | } |
| 2085 | |
Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 2086 | if (MO->isReg()) { |
| 2087 | assert(DefinedRC); |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2088 | return isLegalRegOperand(MRI, OpInfo, *MO); |
Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 2089 | } |
| 2090 | |
Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 2091 | // Handle non-register types that are treated like immediates. |
Tom Stellard | fb77f00 | 2015-01-13 22:59:41 +0000 | [diff] [blame] | 2092 | assert(MO->isImm() || MO->isTargetIndex() || MO->isFI()); |
Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 2093 | |
Matt Arsenault | 4364fef | 2014-09-23 18:30:57 +0000 | [diff] [blame] | 2094 | if (!DefinedRC) { |
| 2095 | // This operand expects an immediate. |
Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 2096 | return true; |
Matt Arsenault | 4364fef | 2014-09-23 18:30:57 +0000 | [diff] [blame] | 2097 | } |
Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 2098 | |
Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 2099 | return isImmOperandLegal(MI, OpIdx, *MO); |
Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 2100 | } |
| 2101 | |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2102 | void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2103 | MachineInstr &MI) const { |
| 2104 | unsigned Opc = MI.getOpcode(); |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2105 | const MCInstrDesc &InstrDesc = get(Opc); |
| 2106 | |
| 2107 | int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2108 | MachineOperand &Src1 = MI.getOperand(Src1Idx); |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2109 | |
| 2110 | // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 |
| 2111 | // we need to only have one constant bus use. |
| 2112 | // |
| 2113 | // Note we do not need to worry about literal constants here. They are |
| 2114 | // disabled for the operand type for instructions because they will always |
| 2115 | // violate the one constant bus use rule. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2116 | bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2117 | if (HasImplicitSGPR) { |
| 2118 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2119 | MachineOperand &Src0 = MI.getOperand(Src0Idx); |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2120 | |
| 2121 | if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) |
| 2122 | legalizeOpWithMove(MI, Src0Idx); |
| 2123 | } |
| 2124 | |
| 2125 | // VOP2 src0 instructions support all operand types, so we don't need to check |
| 2126 | // their legality. If src1 is already legal, we don't need to do anything. |
| 2127 | if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) |
| 2128 | return; |
| 2129 | |
| 2130 | // We do not use commuteInstruction here because it is too aggressive and will |
| 2131 | // commute if it is possible. We only want to commute here if it improves |
| 2132 | // legality. This can be called a fairly large number of times so don't waste |
| 2133 | // compile time pointlessly swapping and checking legality again. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2134 | if (HasImplicitSGPR || !MI.isCommutable()) { |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2135 | legalizeOpWithMove(MI, Src1Idx); |
| 2136 | return; |
| 2137 | } |
| 2138 | |
| 2139 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2140 | MachineOperand &Src0 = MI.getOperand(Src0Idx); |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2141 | |
| 2142 | // If src0 can be used as src1, commuting will make the operands legal. |
| 2143 | // Otherwise we have to give up and insert a move. |
| 2144 | // |
| 2145 | // TODO: Other immediate-like operand kinds could be commuted if there was a |
| 2146 | // MachineOperand::ChangeTo* for them. |
| 2147 | if ((!Src1.isImm() && !Src1.isReg()) || |
| 2148 | !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { |
| 2149 | legalizeOpWithMove(MI, Src1Idx); |
| 2150 | return; |
| 2151 | } |
| 2152 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2153 | int CommutedOpc = commuteOpcode(MI); |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2154 | if (CommutedOpc == -1) { |
| 2155 | legalizeOpWithMove(MI, Src1Idx); |
| 2156 | return; |
| 2157 | } |
| 2158 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2159 | MI.setDesc(get(CommutedOpc)); |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2160 | |
| 2161 | unsigned Src0Reg = Src0.getReg(); |
| 2162 | unsigned Src0SubReg = Src0.getSubReg(); |
| 2163 | bool Src0Kill = Src0.isKill(); |
| 2164 | |
| 2165 | if (Src1.isImm()) |
| 2166 | Src0.ChangeToImmediate(Src1.getImm()); |
| 2167 | else if (Src1.isReg()) { |
| 2168 | Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); |
| 2169 | Src0.setSubReg(Src1.getSubReg()); |
| 2170 | } else |
| 2171 | llvm_unreachable("Should only have register or immediate operands"); |
| 2172 | |
| 2173 | Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); |
| 2174 | Src1.setSubReg(Src0SubReg); |
| 2175 | } |
| 2176 | |
Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 2177 | // Legalize VOP3 operands. Because all operand types are supported for any |
| 2178 | // operand, and since literal constants are not allowed and should never be |
| 2179 | // seen, we only need to worry about inserting copies if we use multiple SGPR |
| 2180 | // operands. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2181 | void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, |
| 2182 | MachineInstr &MI) const { |
| 2183 | unsigned Opc = MI.getOpcode(); |
Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 2184 | |
| 2185 | int VOP3Idx[3] = { |
| 2186 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), |
| 2187 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), |
| 2188 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) |
| 2189 | }; |
| 2190 | |
| 2191 | // Find the one SGPR operand we are allowed to use. |
| 2192 | unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); |
| 2193 | |
| 2194 | for (unsigned i = 0; i < 3; ++i) { |
| 2195 | int Idx = VOP3Idx[i]; |
| 2196 | if (Idx == -1) |
| 2197 | break; |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2198 | MachineOperand &MO = MI.getOperand(Idx); |
Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 2199 | |
| 2200 | // We should never see a VOP3 instruction with an illegal immediate operand. |
| 2201 | if (!MO.isReg()) |
| 2202 | continue; |
| 2203 | |
| 2204 | if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) |
| 2205 | continue; // VGPRs are legal |
| 2206 | |
| 2207 | if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) { |
| 2208 | SGPRReg = MO.getReg(); |
| 2209 | // We can use one SGPR in each VOP3 instruction. |
| 2210 | continue; |
| 2211 | } |
| 2212 | |
| 2213 | // If we make it this far, then the operand is not legal and we must |
| 2214 | // legalize it. |
| 2215 | legalizeOpWithMove(MI, Idx); |
| 2216 | } |
| 2217 | } |
| 2218 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2219 | unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, |
| 2220 | MachineRegisterInfo &MRI) const { |
Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 2221 | const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); |
| 2222 | const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); |
| 2223 | unsigned DstReg = MRI.createVirtualRegister(SRC); |
| 2224 | unsigned SubRegs = VRC->getSize() / 4; |
| 2225 | |
| 2226 | SmallVector<unsigned, 8> SRegs; |
| 2227 | for (unsigned i = 0; i < SubRegs; ++i) { |
| 2228 | unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2229 | BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), |
Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 2230 | get(AMDGPU::V_READFIRSTLANE_B32), SGPR) |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2231 | .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); |
Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 2232 | SRegs.push_back(SGPR); |
| 2233 | } |
| 2234 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2235 | MachineInstrBuilder MIB = |
| 2236 | BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), |
| 2237 | get(AMDGPU::REG_SEQUENCE), DstReg); |
Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 2238 | for (unsigned i = 0; i < SubRegs; ++i) { |
| 2239 | MIB.addReg(SRegs[i]); |
| 2240 | MIB.addImm(RI.getSubRegFromChannel(i)); |
| 2241 | } |
| 2242 | return DstReg; |
| 2243 | } |
| 2244 | |
Tom Stellard | 467b5b9 | 2016-02-20 00:37:25 +0000 | [diff] [blame] | 2245 | void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2246 | MachineInstr &MI) const { |
Tom Stellard | 467b5b9 | 2016-02-20 00:37:25 +0000 | [diff] [blame] | 2247 | |
| 2248 | // If the pointer is store in VGPRs, then we need to move them to |
| 2249 | // SGPRs using v_readfirstlane. This is safe because we only select |
| 2250 | // loads with uniform pointers to SMRD instruction so we know the |
| 2251 | // pointer value is uniform. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2252 | MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); |
Tom Stellard | 467b5b9 | 2016-02-20 00:37:25 +0000 | [diff] [blame] | 2253 | if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { |
| 2254 | unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); |
| 2255 | SBase->setReg(SGPR); |
| 2256 | } |
| 2257 | } |
| 2258 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2259 | void SIInstrInfo::legalizeOperands(MachineInstr &MI) const { |
| 2260 | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2261 | |
| 2262 | // Legalize VOP2 |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2263 | if (isVOP2(MI) || isVOPC(MI)) { |
Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 2264 | legalizeOperandsVOP2(MRI, MI); |
Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 2265 | return; |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2266 | } |
| 2267 | |
| 2268 | // Legalize VOP3 |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2269 | if (isVOP3(MI)) { |
Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 2270 | legalizeOperandsVOP3(MRI, MI); |
Matt Arsenault | e068f9a | 2015-09-24 07:51:28 +0000 | [diff] [blame] | 2271 | return; |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2272 | } |
| 2273 | |
Tom Stellard | 467b5b9 | 2016-02-20 00:37:25 +0000 | [diff] [blame] | 2274 | // Legalize SMRD |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2275 | if (isSMRD(MI)) { |
Tom Stellard | 467b5b9 | 2016-02-20 00:37:25 +0000 | [diff] [blame] | 2276 | legalizeOperandsSMRD(MRI, MI); |
| 2277 | return; |
| 2278 | } |
| 2279 | |
Tom Stellard | 4f3b04d | 2014-04-17 21:00:07 +0000 | [diff] [blame] | 2280 | // Legalize REG_SEQUENCE and PHI |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2281 | // The register class of the operands much be the same type as the register |
| 2282 | // class of the output. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2283 | if (MI.getOpcode() == AMDGPU::PHI) { |
Craig Topper | 062a2ba | 2014-04-25 05:30:21 +0000 | [diff] [blame] | 2284 | const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2285 | for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { |
| 2286 | if (!MI.getOperand(i).isReg() || |
| 2287 | !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg())) |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2288 | continue; |
| 2289 | const TargetRegisterClass *OpRC = |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2290 | MRI.getRegClass(MI.getOperand(i).getReg()); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2291 | if (RI.hasVGPRs(OpRC)) { |
| 2292 | VRC = OpRC; |
| 2293 | } else { |
| 2294 | SRC = OpRC; |
| 2295 | } |
| 2296 | } |
| 2297 | |
| 2298 | // If any of the operands are VGPR registers, then they all most be |
| 2299 | // otherwise we will create illegal VGPR->SGPR copies when legalizing |
| 2300 | // them. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2301 | if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2302 | if (!VRC) { |
| 2303 | assert(SRC); |
| 2304 | VRC = RI.getEquivalentVGPRClass(SRC); |
| 2305 | } |
| 2306 | RC = VRC; |
| 2307 | } else { |
| 2308 | RC = SRC; |
| 2309 | } |
| 2310 | |
| 2311 | // Update all the operands so they have the same type. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2312 | for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { |
| 2313 | MachineOperand &Op = MI.getOperand(I); |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2314 | if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2315 | continue; |
| 2316 | unsigned DstReg = MRI.createVirtualRegister(RC); |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2317 | |
| 2318 | // MI is a PHI instruction. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2319 | MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2320 | MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); |
| 2321 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2322 | BuildMI(*InsertBB, Insert, MI.getDebugLoc(), get(AMDGPU::COPY), DstReg) |
| 2323 | .addOperand(Op); |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2324 | Op.setReg(DstReg); |
| 2325 | } |
| 2326 | } |
| 2327 | |
| 2328 | // REG_SEQUENCE doesn't really require operand legalization, but if one has a |
| 2329 | // VGPR dest type and SGPR sources, insert copies so all operands are |
| 2330 | // VGPRs. This seems to help operand folding / the register coalescer. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2331 | if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { |
| 2332 | MachineBasicBlock *MBB = MI.getParent(); |
| 2333 | const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2334 | if (RI.hasVGPRs(DstRC)) { |
| 2335 | // Update all the operands so they are VGPR register classes. These may |
| 2336 | // not be the same register class because REG_SEQUENCE supports mixing |
| 2337 | // subregister index types e.g. sub0_sub1 + sub2 + sub3 |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2338 | for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { |
| 2339 | MachineOperand &Op = MI.getOperand(I); |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2340 | if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) |
| 2341 | continue; |
| 2342 | |
| 2343 | const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); |
| 2344 | const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); |
| 2345 | if (VRC == OpRC) |
| 2346 | continue; |
| 2347 | |
| 2348 | unsigned DstReg = MRI.createVirtualRegister(VRC); |
| 2349 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2350 | BuildMI(*MBB, MI, MI.getDebugLoc(), get(AMDGPU::COPY), DstReg) |
| 2351 | .addOperand(Op); |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2352 | |
| 2353 | Op.setReg(DstReg); |
| 2354 | Op.setIsKill(); |
Tom Stellard | 4f3b04d | 2014-04-17 21:00:07 +0000 | [diff] [blame] | 2355 | } |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2356 | } |
Matt Arsenault | e068f9a | 2015-09-24 07:51:28 +0000 | [diff] [blame] | 2357 | |
| 2358 | return; |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2359 | } |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2360 | |
Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 2361 | // Legalize INSERT_SUBREG |
| 2362 | // src0 must have the same register class as dst |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2363 | if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { |
| 2364 | unsigned Dst = MI.getOperand(0).getReg(); |
| 2365 | unsigned Src0 = MI.getOperand(1).getReg(); |
Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 2366 | const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); |
| 2367 | const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); |
| 2368 | if (DstRC != Src0RC) { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2369 | MachineBasicBlock &MBB = *MI.getParent(); |
Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 2370 | unsigned NewSrc0 = MRI.createVirtualRegister(DstRC); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2371 | BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::COPY), NewSrc0) |
| 2372 | .addReg(Src0); |
| 2373 | MI.getOperand(1).setReg(NewSrc0); |
Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 2374 | } |
| 2375 | return; |
| 2376 | } |
| 2377 | |
Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 2378 | // Legalize MIMG |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2379 | if (isMIMG(MI)) { |
| 2380 | MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); |
Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 2381 | if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { |
| 2382 | unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); |
| 2383 | SRsrc->setReg(SGPR); |
| 2384 | } |
| 2385 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2386 | MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); |
Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 2387 | if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { |
| 2388 | unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI); |
| 2389 | SSamp->setReg(SGPR); |
| 2390 | } |
| 2391 | return; |
| 2392 | } |
| 2393 | |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2394 | // Legalize MUBUF* instructions |
| 2395 | // FIXME: If we start using the non-addr64 instructions for compute, we |
| 2396 | // may need to legalize them here. |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2397 | int SRsrcIdx = |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2398 | AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2399 | if (SRsrcIdx != -1) { |
| 2400 | // We have an MUBUF instruction |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2401 | MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx); |
| 2402 | unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass; |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2403 | if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()), |
| 2404 | RI.getRegClass(SRsrcRC))) { |
| 2405 | // The operands are legal. |
| 2406 | // FIXME: We may need to legalize operands besided srsrc. |
| 2407 | return; |
| 2408 | } |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2409 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2410 | MachineBasicBlock &MBB = *MI.getParent(); |
Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 2411 | |
Eric Christopher | 572e03a | 2015-06-19 01:53:21 +0000 | [diff] [blame] | 2412 | // Extract the ptr from the resource descriptor. |
Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 2413 | unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc, |
| 2414 | &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2415 | |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2416 | // Create an empty resource descriptor |
| 2417 | unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); |
| 2418 | unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
| 2419 | unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
| 2420 | unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); |
Tom Stellard | 794c8c0 | 2014-12-02 17:05:41 +0000 | [diff] [blame] | 2421 | uint64_t RsrcDataFormat = getDefaultRsrcDataFormat(); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2422 | |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2423 | // Zero64 = 0 |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2424 | BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64) |
| 2425 | .addImm(0); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2426 | |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2427 | // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2428 | BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatLo) |
| 2429 | .addImm(RsrcDataFormat & 0xFFFFFFFF); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2430 | |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2431 | // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2432 | BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatHi) |
| 2433 | .addImm(RsrcDataFormat >> 32); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2434 | |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2435 | // NewSRsrc = {Zero64, SRsrcFormat} |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2436 | BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc) |
| 2437 | .addReg(Zero64) |
| 2438 | .addImm(AMDGPU::sub0_sub1) |
| 2439 | .addReg(SRsrcFormatLo) |
| 2440 | .addImm(AMDGPU::sub2) |
| 2441 | .addReg(SRsrcFormatHi) |
| 2442 | .addImm(AMDGPU::sub3); |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2443 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2444 | MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2445 | unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2446 | if (VAddr) { |
| 2447 | // This is already an ADDR64 instruction so we need to add the pointer |
| 2448 | // extracted from the resource descriptor to the current value of VAddr. |
Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 2449 | unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 2450 | unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2451 | |
Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 2452 | // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0 |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2453 | DebugLoc DL = MI.getDebugLoc(); |
Matt Arsenault | 51d2d0f | 2015-09-01 02:02:21 +0000 | [diff] [blame] | 2454 | BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) |
Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 2455 | .addReg(SRsrcPtr, 0, AMDGPU::sub0) |
Matt Arsenault | 51d2d0f | 2015-09-01 02:02:21 +0000 | [diff] [blame] | 2456 | .addReg(VAddr->getReg(), 0, AMDGPU::sub0); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2457 | |
Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 2458 | // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1 |
Matt Arsenault | 51d2d0f | 2015-09-01 02:02:21 +0000 | [diff] [blame] | 2459 | BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) |
Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 2460 | .addReg(SRsrcPtr, 0, AMDGPU::sub1) |
Matt Arsenault | 51d2d0f | 2015-09-01 02:02:21 +0000 | [diff] [blame] | 2461 | .addReg(VAddr->getReg(), 0, AMDGPU::sub1); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2462 | |
Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 2463 | // NewVaddr = {NewVaddrHi, NewVaddrLo} |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2464 | BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) |
| 2465 | .addReg(NewVAddrLo) |
| 2466 | .addImm(AMDGPU::sub0) |
| 2467 | .addReg(NewVAddrHi) |
| 2468 | .addImm(AMDGPU::sub1); |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2469 | } else { |
| 2470 | // This instructions is the _OFFSET variant, so we need to convert it to |
| 2471 | // ADDR64. |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 2472 | assert(MBB.getParent()->getSubtarget<SISubtarget>().getGeneration() |
| 2473 | < SISubtarget::VOLCANIC_ISLANDS && |
Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 2474 | "FIXME: Need to emit flat atomics here"); |
| 2475 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2476 | MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); |
| 2477 | MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); |
| 2478 | MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); |
| 2479 | unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); |
Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 2480 | |
| 2481 | // Atomics rith return have have an additional tied operand and are |
| 2482 | // missing some of the special bits. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2483 | MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); |
Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 2484 | MachineInstr *Addr64; |
| 2485 | |
| 2486 | if (!VDataIn) { |
| 2487 | // Regular buffer load / store. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2488 | MachineInstrBuilder MIB = |
| 2489 | BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) |
| 2490 | .addOperand(*VData) |
| 2491 | .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. |
| 2492 | // This will be replaced later |
| 2493 | // with the new value of vaddr. |
| 2494 | .addOperand(*SRsrc) |
| 2495 | .addOperand(*SOffset) |
| 2496 | .addOperand(*Offset); |
Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 2497 | |
| 2498 | // Atomics do not have this operand. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2499 | if (const MachineOperand *GLC = |
| 2500 | getNamedOperand(MI, AMDGPU::OpName::glc)) { |
Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 2501 | MIB.addImm(GLC->getImm()); |
| 2502 | } |
| 2503 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2504 | MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); |
Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 2505 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2506 | if (const MachineOperand *TFE = |
| 2507 | getNamedOperand(MI, AMDGPU::OpName::tfe)) { |
Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 2508 | MIB.addImm(TFE->getImm()); |
| 2509 | } |
| 2510 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2511 | MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); |
Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 2512 | Addr64 = MIB; |
| 2513 | } else { |
| 2514 | // Atomics with return. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2515 | Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) |
| 2516 | .addOperand(*VData) |
| 2517 | .addOperand(*VDataIn) |
| 2518 | .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. |
| 2519 | // This will be replaced later |
| 2520 | // with the new value of vaddr. |
| 2521 | .addOperand(*SRsrc) |
| 2522 | .addOperand(*SOffset) |
| 2523 | .addOperand(*Offset) |
| 2524 | .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) |
| 2525 | .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); |
Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 2526 | } |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2527 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2528 | MI.removeFromParent(); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2529 | |
Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 2530 | // NewVaddr = {NewVaddrHi, NewVaddrLo} |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2531 | BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), |
| 2532 | NewVAddr) |
| 2533 | .addReg(SRsrcPtr, 0, AMDGPU::sub0) |
| 2534 | .addImm(AMDGPU::sub0) |
| 2535 | .addReg(SRsrcPtr, 0, AMDGPU::sub1) |
| 2536 | .addImm(AMDGPU::sub1); |
Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 2537 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2538 | VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr); |
| 2539 | SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2540 | } |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2541 | |
Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 2542 | // Update the instruction to use NewVaddr |
| 2543 | VAddr->setReg(NewVAddr); |
| 2544 | // Update the instruction to use NewSRsrc |
| 2545 | SRsrc->setReg(NewSRsrc); |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2546 | } |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2547 | } |
| 2548 | |
| 2549 | void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const { |
| 2550 | SmallVector<MachineInstr *, 128> Worklist; |
| 2551 | Worklist.push_back(&TopInst); |
| 2552 | |
| 2553 | while (!Worklist.empty()) { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2554 | MachineInstr &Inst = *Worklist.pop_back_val(); |
| 2555 | MachineBasicBlock *MBB = Inst.getParent(); |
Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 2556 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
| 2557 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2558 | unsigned Opcode = Inst.getOpcode(); |
| 2559 | unsigned NewOpcode = getVALUOp(Inst); |
Matt Arsenault | 27cc958 | 2014-04-18 01:53:18 +0000 | [diff] [blame] | 2560 | |
Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 2561 | // Handle some special cases |
Matt Arsenault | 27cc958 | 2014-04-18 01:53:18 +0000 | [diff] [blame] | 2562 | switch (Opcode) { |
Tom Stellard | 0c354f2 | 2014-04-30 15:31:29 +0000 | [diff] [blame] | 2563 | default: |
Tom Stellard | 0c354f2 | 2014-04-30 15:31:29 +0000 | [diff] [blame] | 2564 | break; |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2565 | case AMDGPU::S_AND_B64: |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2566 | splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2567 | Inst.eraseFromParent(); |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2568 | continue; |
| 2569 | |
| 2570 | case AMDGPU::S_OR_B64: |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2571 | splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2572 | Inst.eraseFromParent(); |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2573 | continue; |
| 2574 | |
| 2575 | case AMDGPU::S_XOR_B64: |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2576 | splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2577 | Inst.eraseFromParent(); |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2578 | continue; |
| 2579 | |
| 2580 | case AMDGPU::S_NOT_B64: |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2581 | splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2582 | Inst.eraseFromParent(); |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2583 | continue; |
| 2584 | |
Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 2585 | case AMDGPU::S_BCNT1_I32_B64: |
| 2586 | splitScalar64BitBCNT(Worklist, Inst); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2587 | Inst.eraseFromParent(); |
Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 2588 | continue; |
| 2589 | |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2590 | case AMDGPU::S_BFE_I64: { |
| 2591 | splitScalar64BitBFE(Worklist, Inst); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2592 | Inst.eraseFromParent(); |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2593 | continue; |
| 2594 | } |
| 2595 | |
Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 2596 | case AMDGPU::S_LSHL_B32: |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 2597 | if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { |
Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 2598 | NewOpcode = AMDGPU::V_LSHLREV_B32_e64; |
| 2599 | swapOperands(Inst); |
| 2600 | } |
| 2601 | break; |
| 2602 | case AMDGPU::S_ASHR_I32: |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 2603 | if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { |
Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 2604 | NewOpcode = AMDGPU::V_ASHRREV_I32_e64; |
| 2605 | swapOperands(Inst); |
| 2606 | } |
| 2607 | break; |
| 2608 | case AMDGPU::S_LSHR_B32: |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 2609 | if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { |
Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 2610 | NewOpcode = AMDGPU::V_LSHRREV_B32_e64; |
| 2611 | swapOperands(Inst); |
| 2612 | } |
| 2613 | break; |
Marek Olsak | 707a6d0 | 2015-02-03 21:53:01 +0000 | [diff] [blame] | 2614 | case AMDGPU::S_LSHL_B64: |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 2615 | if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { |
Marek Olsak | 707a6d0 | 2015-02-03 21:53:01 +0000 | [diff] [blame] | 2616 | NewOpcode = AMDGPU::V_LSHLREV_B64; |
| 2617 | swapOperands(Inst); |
| 2618 | } |
| 2619 | break; |
| 2620 | case AMDGPU::S_ASHR_I64: |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 2621 | if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { |
Marek Olsak | 707a6d0 | 2015-02-03 21:53:01 +0000 | [diff] [blame] | 2622 | NewOpcode = AMDGPU::V_ASHRREV_I64; |
| 2623 | swapOperands(Inst); |
| 2624 | } |
| 2625 | break; |
| 2626 | case AMDGPU::S_LSHR_B64: |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 2627 | if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { |
Marek Olsak | 707a6d0 | 2015-02-03 21:53:01 +0000 | [diff] [blame] | 2628 | NewOpcode = AMDGPU::V_LSHRREV_B64; |
| 2629 | swapOperands(Inst); |
| 2630 | } |
| 2631 | break; |
Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 2632 | |
Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 2633 | case AMDGPU::S_ABS_I32: |
| 2634 | lowerScalarAbs(Worklist, Inst); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2635 | Inst.eraseFromParent(); |
Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 2636 | continue; |
| 2637 | |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2638 | case AMDGPU::S_CBRANCH_SCC0: |
| 2639 | case AMDGPU::S_CBRANCH_SCC1: |
| 2640 | // Clear unused bits of vcc |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2641 | BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), |
| 2642 | AMDGPU::VCC) |
| 2643 | .addReg(AMDGPU::EXEC) |
| 2644 | .addReg(AMDGPU::VCC); |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2645 | break; |
| 2646 | |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2647 | case AMDGPU::S_BFE_U64: |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2648 | case AMDGPU::S_BFM_B64: |
| 2649 | llvm_unreachable("Moving this op to VALU not implemented"); |
Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 2650 | } |
| 2651 | |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2652 | if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { |
| 2653 | // We cannot move this instruction to the VALU, so we should try to |
| 2654 | // legalize its operands instead. |
| 2655 | legalizeOperands(Inst); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2656 | continue; |
Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 2657 | } |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2658 | |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2659 | // Use the new VALU Opcode. |
| 2660 | const MCInstrDesc &NewDesc = get(NewOpcode); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2661 | Inst.setDesc(NewDesc); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2662 | |
Matt Arsenault | f0b1e3a | 2013-11-18 20:09:21 +0000 | [diff] [blame] | 2663 | // Remove any references to SCC. Vector instructions can't read from it, and |
| 2664 | // We're just about to add the implicit use / defs of VCC, and we don't want |
| 2665 | // both. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2666 | for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { |
| 2667 | MachineOperand &Op = Inst.getOperand(i); |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2668 | if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2669 | Inst.RemoveOperand(i); |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2670 | addSCCDefUsersToVALUWorklist(Inst, Worklist); |
| 2671 | } |
Matt Arsenault | f0b1e3a | 2013-11-18 20:09:21 +0000 | [diff] [blame] | 2672 | } |
| 2673 | |
Matt Arsenault | 27cc958 | 2014-04-18 01:53:18 +0000 | [diff] [blame] | 2674 | if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { |
| 2675 | // We are converting these to a BFE, so we need to add the missing |
| 2676 | // operands for the size and offset. |
| 2677 | unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2678 | Inst.addOperand(MachineOperand::CreateImm(0)); |
| 2679 | Inst.addOperand(MachineOperand::CreateImm(Size)); |
Matt Arsenault | 27cc958 | 2014-04-18 01:53:18 +0000 | [diff] [blame] | 2680 | |
Matt Arsenault | b5b5110 | 2014-06-10 19:18:21 +0000 | [diff] [blame] | 2681 | } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { |
| 2682 | // The VALU version adds the second operand to the result, so insert an |
| 2683 | // extra 0 operand. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2684 | Inst.addOperand(MachineOperand::CreateImm(0)); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2685 | } |
| 2686 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2687 | Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2688 | |
Matt Arsenault | 78b8670 | 2014-04-18 05:19:26 +0000 | [diff] [blame] | 2689 | if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2690 | const MachineOperand &OffsetWidthOp = Inst.getOperand(2); |
Matt Arsenault | 78b8670 | 2014-04-18 05:19:26 +0000 | [diff] [blame] | 2691 | // If we need to move this to VGPRs, we need to unpack the second operand |
| 2692 | // back into the 2 separate ones for bit offset and width. |
| 2693 | assert(OffsetWidthOp.isImm() && |
| 2694 | "Scalar BFE is only implemented for constant width and offset"); |
| 2695 | uint32_t Imm = OffsetWidthOp.getImm(); |
| 2696 | |
| 2697 | uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. |
| 2698 | uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2699 | Inst.RemoveOperand(2); // Remove old immediate. |
| 2700 | Inst.addOperand(MachineOperand::CreateImm(Offset)); |
| 2701 | Inst.addOperand(MachineOperand::CreateImm(BitWidth)); |
Matt Arsenault | 78b8670 | 2014-04-18 05:19:26 +0000 | [diff] [blame] | 2702 | } |
| 2703 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2704 | bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2705 | unsigned NewDstReg = AMDGPU::NoRegister; |
| 2706 | if (HasDst) { |
| 2707 | // Update the destination register class. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2708 | const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2709 | if (!NewDstRC) |
| 2710 | continue; |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2711 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2712 | unsigned DstReg = Inst.getOperand(0).getReg(); |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2713 | NewDstReg = MRI.createVirtualRegister(NewDstRC); |
| 2714 | MRI.replaceRegWith(DstReg, NewDstReg); |
| 2715 | } |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2716 | |
Tom Stellard | e1a2445 | 2014-04-17 21:00:01 +0000 | [diff] [blame] | 2717 | // Legalize the operands |
| 2718 | legalizeOperands(Inst); |
| 2719 | |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2720 | if (HasDst) |
| 2721 | addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 2722 | } |
| 2723 | } |
| 2724 | |
Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 2725 | void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist, |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2726 | MachineInstr &Inst) const { |
| 2727 | MachineBasicBlock &MBB = *Inst.getParent(); |
Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 2728 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
| 2729 | MachineBasicBlock::iterator MII = Inst; |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2730 | DebugLoc DL = Inst.getDebugLoc(); |
Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 2731 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2732 | MachineOperand &Dest = Inst.getOperand(0); |
| 2733 | MachineOperand &Src = Inst.getOperand(1); |
Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 2734 | unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 2735 | unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 2736 | |
| 2737 | BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg) |
| 2738 | .addImm(0) |
| 2739 | .addReg(Src.getReg()); |
| 2740 | |
| 2741 | BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) |
| 2742 | .addReg(Src.getReg()) |
| 2743 | .addReg(TmpReg); |
| 2744 | |
| 2745 | MRI.replaceRegWith(Dest.getReg(), ResultReg); |
| 2746 | addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); |
| 2747 | } |
| 2748 | |
Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 2749 | void SIInstrInfo::splitScalar64BitUnaryOp( |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2750 | SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst, |
| 2751 | unsigned Opcode) const { |
| 2752 | MachineBasicBlock &MBB = *Inst.getParent(); |
Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 2753 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
| 2754 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2755 | MachineOperand &Dest = Inst.getOperand(0); |
| 2756 | MachineOperand &Src0 = Inst.getOperand(1); |
| 2757 | DebugLoc DL = Inst.getDebugLoc(); |
Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 2758 | |
| 2759 | MachineBasicBlock::iterator MII = Inst; |
| 2760 | |
| 2761 | const MCInstrDesc &InstDesc = get(Opcode); |
| 2762 | const TargetRegisterClass *Src0RC = Src0.isReg() ? |
| 2763 | MRI.getRegClass(Src0.getReg()) : |
| 2764 | &AMDGPU::SGPR_32RegClass; |
| 2765 | |
| 2766 | const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); |
| 2767 | |
| 2768 | MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, |
| 2769 | AMDGPU::sub0, Src0SubRC); |
| 2770 | |
| 2771 | const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2772 | const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); |
| 2773 | const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); |
Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 2774 | |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2775 | unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); |
| 2776 | BuildMI(MBB, MII, DL, InstDesc, DestSub0) |
Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 2777 | .addOperand(SrcReg0Sub0); |
| 2778 | |
| 2779 | MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, |
| 2780 | AMDGPU::sub1, Src0SubRC); |
| 2781 | |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2782 | unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); |
| 2783 | BuildMI(MBB, MII, DL, InstDesc, DestSub1) |
Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 2784 | .addOperand(SrcReg0Sub1); |
| 2785 | |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2786 | unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); |
Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 2787 | BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) |
| 2788 | .addReg(DestSub0) |
| 2789 | .addImm(AMDGPU::sub0) |
| 2790 | .addReg(DestSub1) |
| 2791 | .addImm(AMDGPU::sub1); |
| 2792 | |
| 2793 | MRI.replaceRegWith(Dest.getReg(), FullDestReg); |
| 2794 | |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2795 | // We don't need to legalizeOperands here because for a single operand, src0 |
| 2796 | // will support any kind of input. |
| 2797 | |
| 2798 | // Move all users of this moved value. |
| 2799 | addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); |
Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 2800 | } |
| 2801 | |
| 2802 | void SIInstrInfo::splitScalar64BitBinaryOp( |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2803 | SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst, |
| 2804 | unsigned Opcode) const { |
| 2805 | MachineBasicBlock &MBB = *Inst.getParent(); |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2806 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
| 2807 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2808 | MachineOperand &Dest = Inst.getOperand(0); |
| 2809 | MachineOperand &Src0 = Inst.getOperand(1); |
| 2810 | MachineOperand &Src1 = Inst.getOperand(2); |
| 2811 | DebugLoc DL = Inst.getDebugLoc(); |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2812 | |
| 2813 | MachineBasicBlock::iterator MII = Inst; |
| 2814 | |
| 2815 | const MCInstrDesc &InstDesc = get(Opcode); |
Matt Arsenault | 684dc80 | 2014-03-24 20:08:13 +0000 | [diff] [blame] | 2816 | const TargetRegisterClass *Src0RC = Src0.isReg() ? |
| 2817 | MRI.getRegClass(Src0.getReg()) : |
| 2818 | &AMDGPU::SGPR_32RegClass; |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2819 | |
Matt Arsenault | 684dc80 | 2014-03-24 20:08:13 +0000 | [diff] [blame] | 2820 | const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); |
| 2821 | const TargetRegisterClass *Src1RC = Src1.isReg() ? |
| 2822 | MRI.getRegClass(Src1.getReg()) : |
| 2823 | &AMDGPU::SGPR_32RegClass; |
| 2824 | |
| 2825 | const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); |
| 2826 | |
| 2827 | MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, |
| 2828 | AMDGPU::sub0, Src0SubRC); |
| 2829 | MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, |
| 2830 | AMDGPU::sub0, Src1SubRC); |
| 2831 | |
| 2832 | const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2833 | const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); |
| 2834 | const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); |
Matt Arsenault | 684dc80 | 2014-03-24 20:08:13 +0000 | [diff] [blame] | 2835 | |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2836 | unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2837 | MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) |
| 2838 | .addOperand(SrcReg0Sub0) |
| 2839 | .addOperand(SrcReg1Sub0); |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2840 | |
Matt Arsenault | 684dc80 | 2014-03-24 20:08:13 +0000 | [diff] [blame] | 2841 | MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, |
| 2842 | AMDGPU::sub1, Src0SubRC); |
| 2843 | MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, |
| 2844 | AMDGPU::sub1, Src1SubRC); |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2845 | |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2846 | unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2847 | MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) |
| 2848 | .addOperand(SrcReg0Sub1) |
| 2849 | .addOperand(SrcReg1Sub1); |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2850 | |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2851 | unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2852 | BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) |
| 2853 | .addReg(DestSub0) |
| 2854 | .addImm(AMDGPU::sub0) |
| 2855 | .addReg(DestSub1) |
| 2856 | .addImm(AMDGPU::sub1); |
| 2857 | |
| 2858 | MRI.replaceRegWith(Dest.getReg(), FullDestReg); |
| 2859 | |
| 2860 | // Try to legalize the operands in case we need to swap the order to keep it |
| 2861 | // valid. |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2862 | legalizeOperands(LoHalf); |
| 2863 | legalizeOperands(HiHalf); |
| 2864 | |
| 2865 | // Move all users of this moved vlaue. |
| 2866 | addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); |
Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 2867 | } |
| 2868 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2869 | void SIInstrInfo::splitScalar64BitBCNT( |
| 2870 | SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst) const { |
| 2871 | MachineBasicBlock &MBB = *Inst.getParent(); |
Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 2872 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
| 2873 | |
| 2874 | MachineBasicBlock::iterator MII = Inst; |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2875 | DebugLoc DL = Inst.getDebugLoc(); |
Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 2876 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2877 | MachineOperand &Dest = Inst.getOperand(0); |
| 2878 | MachineOperand &Src = Inst.getOperand(1); |
Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 2879 | |
Marek Olsak | c536850 | 2015-01-15 18:43:01 +0000 | [diff] [blame] | 2880 | const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); |
Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 2881 | const TargetRegisterClass *SrcRC = Src.isReg() ? |
| 2882 | MRI.getRegClass(Src.getReg()) : |
| 2883 | &AMDGPU::SGPR_32RegClass; |
| 2884 | |
| 2885 | unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 2886 | unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 2887 | |
| 2888 | const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); |
| 2889 | |
| 2890 | MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, |
| 2891 | AMDGPU::sub0, SrcSubRC); |
| 2892 | MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, |
| 2893 | AMDGPU::sub1, SrcSubRC); |
| 2894 | |
Matt Arsenault | 5e7f95e | 2015-08-26 20:48:04 +0000 | [diff] [blame] | 2895 | BuildMI(MBB, MII, DL, InstDesc, MidReg) |
Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 2896 | .addOperand(SrcRegSub0) |
| 2897 | .addImm(0); |
| 2898 | |
Matt Arsenault | 5e7f95e | 2015-08-26 20:48:04 +0000 | [diff] [blame] | 2899 | BuildMI(MBB, MII, DL, InstDesc, ResultReg) |
Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 2900 | .addOperand(SrcRegSub1) |
| 2901 | .addReg(MidReg); |
| 2902 | |
| 2903 | MRI.replaceRegWith(Dest.getReg(), ResultReg); |
| 2904 | |
Matt Arsenault | 5e7f95e | 2015-08-26 20:48:04 +0000 | [diff] [blame] | 2905 | // We don't need to legalize operands here. src0 for etiher instruction can be |
| 2906 | // an SGPR, and the second input is unused or determined here. |
| 2907 | addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); |
Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 2908 | } |
| 2909 | |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2910 | void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist, |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2911 | MachineInstr &Inst) const { |
| 2912 | MachineBasicBlock &MBB = *Inst.getParent(); |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2913 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
| 2914 | MachineBasicBlock::iterator MII = Inst; |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2915 | DebugLoc DL = Inst.getDebugLoc(); |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2916 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2917 | MachineOperand &Dest = Inst.getOperand(0); |
| 2918 | uint32_t Imm = Inst.getOperand(2).getImm(); |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2919 | uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. |
| 2920 | uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. |
| 2921 | |
Matt Arsenault | 6ad3426 | 2014-11-14 18:40:49 +0000 | [diff] [blame] | 2922 | (void) Offset; |
| 2923 | |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2924 | // Only sext_inreg cases handled. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2925 | assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && |
| 2926 | Offset == 0 && "Not implemented"); |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2927 | |
| 2928 | if (BitWidth < 32) { |
| 2929 | unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 2930 | unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 2931 | unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); |
| 2932 | |
| 2933 | BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2934 | .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) |
| 2935 | .addImm(0) |
| 2936 | .addImm(BitWidth); |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2937 | |
| 2938 | BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) |
| 2939 | .addImm(31) |
| 2940 | .addReg(MidRegLo); |
| 2941 | |
| 2942 | BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) |
| 2943 | .addReg(MidRegLo) |
| 2944 | .addImm(AMDGPU::sub0) |
| 2945 | .addReg(MidRegHi) |
| 2946 | .addImm(AMDGPU::sub1); |
| 2947 | |
| 2948 | MRI.replaceRegWith(Dest.getReg(), ResultReg); |
Matt Arsenault | 445833c | 2015-08-26 20:47:58 +0000 | [diff] [blame] | 2949 | addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2950 | return; |
| 2951 | } |
| 2952 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2953 | MachineOperand &Src = Inst.getOperand(1); |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2954 | unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 2955 | unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); |
| 2956 | |
| 2957 | BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) |
| 2958 | .addImm(31) |
| 2959 | .addReg(Src.getReg(), 0, AMDGPU::sub0); |
| 2960 | |
| 2961 | BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) |
| 2962 | .addReg(Src.getReg(), 0, AMDGPU::sub0) |
| 2963 | .addImm(AMDGPU::sub0) |
| 2964 | .addReg(TmpReg) |
| 2965 | .addImm(AMDGPU::sub1); |
| 2966 | |
| 2967 | MRI.replaceRegWith(Dest.getReg(), ResultReg); |
Matt Arsenault | 445833c | 2015-08-26 20:47:58 +0000 | [diff] [blame] | 2968 | addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 2969 | } |
| 2970 | |
Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 2971 | void SIInstrInfo::addUsersToMoveToVALUWorklist( |
| 2972 | unsigned DstReg, |
| 2973 | MachineRegisterInfo &MRI, |
| 2974 | SmallVectorImpl<MachineInstr *> &Worklist) const { |
| 2975 | for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), |
| 2976 | E = MRI.use_end(); I != E; ++I) { |
| 2977 | MachineInstr &UseMI = *I->getParent(); |
| 2978 | if (!canReadVGPR(UseMI, I.getOperandNo())) { |
| 2979 | Worklist.push_back(&UseMI); |
| 2980 | } |
| 2981 | } |
| 2982 | } |
| 2983 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2984 | void SIInstrInfo::addSCCDefUsersToVALUWorklist( |
| 2985 | MachineInstr &SCCDefInst, SmallVectorImpl<MachineInstr *> &Worklist) const { |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2986 | // This assumes that all the users of SCC are in the same block |
| 2987 | // as the SCC def. |
Duncan P. N. Exon Smith | 4d29511 | 2016-07-08 19:16:05 +0000 | [diff] [blame] | 2988 | for (MachineInstr &MI : |
| 2989 | llvm::make_range(MachineBasicBlock::iterator(SCCDefInst), |
| 2990 | SCCDefInst.getParent()->end())) { |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2991 | // Exit if we find another SCC def. |
Duncan P. N. Exon Smith | 4d29511 | 2016-07-08 19:16:05 +0000 | [diff] [blame] | 2992 | if (MI.findRegisterDefOperandIdx(AMDGPU::SCC) != -1) |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2993 | return; |
| 2994 | |
Duncan P. N. Exon Smith | 4d29511 | 2016-07-08 19:16:05 +0000 | [diff] [blame] | 2995 | if (MI.findRegisterUseOperandIdx(AMDGPU::SCC) != -1) |
| 2996 | Worklist.push_back(&MI); |
Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 2997 | } |
| 2998 | } |
| 2999 | |
Matt Arsenault | ba6aae7 | 2015-09-28 20:54:57 +0000 | [diff] [blame] | 3000 | const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( |
| 3001 | const MachineInstr &Inst) const { |
| 3002 | const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); |
| 3003 | |
| 3004 | switch (Inst.getOpcode()) { |
| 3005 | // For target instructions, getOpRegClass just returns the virtual register |
| 3006 | // class associated with the operand, so we need to find an equivalent VGPR |
| 3007 | // register class in order to move the instruction to the VALU. |
| 3008 | case AMDGPU::COPY: |
| 3009 | case AMDGPU::PHI: |
| 3010 | case AMDGPU::REG_SEQUENCE: |
| 3011 | case AMDGPU::INSERT_SUBREG: |
| 3012 | if (RI.hasVGPRs(NewDstRC)) |
| 3013 | return nullptr; |
| 3014 | |
| 3015 | NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); |
| 3016 | if (!NewDstRC) |
| 3017 | return nullptr; |
| 3018 | return NewDstRC; |
| 3019 | default: |
| 3020 | return NewDstRC; |
| 3021 | } |
| 3022 | } |
| 3023 | |
Matt Arsenault | 6c06741 | 2015-11-03 22:30:15 +0000 | [diff] [blame] | 3024 | // Find the one SGPR operand we are allowed to use. |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3025 | unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI, |
Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 3026 | int OpIndices[3]) const { |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3027 | const MCInstrDesc &Desc = MI.getDesc(); |
Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 3028 | |
| 3029 | // Find the one SGPR operand we are allowed to use. |
Matt Arsenault | e223ceb | 2015-10-21 21:15:01 +0000 | [diff] [blame] | 3030 | // |
Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 3031 | // First we need to consider the instruction's operand requirements before |
| 3032 | // legalizing. Some operands are required to be SGPRs, such as implicit uses |
| 3033 | // of VCC, but we are still bound by the constant bus requirement to only use |
| 3034 | // one. |
| 3035 | // |
| 3036 | // If the operand's class is an SGPR, we can never move it. |
| 3037 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3038 | unsigned SGPRReg = findImplicitSGPRRead(MI); |
Matt Arsenault | e223ceb | 2015-10-21 21:15:01 +0000 | [diff] [blame] | 3039 | if (SGPRReg != AMDGPU::NoRegister) |
| 3040 | return SGPRReg; |
Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 3041 | |
| 3042 | unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3043 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 3044 | |
| 3045 | for (unsigned i = 0; i < 3; ++i) { |
| 3046 | int Idx = OpIndices[i]; |
| 3047 | if (Idx == -1) |
| 3048 | break; |
| 3049 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3050 | const MachineOperand &MO = MI.getOperand(Idx); |
Matt Arsenault | 6c06741 | 2015-11-03 22:30:15 +0000 | [diff] [blame] | 3051 | if (!MO.isReg()) |
| 3052 | continue; |
Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 3053 | |
Matt Arsenault | 6c06741 | 2015-11-03 22:30:15 +0000 | [diff] [blame] | 3054 | // Is this operand statically required to be an SGPR based on the operand |
| 3055 | // constraints? |
| 3056 | const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); |
| 3057 | bool IsRequiredSGPR = RI.isSGPRClass(OpRC); |
| 3058 | if (IsRequiredSGPR) |
| 3059 | return MO.getReg(); |
| 3060 | |
| 3061 | // If this could be a VGPR or an SGPR, Check the dynamic register class. |
| 3062 | unsigned Reg = MO.getReg(); |
| 3063 | const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); |
| 3064 | if (RI.isSGPRClass(RegRC)) |
| 3065 | UsedSGPRs[i] = Reg; |
Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 3066 | } |
| 3067 | |
Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 3068 | // We don't have a required SGPR operand, so we have a bit more freedom in |
| 3069 | // selecting operands to move. |
| 3070 | |
| 3071 | // Try to select the most used SGPR. If an SGPR is equal to one of the |
| 3072 | // others, we choose that. |
| 3073 | // |
| 3074 | // e.g. |
| 3075 | // V_FMA_F32 v0, s0, s0, s0 -> No moves |
| 3076 | // V_FMA_F32 v0, s0, s1, s0 -> Move s1 |
| 3077 | |
Matt Arsenault | 6c06741 | 2015-11-03 22:30:15 +0000 | [diff] [blame] | 3078 | // TODO: If some of the operands are 64-bit SGPRs and some 32, we should |
| 3079 | // prefer those. |
| 3080 | |
Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 3081 | if (UsedSGPRs[0] != AMDGPU::NoRegister) { |
| 3082 | if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) |
| 3083 | SGPRReg = UsedSGPRs[0]; |
| 3084 | } |
| 3085 | |
| 3086 | if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { |
| 3087 | if (UsedSGPRs[1] == UsedSGPRs[2]) |
| 3088 | SGPRReg = UsedSGPRs[1]; |
| 3089 | } |
| 3090 | |
| 3091 | return SGPRReg; |
| 3092 | } |
| 3093 | |
Tom Stellard | 6407e1e | 2014-08-01 00:32:33 +0000 | [diff] [blame] | 3094 | MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, |
Matt Arsenault | ace5b76 | 2014-10-17 18:00:43 +0000 | [diff] [blame] | 3095 | unsigned OperandName) const { |
Tom Stellard | 1aaad69 | 2014-07-21 16:55:33 +0000 | [diff] [blame] | 3096 | int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); |
| 3097 | if (Idx == -1) |
| 3098 | return nullptr; |
| 3099 | |
| 3100 | return &MI.getOperand(Idx); |
| 3101 | } |
Tom Stellard | 794c8c0 | 2014-12-02 17:05:41 +0000 | [diff] [blame] | 3102 | |
| 3103 | uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { |
| 3104 | uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; |
Tom Stellard | 4694ed0 | 2015-06-26 21:58:42 +0000 | [diff] [blame] | 3105 | if (ST.isAmdHsaOS()) { |
Tom Stellard | 794c8c0 | 2014-12-02 17:05:41 +0000 | [diff] [blame] | 3106 | RsrcDataFormat |= (1ULL << 56); |
| 3107 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 3108 | if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) |
Michel Danzer | beb79ce | 2016-03-16 09:10:35 +0000 | [diff] [blame] | 3109 | // Set MTYPE = 2 |
| 3110 | RsrcDataFormat |= (2ULL << 59); |
Tom Stellard | 4694ed0 | 2015-06-26 21:58:42 +0000 | [diff] [blame] | 3111 | } |
| 3112 | |
Tom Stellard | 794c8c0 | 2014-12-02 17:05:41 +0000 | [diff] [blame] | 3113 | return RsrcDataFormat; |
| 3114 | } |
Marek Olsak | d1a69a2 | 2015-09-29 23:37:32 +0000 | [diff] [blame] | 3115 | |
| 3116 | uint64_t SIInstrInfo::getScratchRsrcWords23() const { |
| 3117 | uint64_t Rsrc23 = getDefaultRsrcDataFormat() | |
| 3118 | AMDGPU::RSRC_TID_ENABLE | |
| 3119 | 0xffffffff; // Size; |
| 3120 | |
Matt Arsenault | 24ee078 | 2016-02-12 02:40:47 +0000 | [diff] [blame] | 3121 | uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; |
| 3122 | |
Marek Olsak | e93f6d6 | 2016-06-13 16:05:57 +0000 | [diff] [blame] | 3123 | Rsrc23 |= (EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT) | |
| 3124 | // IndexStride = 64 |
| 3125 | (UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT); |
Matt Arsenault | 24ee078 | 2016-02-12 02:40:47 +0000 | [diff] [blame] | 3126 | |
Marek Olsak | d1a69a2 | 2015-09-29 23:37:32 +0000 | [diff] [blame] | 3127 | // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. |
| 3128 | // Clear them unless we want a huge stride. |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 3129 | if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) |
Marek Olsak | d1a69a2 | 2015-09-29 23:37:32 +0000 | [diff] [blame] | 3130 | Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; |
| 3131 | |
| 3132 | return Rsrc23; |
| 3133 | } |
Nicolai Haehnle | 02c3291 | 2016-01-13 16:10:10 +0000 | [diff] [blame] | 3134 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3135 | bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { |
| 3136 | unsigned Opc = MI.getOpcode(); |
Nicolai Haehnle | 02c3291 | 2016-01-13 16:10:10 +0000 | [diff] [blame] | 3137 | |
| 3138 | return isSMRD(Opc); |
| 3139 | } |
| 3140 | |
Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3141 | bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const { |
| 3142 | unsigned Opc = MI.getOpcode(); |
Nicolai Haehnle | 02c3291 | 2016-01-13 16:10:10 +0000 | [diff] [blame] | 3143 | |
| 3144 | return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc); |
| 3145 | } |
Tom Stellard | 2ff7262 | 2016-01-28 16:04:37 +0000 | [diff] [blame] | 3146 | |
Matt Arsenault | 3354f42 | 2016-09-10 01:20:33 +0000 | [diff] [blame^] | 3147 | unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, |
| 3148 | int &FrameIndex) const { |
| 3149 | const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); |
| 3150 | if (!Addr || !Addr->isFI()) |
| 3151 | return AMDGPU::NoRegister; |
| 3152 | |
| 3153 | assert(!MI.memoperands_empty() && |
| 3154 | (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); |
| 3155 | |
| 3156 | FrameIndex = Addr->getIndex(); |
| 3157 | return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); |
| 3158 | } |
| 3159 | |
| 3160 | unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, |
| 3161 | int &FrameIndex) const { |
| 3162 | const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); |
| 3163 | assert(Addr && Addr->isFI()); |
| 3164 | FrameIndex = Addr->getIndex(); |
| 3165 | return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); |
| 3166 | } |
| 3167 | |
| 3168 | unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
| 3169 | int &FrameIndex) const { |
| 3170 | |
| 3171 | if (!MI.mayLoad()) |
| 3172 | return AMDGPU::NoRegister; |
| 3173 | |
| 3174 | if (isMUBUF(MI) || isVGPRSpill(MI)) |
| 3175 | return isStackAccess(MI, FrameIndex); |
| 3176 | |
| 3177 | if (isSGPRSpill(MI)) |
| 3178 | return isSGPRStackAccess(MI, FrameIndex); |
| 3179 | |
| 3180 | return AMDGPU::NoRegister; |
| 3181 | } |
| 3182 | |
| 3183 | unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
| 3184 | int &FrameIndex) const { |
| 3185 | if (!MI.mayStore()) |
| 3186 | return AMDGPU::NoRegister; |
| 3187 | |
| 3188 | if (isMUBUF(MI) || isVGPRSpill(MI)) |
| 3189 | return isStackAccess(MI, FrameIndex); |
| 3190 | |
| 3191 | if (isSGPRSpill(MI)) |
| 3192 | return isSGPRStackAccess(MI, FrameIndex); |
| 3193 | |
| 3194 | return AMDGPU::NoRegister; |
| 3195 | } |
| 3196 | |
Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 3197 | unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { |
| 3198 | unsigned Opc = MI.getOpcode(); |
| 3199 | const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); |
| 3200 | unsigned DescSize = Desc.getSize(); |
| 3201 | |
| 3202 | // If we have a definitive size, we can use it. Otherwise we need to inspect |
| 3203 | // the operands to know the size. |
Matt Arsenault | ac42ba8 | 2016-09-03 17:25:44 +0000 | [diff] [blame] | 3204 | if (DescSize != 0) |
Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 3205 | return DescSize; |
| 3206 | |
Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 3207 | // 4-byte instructions may have a 32-bit literal encoded after them. Check |
| 3208 | // operands that coud ever be literals. |
| 3209 | if (isVALU(MI) || isSALU(MI)) { |
| 3210 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); |
| 3211 | if (Src0Idx == -1) |
| 3212 | return 4; // No operands. |
| 3213 | |
Matt Arsenault | c1ebd82 | 2016-08-13 01:43:54 +0000 | [diff] [blame] | 3214 | if (isLiteralConstantLike(MI.getOperand(Src0Idx), getOpSize(MI, Src0Idx))) |
Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 3215 | return 8; |
| 3216 | |
| 3217 | int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); |
| 3218 | if (Src1Idx == -1) |
| 3219 | return 4; |
| 3220 | |
Matt Arsenault | c1ebd82 | 2016-08-13 01:43:54 +0000 | [diff] [blame] | 3221 | if (isLiteralConstantLike(MI.getOperand(Src1Idx), getOpSize(MI, Src1Idx))) |
Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 3222 | return 8; |
| 3223 | |
| 3224 | return 4; |
| 3225 | } |
| 3226 | |
| 3227 | switch (Opc) { |
| 3228 | case TargetOpcode::IMPLICIT_DEF: |
| 3229 | case TargetOpcode::KILL: |
| 3230 | case TargetOpcode::DBG_VALUE: |
| 3231 | case TargetOpcode::BUNDLE: |
| 3232 | case TargetOpcode::EH_LABEL: |
| 3233 | return 0; |
| 3234 | case TargetOpcode::INLINEASM: { |
| 3235 | const MachineFunction *MF = MI.getParent()->getParent(); |
| 3236 | const char *AsmStr = MI.getOperand(0).getSymbolName(); |
| 3237 | return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); |
| 3238 | } |
| 3239 | default: |
| 3240 | llvm_unreachable("unable to find instruction size"); |
| 3241 | } |
| 3242 | } |
| 3243 | |
Tom Stellard | 2ff7262 | 2016-01-28 16:04:37 +0000 | [diff] [blame] | 3244 | ArrayRef<std::pair<int, const char *>> |
| 3245 | SIInstrInfo::getSerializableTargetIndices() const { |
| 3246 | static const std::pair<int, const char *> TargetIndices[] = { |
| 3247 | {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, |
| 3248 | {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, |
| 3249 | {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, |
| 3250 | {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, |
| 3251 | {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; |
| 3252 | return makeArrayRef(TargetIndices); |
| 3253 | } |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 3254 | |
| 3255 | /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The |
| 3256 | /// post-RA version of misched uses CreateTargetMIHazardRecognizer. |
| 3257 | ScheduleHazardRecognizer * |
| 3258 | SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, |
| 3259 | const ScheduleDAG *DAG) const { |
| 3260 | return new GCNHazardRecognizer(DAG->MF); |
| 3261 | } |
| 3262 | |
| 3263 | /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer |
| 3264 | /// pass. |
| 3265 | ScheduleHazardRecognizer * |
| 3266 | SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { |
| 3267 | return new GCNHazardRecognizer(MF); |
| 3268 | } |