| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 1 | //===- SIInstrInfo.cpp - SI Instruction Information  ----------------------===// | 
| Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 2 | // | 
| Chandler Carruth | 2946cd7 | 2019-01-19 08:50:56 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | 
|  | 4 | // See https://llvm.org/LICENSE.txt for license information. | 
|  | 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | 
| Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 6 | // | 
|  | 7 | //===----------------------------------------------------------------------===// | 
|  | 8 | // | 
|  | 9 | /// \file | 
| Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 10 | /// SI Implementation of TargetInstrInfo. | 
| Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 11 | // | 
|  | 12 | //===----------------------------------------------------------------------===// | 
|  | 13 |  | 
| Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 14 | #include "SIInstrInfo.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 15 | #include "AMDGPU.h" | 
|  | 16 | #include "AMDGPUSubtarget.h" | 
| Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 17 | #include "GCNHazardRecognizer.h" | 
| Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 18 | #include "SIDefines.h" | 
| Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 19 | #include "SIMachineFunctionInfo.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 20 | #include "SIRegisterInfo.h" | 
| Tom Stellard | 44b30b4 | 2018-05-22 02:03:23 +0000 | [diff] [blame] | 21 | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 22 | #include "Utils/AMDGPUBaseInfo.h" | 
|  | 23 | #include "llvm/ADT/APInt.h" | 
|  | 24 | #include "llvm/ADT/ArrayRef.h" | 
|  | 25 | #include "llvm/ADT/SmallVector.h" | 
|  | 26 | #include "llvm/ADT/StringRef.h" | 
|  | 27 | #include "llvm/ADT/iterator_range.h" | 
|  | 28 | #include "llvm/Analysis/AliasAnalysis.h" | 
|  | 29 | #include "llvm/Analysis/MemoryLocation.h" | 
| Stanislav Mekhanoshin | 7fe9a5d | 2017-09-13 22:20:47 +0000 | [diff] [blame] | 30 | #include "llvm/Analysis/ValueTracking.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 31 | #include "llvm/CodeGen/MachineBasicBlock.h" | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 32 | #include "llvm/CodeGen/MachineDominators.h" | 
| Tom Stellard | c5cf2f0 | 2014-08-21 20:40:54 +0000 | [diff] [blame] | 33 | #include "llvm/CodeGen/MachineFrameInfo.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 34 | #include "llvm/CodeGen/MachineFunction.h" | 
|  | 35 | #include "llvm/CodeGen/MachineInstr.h" | 
| Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 36 | #include "llvm/CodeGen/MachineInstrBuilder.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 37 | #include "llvm/CodeGen/MachineInstrBundle.h" | 
|  | 38 | #include "llvm/CodeGen/MachineMemOperand.h" | 
|  | 39 | #include "llvm/CodeGen/MachineOperand.h" | 
| Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 40 | #include "llvm/CodeGen/MachineRegisterInfo.h" | 
| Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 41 | #include "llvm/CodeGen/RegisterScavenging.h" | 
| Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 42 | #include "llvm/CodeGen/ScheduleDAG.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 43 | #include "llvm/CodeGen/SelectionDAGNodes.h" | 
| David Blaikie | b3bde2e | 2017-11-17 01:07:10 +0000 | [diff] [blame] | 44 | #include "llvm/CodeGen/TargetOpcodes.h" | 
|  | 45 | #include "llvm/CodeGen/TargetRegisterInfo.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 46 | #include "llvm/IR/DebugLoc.h" | 
| Matt Arsenault | 21a4382 | 2017-04-06 21:09:53 +0000 | [diff] [blame] | 47 | #include "llvm/IR/DiagnosticInfo.h" | 
| Tom Stellard | 4e07b1d | 2014-06-10 21:20:41 +0000 | [diff] [blame] | 48 | #include "llvm/IR/Function.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 49 | #include "llvm/IR/InlineAsm.h" | 
|  | 50 | #include "llvm/IR/LLVMContext.h" | 
| Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 51 | #include "llvm/MC/MCInstrDesc.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 52 | #include "llvm/Support/Casting.h" | 
|  | 53 | #include "llvm/Support/CommandLine.h" | 
|  | 54 | #include "llvm/Support/Compiler.h" | 
|  | 55 | #include "llvm/Support/ErrorHandling.h" | 
| David Blaikie | 13e77db | 2018-03-23 23:58:25 +0000 | [diff] [blame] | 56 | #include "llvm/Support/MachineValueType.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 57 | #include "llvm/Support/MathExtras.h" | 
|  | 58 | #include "llvm/Target/TargetMachine.h" | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 59 | #include <cassert> | 
|  | 60 | #include <cstdint> | 
|  | 61 | #include <iterator> | 
|  | 62 | #include <utility> | 
| Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 63 |  | 
|  | 64 | using namespace llvm; | 
|  | 65 |  | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 66 | #define GET_INSTRINFO_CTOR_DTOR | 
|  | 67 | #include "AMDGPUGenInstrInfo.inc" | 
|  | 68 |  | 
|  | 69 | namespace llvm { | 
|  | 70 | namespace AMDGPU { | 
|  | 71 | #define GET_D16ImageDimIntrinsics_IMPL | 
|  | 72 | #define GET_ImageDimIntrinsicTable_IMPL | 
|  | 73 | #define GET_RsrcIntrinsics_IMPL | 
|  | 74 | #include "AMDGPUGenSearchableTables.inc" | 
|  | 75 | } | 
|  | 76 | } | 
|  | 77 |  | 
|  | 78 |  | 
| Matt Arsenault | 6bc43d8 | 2016-10-06 16:20:41 +0000 | [diff] [blame] | 79 | // Must be at least 4 to be able to branch over minimum unconditional branch | 
|  | 80 | // code. This is only for making it possible to write reasonably small tests for | 
|  | 81 | // long branches. | 
|  | 82 | static cl::opt<unsigned> | 
|  | 83 | BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), | 
|  | 84 | cl::desc("Restrict range of branch instructions (DEBUG)")); | 
|  | 85 |  | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 86 | SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 87 | : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), | 
|  | 88 | RI(ST), ST(ST) {} | 
| Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 89 |  | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 90 | //===----------------------------------------------------------------------===// | 
|  | 91 | // TargetInstrInfo callbacks | 
|  | 92 | //===----------------------------------------------------------------------===// | 
|  | 93 |  | 
| Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 94 | static unsigned getNumOperandsNoGlue(SDNode *Node) { | 
|  | 95 | unsigned N = Node->getNumOperands(); | 
|  | 96 | while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) | 
|  | 97 | --N; | 
|  | 98 | return N; | 
|  | 99 | } | 
|  | 100 |  | 
| Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 101 | /// Returns true if both nodes have the same value for the given | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 102 | ///        operand \p Op, or if both nodes do not have this operand. | 
|  | 103 | static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { | 
|  | 104 | unsigned Opc0 = N0->getMachineOpcode(); | 
|  | 105 | unsigned Opc1 = N1->getMachineOpcode(); | 
|  | 106 |  | 
|  | 107 | int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); | 
|  | 108 | int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); | 
|  | 109 |  | 
|  | 110 | if (Op0Idx == -1 && Op1Idx == -1) | 
|  | 111 | return true; | 
|  | 112 |  | 
|  | 113 |  | 
|  | 114 | if ((Op0Idx == -1 && Op1Idx != -1) || | 
|  | 115 | (Op1Idx == -1 && Op0Idx != -1)) | 
|  | 116 | return false; | 
|  | 117 |  | 
|  | 118 | // getNamedOperandIdx returns the index for the MachineInstr's operands, | 
|  | 119 | // which includes the result as the first operand. We are indexing into the | 
|  | 120 | // MachineSDNode's operands, so we need to skip the result operand to get | 
|  | 121 | // the real index. | 
|  | 122 | --Op0Idx; | 
|  | 123 | --Op1Idx; | 
|  | 124 |  | 
| Tom Stellard | b8b8413 | 2014-09-03 15:22:39 +0000 | [diff] [blame] | 125 | return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 126 | } | 
|  | 127 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 128 | bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, | 
| Matt Arsenault | a48b866 | 2015-04-23 23:34:48 +0000 | [diff] [blame] | 129 | AliasAnalysis *AA) const { | 
|  | 130 | // TODO: The generic check fails for VALU instructions that should be | 
|  | 131 | // rematerializable due to implicit reads of exec. We really want all of the | 
|  | 132 | // generic logic for this except for this. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 133 | switch (MI.getOpcode()) { | 
| Matt Arsenault | a48b866 | 2015-04-23 23:34:48 +0000 | [diff] [blame] | 134 | case AMDGPU::V_MOV_B32_e32: | 
|  | 135 | case AMDGPU::V_MOV_B32_e64: | 
| Matt Arsenault | 80f766a | 2015-09-10 01:23:28 +0000 | [diff] [blame] | 136 | case AMDGPU::V_MOV_B64_PSEUDO: | 
| Matt Arsenault | cba0c6d | 2019-02-04 22:26:21 +0000 | [diff] [blame] | 137 | // No implicit operands. | 
|  | 138 | return MI.getNumOperands() == MI.getDesc().getNumOperands(); | 
| Matt Arsenault | a48b866 | 2015-04-23 23:34:48 +0000 | [diff] [blame] | 139 | default: | 
|  | 140 | return false; | 
|  | 141 | } | 
|  | 142 | } | 
|  | 143 |  | 
| Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 144 | bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, | 
|  | 145 | int64_t &Offset0, | 
|  | 146 | int64_t &Offset1) const { | 
|  | 147 | if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) | 
|  | 148 | return false; | 
|  | 149 |  | 
|  | 150 | unsigned Opc0 = Load0->getMachineOpcode(); | 
|  | 151 | unsigned Opc1 = Load1->getMachineOpcode(); | 
|  | 152 |  | 
|  | 153 | // Make sure both are actually loads. | 
|  | 154 | if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) | 
|  | 155 | return false; | 
|  | 156 |  | 
|  | 157 | if (isDS(Opc0) && isDS(Opc1)) { | 
| Tom Stellard | 20fa0be | 2014-10-07 21:09:20 +0000 | [diff] [blame] | 158 |  | 
|  | 159 | // FIXME: Handle this case: | 
|  | 160 | if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) | 
|  | 161 | return false; | 
| Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 162 |  | 
| Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 163 | // Check base reg. | 
| Matt Arsenault | 07f904b | 2019-03-08 20:30:50 +0000 | [diff] [blame] | 164 | if (Load0->getOperand(0) != Load1->getOperand(0)) | 
| Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 165 | return false; | 
|  | 166 |  | 
| Matt Arsenault | 972c12a | 2014-09-17 17:48:32 +0000 | [diff] [blame] | 167 | // Skip read2 / write2 variants for simplicity. | 
|  | 168 | // TODO: We should report true if the used offsets are adjacent (excluded | 
|  | 169 | // st64 versions). | 
| Matt Arsenault | bbc59d8 | 2019-03-27 15:41:00 +0000 | [diff] [blame] | 170 | int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); | 
|  | 171 | int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); | 
|  | 172 | if (Offset0Idx == -1 || Offset1Idx == -1) | 
| Matt Arsenault | 972c12a | 2014-09-17 17:48:32 +0000 | [diff] [blame] | 173 | return false; | 
|  | 174 |  | 
| Matt Arsenault | bbc59d8 | 2019-03-27 15:41:00 +0000 | [diff] [blame] | 175 | // XXX - be careful of datalesss loads | 
|  | 176 | // getNamedOperandIdx returns the index for MachineInstrs.  Since they | 
|  | 177 | // include the output in the operand list, but SDNodes don't, we need to | 
|  | 178 | // subtract the index by one. | 
|  | 179 | Offset0Idx -= get(Opc0).NumDefs; | 
|  | 180 | Offset1Idx -= get(Opc1).NumDefs; | 
|  | 181 | Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); | 
|  | 182 | Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); | 
| Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 183 | return true; | 
|  | 184 | } | 
|  | 185 |  | 
|  | 186 | if (isSMRD(Opc0) && isSMRD(Opc1)) { | 
| Nicolai Haehnle | ef44978 | 2017-04-24 16:53:52 +0000 | [diff] [blame] | 187 | // Skip time and cache invalidation instructions. | 
|  | 188 | if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || | 
|  | 189 | AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) | 
|  | 190 | return false; | 
|  | 191 |  | 
| Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 192 | assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); | 
|  | 193 |  | 
|  | 194 | // Check base reg. | 
|  | 195 | if (Load0->getOperand(0) != Load1->getOperand(0)) | 
|  | 196 | return false; | 
|  | 197 |  | 
| Tom Stellard | f0a575f | 2015-03-23 16:06:01 +0000 | [diff] [blame] | 198 | const ConstantSDNode *Load0Offset = | 
|  | 199 | dyn_cast<ConstantSDNode>(Load0->getOperand(1)); | 
|  | 200 | const ConstantSDNode *Load1Offset = | 
|  | 201 | dyn_cast<ConstantSDNode>(Load1->getOperand(1)); | 
|  | 202 |  | 
|  | 203 | if (!Load0Offset || !Load1Offset) | 
|  | 204 | return false; | 
|  | 205 |  | 
| Tom Stellard | f0a575f | 2015-03-23 16:06:01 +0000 | [diff] [blame] | 206 | Offset0 = Load0Offset->getZExtValue(); | 
|  | 207 | Offset1 = Load1Offset->getZExtValue(); | 
| Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 208 | return true; | 
|  | 209 | } | 
|  | 210 |  | 
|  | 211 | // MUBUF and MTBUF can access the same addresses. | 
|  | 212 | if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { | 
| Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 213 |  | 
|  | 214 | // MUBUF and MTBUF have vaddr at different indices. | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 215 | if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 216 | !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || | 
| Tom Stellard | b8b8413 | 2014-09-03 15:22:39 +0000 | [diff] [blame] | 217 | !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) | 
| Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 218 | return false; | 
|  | 219 |  | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 220 | int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); | 
|  | 221 | int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); | 
|  | 222 |  | 
|  | 223 | if (OffIdx0 == -1 || OffIdx1 == -1) | 
|  | 224 | return false; | 
|  | 225 |  | 
|  | 226 | // getNamedOperandIdx returns the index for MachineInstrs.  Since they | 
| Matt Arsenault | 07f904b | 2019-03-08 20:30:50 +0000 | [diff] [blame] | 227 | // include the output in the operand list, but SDNodes don't, we need to | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 228 | // subtract the index by one. | 
| Matt Arsenault | 28f97f1 | 2019-03-27 16:12:29 +0000 | [diff] [blame] | 229 | OffIdx0 -= get(Opc0).NumDefs; | 
|  | 230 | OffIdx1 -= get(Opc1).NumDefs; | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 231 |  | 
|  | 232 | SDValue Off0 = Load0->getOperand(OffIdx0); | 
|  | 233 | SDValue Off1 = Load1->getOperand(OffIdx1); | 
|  | 234 |  | 
|  | 235 | // The offset might be a FrameIndexSDNode. | 
|  | 236 | if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) | 
|  | 237 | return false; | 
|  | 238 |  | 
|  | 239 | Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); | 
|  | 240 | Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); | 
| Matt Arsenault | c10853f | 2014-08-06 00:29:43 +0000 | [diff] [blame] | 241 | return true; | 
|  | 242 | } | 
|  | 243 |  | 
|  | 244 | return false; | 
|  | 245 | } | 
|  | 246 |  | 
| Matt Arsenault | 2e99112 | 2014-09-10 23:26:16 +0000 | [diff] [blame] | 247 | static bool isStride64(unsigned Opc) { | 
|  | 248 | switch (Opc) { | 
|  | 249 | case AMDGPU::DS_READ2ST64_B32: | 
|  | 250 | case AMDGPU::DS_READ2ST64_B64: | 
|  | 251 | case AMDGPU::DS_WRITE2ST64_B32: | 
|  | 252 | case AMDGPU::DS_WRITE2ST64_B64: | 
|  | 253 | return true; | 
|  | 254 | default: | 
|  | 255 | return false; | 
|  | 256 | } | 
|  | 257 | } | 
|  | 258 |  | 
| Bjorn Pettersson | 238c9d630 | 2019-04-19 09:08:38 +0000 | [diff] [blame] | 259 | bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt, | 
|  | 260 | const MachineOperand *&BaseOp, | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 261 | int64_t &Offset, | 
|  | 262 | const TargetRegisterInfo *TRI) const { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 263 | unsigned Opc = LdSt.getOpcode(); | 
| Matt Arsenault | 3add643 | 2015-10-20 04:35:43 +0000 | [diff] [blame] | 264 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 265 | if (isDS(LdSt)) { | 
|  | 266 | const MachineOperand *OffsetImm = | 
|  | 267 | getNamedOperand(LdSt, AMDGPU::OpName::offset); | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 268 | if (OffsetImm) { | 
|  | 269 | // Normal, single offset LDS instruction. | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 270 | BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); | 
| Matt Arsenault | cdd191d | 2019-01-28 20:14:49 +0000 | [diff] [blame] | 271 | // TODO: ds_consume/ds_append use M0 for the base address. Is it safe to | 
|  | 272 | // report that here? | 
|  | 273 | if (!BaseOp) | 
|  | 274 | return false; | 
|  | 275 |  | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 276 | Offset = OffsetImm->getImm(); | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 277 | assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " | 
|  | 278 | "operands of type register."); | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 279 | return true; | 
| Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 280 | } | 
|  | 281 |  | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 282 | // The 2 offset instructions use offset0 and offset1 instead. We can treat | 
|  | 283 | // these as a load with a single offset if the 2 offsets are consecutive. We | 
|  | 284 | // will use this for some partially aligned loads. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 285 | const MachineOperand *Offset0Imm = | 
|  | 286 | getNamedOperand(LdSt, AMDGPU::OpName::offset0); | 
|  | 287 | const MachineOperand *Offset1Imm = | 
|  | 288 | getNamedOperand(LdSt, AMDGPU::OpName::offset1); | 
| Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 289 |  | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 290 | uint8_t Offset0 = Offset0Imm->getImm(); | 
|  | 291 | uint8_t Offset1 = Offset1Imm->getImm(); | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 292 |  | 
| Matt Arsenault | 84db5d9 | 2015-07-14 17:57:36 +0000 | [diff] [blame] | 293 | if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 294 | // Each of these offsets is in element sized units, so we need to convert | 
|  | 295 | // to bytes of the individual reads. | 
|  | 296 |  | 
|  | 297 | unsigned EltSize; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 298 | if (LdSt.mayLoad()) | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 299 | EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 300 | else { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 301 | assert(LdSt.mayStore()); | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 302 | int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 303 | EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 304 | } | 
|  | 305 |  | 
| Matt Arsenault | 2e99112 | 2014-09-10 23:26:16 +0000 | [diff] [blame] | 306 | if (isStride64(Opc)) | 
|  | 307 | EltSize *= 64; | 
|  | 308 |  | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 309 | BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 310 | Offset = EltSize * Offset0; | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 311 | assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " | 
|  | 312 | "operands of type register."); | 
| Matt Arsenault | 7eb0a10 | 2014-07-30 01:01:10 +0000 | [diff] [blame] | 313 | return true; | 
|  | 314 | } | 
|  | 315 |  | 
|  | 316 | return false; | 
| Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 317 | } | 
|  | 318 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 319 | if (isMUBUF(LdSt) || isMTBUF(LdSt)) { | 
| Matt Arsenault | 3666629 | 2016-11-15 20:14:27 +0000 | [diff] [blame] | 320 | const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); | 
|  | 321 | if (SOffset && SOffset->isReg()) | 
| Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 322 | return false; | 
|  | 323 |  | 
| Bjorn Pettersson | 238c9d630 | 2019-04-19 09:08:38 +0000 | [diff] [blame] | 324 | const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); | 
| Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 325 | if (!AddrReg) | 
|  | 326 | return false; | 
|  | 327 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 328 | const MachineOperand *OffsetImm = | 
|  | 329 | getNamedOperand(LdSt, AMDGPU::OpName::offset); | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 330 | BaseOp = AddrReg; | 
| Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 331 | Offset = OffsetImm->getImm(); | 
| Matt Arsenault | 3666629 | 2016-11-15 20:14:27 +0000 | [diff] [blame] | 332 |  | 
|  | 333 | if (SOffset) // soffset can be an inline immediate. | 
|  | 334 | Offset += SOffset->getImm(); | 
|  | 335 |  | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 336 | assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " | 
|  | 337 | "operands of type register."); | 
| Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 338 | return true; | 
|  | 339 | } | 
|  | 340 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 341 | if (isSMRD(LdSt)) { | 
|  | 342 | const MachineOperand *OffsetImm = | 
|  | 343 | getNamedOperand(LdSt, AMDGPU::OpName::offset); | 
| Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 344 | if (!OffsetImm) | 
|  | 345 | return false; | 
|  | 346 |  | 
| Bjorn Pettersson | 238c9d630 | 2019-04-19 09:08:38 +0000 | [diff] [blame] | 347 | const MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase); | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 348 | BaseOp = SBaseReg; | 
| Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 349 | Offset = OffsetImm->getImm(); | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 350 | assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " | 
|  | 351 | "operands of type register."); | 
| Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 352 | return true; | 
|  | 353 | } | 
|  | 354 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 355 | if (isFLAT(LdSt)) { | 
| Bjorn Pettersson | 238c9d630 | 2019-04-19 09:08:38 +0000 | [diff] [blame] | 356 | const MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); | 
| Matt Arsenault | 37a58e0 | 2017-07-21 18:06:36 +0000 | [diff] [blame] | 357 | if (VAddr) { | 
|  | 358 | // Can't analyze 2 offsets. | 
|  | 359 | if (getNamedOperand(LdSt, AMDGPU::OpName::saddr)) | 
|  | 360 | return false; | 
|  | 361 |  | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 362 | BaseOp = VAddr; | 
| Matt Arsenault | 37a58e0 | 2017-07-21 18:06:36 +0000 | [diff] [blame] | 363 | } else { | 
|  | 364 | // scratch instructions have either vaddr or saddr. | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 365 | BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); | 
| Matt Arsenault | 37a58e0 | 2017-07-21 18:06:36 +0000 | [diff] [blame] | 366 | } | 
|  | 367 |  | 
|  | 368 | Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 369 | assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " | 
|  | 370 | "operands of type register."); | 
| Matt Arsenault | 43578ec | 2016-06-02 20:05:20 +0000 | [diff] [blame] | 371 | return true; | 
|  | 372 | } | 
|  | 373 |  | 
| Matt Arsenault | 1acc72f | 2014-07-29 21:34:55 +0000 | [diff] [blame] | 374 | return false; | 
|  | 375 | } | 
|  | 376 |  | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 377 | static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, | 
|  | 378 | const MachineOperand &BaseOp1, | 
|  | 379 | const MachineInstr &MI2, | 
|  | 380 | const MachineOperand &BaseOp2) { | 
|  | 381 | // Support only base operands with base registers. | 
|  | 382 | // Note: this could be extended to support FI operands. | 
|  | 383 | if (!BaseOp1.isReg() || !BaseOp2.isReg()) | 
|  | 384 | return false; | 
|  | 385 |  | 
|  | 386 | if (BaseOp1.isIdenticalTo(BaseOp2)) | 
| Stanislav Mekhanoshin | 7fe9a5d | 2017-09-13 22:20:47 +0000 | [diff] [blame] | 387 | return true; | 
|  | 388 |  | 
|  | 389 | if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) | 
|  | 390 | return false; | 
|  | 391 |  | 
|  | 392 | auto MO1 = *MI1.memoperands_begin(); | 
|  | 393 | auto MO2 = *MI2.memoperands_begin(); | 
|  | 394 | if (MO1->getAddrSpace() != MO2->getAddrSpace()) | 
|  | 395 | return false; | 
|  | 396 |  | 
|  | 397 | auto Base1 = MO1->getValue(); | 
|  | 398 | auto Base2 = MO2->getValue(); | 
|  | 399 | if (!Base1 || !Base2) | 
|  | 400 | return false; | 
|  | 401 | const MachineFunction &MF = *MI1.getParent()->getParent(); | 
| Matthias Braun | f1caa28 | 2017-12-15 22:22:58 +0000 | [diff] [blame] | 402 | const DataLayout &DL = MF.getFunction().getParent()->getDataLayout(); | 
| Stanislav Mekhanoshin | 7fe9a5d | 2017-09-13 22:20:47 +0000 | [diff] [blame] | 403 | Base1 = GetUnderlyingObject(Base1, DL); | 
|  | 404 | Base2 = GetUnderlyingObject(Base1, DL); | 
|  | 405 |  | 
|  | 406 | if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) | 
|  | 407 | return false; | 
|  | 408 |  | 
|  | 409 | return Base1 == Base2; | 
|  | 410 | } | 
|  | 411 |  | 
| Bjorn Pettersson | 238c9d630 | 2019-04-19 09:08:38 +0000 | [diff] [blame] | 412 | bool SIInstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1, | 
|  | 413 | const MachineOperand &BaseOp2, | 
| Jun Bum Lim | 4c5bd58 | 2016-04-15 14:58:38 +0000 | [diff] [blame] | 414 | unsigned NumLoads) const { | 
| Bjorn Pettersson | 238c9d630 | 2019-04-19 09:08:38 +0000 | [diff] [blame] | 415 | const MachineInstr &FirstLdSt = *BaseOp1.getParent(); | 
|  | 416 | const MachineInstr &SecondLdSt = *BaseOp2.getParent(); | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 417 |  | 
|  | 418 | if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOp1, SecondLdSt, BaseOp2)) | 
| Stanislav Mekhanoshin | 7fe9a5d | 2017-09-13 22:20:47 +0000 | [diff] [blame] | 419 | return false; | 
|  | 420 |  | 
| NAKAMURA Takumi | fe1202c | 2016-06-20 00:37:41 +0000 | [diff] [blame] | 421 | const MachineOperand *FirstDst = nullptr; | 
|  | 422 | const MachineOperand *SecondDst = nullptr; | 
| Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 423 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 424 | if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) || | 
| Matt Arsenault | 74f6483 | 2017-02-01 20:22:51 +0000 | [diff] [blame] | 425 | (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt)) || | 
|  | 426 | (isFLAT(FirstLdSt) && isFLAT(SecondLdSt))) { | 
| Stanislav Mekhanoshin | 7fe9a5d | 2017-09-13 22:20:47 +0000 | [diff] [blame] | 427 | const unsigned MaxGlobalLoadCluster = 6; | 
|  | 428 | if (NumLoads > MaxGlobalLoadCluster) | 
|  | 429 | return false; | 
|  | 430 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 431 | FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata); | 
| Stanislav Mekhanoshin | 949fac9 | 2017-09-06 15:31:30 +0000 | [diff] [blame] | 432 | if (!FirstDst) | 
|  | 433 | FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 434 | SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata); | 
| Stanislav Mekhanoshin | 949fac9 | 2017-09-06 15:31:30 +0000 | [diff] [blame] | 435 | if (!SecondDst) | 
|  | 436 | SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); | 
| Matt Arsenault | 437fd71 | 2016-11-29 19:30:41 +0000 | [diff] [blame] | 437 | } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) { | 
|  | 438 | FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst); | 
|  | 439 | SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst); | 
|  | 440 | } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) { | 
|  | 441 | FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); | 
|  | 442 | SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); | 
| Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 443 | } | 
|  | 444 |  | 
|  | 445 | if (!FirstDst || !SecondDst) | 
| Matt Arsenault | 0e75a06 | 2014-09-17 17:48:30 +0000 | [diff] [blame] | 446 | return false; | 
|  | 447 |  | 
| Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 448 | // Try to limit clustering based on the total number of bytes loaded | 
|  | 449 | // rather than the number of instructions.  This is done to help reduce | 
|  | 450 | // register pressure.  The method used is somewhat inexact, though, | 
|  | 451 | // because it assumes that all loads in the cluster will load the | 
|  | 452 | // same number of bytes as FirstLdSt. | 
| Matt Arsenault | 0e75a06 | 2014-09-17 17:48:30 +0000 | [diff] [blame] | 453 |  | 
| Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 454 | // The unit of this value is bytes. | 
|  | 455 | // FIXME: This needs finer tuning. | 
|  | 456 | unsigned LoadClusterThreshold = 16; | 
| Matt Arsenault | 0e75a06 | 2014-09-17 17:48:30 +0000 | [diff] [blame] | 457 |  | 
| Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 458 | const MachineRegisterInfo &MRI = | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 459 | FirstLdSt.getParent()->getParent()->getRegInfo(); | 
| Neil Henning | 0a30f33 | 2019-04-01 15:19:52 +0000 | [diff] [blame] | 460 |  | 
|  | 461 | const unsigned Reg = FirstDst->getReg(); | 
|  | 462 |  | 
|  | 463 | const TargetRegisterClass *DstRC = TargetRegisterInfo::isVirtualRegister(Reg) | 
|  | 464 | ? MRI.getRegClass(Reg) | 
|  | 465 | : RI.getPhysRegClass(Reg); | 
| Tom Stellard | a76bcc2 | 2016-03-28 16:10:13 +0000 | [diff] [blame] | 466 |  | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 467 | return (NumLoads * (RI.getRegSizeInBits(*DstRC) / 8)) <= LoadClusterThreshold; | 
| Matt Arsenault | 0e75a06 | 2014-09-17 17:48:30 +0000 | [diff] [blame] | 468 | } | 
|  | 469 |  | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 470 | // FIXME: This behaves strangely. If, for example, you have 32 load + stores, | 
|  | 471 | // the first 16 loads will be interleaved with the stores, and the next 16 will | 
|  | 472 | // be clustered as expected. It should really split into 2 16 store batches. | 
|  | 473 | // | 
|  | 474 | // Loads are clustered until this returns false, rather than trying to schedule | 
|  | 475 | // groups of stores. This also means we have to deal with saying different | 
|  | 476 | // address space loads should be clustered, and ones which might cause bank | 
|  | 477 | // conflicts. | 
|  | 478 | // | 
|  | 479 | // This might be deprecated so it might not be worth that much effort to fix. | 
|  | 480 | bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, | 
|  | 481 | int64_t Offset0, int64_t Offset1, | 
|  | 482 | unsigned NumLoads) const { | 
|  | 483 | assert(Offset1 > Offset0 && | 
|  | 484 | "Second offset should be larger than first offset!"); | 
|  | 485 | // If we have less than 16 loads in a row, and the offsets are within 64 | 
|  | 486 | // bytes, then schedule together. | 
|  | 487 |  | 
|  | 488 | // A cacheline is 64 bytes (for global memory). | 
|  | 489 | return (NumLoads <= 16 && (Offset1 - Offset0) < 64); | 
|  | 490 | } | 
|  | 491 |  | 
| Matt Arsenault | 21a4382 | 2017-04-06 21:09:53 +0000 | [diff] [blame] | 492 | static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, | 
|  | 493 | MachineBasicBlock::iterator MI, | 
|  | 494 | const DebugLoc &DL, unsigned DestReg, | 
|  | 495 | unsigned SrcReg, bool KillSrc) { | 
|  | 496 | MachineFunction *MF = MBB.getParent(); | 
| Matthias Braun | f1caa28 | 2017-12-15 22:22:58 +0000 | [diff] [blame] | 497 | DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), | 
| Matt Arsenault | 21a4382 | 2017-04-06 21:09:53 +0000 | [diff] [blame] | 498 | "illegal SGPR to VGPR copy", | 
|  | 499 | DL, DS_Error); | 
| Matthias Braun | f1caa28 | 2017-12-15 22:22:58 +0000 | [diff] [blame] | 500 | LLVMContext &C = MF->getFunction().getContext(); | 
| Matt Arsenault | 21a4382 | 2017-04-06 21:09:53 +0000 | [diff] [blame] | 501 | C.diagnose(IllegalCopy); | 
|  | 502 |  | 
|  | 503 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) | 
|  | 504 | .addReg(SrcReg, getKillRegState(KillSrc)); | 
|  | 505 | } | 
|  | 506 |  | 
| Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 507 | void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, | 
|  | 508 | MachineBasicBlock::iterator MI, | 
|  | 509 | const DebugLoc &DL, unsigned DestReg, | 
|  | 510 | unsigned SrcReg, bool KillSrc) const { | 
| Matt Arsenault | 314cbf7 | 2016-11-07 16:39:22 +0000 | [diff] [blame] | 511 | const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); | 
| Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 512 |  | 
| Matt Arsenault | 314cbf7 | 2016-11-07 16:39:22 +0000 | [diff] [blame] | 513 | if (RC == &AMDGPU::VGPR_32RegClass) { | 
|  | 514 | assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || | 
|  | 515 | AMDGPU::SReg_32RegClass.contains(SrcReg)); | 
|  | 516 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) | 
|  | 517 | .addReg(SrcReg, getKillRegState(KillSrc)); | 
|  | 518 | return; | 
|  | 519 | } | 
| Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 520 |  | 
| Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 521 | if (RC == &AMDGPU::SReg_32_XM0RegClass || | 
|  | 522 | RC == &AMDGPU::SReg_32RegClass) { | 
| Nicolai Haehnle | e58e0e3 | 2016-09-12 16:25:20 +0000 | [diff] [blame] | 523 | if (SrcReg == AMDGPU::SCC) { | 
|  | 524 | BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) | 
|  | 525 | .addImm(-1) | 
|  | 526 | .addImm(0); | 
|  | 527 | return; | 
|  | 528 | } | 
|  | 529 |  | 
| Matt Arsenault | 21a4382 | 2017-04-06 21:09:53 +0000 | [diff] [blame] | 530 | if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { | 
|  | 531 | reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); | 
|  | 532 | return; | 
|  | 533 | } | 
|  | 534 |  | 
| Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 535 | BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) | 
|  | 536 | .addReg(SrcReg, getKillRegState(KillSrc)); | 
|  | 537 | return; | 
| Matt Arsenault | 314cbf7 | 2016-11-07 16:39:22 +0000 | [diff] [blame] | 538 | } | 
| Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 539 |  | 
| Matt Arsenault | 314cbf7 | 2016-11-07 16:39:22 +0000 | [diff] [blame] | 540 | if (RC == &AMDGPU::SReg_64RegClass) { | 
| Matt Arsenault | 834b1aa | 2015-02-14 02:55:54 +0000 | [diff] [blame] | 541 | if (DestReg == AMDGPU::VCC) { | 
| Matt Arsenault | 9998168 | 2015-02-14 02:55:56 +0000 | [diff] [blame] | 542 | if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { | 
|  | 543 | BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) | 
|  | 544 | .addReg(SrcReg, getKillRegState(KillSrc)); | 
|  | 545 | } else { | 
|  | 546 | // FIXME: Hack until VReg_1 removed. | 
|  | 547 | assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); | 
| Matt Arsenault | 5d8eb25 | 2016-09-30 01:50:20 +0000 | [diff] [blame] | 548 | BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) | 
| Matt Arsenault | 9998168 | 2015-02-14 02:55:56 +0000 | [diff] [blame] | 549 | .addImm(0) | 
|  | 550 | .addReg(SrcReg, getKillRegState(KillSrc)); | 
|  | 551 | } | 
| Matt Arsenault | 834b1aa | 2015-02-14 02:55:54 +0000 | [diff] [blame] | 552 |  | 
| Matt Arsenault | 834b1aa | 2015-02-14 02:55:54 +0000 | [diff] [blame] | 553 | return; | 
|  | 554 | } | 
|  | 555 |  | 
| Matt Arsenault | 21a4382 | 2017-04-06 21:09:53 +0000 | [diff] [blame] | 556 | if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { | 
|  | 557 | reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); | 
|  | 558 | return; | 
|  | 559 | } | 
|  | 560 |  | 
| Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 561 | BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) | 
|  | 562 | .addReg(SrcReg, getKillRegState(KillSrc)); | 
| Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 563 | return; | 
| Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 564 | } | 
|  | 565 |  | 
| Matt Arsenault | 314cbf7 | 2016-11-07 16:39:22 +0000 | [diff] [blame] | 566 | if (DestReg == AMDGPU::SCC) { | 
|  | 567 | assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); | 
|  | 568 | BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) | 
|  | 569 | .addReg(SrcReg, getKillRegState(KillSrc)) | 
|  | 570 | .addImm(0); | 
|  | 571 | return; | 
|  | 572 | } | 
|  | 573 |  | 
|  | 574 | unsigned EltSize = 4; | 
|  | 575 | unsigned Opcode = AMDGPU::V_MOV_B32_e32; | 
|  | 576 | if (RI.isSGPRClass(RC)) { | 
| Tim Renouf | 361b5b2 | 2019-03-21 12:01:21 +0000 | [diff] [blame] | 577 | // TODO: Copy vec3/vec5 with s_mov_b64s then final s_mov_b32. | 
|  | 578 | if (!(RI.getRegSizeInBits(*RC) % 64)) { | 
| Matt Arsenault | 314cbf7 | 2016-11-07 16:39:22 +0000 | [diff] [blame] | 579 | Opcode =  AMDGPU::S_MOV_B64; | 
|  | 580 | EltSize = 8; | 
|  | 581 | } else { | 
|  | 582 | Opcode = AMDGPU::S_MOV_B32; | 
|  | 583 | EltSize = 4; | 
|  | 584 | } | 
| Matt Arsenault | 21a4382 | 2017-04-06 21:09:53 +0000 | [diff] [blame] | 585 |  | 
|  | 586 | if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { | 
|  | 587 | reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); | 
|  | 588 | return; | 
|  | 589 | } | 
| Matt Arsenault | 314cbf7 | 2016-11-07 16:39:22 +0000 | [diff] [blame] | 590 | } | 
|  | 591 |  | 
|  | 592 | ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); | 
| Matt Arsenault | 73d2f89 | 2016-07-15 22:32:02 +0000 | [diff] [blame] | 593 | bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); | 
| Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 594 |  | 
|  | 595 | for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { | 
|  | 596 | unsigned SubIdx; | 
|  | 597 | if (Forward) | 
|  | 598 | SubIdx = SubIndices[Idx]; | 
|  | 599 | else | 
|  | 600 | SubIdx = SubIndices[SubIndices.size() - Idx - 1]; | 
|  | 601 |  | 
| Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 602 | MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, | 
|  | 603 | get(Opcode), RI.getSubReg(DestReg, SubIdx)); | 
|  | 604 |  | 
| Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 605 | Builder.addReg(RI.getSubReg(SrcReg, SubIdx)); | 
| Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 606 |  | 
| Nicolai Haehnle | dd58705 | 2015-12-19 01:16:06 +0000 | [diff] [blame] | 607 | if (Idx == 0) | 
| Christian Konig | d0e3da1 | 2013-03-01 09:46:27 +0000 | [diff] [blame] | 608 | Builder.addReg(DestReg, RegState::Define | RegState::Implicit); | 
| Matt Arsenault | 73d2f89 | 2016-07-15 22:32:02 +0000 | [diff] [blame] | 609 |  | 
| Matt Arsenault | 05c2647 | 2017-06-12 17:19:20 +0000 | [diff] [blame] | 610 | bool UseKill = KillSrc && Idx == SubIndices.size() - 1; | 
|  | 611 | Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); | 
| Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 612 | } | 
|  | 613 | } | 
|  | 614 |  | 
| Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 615 | int SIInstrInfo::commuteOpcode(unsigned Opcode) const { | 
| Christian Konig | 3c14580 | 2013-03-27 09:12:59 +0000 | [diff] [blame] | 616 | int NewOpc; | 
|  | 617 |  | 
|  | 618 | // Try to map original to commuted opcode | 
| Marek Olsak | 191507e | 2015-02-03 17:38:12 +0000 | [diff] [blame] | 619 | NewOpc = AMDGPU::getCommuteRev(Opcode); | 
| Marek Olsak | cfbdba2 | 2015-06-26 20:29:10 +0000 | [diff] [blame] | 620 | if (NewOpc != -1) | 
|  | 621 | // Check if the commuted (REV) opcode exists on the target. | 
|  | 622 | return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; | 
| Christian Konig | 3c14580 | 2013-03-27 09:12:59 +0000 | [diff] [blame] | 623 |  | 
|  | 624 | // Try to map commuted to original opcode | 
| Marek Olsak | 191507e | 2015-02-03 17:38:12 +0000 | [diff] [blame] | 625 | NewOpc = AMDGPU::getCommuteOrig(Opcode); | 
| Marek Olsak | cfbdba2 | 2015-06-26 20:29:10 +0000 | [diff] [blame] | 626 | if (NewOpc != -1) | 
|  | 627 | // Check if the original (non-REV) opcode exists on the target. | 
|  | 628 | return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; | 
| Christian Konig | 3c14580 | 2013-03-27 09:12:59 +0000 | [diff] [blame] | 629 |  | 
|  | 630 | return Opcode; | 
|  | 631 | } | 
|  | 632 |  | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 633 | void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, | 
|  | 634 | MachineBasicBlock::iterator MI, | 
|  | 635 | const DebugLoc &DL, unsigned DestReg, | 
|  | 636 | int64_t Value) const { | 
|  | 637 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 638 | const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); | 
|  | 639 | if (RegClass == &AMDGPU::SReg_32RegClass || | 
|  | 640 | RegClass == &AMDGPU::SGPR_32RegClass || | 
|  | 641 | RegClass == &AMDGPU::SReg_32_XM0RegClass || | 
|  | 642 | RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { | 
|  | 643 | BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) | 
|  | 644 | .addImm(Value); | 
|  | 645 | return; | 
|  | 646 | } | 
|  | 647 |  | 
|  | 648 | if (RegClass == &AMDGPU::SReg_64RegClass || | 
|  | 649 | RegClass == &AMDGPU::SGPR_64RegClass || | 
|  | 650 | RegClass == &AMDGPU::SReg_64_XEXECRegClass) { | 
|  | 651 | BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) | 
|  | 652 | .addImm(Value); | 
|  | 653 | return; | 
|  | 654 | } | 
|  | 655 |  | 
|  | 656 | if (RegClass == &AMDGPU::VGPR_32RegClass) { | 
|  | 657 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) | 
|  | 658 | .addImm(Value); | 
|  | 659 | return; | 
|  | 660 | } | 
|  | 661 | if (RegClass == &AMDGPU::VReg_64RegClass) { | 
|  | 662 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) | 
|  | 663 | .addImm(Value); | 
|  | 664 | return; | 
|  | 665 | } | 
|  | 666 |  | 
|  | 667 | unsigned EltSize = 4; | 
|  | 668 | unsigned Opcode = AMDGPU::V_MOV_B32_e32; | 
|  | 669 | if (RI.isSGPRClass(RegClass)) { | 
|  | 670 | if (RI.getRegSizeInBits(*RegClass) > 32) { | 
|  | 671 | Opcode =  AMDGPU::S_MOV_B64; | 
|  | 672 | EltSize = 8; | 
|  | 673 | } else { | 
|  | 674 | Opcode = AMDGPU::S_MOV_B32; | 
|  | 675 | EltSize = 4; | 
|  | 676 | } | 
|  | 677 | } | 
|  | 678 |  | 
|  | 679 | ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); | 
|  | 680 | for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { | 
|  | 681 | int64_t IdxValue = Idx == 0 ? Value : 0; | 
|  | 682 |  | 
|  | 683 | MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, | 
|  | 684 | get(Opcode), RI.getSubReg(DestReg, Idx)); | 
|  | 685 | Builder.addImm(IdxValue); | 
|  | 686 | } | 
|  | 687 | } | 
|  | 688 |  | 
|  | 689 | const TargetRegisterClass * | 
|  | 690 | SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { | 
|  | 691 | return &AMDGPU::VGPR_32RegClass; | 
|  | 692 | } | 
|  | 693 |  | 
|  | 694 | void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, | 
|  | 695 | MachineBasicBlock::iterator I, | 
|  | 696 | const DebugLoc &DL, unsigned DstReg, | 
|  | 697 | ArrayRef<MachineOperand> Cond, | 
|  | 698 | unsigned TrueReg, | 
|  | 699 | unsigned FalseReg) const { | 
|  | 700 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
| NAKAMURA Takumi | 994a43d | 2017-05-16 04:01:23 +0000 | [diff] [blame] | 701 | assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && | 
|  | 702 | "Not a VGPR32 reg"); | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 703 |  | 
|  | 704 | if (Cond.size() == 1) { | 
| Nicolai Haehnle | ce4ddd0 | 2017-09-29 15:37:31 +0000 | [diff] [blame] | 705 | unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); | 
|  | 706 | BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) | 
|  | 707 | .add(Cond[0]); | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 708 | BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 709 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 710 | .addReg(FalseReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 711 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 712 | .addReg(TrueReg) | 
| Nicolai Haehnle | ce4ddd0 | 2017-09-29 15:37:31 +0000 | [diff] [blame] | 713 | .addReg(SReg); | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 714 | } else if (Cond.size() == 2) { | 
|  | 715 | assert(Cond[0].isImm() && "Cond[0] is not an immediate"); | 
|  | 716 | switch (Cond[0].getImm()) { | 
|  | 717 | case SIInstrInfo::SCC_TRUE: { | 
| Nicolai Haehnle | ce4ddd0 | 2017-09-29 15:37:31 +0000 | [diff] [blame] | 718 | unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 719 | BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) | 
|  | 720 | .addImm(-1) | 
|  | 721 | .addImm(0); | 
|  | 722 | BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 723 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 724 | .addReg(FalseReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 725 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 726 | .addReg(TrueReg) | 
|  | 727 | .addReg(SReg); | 
|  | 728 | break; | 
|  | 729 | } | 
|  | 730 | case SIInstrInfo::SCC_FALSE: { | 
| Nicolai Haehnle | ce4ddd0 | 2017-09-29 15:37:31 +0000 | [diff] [blame] | 731 | unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 732 | BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) | 
|  | 733 | .addImm(0) | 
|  | 734 | .addImm(-1); | 
|  | 735 | BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 736 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 737 | .addReg(FalseReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 738 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 739 | .addReg(TrueReg) | 
|  | 740 | .addReg(SReg); | 
|  | 741 | break; | 
|  | 742 | } | 
|  | 743 | case SIInstrInfo::VCCNZ: { | 
|  | 744 | MachineOperand RegOp = Cond[1]; | 
|  | 745 | RegOp.setImplicit(false); | 
| Nicolai Haehnle | ce4ddd0 | 2017-09-29 15:37:31 +0000 | [diff] [blame] | 746 | unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); | 
|  | 747 | BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) | 
|  | 748 | .add(RegOp); | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 749 | BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 750 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 751 | .addReg(FalseReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 752 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 753 | .addReg(TrueReg) | 
| Nicolai Haehnle | ce4ddd0 | 2017-09-29 15:37:31 +0000 | [diff] [blame] | 754 | .addReg(SReg); | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 755 | break; | 
|  | 756 | } | 
|  | 757 | case SIInstrInfo::VCCZ: { | 
|  | 758 | MachineOperand RegOp = Cond[1]; | 
|  | 759 | RegOp.setImplicit(false); | 
| Nicolai Haehnle | ce4ddd0 | 2017-09-29 15:37:31 +0000 | [diff] [blame] | 760 | unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); | 
|  | 761 | BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) | 
|  | 762 | .add(RegOp); | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 763 | BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 764 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 765 | .addReg(TrueReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 766 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 767 | .addReg(FalseReg) | 
| Nicolai Haehnle | ce4ddd0 | 2017-09-29 15:37:31 +0000 | [diff] [blame] | 768 | .addReg(SReg); | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 769 | break; | 
|  | 770 | } | 
|  | 771 | case SIInstrInfo::EXECNZ: { | 
| Nicolai Haehnle | ce4ddd0 | 2017-09-29 15:37:31 +0000 | [diff] [blame] | 772 | unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 773 | unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 774 | BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2) | 
|  | 775 | .addImm(0); | 
|  | 776 | BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) | 
|  | 777 | .addImm(-1) | 
|  | 778 | .addImm(0); | 
|  | 779 | BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 780 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 781 | .addReg(FalseReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 782 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 783 | .addReg(TrueReg) | 
|  | 784 | .addReg(SReg); | 
|  | 785 | break; | 
|  | 786 | } | 
|  | 787 | case SIInstrInfo::EXECZ: { | 
| Nicolai Haehnle | ce4ddd0 | 2017-09-29 15:37:31 +0000 | [diff] [blame] | 788 | unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 789 | unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 790 | BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2) | 
|  | 791 | .addImm(0); | 
|  | 792 | BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) | 
|  | 793 | .addImm(0) | 
|  | 794 | .addImm(-1); | 
|  | 795 | BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 796 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 797 | .addReg(FalseReg) | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 798 | .addImm(0) | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 799 | .addReg(TrueReg) | 
|  | 800 | .addReg(SReg); | 
|  | 801 | llvm_unreachable("Unhandled branch predicate EXECZ"); | 
|  | 802 | break; | 
|  | 803 | } | 
|  | 804 | default: | 
|  | 805 | llvm_unreachable("invalid branch predicate"); | 
|  | 806 | } | 
|  | 807 | } else { | 
|  | 808 | llvm_unreachable("Can only handle Cond size 1 or 2"); | 
|  | 809 | } | 
|  | 810 | } | 
|  | 811 |  | 
|  | 812 | unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB, | 
|  | 813 | MachineBasicBlock::iterator I, | 
|  | 814 | const DebugLoc &DL, | 
|  | 815 | unsigned SrcReg, int Value) const { | 
|  | 816 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); | 
|  | 817 | unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 818 | BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) | 
|  | 819 | .addImm(Value) | 
|  | 820 | .addReg(SrcReg); | 
|  | 821 |  | 
|  | 822 | return Reg; | 
|  | 823 | } | 
|  | 824 |  | 
|  | 825 | unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB, | 
|  | 826 | MachineBasicBlock::iterator I, | 
|  | 827 | const DebugLoc &DL, | 
|  | 828 | unsigned SrcReg, int Value) const { | 
|  | 829 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); | 
|  | 830 | unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 831 | BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) | 
|  | 832 | .addImm(Value) | 
|  | 833 | .addReg(SrcReg); | 
|  | 834 |  | 
|  | 835 | return Reg; | 
|  | 836 | } | 
|  | 837 |  | 
| Tom Stellard | ef3b864 | 2015-01-07 19:56:17 +0000 | [diff] [blame] | 838 | unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { | 
|  | 839 |  | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 840 | if (RI.getRegSizeInBits(*DstRC) == 32) { | 
| Tom Stellard | ef3b864 | 2015-01-07 19:56:17 +0000 | [diff] [blame] | 841 | return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 842 | } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { | 
| Tom Stellard | ef3b864 | 2015-01-07 19:56:17 +0000 | [diff] [blame] | 843 | return AMDGPU::S_MOV_B64; | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 844 | } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { | 
| Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 845 | return  AMDGPU::V_MOV_B64_PSEUDO; | 
| Tom Stellard | ef3b864 | 2015-01-07 19:56:17 +0000 | [diff] [blame] | 846 | } | 
|  | 847 | return AMDGPU::COPY; | 
|  | 848 | } | 
|  | 849 |  | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 850 | static unsigned getSGPRSpillSaveOpcode(unsigned Size) { | 
|  | 851 | switch (Size) { | 
|  | 852 | case 4: | 
|  | 853 | return AMDGPU::SI_SPILL_S32_SAVE; | 
|  | 854 | case 8: | 
|  | 855 | return AMDGPU::SI_SPILL_S64_SAVE; | 
| Tim Renouf | 361b5b2 | 2019-03-21 12:01:21 +0000 | [diff] [blame] | 856 | case 12: | 
|  | 857 | return AMDGPU::SI_SPILL_S96_SAVE; | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 858 | case 16: | 
|  | 859 | return AMDGPU::SI_SPILL_S128_SAVE; | 
| Tim Renouf | 033f99a | 2019-03-22 10:11:21 +0000 | [diff] [blame] | 860 | case 20: | 
|  | 861 | return AMDGPU::SI_SPILL_S160_SAVE; | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 862 | case 32: | 
|  | 863 | return AMDGPU::SI_SPILL_S256_SAVE; | 
|  | 864 | case 64: | 
|  | 865 | return AMDGPU::SI_SPILL_S512_SAVE; | 
|  | 866 | default: | 
|  | 867 | llvm_unreachable("unknown register size"); | 
|  | 868 | } | 
|  | 869 | } | 
|  | 870 |  | 
|  | 871 | static unsigned getVGPRSpillSaveOpcode(unsigned Size) { | 
|  | 872 | switch (Size) { | 
|  | 873 | case 4: | 
|  | 874 | return AMDGPU::SI_SPILL_V32_SAVE; | 
|  | 875 | case 8: | 
|  | 876 | return AMDGPU::SI_SPILL_V64_SAVE; | 
| Tom Stellard | 703b2ec | 2016-04-12 23:57:30 +0000 | [diff] [blame] | 877 | case 12: | 
|  | 878 | return AMDGPU::SI_SPILL_V96_SAVE; | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 879 | case 16: | 
|  | 880 | return AMDGPU::SI_SPILL_V128_SAVE; | 
| Tim Renouf | 033f99a | 2019-03-22 10:11:21 +0000 | [diff] [blame] | 881 | case 20: | 
|  | 882 | return AMDGPU::SI_SPILL_V160_SAVE; | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 883 | case 32: | 
|  | 884 | return AMDGPU::SI_SPILL_V256_SAVE; | 
|  | 885 | case 64: | 
|  | 886 | return AMDGPU::SI_SPILL_V512_SAVE; | 
|  | 887 | default: | 
|  | 888 | llvm_unreachable("unknown register size"); | 
|  | 889 | } | 
|  | 890 | } | 
|  | 891 |  | 
| Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 892 | void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, | 
|  | 893 | MachineBasicBlock::iterator MI, | 
|  | 894 | unsigned SrcReg, bool isKill, | 
|  | 895 | int FrameIndex, | 
|  | 896 | const TargetRegisterClass *RC, | 
|  | 897 | const TargetRegisterInfo *TRI) const { | 
| Tom Stellard | 4e07b1d | 2014-06-10 21:20:41 +0000 | [diff] [blame] | 898 | MachineFunction *MF = MBB.getParent(); | 
| Tom Stellard | 42fb60e | 2015-01-14 15:42:31 +0000 | [diff] [blame] | 899 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); | 
| Matthias Braun | 941a705 | 2016-07-28 18:40:00 +0000 | [diff] [blame] | 900 | MachineFrameInfo &FrameInfo = MF->getFrameInfo(); | 
| Graham Sellers | ba559ac | 2018-12-01 12:27:53 +0000 | [diff] [blame] | 901 | const DebugLoc &DL = MBB.findDebugLoc(MI); | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 902 |  | 
| Matthias Braun | 941a705 | 2016-07-28 18:40:00 +0000 | [diff] [blame] | 903 | unsigned Size = FrameInfo.getObjectSize(FrameIndex); | 
|  | 904 | unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 905 | MachinePointerInfo PtrInfo | 
|  | 906 | = MachinePointerInfo::getFixedStack(*MF, FrameIndex); | 
|  | 907 | MachineMemOperand *MMO | 
|  | 908 | = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, | 
|  | 909 | Size, Align); | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 910 | unsigned SpillSize = TRI->getSpillSize(*RC); | 
| Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 911 |  | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 912 | if (RI.isSGPRClass(RC)) { | 
| Matt Arsenault | 5b22dfa | 2015-11-05 05:27:10 +0000 | [diff] [blame] | 913 | MFI->setHasSpilledSGPRs(); | 
|  | 914 |  | 
| Matt Arsenault | 2510a31 | 2016-09-03 06:57:55 +0000 | [diff] [blame] | 915 | // We are only allowed to create one new instruction when spilling | 
|  | 916 | // registers, so we need to use pseudo instruction for spilling SGPRs. | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 917 | const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); | 
| Matt Arsenault | 2510a31 | 2016-09-03 06:57:55 +0000 | [diff] [blame] | 918 |  | 
|  | 919 | // The SGPR spill/restore instructions only work on number sgprs, so we need | 
|  | 920 | // to make sure we are using the correct register class. | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 921 | if (TargetRegisterInfo::isVirtualRegister(SrcReg) && SpillSize == 4) { | 
| Matt Arsenault | b6e1cc2 | 2016-05-21 00:53:42 +0000 | [diff] [blame] | 922 | MachineRegisterInfo &MRI = MF->getRegInfo(); | 
|  | 923 | MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); | 
|  | 924 | } | 
|  | 925 |  | 
| Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 926 | MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc) | 
| Matt Arsenault | 3354f42 | 2016-09-10 01:20:33 +0000 | [diff] [blame] | 927 | .addReg(SrcReg, getKillRegState(isKill)) // data | 
|  | 928 | .addFrameIndex(FrameIndex)               // addr | 
| Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 929 | .addMemOperand(MMO) | 
|  | 930 | .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) | 
| Matt Arsenault | b812b7a | 2019-06-05 22:20:47 +0000 | [diff] [blame] | 931 | .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); | 
| Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 932 | // Add the scratch resource registers as implicit uses because we may end up | 
|  | 933 | // needing them, and need to ensure that the reserved registers are | 
|  | 934 | // correctly handled. | 
| Tom Stellard | 42fb60e | 2015-01-14 15:42:31 +0000 | [diff] [blame] | 935 |  | 
| Matt Arsenault | adc59d7 | 2018-04-23 15:51:26 +0000 | [diff] [blame] | 936 | FrameInfo.setStackID(FrameIndex, SIStackID::SGPR_SPILL); | 
| Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 937 | if (ST.hasScalarStores()) { | 
|  | 938 | // m0 is used for offset to scalar stores if used to spill. | 
| Nicolai Haehnle | 43cc6c4 | 2017-06-27 08:04:13 +0000 | [diff] [blame] | 939 | Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine | RegState::Dead); | 
| Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 940 | } | 
|  | 941 |  | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 942 | return; | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 943 | } | 
| Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 944 |  | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 945 | assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); | 
|  | 946 |  | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 947 | unsigned Opcode = getVGPRSpillSaveOpcode(SpillSize); | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 948 | MFI->setHasSpilledVGPRs(); | 
|  | 949 | BuildMI(MBB, MI, DL, get(Opcode)) | 
| Matt Arsenault | 3354f42 | 2016-09-10 01:20:33 +0000 | [diff] [blame] | 950 | .addReg(SrcReg, getKillRegState(isKill)) // data | 
|  | 951 | .addFrameIndex(FrameIndex)               // addr | 
| Matt Arsenault | 2510a31 | 2016-09-03 06:57:55 +0000 | [diff] [blame] | 952 | .addReg(MFI->getScratchRSrcReg())        // scratch_rsrc | 
| Matt Arsenault | b812b7a | 2019-06-05 22:20:47 +0000 | [diff] [blame] | 953 | .addReg(MFI->getStackPtrOffsetReg())     // scratch_offset | 
| Matt Arsenault | 2510a31 | 2016-09-03 06:57:55 +0000 | [diff] [blame] | 954 | .addImm(0)                               // offset | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 955 | .addMemOperand(MMO); | 
|  | 956 | } | 
|  | 957 |  | 
|  | 958 | static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { | 
|  | 959 | switch (Size) { | 
|  | 960 | case 4: | 
|  | 961 | return AMDGPU::SI_SPILL_S32_RESTORE; | 
|  | 962 | case 8: | 
|  | 963 | return AMDGPU::SI_SPILL_S64_RESTORE; | 
| Tim Renouf | 361b5b2 | 2019-03-21 12:01:21 +0000 | [diff] [blame] | 964 | case 12: | 
|  | 965 | return AMDGPU::SI_SPILL_S96_RESTORE; | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 966 | case 16: | 
|  | 967 | return AMDGPU::SI_SPILL_S128_RESTORE; | 
| Tim Renouf | 033f99a | 2019-03-22 10:11:21 +0000 | [diff] [blame] | 968 | case 20: | 
|  | 969 | return AMDGPU::SI_SPILL_S160_RESTORE; | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 970 | case 32: | 
|  | 971 | return AMDGPU::SI_SPILL_S256_RESTORE; | 
|  | 972 | case 64: | 
|  | 973 | return AMDGPU::SI_SPILL_S512_RESTORE; | 
|  | 974 | default: | 
|  | 975 | llvm_unreachable("unknown register size"); | 
|  | 976 | } | 
|  | 977 | } | 
|  | 978 |  | 
|  | 979 | static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { | 
|  | 980 | switch (Size) { | 
|  | 981 | case 4: | 
|  | 982 | return AMDGPU::SI_SPILL_V32_RESTORE; | 
|  | 983 | case 8: | 
|  | 984 | return AMDGPU::SI_SPILL_V64_RESTORE; | 
| Tom Stellard | 703b2ec | 2016-04-12 23:57:30 +0000 | [diff] [blame] | 985 | case 12: | 
|  | 986 | return AMDGPU::SI_SPILL_V96_RESTORE; | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 987 | case 16: | 
|  | 988 | return AMDGPU::SI_SPILL_V128_RESTORE; | 
| Tim Renouf | 033f99a | 2019-03-22 10:11:21 +0000 | [diff] [blame] | 989 | case 20: | 
|  | 990 | return AMDGPU::SI_SPILL_V160_RESTORE; | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 991 | case 32: | 
|  | 992 | return AMDGPU::SI_SPILL_V256_RESTORE; | 
|  | 993 | case 64: | 
|  | 994 | return AMDGPU::SI_SPILL_V512_RESTORE; | 
|  | 995 | default: | 
|  | 996 | llvm_unreachable("unknown register size"); | 
| Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 997 | } | 
|  | 998 | } | 
|  | 999 |  | 
|  | 1000 | void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, | 
|  | 1001 | MachineBasicBlock::iterator MI, | 
|  | 1002 | unsigned DestReg, int FrameIndex, | 
|  | 1003 | const TargetRegisterClass *RC, | 
|  | 1004 | const TargetRegisterInfo *TRI) const { | 
| Tom Stellard | 4e07b1d | 2014-06-10 21:20:41 +0000 | [diff] [blame] | 1005 | MachineFunction *MF = MBB.getParent(); | 
| Matt Arsenault | 88ce3dc | 2018-11-26 21:28:40 +0000 | [diff] [blame] | 1006 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); | 
| Matthias Braun | 941a705 | 2016-07-28 18:40:00 +0000 | [diff] [blame] | 1007 | MachineFrameInfo &FrameInfo = MF->getFrameInfo(); | 
| Graham Sellers | ba559ac | 2018-12-01 12:27:53 +0000 | [diff] [blame] | 1008 | const DebugLoc &DL = MBB.findDebugLoc(MI); | 
| Matthias Braun | 941a705 | 2016-07-28 18:40:00 +0000 | [diff] [blame] | 1009 | unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); | 
|  | 1010 | unsigned Size = FrameInfo.getObjectSize(FrameIndex); | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 1011 | unsigned SpillSize = TRI->getSpillSize(*RC); | 
| Tom Stellard | 4e07b1d | 2014-06-10 21:20:41 +0000 | [diff] [blame] | 1012 |  | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 1013 | MachinePointerInfo PtrInfo | 
|  | 1014 | = MachinePointerInfo::getFixedStack(*MF, FrameIndex); | 
|  | 1015 |  | 
|  | 1016 | MachineMemOperand *MMO = MF->getMachineMemOperand( | 
|  | 1017 | PtrInfo, MachineMemOperand::MOLoad, Size, Align); | 
|  | 1018 |  | 
|  | 1019 | if (RI.isSGPRClass(RC)) { | 
| Matt Arsenault | 88ce3dc | 2018-11-26 21:28:40 +0000 | [diff] [blame] | 1020 | MFI->setHasSpilledSGPRs(); | 
|  | 1021 |  | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 1022 | // FIXME: Maybe this should not include a memoperand because it will be | 
|  | 1023 | // lowered to non-memory instructions. | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 1024 | const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); | 
|  | 1025 | if (TargetRegisterInfo::isVirtualRegister(DestReg) && SpillSize == 4) { | 
| Matt Arsenault | b6e1cc2 | 2016-05-21 00:53:42 +0000 | [diff] [blame] | 1026 | MachineRegisterInfo &MRI = MF->getRegInfo(); | 
|  | 1027 | MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass); | 
|  | 1028 | } | 
|  | 1029 |  | 
| Matt Arsenault | adc59d7 | 2018-04-23 15:51:26 +0000 | [diff] [blame] | 1030 | FrameInfo.setStackID(FrameIndex, SIStackID::SGPR_SPILL); | 
| Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 1031 | MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc, DestReg) | 
| Matt Arsenault | 3354f42 | 2016-09-10 01:20:33 +0000 | [diff] [blame] | 1032 | .addFrameIndex(FrameIndex) // addr | 
| Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 1033 | .addMemOperand(MMO) | 
|  | 1034 | .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) | 
| Matt Arsenault | b812b7a | 2019-06-05 22:20:47 +0000 | [diff] [blame] | 1035 | .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 1036 |  | 
| Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 1037 | if (ST.hasScalarStores()) { | 
|  | 1038 | // m0 is used for offset to scalar stores if used to spill. | 
| Nicolai Haehnle | 43cc6c4 | 2017-06-27 08:04:13 +0000 | [diff] [blame] | 1039 | Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine | RegState::Dead); | 
| Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 1040 | } | 
|  | 1041 |  | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 1042 | return; | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1043 | } | 
| Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 1044 |  | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 1045 | assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); | 
|  | 1046 |  | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 1047 | unsigned Opcode = getVGPRSpillRestoreOpcode(SpillSize); | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 1048 | BuildMI(MBB, MI, DL, get(Opcode), DestReg) | 
| Matt Arsenault | b812b7a | 2019-06-05 22:20:47 +0000 | [diff] [blame] | 1049 | .addFrameIndex(FrameIndex)           // vaddr | 
|  | 1050 | .addReg(MFI->getScratchRSrcReg())    // scratch_rsrc | 
|  | 1051 | .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset | 
|  | 1052 | .addImm(0)                           // offset | 
| Matt Arsenault | 08f14de | 2015-11-06 18:07:53 +0000 | [diff] [blame] | 1053 | .addMemOperand(MMO); | 
| Tom Stellard | c149dc0 | 2013-11-27 21:23:35 +0000 | [diff] [blame] | 1054 | } | 
|  | 1055 |  | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1056 | /// \param @Offset Offset in bytes of the FrameIndex being spilled | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1057 | unsigned SIInstrInfo::calculateLDSSpillAddress( | 
|  | 1058 | MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg, | 
|  | 1059 | unsigned FrameOffset, unsigned Size) const { | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1060 | MachineFunction *MF = MBB.getParent(); | 
|  | 1061 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 1062 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | 
| Graham Sellers | ba559ac | 2018-12-01 12:27:53 +0000 | [diff] [blame] | 1063 | const DebugLoc &DL = MBB.findDebugLoc(MI); | 
| Konstantin Zhuravlyov | 1d65026 | 2016-09-06 20:22:28 +0000 | [diff] [blame] | 1064 | unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize(); | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1065 | unsigned WavefrontSize = ST.getWavefrontSize(); | 
|  | 1066 |  | 
|  | 1067 | unsigned TIDReg = MFI->getTIDReg(); | 
|  | 1068 | if (!MFI->hasCalculatedTID()) { | 
|  | 1069 | MachineBasicBlock &Entry = MBB.getParent()->front(); | 
|  | 1070 | MachineBasicBlock::iterator Insert = Entry.front(); | 
| Graham Sellers | ba559ac | 2018-12-01 12:27:53 +0000 | [diff] [blame] | 1071 | const DebugLoc &DL = Insert->getDebugLoc(); | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1072 |  | 
| Tom Stellard | 19f4301 | 2016-07-28 14:30:43 +0000 | [diff] [blame] | 1073 | TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, | 
|  | 1074 | *MF); | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1075 | if (TIDReg == AMDGPU::NoRegister) | 
|  | 1076 | return TIDReg; | 
|  | 1077 |  | 
| Matthias Braun | f1caa28 | 2017-12-15 22:22:58 +0000 | [diff] [blame] | 1078 | if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) && | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1079 | WorkGroupSize > WavefrontSize) { | 
| Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 1080 | unsigned TIDIGXReg | 
| Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 1081 | = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X); | 
| Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 1082 | unsigned TIDIGYReg | 
| Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 1083 | = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); | 
| Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 1084 | unsigned TIDIGZReg | 
| Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 1085 | = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1086 | unsigned InputPtrReg = | 
| Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 1087 | MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); | 
| Benjamin Kramer | 7149aab | 2015-03-01 18:09:56 +0000 | [diff] [blame] | 1088 | for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1089 | if (!Entry.isLiveIn(Reg)) | 
|  | 1090 | Entry.addLiveIn(Reg); | 
|  | 1091 | } | 
|  | 1092 |  | 
| Matthias Braun | 7dc03f0 | 2016-04-06 02:47:09 +0000 | [diff] [blame] | 1093 | RS->enterBasicBlock(Entry); | 
| Matt Arsenault | 0c90e95 | 2015-11-06 18:17:45 +0000 | [diff] [blame] | 1094 | // FIXME: Can we scavenge an SReg_64 and access the subregs? | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1095 | unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); | 
|  | 1096 | unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); | 
|  | 1097 | BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) | 
|  | 1098 | .addReg(InputPtrReg) | 
|  | 1099 | .addImm(SI::KernelInputOffsets::NGROUPS_Z); | 
|  | 1100 | BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) | 
|  | 1101 | .addReg(InputPtrReg) | 
|  | 1102 | .addImm(SI::KernelInputOffsets::NGROUPS_Y); | 
|  | 1103 |  | 
|  | 1104 | // NGROUPS.X * NGROUPS.Y | 
|  | 1105 | BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) | 
|  | 1106 | .addReg(STmp1) | 
|  | 1107 | .addReg(STmp0); | 
|  | 1108 | // (NGROUPS.X * NGROUPS.Y) * TIDIG.X | 
|  | 1109 | BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) | 
|  | 1110 | .addReg(STmp1) | 
|  | 1111 | .addReg(TIDIGXReg); | 
|  | 1112 | // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) | 
|  | 1113 | BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) | 
|  | 1114 | .addReg(STmp0) | 
|  | 1115 | .addReg(TIDIGYReg) | 
|  | 1116 | .addReg(TIDReg); | 
|  | 1117 | // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 1118 | getAddNoCarry(Entry, Insert, DL, TIDReg) | 
|  | 1119 | .addReg(TIDReg) | 
| Tim Renouf | cfdfba9 | 2019-03-18 19:35:44 +0000 | [diff] [blame] | 1120 | .addReg(TIDIGZReg) | 
|  | 1121 | .addImm(0); // clamp bit | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1122 | } else { | 
|  | 1123 | // Get the wave id | 
|  | 1124 | BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), | 
|  | 1125 | TIDReg) | 
|  | 1126 | .addImm(-1) | 
|  | 1127 | .addImm(0); | 
|  | 1128 |  | 
| Marek Olsak | c536850 | 2015-01-15 18:43:01 +0000 | [diff] [blame] | 1129 | BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1130 | TIDReg) | 
|  | 1131 | .addImm(-1) | 
|  | 1132 | .addReg(TIDReg); | 
|  | 1133 | } | 
|  | 1134 |  | 
|  | 1135 | BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), | 
|  | 1136 | TIDReg) | 
|  | 1137 | .addImm(2) | 
|  | 1138 | .addReg(TIDReg); | 
|  | 1139 | MFI->setTIDReg(TIDReg); | 
|  | 1140 | } | 
|  | 1141 |  | 
|  | 1142 | // Add FrameIndex to LDS offset | 
| Matt Arsenault | 52ef401 | 2016-07-26 16:45:58 +0000 | [diff] [blame] | 1143 | unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize); | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 1144 | getAddNoCarry(MBB, MI, DL, TmpReg) | 
|  | 1145 | .addImm(LDSOffset) | 
| Tim Renouf | cfdfba9 | 2019-03-18 19:35:44 +0000 | [diff] [blame] | 1146 | .addReg(TIDReg) | 
|  | 1147 | .addImm(0); // clamp bit | 
| Tom Stellard | 9646890 | 2014-09-24 01:33:17 +0000 | [diff] [blame] | 1148 |  | 
|  | 1149 | return TmpReg; | 
|  | 1150 | } | 
|  | 1151 |  | 
| Tom Stellard | d37630e | 2016-04-07 14:47:07 +0000 | [diff] [blame] | 1152 | void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB, | 
|  | 1153 | MachineBasicBlock::iterator MI, | 
| Nicolai Haehnle | 87323da | 2015-12-17 16:46:42 +0000 | [diff] [blame] | 1154 | int Count) const { | 
| Tom Stellard | 341e293 | 2016-05-02 18:02:24 +0000 | [diff] [blame] | 1155 | DebugLoc DL = MBB.findDebugLoc(MI); | 
| Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 1156 | while (Count > 0) { | 
|  | 1157 | int Arg; | 
|  | 1158 | if (Count >= 8) | 
|  | 1159 | Arg = 7; | 
|  | 1160 | else | 
|  | 1161 | Arg = Count - 1; | 
|  | 1162 | Count -= 8; | 
| Tom Stellard | 341e293 | 2016-05-02 18:02:24 +0000 | [diff] [blame] | 1163 | BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) | 
| Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 1164 | .addImm(Arg); | 
|  | 1165 | } | 
|  | 1166 | } | 
|  | 1167 |  | 
| Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 1168 | void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, | 
|  | 1169 | MachineBasicBlock::iterator MI) const { | 
|  | 1170 | insertWaitStates(MBB, MI, 1); | 
|  | 1171 | } | 
|  | 1172 |  | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 1173 | void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { | 
|  | 1174 | auto MF = MBB.getParent(); | 
|  | 1175 | SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); | 
|  | 1176 |  | 
|  | 1177 | assert(Info->isEntryFunction()); | 
|  | 1178 |  | 
|  | 1179 | if (MBB.succ_empty()) { | 
|  | 1180 | bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); | 
| David Stuttard | 20ea21c | 2019-03-12 09:52:58 +0000 | [diff] [blame] | 1181 | if (HasNoTerminator) { | 
|  | 1182 | if (Info->returnsVoid()) { | 
|  | 1183 | BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); | 
|  | 1184 | } else { | 
|  | 1185 | BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); | 
|  | 1186 | } | 
|  | 1187 | } | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 1188 | } | 
|  | 1189 | } | 
|  | 1190 |  | 
| Stanislav Mekhanoshin | f92ed69 | 2019-01-21 19:11:26 +0000 | [diff] [blame] | 1191 | unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { | 
| Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 1192 | switch (MI.getOpcode()) { | 
|  | 1193 | default: return 1; // FIXME: Do wait states equal cycles? | 
|  | 1194 |  | 
|  | 1195 | case AMDGPU::S_NOP: | 
|  | 1196 | return MI.getOperand(0).getImm() + 1; | 
|  | 1197 | } | 
|  | 1198 | } | 
|  | 1199 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1200 | bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { | 
|  | 1201 | MachineBasicBlock &MBB = *MI.getParent(); | 
| Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 1202 | DebugLoc DL = MBB.findDebugLoc(MI); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1203 | switch (MI.getOpcode()) { | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 1204 | default: return TargetInstrInfo::expandPostRAPseudo(MI); | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 1205 | case AMDGPU::S_MOV_B64_term: | 
| Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 1206 | // This is only a terminator to get the correct spill code placement during | 
|  | 1207 | // register allocation. | 
|  | 1208 | MI.setDesc(get(AMDGPU::S_MOV_B64)); | 
|  | 1209 | break; | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 1210 |  | 
|  | 1211 | case AMDGPU::S_XOR_B64_term: | 
| Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 1212 | // This is only a terminator to get the correct spill code placement during | 
|  | 1213 | // register allocation. | 
|  | 1214 | MI.setDesc(get(AMDGPU::S_XOR_B64)); | 
|  | 1215 | break; | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 1216 |  | 
|  | 1217 | case AMDGPU::S_ANDN2_B64_term: | 
| Matt Arsenault | e674075 | 2016-09-29 01:44:16 +0000 | [diff] [blame] | 1218 | // This is only a terminator to get the correct spill code placement during | 
|  | 1219 | // register allocation. | 
|  | 1220 | MI.setDesc(get(AMDGPU::S_ANDN2_B64)); | 
|  | 1221 | break; | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 1222 |  | 
| Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 1223 | case AMDGPU::V_MOV_B64_PSEUDO: { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1224 | unsigned Dst = MI.getOperand(0).getReg(); | 
| Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 1225 | unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); | 
|  | 1226 | unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); | 
|  | 1227 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1228 | const MachineOperand &SrcOp = MI.getOperand(1); | 
| Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 1229 | // FIXME: Will this work for 64-bit floating point immediates? | 
|  | 1230 | assert(!SrcOp.isFPImm()); | 
|  | 1231 | if (SrcOp.isImm()) { | 
|  | 1232 | APInt Imm(64, SrcOp.getImm()); | 
|  | 1233 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) | 
| Matt Arsenault | 80bc355 | 2016-06-13 15:53:52 +0000 | [diff] [blame] | 1234 | .addImm(Imm.getLoBits(32).getZExtValue()) | 
|  | 1235 | .addReg(Dst, RegState::Implicit | RegState::Define); | 
| Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 1236 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) | 
| Matt Arsenault | 80bc355 | 2016-06-13 15:53:52 +0000 | [diff] [blame] | 1237 | .addImm(Imm.getHiBits(32).getZExtValue()) | 
|  | 1238 | .addReg(Dst, RegState::Implicit | RegState::Define); | 
| Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 1239 | } else { | 
|  | 1240 | assert(SrcOp.isReg()); | 
|  | 1241 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) | 
| Matt Arsenault | 80bc355 | 2016-06-13 15:53:52 +0000 | [diff] [blame] | 1242 | .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) | 
|  | 1243 | .addReg(Dst, RegState::Implicit | RegState::Define); | 
| Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 1244 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) | 
| Matt Arsenault | 80bc355 | 2016-06-13 15:53:52 +0000 | [diff] [blame] | 1245 | .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) | 
|  | 1246 | .addReg(Dst, RegState::Implicit | RegState::Define); | 
| Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 1247 | } | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1248 | MI.eraseFromParent(); | 
| Tom Stellard | 4842c05 | 2015-01-07 20:27:25 +0000 | [diff] [blame] | 1249 | break; | 
|  | 1250 | } | 
| Connor Abbott | 66b9bd6 | 2017-08-04 18:36:54 +0000 | [diff] [blame] | 1251 | case AMDGPU::V_SET_INACTIVE_B32: { | 
|  | 1252 | BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) | 
|  | 1253 | .addReg(AMDGPU::EXEC); | 
|  | 1254 | BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) | 
|  | 1255 | .add(MI.getOperand(2)); | 
|  | 1256 | BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) | 
|  | 1257 | .addReg(AMDGPU::EXEC); | 
|  | 1258 | MI.eraseFromParent(); | 
|  | 1259 | break; | 
|  | 1260 | } | 
|  | 1261 | case AMDGPU::V_SET_INACTIVE_B64: { | 
|  | 1262 | BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) | 
|  | 1263 | .addReg(AMDGPU::EXEC); | 
|  | 1264 | MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), | 
|  | 1265 | MI.getOperand(0).getReg()) | 
|  | 1266 | .add(MI.getOperand(2)); | 
|  | 1267 | expandPostRAPseudo(*Copy); | 
|  | 1268 | BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) | 
|  | 1269 | .addReg(AMDGPU::EXEC); | 
|  | 1270 | MI.eraseFromParent(); | 
|  | 1271 | break; | 
|  | 1272 | } | 
| Nicolai Haehnle | a785209 | 2016-10-24 14:56:02 +0000 | [diff] [blame] | 1273 | case AMDGPU::V_MOVRELD_B32_V1: | 
|  | 1274 | case AMDGPU::V_MOVRELD_B32_V2: | 
|  | 1275 | case AMDGPU::V_MOVRELD_B32_V4: | 
|  | 1276 | case AMDGPU::V_MOVRELD_B32_V8: | 
|  | 1277 | case AMDGPU::V_MOVRELD_B32_V16: { | 
|  | 1278 | const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32); | 
|  | 1279 | unsigned VecReg = MI.getOperand(0).getReg(); | 
|  | 1280 | bool IsUndef = MI.getOperand(1).isUndef(); | 
|  | 1281 | unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm(); | 
|  | 1282 | assert(VecReg == MI.getOperand(1).getReg()); | 
|  | 1283 |  | 
|  | 1284 | MachineInstr *MovRel = | 
|  | 1285 | BuildMI(MBB, MI, DL, MovRelDesc) | 
|  | 1286 | .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 1287 | .add(MI.getOperand(2)) | 
| Nicolai Haehnle | a785209 | 2016-10-24 14:56:02 +0000 | [diff] [blame] | 1288 | .addReg(VecReg, RegState::ImplicitDefine) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 1289 | .addReg(VecReg, | 
|  | 1290 | RegState::Implicit | (IsUndef ? RegState::Undef : 0)); | 
| Nicolai Haehnle | a785209 | 2016-10-24 14:56:02 +0000 | [diff] [blame] | 1291 |  | 
|  | 1292 | const int ImpDefIdx = | 
|  | 1293 | MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses(); | 
|  | 1294 | const int ImpUseIdx = ImpDefIdx + 1; | 
|  | 1295 | MovRel->tieOperands(ImpDefIdx, ImpUseIdx); | 
|  | 1296 |  | 
|  | 1297 | MI.eraseFromParent(); | 
|  | 1298 | break; | 
|  | 1299 | } | 
| Tom Stellard | bf3e6e5 | 2016-06-14 20:29:59 +0000 | [diff] [blame] | 1300 | case AMDGPU::SI_PC_ADD_REL_OFFSET: { | 
| Tom Stellard | c93fc11 | 2015-12-10 02:13:01 +0000 | [diff] [blame] | 1301 | MachineFunction &MF = *MBB.getParent(); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1302 | unsigned Reg = MI.getOperand(0).getReg(); | 
| Matt Arsenault | 11587d9 | 2016-08-10 19:11:45 +0000 | [diff] [blame] | 1303 | unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0); | 
|  | 1304 | unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1); | 
| Tom Stellard | c93fc11 | 2015-12-10 02:13:01 +0000 | [diff] [blame] | 1305 |  | 
|  | 1306 | // Create a bundle so these instructions won't be re-ordered by the | 
|  | 1307 | // post-RA scheduler. | 
|  | 1308 | MIBundleBuilder Bundler(MBB, MI); | 
|  | 1309 | Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); | 
|  | 1310 |  | 
|  | 1311 | // Add 32-bit offset from this instruction to the start of the | 
|  | 1312 | // constant data. | 
|  | 1313 | Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1314 | .addReg(RegLo) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 1315 | .add(MI.getOperand(1))); | 
| Tom Stellard | c93fc11 | 2015-12-10 02:13:01 +0000 | [diff] [blame] | 1316 |  | 
| Konstantin Zhuravlyov | c96b5d7 | 2016-10-14 04:37:34 +0000 | [diff] [blame] | 1317 | MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) | 
|  | 1318 | .addReg(RegHi); | 
|  | 1319 | if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE) | 
|  | 1320 | MIB.addImm(0); | 
|  | 1321 | else | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 1322 | MIB.add(MI.getOperand(2)); | 
| Konstantin Zhuravlyov | c96b5d7 | 2016-10-14 04:37:34 +0000 | [diff] [blame] | 1323 |  | 
|  | 1324 | Bundler.append(MIB); | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 1325 | finalizeBundle(MBB, Bundler.begin()); | 
| Tom Stellard | c93fc11 | 2015-12-10 02:13:01 +0000 | [diff] [blame] | 1326 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1327 | MI.eraseFromParent(); | 
| Tom Stellard | c93fc11 | 2015-12-10 02:13:01 +0000 | [diff] [blame] | 1328 | break; | 
|  | 1329 | } | 
| Neil Henning | 0a30f33 | 2019-04-01 15:19:52 +0000 | [diff] [blame] | 1330 | case AMDGPU::ENTER_WWM: { | 
|  | 1331 | // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when | 
|  | 1332 | // WWM is entered. | 
|  | 1333 | MI.setDesc(get(AMDGPU::S_OR_SAVEEXEC_B64)); | 
|  | 1334 | break; | 
|  | 1335 | } | 
| Connor Abbott | 92638ab | 2017-08-04 18:36:52 +0000 | [diff] [blame] | 1336 | case AMDGPU::EXIT_WWM: { | 
| Neil Henning | 0a30f33 | 2019-04-01 15:19:52 +0000 | [diff] [blame] | 1337 | // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when | 
|  | 1338 | // WWM is exited. | 
| Connor Abbott | 92638ab | 2017-08-04 18:36:52 +0000 | [diff] [blame] | 1339 | MI.setDesc(get(AMDGPU::S_MOV_B64)); | 
|  | 1340 | break; | 
|  | 1341 | } | 
| Stanislav Mekhanoshin | 739174c | 2018-05-31 20:13:51 +0000 | [diff] [blame] | 1342 | case TargetOpcode::BUNDLE: { | 
|  | 1343 | if (!MI.mayLoad()) | 
|  | 1344 | return false; | 
|  | 1345 |  | 
|  | 1346 | // If it is a load it must be a memory clause | 
|  | 1347 | for (MachineBasicBlock::instr_iterator I = MI.getIterator(); | 
|  | 1348 | I->isBundledWithSucc(); ++I) { | 
|  | 1349 | I->unbundleFromSucc(); | 
|  | 1350 | for (MachineOperand &MO : I->operands()) | 
|  | 1351 | if (MO.isReg()) | 
|  | 1352 | MO.setIsInternalRead(false); | 
|  | 1353 | } | 
|  | 1354 |  | 
|  | 1355 | MI.eraseFromParent(); | 
|  | 1356 | break; | 
|  | 1357 | } | 
| Tom Stellard | eba6107 | 2014-05-02 15:41:42 +0000 | [diff] [blame] | 1358 | } | 
|  | 1359 | return true; | 
|  | 1360 | } | 
|  | 1361 |  | 
| Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 1362 | bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, | 
|  | 1363 | MachineOperand &Src0, | 
|  | 1364 | unsigned Src0OpName, | 
|  | 1365 | MachineOperand &Src1, | 
|  | 1366 | unsigned Src1OpName) const { | 
|  | 1367 | MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); | 
|  | 1368 | if (!Src0Mods) | 
|  | 1369 | return false; | 
|  | 1370 |  | 
|  | 1371 | MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); | 
|  | 1372 | assert(Src1Mods && | 
|  | 1373 | "All commutable instructions have both src0 and src1 modifiers"); | 
|  | 1374 |  | 
|  | 1375 | int Src0ModsVal = Src0Mods->getImm(); | 
|  | 1376 | int Src1ModsVal = Src1Mods->getImm(); | 
|  | 1377 |  | 
|  | 1378 | Src1Mods->setImm(Src0ModsVal); | 
|  | 1379 | Src0Mods->setImm(Src1ModsVal); | 
|  | 1380 | return true; | 
|  | 1381 | } | 
|  | 1382 |  | 
|  | 1383 | static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, | 
|  | 1384 | MachineOperand &RegOp, | 
| Matt Arsenault | 25dba30 | 2016-09-13 19:03:12 +0000 | [diff] [blame] | 1385 | MachineOperand &NonRegOp) { | 
|  | 1386 | unsigned Reg = RegOp.getReg(); | 
|  | 1387 | unsigned SubReg = RegOp.getSubReg(); | 
|  | 1388 | bool IsKill = RegOp.isKill(); | 
|  | 1389 | bool IsDead = RegOp.isDead(); | 
|  | 1390 | bool IsUndef = RegOp.isUndef(); | 
|  | 1391 | bool IsDebug = RegOp.isDebug(); | 
|  | 1392 |  | 
|  | 1393 | if (NonRegOp.isImm()) | 
|  | 1394 | RegOp.ChangeToImmediate(NonRegOp.getImm()); | 
|  | 1395 | else if (NonRegOp.isFI()) | 
|  | 1396 | RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); | 
|  | 1397 | else | 
| Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 1398 | return nullptr; | 
|  | 1399 |  | 
| Matt Arsenault | 25dba30 | 2016-09-13 19:03:12 +0000 | [diff] [blame] | 1400 | NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); | 
|  | 1401 | NonRegOp.setSubReg(SubReg); | 
|  | 1402 |  | 
| Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 1403 | return &MI; | 
|  | 1404 | } | 
|  | 1405 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1406 | MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, | 
| Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 1407 | unsigned Src0Idx, | 
|  | 1408 | unsigned Src1Idx) const { | 
|  | 1409 | assert(!NewMI && "this should never be used"); | 
|  | 1410 |  | 
|  | 1411 | unsigned Opc = MI.getOpcode(); | 
|  | 1412 | int CommutedOpcode = commuteOpcode(Opc); | 
| Marek Olsak | cfbdba2 | 2015-06-26 20:29:10 +0000 | [diff] [blame] | 1413 | if (CommutedOpcode == -1) | 
|  | 1414 | return nullptr; | 
|  | 1415 |  | 
| Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 1416 | assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == | 
|  | 1417 | static_cast<int>(Src0Idx) && | 
|  | 1418 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == | 
|  | 1419 | static_cast<int>(Src1Idx) && | 
|  | 1420 | "inconsistency with findCommutedOpIndices"); | 
|  | 1421 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1422 | MachineOperand &Src0 = MI.getOperand(Src0Idx); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1423 | MachineOperand &Src1 = MI.getOperand(Src1Idx); | 
| Matt Arsenault | aa5ccfb | 2014-10-17 18:00:37 +0000 | [diff] [blame] | 1424 |  | 
| Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 1425 | MachineInstr *CommutedMI = nullptr; | 
|  | 1426 | if (Src0.isReg() && Src1.isReg()) { | 
|  | 1427 | if (isOperandLegal(MI, Src1Idx, &Src0)) { | 
|  | 1428 | // Be sure to copy the source modifiers to the right place. | 
|  | 1429 | CommutedMI | 
|  | 1430 | = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); | 
| Matt Arsenault | d282ada | 2014-10-17 18:00:48 +0000 | [diff] [blame] | 1431 | } | 
|  | 1432 |  | 
| Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 1433 | } else if (Src0.isReg() && !Src1.isReg()) { | 
|  | 1434 | // src0 should always be able to support any operand type, so no need to | 
|  | 1435 | // check operand legality. | 
|  | 1436 | CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); | 
|  | 1437 | } else if (!Src0.isReg() && Src1.isReg()) { | 
|  | 1438 | if (isOperandLegal(MI, Src1Idx, &Src0)) | 
|  | 1439 | CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 1440 | } else { | 
| Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 1441 | // FIXME: Found two non registers to commute. This does happen. | 
|  | 1442 | return nullptr; | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 1443 | } | 
| Christian Konig | 3c14580 | 2013-03-27 09:12:59 +0000 | [diff] [blame] | 1444 |  | 
| Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 1445 | if (CommutedMI) { | 
|  | 1446 | swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, | 
|  | 1447 | Src1, AMDGPU::OpName::src1_modifiers); | 
|  | 1448 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1449 | CommutedMI->setDesc(get(CommutedOpcode)); | 
| Matt Arsenault | bbb47da | 2016-09-08 17:19:29 +0000 | [diff] [blame] | 1450 | } | 
| Christian Konig | 3c14580 | 2013-03-27 09:12:59 +0000 | [diff] [blame] | 1451 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1452 | return CommutedMI; | 
| Christian Konig | 76edd4f | 2013-02-26 17:52:29 +0000 | [diff] [blame] | 1453 | } | 
|  | 1454 |  | 
| Matt Arsenault | 92befe7 | 2014-09-26 17:54:54 +0000 | [diff] [blame] | 1455 | // This needs to be implemented because the source modifiers may be inserted | 
|  | 1456 | // between the true commutable operands, and the base | 
|  | 1457 | // TargetInstrInfo::commuteInstruction uses it. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 1458 | bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0, | 
| Andrew Kaylor | 16c4da0 | 2015-09-28 20:33:22 +0000 | [diff] [blame] | 1459 | unsigned &SrcOpIdx1) const { | 
| Alexander Timofeev | db7ee76 | 2018-09-11 11:56:50 +0000 | [diff] [blame] | 1460 | return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); | 
|  | 1461 | } | 
|  | 1462 |  | 
|  | 1463 | bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, | 
|  | 1464 | unsigned &SrcOpIdx1) const { | 
|  | 1465 | if (!Desc.isCommutable()) | 
| Matt Arsenault | 92befe7 | 2014-09-26 17:54:54 +0000 | [diff] [blame] | 1466 | return false; | 
|  | 1467 |  | 
| Alexander Timofeev | db7ee76 | 2018-09-11 11:56:50 +0000 | [diff] [blame] | 1468 | unsigned Opc = Desc.getOpcode(); | 
| Matt Arsenault | 92befe7 | 2014-09-26 17:54:54 +0000 | [diff] [blame] | 1469 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); | 
|  | 1470 | if (Src0Idx == -1) | 
|  | 1471 | return false; | 
|  | 1472 |  | 
| Matt Arsenault | 92befe7 | 2014-09-26 17:54:54 +0000 | [diff] [blame] | 1473 | int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); | 
|  | 1474 | if (Src1Idx == -1) | 
|  | 1475 | return false; | 
|  | 1476 |  | 
| Andrew Kaylor | 16c4da0 | 2015-09-28 20:33:22 +0000 | [diff] [blame] | 1477 | return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); | 
| Matt Arsenault | 92befe7 | 2014-09-26 17:54:54 +0000 | [diff] [blame] | 1478 | } | 
|  | 1479 |  | 
| Matt Arsenault | 6bc43d8 | 2016-10-06 16:20:41 +0000 | [diff] [blame] | 1480 | bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, | 
|  | 1481 | int64_t BrOffset) const { | 
|  | 1482 | // BranchRelaxation should never have to check s_setpc_b64 because its dest | 
|  | 1483 | // block is unanalyzable. | 
|  | 1484 | assert(BranchOp != AMDGPU::S_SETPC_B64); | 
|  | 1485 |  | 
|  | 1486 | // Convert to dwords. | 
|  | 1487 | BrOffset /= 4; | 
|  | 1488 |  | 
|  | 1489 | // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is | 
|  | 1490 | // from the next instruction. | 
|  | 1491 | BrOffset -= 1; | 
|  | 1492 |  | 
|  | 1493 | return isIntN(BranchOffsetBits, BrOffset); | 
|  | 1494 | } | 
|  | 1495 |  | 
|  | 1496 | MachineBasicBlock *SIInstrInfo::getBranchDestBlock( | 
|  | 1497 | const MachineInstr &MI) const { | 
|  | 1498 | if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { | 
|  | 1499 | // This would be a difficult analysis to perform, but can always be legal so | 
|  | 1500 | // there's no need to analyze it. | 
|  | 1501 | return nullptr; | 
|  | 1502 | } | 
|  | 1503 |  | 
|  | 1504 | return MI.getOperand(0).getMBB(); | 
|  | 1505 | } | 
|  | 1506 |  | 
|  | 1507 | unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, | 
|  | 1508 | MachineBasicBlock &DestBB, | 
|  | 1509 | const DebugLoc &DL, | 
|  | 1510 | int64_t BrOffset, | 
|  | 1511 | RegScavenger *RS) const { | 
|  | 1512 | assert(RS && "RegScavenger required for long branching"); | 
|  | 1513 | assert(MBB.empty() && | 
|  | 1514 | "new block should be inserted for expanding unconditional branch"); | 
|  | 1515 | assert(MBB.pred_size() == 1); | 
|  | 1516 |  | 
|  | 1517 | MachineFunction *MF = MBB.getParent(); | 
|  | 1518 | MachineRegisterInfo &MRI = MF->getRegInfo(); | 
|  | 1519 |  | 
|  | 1520 | // FIXME: Virtual register workaround for RegScavenger not working with empty | 
|  | 1521 | // blocks. | 
|  | 1522 | unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 1523 |  | 
|  | 1524 | auto I = MBB.end(); | 
|  | 1525 |  | 
|  | 1526 | // We need to compute the offset relative to the instruction immediately after | 
|  | 1527 | // s_getpc_b64. Insert pc arithmetic code before last terminator. | 
|  | 1528 | MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); | 
|  | 1529 |  | 
|  | 1530 | // TODO: Handle > 32-bit block address. | 
|  | 1531 | if (BrOffset >= 0) { | 
|  | 1532 | BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) | 
|  | 1533 | .addReg(PCReg, RegState::Define, AMDGPU::sub0) | 
|  | 1534 | .addReg(PCReg, 0, AMDGPU::sub0) | 
| Matt Arsenault | 0f8a764 | 2019-06-05 20:32:25 +0000 | [diff] [blame] | 1535 | .addMBB(&DestBB, MO_LONG_BRANCH_FORWARD); | 
| Matt Arsenault | 6bc43d8 | 2016-10-06 16:20:41 +0000 | [diff] [blame] | 1536 | BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) | 
|  | 1537 | .addReg(PCReg, RegState::Define, AMDGPU::sub1) | 
|  | 1538 | .addReg(PCReg, 0, AMDGPU::sub1) | 
|  | 1539 | .addImm(0); | 
|  | 1540 | } else { | 
|  | 1541 | // Backwards branch. | 
|  | 1542 | BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) | 
|  | 1543 | .addReg(PCReg, RegState::Define, AMDGPU::sub0) | 
|  | 1544 | .addReg(PCReg, 0, AMDGPU::sub0) | 
| Matt Arsenault | 0f8a764 | 2019-06-05 20:32:25 +0000 | [diff] [blame] | 1545 | .addMBB(&DestBB, MO_LONG_BRANCH_BACKWARD); | 
| Matt Arsenault | 6bc43d8 | 2016-10-06 16:20:41 +0000 | [diff] [blame] | 1546 | BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) | 
|  | 1547 | .addReg(PCReg, RegState::Define, AMDGPU::sub1) | 
|  | 1548 | .addReg(PCReg, 0, AMDGPU::sub1) | 
|  | 1549 | .addImm(0); | 
|  | 1550 | } | 
|  | 1551 |  | 
|  | 1552 | // Insert the indirect branch after the other terminator. | 
|  | 1553 | BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) | 
|  | 1554 | .addReg(PCReg); | 
|  | 1555 |  | 
|  | 1556 | // FIXME: If spilling is necessary, this will fail because this scavenger has | 
|  | 1557 | // no emergency stack slots. It is non-trivial to spill in this situation, | 
|  | 1558 | // because the restore code needs to be specially placed after the | 
|  | 1559 | // jump. BranchRelaxation then needs to be made aware of the newly inserted | 
|  | 1560 | // block. | 
|  | 1561 | // | 
|  | 1562 | // If a spill is needed for the pc register pair, we need to insert a spill | 
|  | 1563 | // restore block right before the destination block, and insert a short branch | 
|  | 1564 | // into the old destination block's fallthrough predecessor. | 
|  | 1565 | // e.g.: | 
|  | 1566 | // | 
|  | 1567 | // s_cbranch_scc0 skip_long_branch: | 
|  | 1568 | // | 
|  | 1569 | // long_branch_bb: | 
|  | 1570 | //   spill s[8:9] | 
|  | 1571 | //   s_getpc_b64 s[8:9] | 
|  | 1572 | //   s_add_u32 s8, s8, restore_bb | 
|  | 1573 | //   s_addc_u32 s9, s9, 0 | 
|  | 1574 | //   s_setpc_b64 s[8:9] | 
|  | 1575 | // | 
|  | 1576 | // skip_long_branch: | 
|  | 1577 | //   foo; | 
|  | 1578 | // | 
|  | 1579 | // ..... | 
|  | 1580 | // | 
|  | 1581 | // dest_bb_fallthrough_predecessor: | 
|  | 1582 | // bar; | 
|  | 1583 | // s_branch dest_bb | 
|  | 1584 | // | 
|  | 1585 | // restore_bb: | 
|  | 1586 | //  restore s[8:9] | 
|  | 1587 | //  fallthrough dest_bb | 
|  | 1588 | /// | 
|  | 1589 | // dest_bb: | 
|  | 1590 | //   buzz; | 
|  | 1591 |  | 
|  | 1592 | RS->enterBasicBlockEnd(MBB); | 
| Matt Arsenault | b0b741e | 2018-10-30 01:33:14 +0000 | [diff] [blame] | 1593 | unsigned Scav = RS->scavengeRegisterBackwards( | 
|  | 1594 | AMDGPU::SReg_64RegClass, | 
|  | 1595 | MachineBasicBlock::iterator(GetPC), false, 0); | 
| Matt Arsenault | 6bc43d8 | 2016-10-06 16:20:41 +0000 | [diff] [blame] | 1596 | MRI.replaceRegWith(PCReg, Scav); | 
|  | 1597 | MRI.clearVirtRegs(); | 
|  | 1598 | RS->setRegUsed(Scav); | 
|  | 1599 |  | 
|  | 1600 | return 4 + 8 + 4 + 4; | 
|  | 1601 | } | 
|  | 1602 |  | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1603 | unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { | 
|  | 1604 | switch (Cond) { | 
|  | 1605 | case SIInstrInfo::SCC_TRUE: | 
|  | 1606 | return AMDGPU::S_CBRANCH_SCC1; | 
|  | 1607 | case SIInstrInfo::SCC_FALSE: | 
|  | 1608 | return AMDGPU::S_CBRANCH_SCC0; | 
| Matt Arsenault | 4945905 | 2016-05-21 00:29:40 +0000 | [diff] [blame] | 1609 | case SIInstrInfo::VCCNZ: | 
|  | 1610 | return AMDGPU::S_CBRANCH_VCCNZ; | 
|  | 1611 | case SIInstrInfo::VCCZ: | 
|  | 1612 | return AMDGPU::S_CBRANCH_VCCZ; | 
|  | 1613 | case SIInstrInfo::EXECNZ: | 
|  | 1614 | return AMDGPU::S_CBRANCH_EXECNZ; | 
|  | 1615 | case SIInstrInfo::EXECZ: | 
|  | 1616 | return AMDGPU::S_CBRANCH_EXECZ; | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1617 | default: | 
|  | 1618 | llvm_unreachable("invalid branch predicate"); | 
|  | 1619 | } | 
|  | 1620 | } | 
|  | 1621 |  | 
|  | 1622 | SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { | 
|  | 1623 | switch (Opcode) { | 
|  | 1624 | case AMDGPU::S_CBRANCH_SCC0: | 
|  | 1625 | return SCC_FALSE; | 
|  | 1626 | case AMDGPU::S_CBRANCH_SCC1: | 
|  | 1627 | return SCC_TRUE; | 
| Matt Arsenault | 4945905 | 2016-05-21 00:29:40 +0000 | [diff] [blame] | 1628 | case AMDGPU::S_CBRANCH_VCCNZ: | 
|  | 1629 | return VCCNZ; | 
|  | 1630 | case AMDGPU::S_CBRANCH_VCCZ: | 
|  | 1631 | return VCCZ; | 
|  | 1632 | case AMDGPU::S_CBRANCH_EXECNZ: | 
|  | 1633 | return EXECNZ; | 
|  | 1634 | case AMDGPU::S_CBRANCH_EXECZ: | 
|  | 1635 | return EXECZ; | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1636 | default: | 
|  | 1637 | return INVALID_BR; | 
|  | 1638 | } | 
|  | 1639 | } | 
|  | 1640 |  | 
| Matt Arsenault | 6bc43d8 | 2016-10-06 16:20:41 +0000 | [diff] [blame] | 1641 | bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, | 
|  | 1642 | MachineBasicBlock::iterator I, | 
|  | 1643 | MachineBasicBlock *&TBB, | 
|  | 1644 | MachineBasicBlock *&FBB, | 
|  | 1645 | SmallVectorImpl<MachineOperand> &Cond, | 
|  | 1646 | bool AllowModify) const { | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1647 | if (I->getOpcode() == AMDGPU::S_BRANCH) { | 
|  | 1648 | // Unconditional Branch | 
|  | 1649 | TBB = I->getOperand(0).getMBB(); | 
|  | 1650 | return false; | 
|  | 1651 | } | 
|  | 1652 |  | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 1653 | MachineBasicBlock *CondBB = nullptr; | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1654 |  | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 1655 | if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { | 
|  | 1656 | CondBB = I->getOperand(1).getMBB(); | 
|  | 1657 | Cond.push_back(I->getOperand(0)); | 
|  | 1658 | } else { | 
|  | 1659 | BranchPredicate Pred = getBranchPredicate(I->getOpcode()); | 
|  | 1660 | if (Pred == INVALID_BR) | 
|  | 1661 | return true; | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1662 |  | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 1663 | CondBB = I->getOperand(0).getMBB(); | 
|  | 1664 | Cond.push_back(MachineOperand::CreateImm(Pred)); | 
|  | 1665 | Cond.push_back(I->getOperand(1)); // Save the branch register. | 
|  | 1666 | } | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1667 | ++I; | 
|  | 1668 |  | 
|  | 1669 | if (I == MBB.end()) { | 
|  | 1670 | // Conditional branch followed by fall-through. | 
|  | 1671 | TBB = CondBB; | 
|  | 1672 | return false; | 
|  | 1673 | } | 
|  | 1674 |  | 
|  | 1675 | if (I->getOpcode() == AMDGPU::S_BRANCH) { | 
|  | 1676 | TBB = CondBB; | 
|  | 1677 | FBB = I->getOperand(0).getMBB(); | 
|  | 1678 | return false; | 
|  | 1679 | } | 
|  | 1680 |  | 
|  | 1681 | return true; | 
|  | 1682 | } | 
|  | 1683 |  | 
| Matt Arsenault | 6bc43d8 | 2016-10-06 16:20:41 +0000 | [diff] [blame] | 1684 | bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, | 
|  | 1685 | MachineBasicBlock *&FBB, | 
|  | 1686 | SmallVectorImpl<MachineOperand> &Cond, | 
|  | 1687 | bool AllowModify) const { | 
|  | 1688 | MachineBasicBlock::iterator I = MBB.getFirstTerminator(); | 
| Matt Arsenault | eabb8dd | 2018-11-16 05:03:02 +0000 | [diff] [blame] | 1689 | auto E = MBB.end(); | 
|  | 1690 | if (I == E) | 
|  | 1691 | return false; | 
|  | 1692 |  | 
|  | 1693 | // Skip over the instructions that are artificially terminators for special | 
|  | 1694 | // exec management. | 
|  | 1695 | while (I != E && !I->isBranch() && !I->isReturn() && | 
|  | 1696 | I->getOpcode() != AMDGPU::SI_MASK_BRANCH) { | 
|  | 1697 | switch (I->getOpcode()) { | 
|  | 1698 | case AMDGPU::SI_MASK_BRANCH: | 
|  | 1699 | case AMDGPU::S_MOV_B64_term: | 
|  | 1700 | case AMDGPU::S_XOR_B64_term: | 
|  | 1701 | case AMDGPU::S_ANDN2_B64_term: | 
|  | 1702 | break; | 
|  | 1703 | case AMDGPU::SI_IF: | 
|  | 1704 | case AMDGPU::SI_ELSE: | 
|  | 1705 | case AMDGPU::SI_KILL_I1_TERMINATOR: | 
|  | 1706 | case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: | 
|  | 1707 | // FIXME: It's messy that these need to be considered here at all. | 
|  | 1708 | return true; | 
|  | 1709 | default: | 
|  | 1710 | llvm_unreachable("unexpected non-branch terminator inst"); | 
|  | 1711 | } | 
|  | 1712 |  | 
|  | 1713 | ++I; | 
|  | 1714 | } | 
|  | 1715 |  | 
|  | 1716 | if (I == E) | 
| Matt Arsenault | 6bc43d8 | 2016-10-06 16:20:41 +0000 | [diff] [blame] | 1717 | return false; | 
|  | 1718 |  | 
|  | 1719 | if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) | 
|  | 1720 | return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); | 
|  | 1721 |  | 
|  | 1722 | ++I; | 
|  | 1723 |  | 
|  | 1724 | // TODO: Should be able to treat as fallthrough? | 
|  | 1725 | if (I == MBB.end()) | 
|  | 1726 | return true; | 
|  | 1727 |  | 
|  | 1728 | if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) | 
|  | 1729 | return true; | 
|  | 1730 |  | 
|  | 1731 | MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); | 
|  | 1732 |  | 
|  | 1733 | // Specifically handle the case where the conditional branch is to the same | 
|  | 1734 | // destination as the mask branch. e.g. | 
|  | 1735 | // | 
|  | 1736 | // si_mask_branch BB8 | 
|  | 1737 | // s_cbranch_execz BB8 | 
|  | 1738 | // s_cbranch BB9 | 
|  | 1739 | // | 
|  | 1740 | // This is required to understand divergent loops which may need the branches | 
|  | 1741 | // to be relaxed. | 
|  | 1742 | if (TBB != MaskBrDest || Cond.empty()) | 
|  | 1743 | return true; | 
|  | 1744 |  | 
|  | 1745 | auto Pred = Cond[0].getImm(); | 
|  | 1746 | return (Pred != EXECZ && Pred != EXECNZ); | 
|  | 1747 | } | 
|  | 1748 |  | 
| Matt Arsenault | 1b9fc8e | 2016-09-14 20:43:16 +0000 | [diff] [blame] | 1749 | unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, | 
| Matt Arsenault | a2b036e | 2016-09-14 17:23:48 +0000 | [diff] [blame] | 1750 | int *BytesRemoved) const { | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1751 | MachineBasicBlock::iterator I = MBB.getFirstTerminator(); | 
|  | 1752 |  | 
|  | 1753 | unsigned Count = 0; | 
| Matt Arsenault | a2b036e | 2016-09-14 17:23:48 +0000 | [diff] [blame] | 1754 | unsigned RemovedSize = 0; | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1755 | while (I != MBB.end()) { | 
|  | 1756 | MachineBasicBlock::iterator Next = std::next(I); | 
| Matt Arsenault | 6bc43d8 | 2016-10-06 16:20:41 +0000 | [diff] [blame] | 1757 | if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { | 
|  | 1758 | I = Next; | 
|  | 1759 | continue; | 
|  | 1760 | } | 
|  | 1761 |  | 
| Matt Arsenault | a2b036e | 2016-09-14 17:23:48 +0000 | [diff] [blame] | 1762 | RemovedSize += getInstSizeInBytes(*I); | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1763 | I->eraseFromParent(); | 
|  | 1764 | ++Count; | 
|  | 1765 | I = Next; | 
|  | 1766 | } | 
|  | 1767 |  | 
| Matt Arsenault | a2b036e | 2016-09-14 17:23:48 +0000 | [diff] [blame] | 1768 | if (BytesRemoved) | 
|  | 1769 | *BytesRemoved = RemovedSize; | 
|  | 1770 |  | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1771 | return Count; | 
|  | 1772 | } | 
|  | 1773 |  | 
| Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 1774 | // Copy the flags onto the implicit condition register operand. | 
|  | 1775 | static void preserveCondRegFlags(MachineOperand &CondReg, | 
|  | 1776 | const MachineOperand &OrigCond) { | 
|  | 1777 | CondReg.setIsUndef(OrigCond.isUndef()); | 
|  | 1778 | CondReg.setIsKill(OrigCond.isKill()); | 
|  | 1779 | } | 
|  | 1780 |  | 
| Matt Arsenault | e8e0f5c | 2016-09-14 17:24:15 +0000 | [diff] [blame] | 1781 | unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1782 | MachineBasicBlock *TBB, | 
|  | 1783 | MachineBasicBlock *FBB, | 
|  | 1784 | ArrayRef<MachineOperand> Cond, | 
| Matt Arsenault | a2b036e | 2016-09-14 17:23:48 +0000 | [diff] [blame] | 1785 | const DebugLoc &DL, | 
|  | 1786 | int *BytesAdded) const { | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1787 | if (!FBB && Cond.empty()) { | 
|  | 1788 | BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) | 
|  | 1789 | .addMBB(TBB); | 
| Matt Arsenault | a2b036e | 2016-09-14 17:23:48 +0000 | [diff] [blame] | 1790 | if (BytesAdded) | 
|  | 1791 | *BytesAdded = 4; | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1792 | return 1; | 
|  | 1793 | } | 
|  | 1794 |  | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 1795 | if(Cond.size() == 1 && Cond[0].isReg()) { | 
|  | 1796 | BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) | 
|  | 1797 | .add(Cond[0]) | 
|  | 1798 | .addMBB(TBB); | 
|  | 1799 | return 1; | 
|  | 1800 | } | 
|  | 1801 |  | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1802 | assert(TBB && Cond[0].isImm()); | 
|  | 1803 |  | 
|  | 1804 | unsigned Opcode | 
|  | 1805 | = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); | 
|  | 1806 |  | 
|  | 1807 | if (!FBB) { | 
| Matt Arsenault | 52f14ec | 2016-11-07 19:09:27 +0000 | [diff] [blame] | 1808 | Cond[1].isUndef(); | 
|  | 1809 | MachineInstr *CondBr = | 
|  | 1810 | BuildMI(&MBB, DL, get(Opcode)) | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1811 | .addMBB(TBB); | 
| Matt Arsenault | a2b036e | 2016-09-14 17:23:48 +0000 | [diff] [blame] | 1812 |  | 
| Matt Arsenault | 52f14ec | 2016-11-07 19:09:27 +0000 | [diff] [blame] | 1813 | // Copy the flags onto the implicit condition register operand. | 
| Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 1814 | preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); | 
| Matt Arsenault | 52f14ec | 2016-11-07 19:09:27 +0000 | [diff] [blame] | 1815 |  | 
| Matt Arsenault | a2b036e | 2016-09-14 17:23:48 +0000 | [diff] [blame] | 1816 | if (BytesAdded) | 
|  | 1817 | *BytesAdded = 4; | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1818 | return 1; | 
|  | 1819 | } | 
|  | 1820 |  | 
|  | 1821 | assert(TBB && FBB); | 
|  | 1822 |  | 
| Matt Arsenault | 52f14ec | 2016-11-07 19:09:27 +0000 | [diff] [blame] | 1823 | MachineInstr *CondBr = | 
|  | 1824 | BuildMI(&MBB, DL, get(Opcode)) | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1825 | .addMBB(TBB); | 
|  | 1826 | BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) | 
|  | 1827 | .addMBB(FBB); | 
|  | 1828 |  | 
| Matt Arsenault | 52f14ec | 2016-11-07 19:09:27 +0000 | [diff] [blame] | 1829 | MachineOperand &CondReg = CondBr->getOperand(1); | 
|  | 1830 | CondReg.setIsUndef(Cond[1].isUndef()); | 
|  | 1831 | CondReg.setIsKill(Cond[1].isKill()); | 
|  | 1832 |  | 
| Matt Arsenault | a2b036e | 2016-09-14 17:23:48 +0000 | [diff] [blame] | 1833 | if (BytesAdded) | 
|  | 1834 | *BytesAdded = 8; | 
|  | 1835 |  | 
| Matt Arsenault | 6d09380 | 2016-05-21 00:29:27 +0000 | [diff] [blame] | 1836 | return 2; | 
|  | 1837 | } | 
|  | 1838 |  | 
| Matt Arsenault | 1b9fc8e | 2016-09-14 20:43:16 +0000 | [diff] [blame] | 1839 | bool SIInstrInfo::reverseBranchCondition( | 
| Matt Arsenault | 72fcd5f | 2016-05-21 00:29:34 +0000 | [diff] [blame] | 1840 | SmallVectorImpl<MachineOperand> &Cond) const { | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 1841 | if (Cond.size() != 2) { | 
|  | 1842 | return true; | 
|  | 1843 | } | 
|  | 1844 |  | 
|  | 1845 | if (Cond[0].isImm()) { | 
|  | 1846 | Cond[0].setImm(-Cond[0].getImm()); | 
|  | 1847 | return false; | 
|  | 1848 | } | 
|  | 1849 |  | 
|  | 1850 | return true; | 
| Matt Arsenault | 72fcd5f | 2016-05-21 00:29:34 +0000 | [diff] [blame] | 1851 | } | 
|  | 1852 |  | 
| Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 1853 | bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, | 
|  | 1854 | ArrayRef<MachineOperand> Cond, | 
|  | 1855 | unsigned TrueReg, unsigned FalseReg, | 
|  | 1856 | int &CondCycles, | 
|  | 1857 | int &TrueCycles, int &FalseCycles) const { | 
|  | 1858 | switch (Cond[0].getImm()) { | 
|  | 1859 | case VCCNZ: | 
|  | 1860 | case VCCZ: { | 
|  | 1861 | const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 1862 | const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); | 
|  | 1863 | assert(MRI.getRegClass(FalseReg) == RC); | 
|  | 1864 |  | 
|  | 1865 | int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; | 
|  | 1866 | CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? | 
|  | 1867 |  | 
|  | 1868 | // Limit to equal cost for branch vs. N v_cndmask_b32s. | 
|  | 1869 | return !RI.isSGPRClass(RC) && NumInsts <= 6; | 
|  | 1870 | } | 
|  | 1871 | case SCC_TRUE: | 
|  | 1872 | case SCC_FALSE: { | 
|  | 1873 | // FIXME: We could insert for VGPRs if we could replace the original compare | 
|  | 1874 | // with a vector one. | 
|  | 1875 | const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 1876 | const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); | 
|  | 1877 | assert(MRI.getRegClass(FalseReg) == RC); | 
|  | 1878 |  | 
|  | 1879 | int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; | 
|  | 1880 |  | 
|  | 1881 | // Multiples of 8 can do s_cselect_b64 | 
|  | 1882 | if (NumInsts % 2 == 0) | 
|  | 1883 | NumInsts /= 2; | 
|  | 1884 |  | 
|  | 1885 | CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? | 
|  | 1886 | return RI.isSGPRClass(RC); | 
|  | 1887 | } | 
|  | 1888 | default: | 
|  | 1889 | return false; | 
|  | 1890 | } | 
|  | 1891 | } | 
|  | 1892 |  | 
|  | 1893 | void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, | 
|  | 1894 | MachineBasicBlock::iterator I, const DebugLoc &DL, | 
|  | 1895 | unsigned DstReg, ArrayRef<MachineOperand> Cond, | 
|  | 1896 | unsigned TrueReg, unsigned FalseReg) const { | 
|  | 1897 | BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); | 
|  | 1898 | if (Pred == VCCZ || Pred == SCC_FALSE) { | 
|  | 1899 | Pred = static_cast<BranchPredicate>(-Pred); | 
|  | 1900 | std::swap(TrueReg, FalseReg); | 
|  | 1901 | } | 
|  | 1902 |  | 
|  | 1903 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 1904 | const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 1905 | unsigned DstSize = RI.getRegSizeInBits(*DstRC); | 
| Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 1906 |  | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 1907 | if (DstSize == 32) { | 
| Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 1908 | unsigned SelOp = Pred == SCC_TRUE ? | 
|  | 1909 | AMDGPU::S_CSELECT_B32 : AMDGPU::V_CNDMASK_B32_e32; | 
|  | 1910 |  | 
|  | 1911 | // Instruction's operands are backwards from what is expected. | 
|  | 1912 | MachineInstr *Select = | 
|  | 1913 | BuildMI(MBB, I, DL, get(SelOp), DstReg) | 
|  | 1914 | .addReg(FalseReg) | 
|  | 1915 | .addReg(TrueReg); | 
|  | 1916 |  | 
|  | 1917 | preserveCondRegFlags(Select->getOperand(3), Cond[1]); | 
|  | 1918 | return; | 
|  | 1919 | } | 
|  | 1920 |  | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 1921 | if (DstSize == 64 && Pred == SCC_TRUE) { | 
| Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 1922 | MachineInstr *Select = | 
|  | 1923 | BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) | 
|  | 1924 | .addReg(FalseReg) | 
|  | 1925 | .addReg(TrueReg); | 
|  | 1926 |  | 
|  | 1927 | preserveCondRegFlags(Select->getOperand(3), Cond[1]); | 
|  | 1928 | return; | 
|  | 1929 | } | 
|  | 1930 |  | 
|  | 1931 | static const int16_t Sub0_15[] = { | 
|  | 1932 | AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, | 
|  | 1933 | AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, | 
|  | 1934 | AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, | 
|  | 1935 | AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, | 
|  | 1936 | }; | 
|  | 1937 |  | 
|  | 1938 | static const int16_t Sub0_15_64[] = { | 
|  | 1939 | AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, | 
|  | 1940 | AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, | 
|  | 1941 | AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, | 
|  | 1942 | AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, | 
|  | 1943 | }; | 
|  | 1944 |  | 
|  | 1945 | unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; | 
|  | 1946 | const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; | 
|  | 1947 | const int16_t *SubIndices = Sub0_15; | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 1948 | int NElts = DstSize / 32; | 
| Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 1949 |  | 
| Tim Renouf | 361b5b2 | 2019-03-21 12:01:21 +0000 | [diff] [blame] | 1950 | // 64-bit select is only available for SALU. | 
|  | 1951 | // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. | 
| Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 1952 | if (Pred == SCC_TRUE) { | 
| Tim Renouf | 361b5b2 | 2019-03-21 12:01:21 +0000 | [diff] [blame] | 1953 | if (NElts % 2) { | 
|  | 1954 | SelOp = AMDGPU::S_CSELECT_B32; | 
|  | 1955 | EltRC = &AMDGPU::SGPR_32RegClass; | 
|  | 1956 | } else { | 
|  | 1957 | SelOp = AMDGPU::S_CSELECT_B64; | 
|  | 1958 | EltRC = &AMDGPU::SGPR_64RegClass; | 
|  | 1959 | SubIndices = Sub0_15_64; | 
|  | 1960 | NElts /= 2; | 
|  | 1961 | } | 
| Matt Arsenault | 9f5e0ef | 2017-01-25 04:25:02 +0000 | [diff] [blame] | 1962 | } | 
|  | 1963 |  | 
|  | 1964 | MachineInstrBuilder MIB = BuildMI( | 
|  | 1965 | MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); | 
|  | 1966 |  | 
|  | 1967 | I = MIB->getIterator(); | 
|  | 1968 |  | 
|  | 1969 | SmallVector<unsigned, 8> Regs; | 
|  | 1970 | for (int Idx = 0; Idx != NElts; ++Idx) { | 
|  | 1971 | unsigned DstElt = MRI.createVirtualRegister(EltRC); | 
|  | 1972 | Regs.push_back(DstElt); | 
|  | 1973 |  | 
|  | 1974 | unsigned SubIdx = SubIndices[Idx]; | 
|  | 1975 |  | 
|  | 1976 | MachineInstr *Select = | 
|  | 1977 | BuildMI(MBB, I, DL, get(SelOp), DstElt) | 
|  | 1978 | .addReg(FalseReg, 0, SubIdx) | 
|  | 1979 | .addReg(TrueReg, 0, SubIdx); | 
|  | 1980 | preserveCondRegFlags(Select->getOperand(3), Cond[1]); | 
|  | 1981 |  | 
|  | 1982 | MIB.addReg(DstElt) | 
|  | 1983 | .addImm(SubIdx); | 
|  | 1984 | } | 
|  | 1985 | } | 
|  | 1986 |  | 
| Sam Kolton | 27e0f8b | 2017-03-31 11:42:43 +0000 | [diff] [blame] | 1987 | bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { | 
|  | 1988 | switch (MI.getOpcode()) { | 
|  | 1989 | case AMDGPU::V_MOV_B32_e32: | 
|  | 1990 | case AMDGPU::V_MOV_B32_e64: | 
|  | 1991 | case AMDGPU::V_MOV_B64_PSEUDO: { | 
|  | 1992 | // If there are additional implicit register operands, this may be used for | 
|  | 1993 | // register indexing so the source register operand isn't simply copied. | 
|  | 1994 | unsigned NumOps = MI.getDesc().getNumOperands() + | 
|  | 1995 | MI.getDesc().getNumImplicitUses(); | 
|  | 1996 |  | 
|  | 1997 | return MI.getNumOperands() == NumOps; | 
|  | 1998 | } | 
|  | 1999 | case AMDGPU::S_MOV_B32: | 
|  | 2000 | case AMDGPU::S_MOV_B64: | 
|  | 2001 | case AMDGPU::COPY: | 
|  | 2002 | return true; | 
|  | 2003 | default: | 
|  | 2004 | return false; | 
|  | 2005 | } | 
|  | 2006 | } | 
|  | 2007 |  | 
| Jan Sjodin | 312ccf7 | 2017-09-14 20:53:51 +0000 | [diff] [blame] | 2008 | unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( | 
| Marcello Maggioni | 5ca4128 | 2018-08-20 19:23:45 +0000 | [diff] [blame] | 2009 | unsigned Kind) const { | 
| Jan Sjodin | 312ccf7 | 2017-09-14 20:53:51 +0000 | [diff] [blame] | 2010 | switch(Kind) { | 
|  | 2011 | case PseudoSourceValue::Stack: | 
|  | 2012 | case PseudoSourceValue::FixedStack: | 
| Matt Arsenault | 0da6350 | 2018-08-31 05:49:54 +0000 | [diff] [blame] | 2013 | return AMDGPUAS::PRIVATE_ADDRESS; | 
| Jan Sjodin | 312ccf7 | 2017-09-14 20:53:51 +0000 | [diff] [blame] | 2014 | case PseudoSourceValue::ConstantPool: | 
|  | 2015 | case PseudoSourceValue::GOT: | 
|  | 2016 | case PseudoSourceValue::JumpTable: | 
|  | 2017 | case PseudoSourceValue::GlobalValueCallEntry: | 
|  | 2018 | case PseudoSourceValue::ExternalSymbolCallEntry: | 
|  | 2019 | case PseudoSourceValue::TargetCustom: | 
| Matt Arsenault | 0da6350 | 2018-08-31 05:49:54 +0000 | [diff] [blame] | 2020 | return AMDGPUAS::CONSTANT_ADDRESS; | 
| Jan Sjodin | 312ccf7 | 2017-09-14 20:53:51 +0000 | [diff] [blame] | 2021 | } | 
| Matt Arsenault | 0da6350 | 2018-08-31 05:49:54 +0000 | [diff] [blame] | 2022 | return AMDGPUAS::FLAT_ADDRESS; | 
| Jan Sjodin | 312ccf7 | 2017-09-14 20:53:51 +0000 | [diff] [blame] | 2023 | } | 
|  | 2024 |  | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2025 | static void removeModOperands(MachineInstr &MI) { | 
|  | 2026 | unsigned Opc = MI.getOpcode(); | 
|  | 2027 | int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, | 
|  | 2028 | AMDGPU::OpName::src0_modifiers); | 
|  | 2029 | int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, | 
|  | 2030 | AMDGPU::OpName::src1_modifiers); | 
|  | 2031 | int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, | 
|  | 2032 | AMDGPU::OpName::src2_modifiers); | 
|  | 2033 |  | 
|  | 2034 | MI.RemoveOperand(Src2ModIdx); | 
|  | 2035 | MI.RemoveOperand(Src1ModIdx); | 
|  | 2036 | MI.RemoveOperand(Src0ModIdx); | 
|  | 2037 | } | 
|  | 2038 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2039 | bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2040 | unsigned Reg, MachineRegisterInfo *MRI) const { | 
|  | 2041 | if (!MRI->hasOneNonDBGUse(Reg)) | 
|  | 2042 | return false; | 
|  | 2043 |  | 
| Nicolai Haehnle | 39980da | 2017-11-28 08:41:50 +0000 | [diff] [blame] | 2044 | switch (DefMI.getOpcode()) { | 
|  | 2045 | default: | 
|  | 2046 | return false; | 
|  | 2047 | case AMDGPU::S_MOV_B64: | 
|  | 2048 | // TODO: We could fold 64-bit immediates, but this get compilicated | 
|  | 2049 | // when there are sub-registers. | 
|  | 2050 | return false; | 
|  | 2051 |  | 
|  | 2052 | case AMDGPU::V_MOV_B32_e32: | 
|  | 2053 | case AMDGPU::S_MOV_B32: | 
|  | 2054 | break; | 
|  | 2055 | } | 
|  | 2056 |  | 
|  | 2057 | const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); | 
|  | 2058 | assert(ImmOp); | 
|  | 2059 | // FIXME: We could handle FrameIndex values here. | 
|  | 2060 | if (!ImmOp->isImm()) | 
|  | 2061 | return false; | 
|  | 2062 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2063 | unsigned Opc = UseMI.getOpcode(); | 
| Tom Stellard | 2add8a1 | 2016-09-06 20:00:26 +0000 | [diff] [blame] | 2064 | if (Opc == AMDGPU::COPY) { | 
|  | 2065 | bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg()); | 
| Tom Stellard | 2add8a1 | 2016-09-06 20:00:26 +0000 | [diff] [blame] | 2066 | unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; | 
| Tom Stellard | 2add8a1 | 2016-09-06 20:00:26 +0000 | [diff] [blame] | 2067 | UseMI.setDesc(get(NewOpc)); | 
|  | 2068 | UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm()); | 
|  | 2069 | UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); | 
|  | 2070 | return true; | 
|  | 2071 | } | 
|  | 2072 |  | 
| Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 2073 | if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2074 | Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64 || | 
|  | 2075 | Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || | 
|  | 2076 | Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64) { | 
| Matt Arsenault | 2ed2193 | 2017-02-27 20:21:31 +0000 | [diff] [blame] | 2077 | // Don't fold if we are using source or output modifiers. The new VOP2 | 
|  | 2078 | // instructions don't have them. | 
|  | 2079 | if (hasAnyModifiersSet(UseMI)) | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2080 | return false; | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2081 |  | 
| Matt Arsenault | 3d1c1de | 2016-04-14 21:58:24 +0000 | [diff] [blame] | 2082 | // If this is a free constant, there's no reason to do this. | 
|  | 2083 | // TODO: We could fold this here instead of letting SIFoldOperands do it | 
|  | 2084 | // later. | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2085 | MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); | 
|  | 2086 |  | 
|  | 2087 | // Any src operand can be used for the legality check. | 
| Nicolai Haehnle | 39980da | 2017-11-28 08:41:50 +0000 | [diff] [blame] | 2088 | if (isInlineConstant(UseMI, *Src0, *ImmOp)) | 
| Matt Arsenault | 3d1c1de | 2016-04-14 21:58:24 +0000 | [diff] [blame] | 2089 | return false; | 
|  | 2090 |  | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2091 | bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || | 
|  | 2092 | Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64; | 
|  | 2093 | bool IsFMA = Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || | 
|  | 2094 | Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2095 | MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); | 
|  | 2096 | MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2097 |  | 
| Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 2098 | // Multiplied part is the constant: Use v_madmk_{f16, f32}. | 
| Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 2099 | // We should only expect these to be on src0 due to canonicalizations. | 
|  | 2100 | if (Src0->isReg() && Src0->getReg() == Reg) { | 
| Matt Arsenault | a266bd8 | 2016-03-02 04:05:14 +0000 | [diff] [blame] | 2101 | if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) | 
| Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 2102 | return false; | 
|  | 2103 |  | 
| Matt Arsenault | a266bd8 | 2016-03-02 04:05:14 +0000 | [diff] [blame] | 2104 | if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) | 
| Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 2105 | return false; | 
|  | 2106 |  | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2107 | unsigned NewOpc = | 
|  | 2108 | IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) | 
|  | 2109 | : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); | 
|  | 2110 | if (pseudoToMCOpcode(NewOpc) == -1) | 
|  | 2111 | return false; | 
|  | 2112 |  | 
| Nikolay Haustov | 6560781 | 2016-03-11 09:27:25 +0000 | [diff] [blame] | 2113 | // We need to swap operands 0 and 1 since madmk constant is at operand 1. | 
| Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 2114 |  | 
| Nicolai Haehnle | 39980da | 2017-11-28 08:41:50 +0000 | [diff] [blame] | 2115 | const int64_t Imm = ImmOp->getImm(); | 
| Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 2116 |  | 
|  | 2117 | // FIXME: This would be a lot easier if we could return a new instruction | 
|  | 2118 | // instead of having to modify in place. | 
|  | 2119 |  | 
|  | 2120 | // Remove these first since they are at the end. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2121 | UseMI.RemoveOperand( | 
|  | 2122 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); | 
|  | 2123 | UseMI.RemoveOperand( | 
|  | 2124 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); | 
| Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 2125 |  | 
|  | 2126 | unsigned Src1Reg = Src1->getReg(); | 
|  | 2127 | unsigned Src1SubReg = Src1->getSubReg(); | 
| Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 2128 | Src0->setReg(Src1Reg); | 
|  | 2129 | Src0->setSubReg(Src1SubReg); | 
| Matt Arsenault | 5e10016 | 2015-04-24 01:57:58 +0000 | [diff] [blame] | 2130 | Src0->setIsKill(Src1->isKill()); | 
|  | 2131 |  | 
| Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 2132 | if (Opc == AMDGPU::V_MAC_F32_e64 || | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2133 | Opc == AMDGPU::V_MAC_F16_e64 || | 
|  | 2134 | Opc == AMDGPU::V_FMAC_F32_e64 || | 
|  | 2135 | Opc == AMDGPU::V_FMAC_F16_e64) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2136 | UseMI.untieRegOperand( | 
|  | 2137 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); | 
| Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 2138 |  | 
| Nikolay Haustov | 6560781 | 2016-03-11 09:27:25 +0000 | [diff] [blame] | 2139 | Src1->ChangeToImmediate(Imm); | 
| Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 2140 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2141 | removeModOperands(UseMI); | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2142 | UseMI.setDesc(get(NewOpc)); | 
| Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 2143 |  | 
|  | 2144 | bool DeleteDef = MRI->hasOneNonDBGUse(Reg); | 
|  | 2145 | if (DeleteDef) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2146 | DefMI.eraseFromParent(); | 
| Matt Arsenault | f078330 | 2015-02-21 21:29:10 +0000 | [diff] [blame] | 2147 |  | 
|  | 2148 | return true; | 
|  | 2149 | } | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2150 |  | 
| Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 2151 | // Added part is the constant: Use v_madak_{f16, f32}. | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2152 | if (Src2->isReg() && Src2->getReg() == Reg) { | 
|  | 2153 | // Not allowed to use constant bus for another operand. | 
|  | 2154 | // We can however allow an inline immediate as src0. | 
| Alexander Timofeev | 20cbe6f | 2018-09-10 16:42:49 +0000 | [diff] [blame] | 2155 | bool Src0Inlined = false; | 
|  | 2156 | if (Src0->isReg()) { | 
|  | 2157 | // Try to inline constant if possible. | 
|  | 2158 | // If the Def moves immediate and the use is single | 
|  | 2159 | // We are saving VGPR here. | 
|  | 2160 | MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); | 
|  | 2161 | if (Def && Def->isMoveImmediate() && | 
|  | 2162 | isInlineConstant(Def->getOperand(1)) && | 
|  | 2163 | MRI->hasOneUse(Src0->getReg())) { | 
|  | 2164 | Src0->ChangeToImmediate(Def->getOperand(1).getImm()); | 
|  | 2165 | Src0Inlined = true; | 
|  | 2166 | } else if ((RI.isPhysicalRegister(Src0->getReg()) && | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 2167 | (ST.getConstantBusLimit(Opc) <= 1 && | 
|  | 2168 | RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || | 
| Alexander Timofeev | 20cbe6f | 2018-09-10 16:42:49 +0000 | [diff] [blame] | 2169 | (RI.isVirtualRegister(Src0->getReg()) && | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 2170 | (ST.getConstantBusLimit(Opc) <= 1 && | 
|  | 2171 | RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) | 
| Alexander Timofeev | 20cbe6f | 2018-09-10 16:42:49 +0000 | [diff] [blame] | 2172 | return false; | 
|  | 2173 | // VGPR is okay as Src0 - fallthrough | 
|  | 2174 | } | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2175 |  | 
| Alexander Timofeev | 20cbe6f | 2018-09-10 16:42:49 +0000 | [diff] [blame] | 2176 | if (Src1->isReg() && !Src0Inlined ) { | 
|  | 2177 | // We have one slot for inlinable constant so far - try to fill it | 
|  | 2178 | MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); | 
|  | 2179 | if (Def && Def->isMoveImmediate() && | 
|  | 2180 | isInlineConstant(Def->getOperand(1)) && | 
|  | 2181 | MRI->hasOneUse(Src1->getReg()) && | 
|  | 2182 | commuteInstruction(UseMI)) { | 
|  | 2183 | Src0->ChangeToImmediate(Def->getOperand(1).getImm()); | 
|  | 2184 | } else if ((RI.isPhysicalRegister(Src1->getReg()) && | 
|  | 2185 | RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || | 
|  | 2186 | (RI.isVirtualRegister(Src1->getReg()) && | 
|  | 2187 | RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) | 
|  | 2188 | return false; | 
|  | 2189 | // VGPR is okay as Src1 - fallthrough | 
|  | 2190 | } | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2191 |  | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2192 | unsigned NewOpc = | 
|  | 2193 | IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) | 
|  | 2194 | : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); | 
|  | 2195 | if (pseudoToMCOpcode(NewOpc) == -1) | 
|  | 2196 | return false; | 
|  | 2197 |  | 
| Nicolai Haehnle | 39980da | 2017-11-28 08:41:50 +0000 | [diff] [blame] | 2198 | const int64_t Imm = ImmOp->getImm(); | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2199 |  | 
|  | 2200 | // FIXME: This would be a lot easier if we could return a new instruction | 
|  | 2201 | // instead of having to modify in place. | 
|  | 2202 |  | 
|  | 2203 | // Remove these first since they are at the end. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2204 | UseMI.RemoveOperand( | 
|  | 2205 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); | 
|  | 2206 | UseMI.RemoveOperand( | 
|  | 2207 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2208 |  | 
| Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 2209 | if (Opc == AMDGPU::V_MAC_F32_e64 || | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2210 | Opc == AMDGPU::V_MAC_F16_e64 || | 
|  | 2211 | Opc == AMDGPU::V_FMAC_F32_e64 || | 
|  | 2212 | Opc == AMDGPU::V_FMAC_F16_e64) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2213 | UseMI.untieRegOperand( | 
|  | 2214 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); | 
| Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 2215 |  | 
|  | 2216 | // ChangingToImmediate adds Src2 back to the instruction. | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2217 | Src2->ChangeToImmediate(Imm); | 
|  | 2218 |  | 
|  | 2219 | // These come before src2. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2220 | removeModOperands(UseMI); | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2221 | UseMI.setDesc(get(NewOpc)); | 
| Alexander Timofeev | ba447ba | 2019-05-26 20:33:26 +0000 | [diff] [blame] | 2222 | // It might happen that UseMI was commuted | 
|  | 2223 | // and we now have SGPR as SRC1. If so 2 inlined | 
|  | 2224 | // constant and SGPR are illegal. | 
|  | 2225 | legalizeOperands(UseMI); | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2226 |  | 
|  | 2227 | bool DeleteDef = MRI->hasOneNonDBGUse(Reg); | 
|  | 2228 | if (DeleteDef) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2229 | DefMI.eraseFromParent(); | 
| Matt Arsenault | 0325d3d | 2015-02-21 21:29:07 +0000 | [diff] [blame] | 2230 |  | 
|  | 2231 | return true; | 
|  | 2232 | } | 
|  | 2233 | } | 
|  | 2234 |  | 
|  | 2235 | return false; | 
|  | 2236 | } | 
|  | 2237 |  | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2238 | static bool offsetsDoNotOverlap(int WidthA, int OffsetA, | 
|  | 2239 | int WidthB, int OffsetB) { | 
|  | 2240 | int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; | 
|  | 2241 | int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; | 
|  | 2242 | int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; | 
|  | 2243 | return LowOffset + LowWidth <= HighOffset; | 
|  | 2244 | } | 
|  | 2245 |  | 
| Bjorn Pettersson | 238c9d630 | 2019-04-19 09:08:38 +0000 | [diff] [blame] | 2246 | bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, | 
|  | 2247 | const MachineInstr &MIb) const { | 
|  | 2248 | const MachineOperand *BaseOp0, *BaseOp1; | 
| Chad Rosier | c27a18f | 2016-03-09 16:00:35 +0000 | [diff] [blame] | 2249 | int64_t Offset0, Offset1; | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2250 |  | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 2251 | if (getMemOperandWithOffset(MIa, BaseOp0, Offset0, &RI) && | 
|  | 2252 | getMemOperandWithOffset(MIb, BaseOp1, Offset1, &RI)) { | 
|  | 2253 | if (!BaseOp0->isIdenticalTo(*BaseOp1)) | 
|  | 2254 | return false; | 
| Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 2255 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2256 | if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { | 
| Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 2257 | // FIXME: Handle ds_read2 / ds_write2. | 
|  | 2258 | return false; | 
|  | 2259 | } | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2260 | unsigned Width0 = (*MIa.memoperands_begin())->getSize(); | 
|  | 2261 | unsigned Width1 = (*MIb.memoperands_begin())->getSize(); | 
| Francis Visoiu Mistrih | d7eebd6 | 2018-11-28 12:00:20 +0000 | [diff] [blame] | 2262 | if (offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2263 | return true; | 
|  | 2264 | } | 
|  | 2265 | } | 
|  | 2266 |  | 
|  | 2267 | return false; | 
|  | 2268 | } | 
|  | 2269 |  | 
| Bjorn Pettersson | 238c9d630 | 2019-04-19 09:08:38 +0000 | [diff] [blame] | 2270 | bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, | 
|  | 2271 | const MachineInstr &MIb, | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2272 | AliasAnalysis *AA) const { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2273 | assert((MIa.mayLoad() || MIa.mayStore()) && | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2274 | "MIa must load from or modify a memory location"); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2275 | assert((MIb.mayLoad() || MIb.mayStore()) && | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2276 | "MIb must load from or modify a memory location"); | 
|  | 2277 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2278 | if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2279 | return false; | 
|  | 2280 |  | 
|  | 2281 | // XXX - Can we relax this between address spaces? | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2282 | if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2283 | return false; | 
|  | 2284 |  | 
|  | 2285 | // TODO: Should we check the address space from the MachineMemOperand? That | 
|  | 2286 | // would allow us to distinguish objects we know don't alias based on the | 
| Benjamin Kramer | df005cb | 2015-08-08 18:27:36 +0000 | [diff] [blame] | 2287 | // underlying address space, even if it was lowered to a different one, | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2288 | // e.g. private accesses lowered to use MUBUF instructions on a scratch | 
|  | 2289 | // buffer. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2290 | if (isDS(MIa)) { | 
|  | 2291 | if (isDS(MIb)) | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2292 | return checkInstOffsetsDoNotOverlap(MIa, MIb); | 
|  | 2293 |  | 
| Matt Arsenault | 9608a289 | 2017-07-29 01:26:21 +0000 | [diff] [blame] | 2294 | return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2295 | } | 
|  | 2296 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2297 | if (isMUBUF(MIa) || isMTBUF(MIa)) { | 
|  | 2298 | if (isMUBUF(MIb) || isMTBUF(MIb)) | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2299 | return checkInstOffsetsDoNotOverlap(MIa, MIb); | 
|  | 2300 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2301 | return !isFLAT(MIb) && !isSMRD(MIb); | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2302 | } | 
|  | 2303 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2304 | if (isSMRD(MIa)) { | 
|  | 2305 | if (isSMRD(MIb)) | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2306 | return checkInstOffsetsDoNotOverlap(MIa, MIb); | 
|  | 2307 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2308 | return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa); | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2309 | } | 
|  | 2310 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2311 | if (isFLAT(MIa)) { | 
|  | 2312 | if (isFLAT(MIb)) | 
| Matt Arsenault | c09cc3c | 2014-11-19 00:01:31 +0000 | [diff] [blame] | 2313 | return checkInstOffsetsDoNotOverlap(MIa, MIb); | 
|  | 2314 |  | 
|  | 2315 | return false; | 
|  | 2316 | } | 
|  | 2317 |  | 
|  | 2318 | return false; | 
|  | 2319 | } | 
|  | 2320 |  | 
| Stanislav Mekhanoshin | 710da42 | 2017-09-11 17:13:57 +0000 | [diff] [blame] | 2321 | static int64_t getFoldableImm(const MachineOperand* MO) { | 
|  | 2322 | if (!MO->isReg()) | 
|  | 2323 | return false; | 
|  | 2324 | const MachineFunction *MF = MO->getParent()->getParent()->getParent(); | 
|  | 2325 | const MachineRegisterInfo &MRI = MF->getRegInfo(); | 
|  | 2326 | auto Def = MRI.getUniqueVRegDef(MO->getReg()); | 
| Matt Arsenault | c317287 | 2017-09-14 20:54:29 +0000 | [diff] [blame] | 2327 | if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 && | 
|  | 2328 | Def->getOperand(1).isImm()) | 
| Stanislav Mekhanoshin | 710da42 | 2017-09-11 17:13:57 +0000 | [diff] [blame] | 2329 | return Def->getOperand(1).getImm(); | 
|  | 2330 | return AMDGPU::NoRegister; | 
|  | 2331 | } | 
|  | 2332 |  | 
| Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 2333 | MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2334 | MachineInstr &MI, | 
|  | 2335 | LiveVariables *LV) const { | 
| Matt Arsenault | 0084adc | 2018-04-30 19:08:16 +0000 | [diff] [blame] | 2336 | unsigned Opc = MI.getOpcode(); | 
| Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 2337 | bool IsF16 = false; | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2338 | bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || | 
|  | 2339 | Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64; | 
| Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 2340 |  | 
| Matt Arsenault | 0084adc | 2018-04-30 19:08:16 +0000 | [diff] [blame] | 2341 | switch (Opc) { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2342 | default: | 
|  | 2343 | return nullptr; | 
| Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 2344 | case AMDGPU::V_MAC_F16_e64: | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2345 | case AMDGPU::V_FMAC_F16_e64: | 
| Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 2346 | IsF16 = true; | 
| Simon Pilgrim | 0f5b350 | 2017-07-07 10:18:57 +0000 | [diff] [blame] | 2347 | LLVM_FALLTHROUGH; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2348 | case AMDGPU::V_MAC_F32_e64: | 
| Matt Arsenault | 0084adc | 2018-04-30 19:08:16 +0000 | [diff] [blame] | 2349 | case AMDGPU::V_FMAC_F32_e64: | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2350 | break; | 
| Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 2351 | case AMDGPU::V_MAC_F16_e32: | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2352 | case AMDGPU::V_FMAC_F16_e32: | 
| Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 2353 | IsF16 = true; | 
| Simon Pilgrim | 0f5b350 | 2017-07-07 10:18:57 +0000 | [diff] [blame] | 2354 | LLVM_FALLTHROUGH; | 
| Matt Arsenault | 0084adc | 2018-04-30 19:08:16 +0000 | [diff] [blame] | 2355 | case AMDGPU::V_MAC_F32_e32: | 
|  | 2356 | case AMDGPU::V_FMAC_F32_e32: { | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2357 | int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), | 
|  | 2358 | AMDGPU::OpName::src0); | 
|  | 2359 | const MachineOperand *Src0 = &MI.getOperand(Src0Idx); | 
| Matt Arsenault | fdcdd88 | 2017-09-21 00:45:59 +0000 | [diff] [blame] | 2360 | if (!Src0->isReg() && !Src0->isImm()) | 
|  | 2361 | return nullptr; | 
|  | 2362 |  | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2363 | if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2364 | return nullptr; | 
| Matt Arsenault | fdcdd88 | 2017-09-21 00:45:59 +0000 | [diff] [blame] | 2365 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2366 | break; | 
|  | 2367 | } | 
| Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 2368 | } | 
|  | 2369 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2370 | const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); | 
|  | 2371 | const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); | 
| Matt Arsenault | 3cb9ff8 | 2017-03-11 05:40:40 +0000 | [diff] [blame] | 2372 | const MachineOperand *Src0Mods = | 
|  | 2373 | getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2374 | const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); | 
| Matt Arsenault | 3cb9ff8 | 2017-03-11 05:40:40 +0000 | [diff] [blame] | 2375 | const MachineOperand *Src1Mods = | 
|  | 2376 | getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2377 | const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); | 
| Matt Arsenault | 3cb9ff8 | 2017-03-11 05:40:40 +0000 | [diff] [blame] | 2378 | const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); | 
|  | 2379 | const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); | 
| Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 2380 |  | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2381 | if (!Src0Mods && !Src1Mods && !Clamp && !Omod && | 
| Matt Arsenault | c317287 | 2017-09-14 20:54:29 +0000 | [diff] [blame] | 2382 | // If we have an SGPR input, we will violate the constant bus restriction. | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 2383 | (ST.getConstantBusLimit(Opc) > 1 || | 
|  | 2384 | !Src0->isReg() || | 
|  | 2385 | !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { | 
| Stanislav Mekhanoshin | 710da42 | 2017-09-11 17:13:57 +0000 | [diff] [blame] | 2386 | if (auto Imm = getFoldableImm(Src2)) { | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2387 | unsigned NewOpc = | 
|  | 2388 | IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) | 
|  | 2389 | : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); | 
|  | 2390 | if (pseudoToMCOpcode(NewOpc) != -1) | 
|  | 2391 | return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) | 
|  | 2392 | .add(*Dst) | 
|  | 2393 | .add(*Src0) | 
|  | 2394 | .add(*Src1) | 
|  | 2395 | .addImm(Imm); | 
| Stanislav Mekhanoshin | 710da42 | 2017-09-11 17:13:57 +0000 | [diff] [blame] | 2396 | } | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2397 | unsigned NewOpc = | 
|  | 2398 | IsFMA ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) | 
|  | 2399 | : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); | 
| Stanislav Mekhanoshin | 710da42 | 2017-09-11 17:13:57 +0000 | [diff] [blame] | 2400 | if (auto Imm = getFoldableImm(Src1)) { | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2401 | if (pseudoToMCOpcode(NewOpc) != -1) | 
|  | 2402 | return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) | 
|  | 2403 | .add(*Dst) | 
|  | 2404 | .add(*Src0) | 
|  | 2405 | .addImm(Imm) | 
|  | 2406 | .add(*Src2); | 
| Stanislav Mekhanoshin | 710da42 | 2017-09-11 17:13:57 +0000 | [diff] [blame] | 2407 | } | 
|  | 2408 | if (auto Imm = getFoldableImm(Src0)) { | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2409 | if (pseudoToMCOpcode(NewOpc) != -1 && | 
|  | 2410 | isOperandLegal(MI, AMDGPU::getNamedOperandIdx(NewOpc, | 
| Stanislav Mekhanoshin | 710da42 | 2017-09-11 17:13:57 +0000 | [diff] [blame] | 2411 | AMDGPU::OpName::src0), Src1)) | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2412 | return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) | 
| Stanislav Mekhanoshin | 710da42 | 2017-09-11 17:13:57 +0000 | [diff] [blame] | 2413 | .add(*Dst) | 
|  | 2414 | .add(*Src1) | 
|  | 2415 | .addImm(Imm) | 
|  | 2416 | .add(*Src2); | 
|  | 2417 | } | 
|  | 2418 | } | 
|  | 2419 |  | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2420 | unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16 : AMDGPU::V_FMA_F32) | 
|  | 2421 | : (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32); | 
|  | 2422 | if (pseudoToMCOpcode(NewOpc) == -1) | 
|  | 2423 | return nullptr; | 
|  | 2424 |  | 
| Matt Arsenault | 0084adc | 2018-04-30 19:08:16 +0000 | [diff] [blame] | 2425 | return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 2426 | .add(*Dst) | 
| Matt Arsenault | 3cb9ff8 | 2017-03-11 05:40:40 +0000 | [diff] [blame] | 2427 | .addImm(Src0Mods ? Src0Mods->getImm() : 0) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 2428 | .add(*Src0) | 
| Matt Arsenault | 3cb9ff8 | 2017-03-11 05:40:40 +0000 | [diff] [blame] | 2429 | .addImm(Src1Mods ? Src1Mods->getImm() : 0) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 2430 | .add(*Src1) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2431 | .addImm(0) // Src mods | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 2432 | .add(*Src2) | 
| Matt Arsenault | 3cb9ff8 | 2017-03-11 05:40:40 +0000 | [diff] [blame] | 2433 | .addImm(Clamp ? Clamp->getImm() : 0) | 
|  | 2434 | .addImm(Omod ? Omod->getImm() : 0); | 
| Tom Stellard | db5a11f | 2015-07-13 15:47:57 +0000 | [diff] [blame] | 2435 | } | 
|  | 2436 |  | 
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 2437 | // It's not generally safe to move VALU instructions across these since it will | 
|  | 2438 | // start using the register as a base index rather than directly. | 
|  | 2439 | // XXX - Why isn't hasSideEffects sufficient for these? | 
|  | 2440 | static bool changesVGPRIndexingMode(const MachineInstr &MI) { | 
|  | 2441 | switch (MI.getOpcode()) { | 
|  | 2442 | case AMDGPU::S_SET_GPR_IDX_ON: | 
|  | 2443 | case AMDGPU::S_SET_GPR_IDX_MODE: | 
|  | 2444 | case AMDGPU::S_SET_GPR_IDX_OFF: | 
|  | 2445 | return true; | 
|  | 2446 | default: | 
|  | 2447 | return false; | 
|  | 2448 | } | 
|  | 2449 | } | 
|  | 2450 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2451 | bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, | 
| Nicolai Haehnle | 213e87f | 2016-03-21 20:28:33 +0000 | [diff] [blame] | 2452 | const MachineBasicBlock *MBB, | 
|  | 2453 | const MachineFunction &MF) const { | 
| Matt Arsenault | 95c7897 | 2016-07-09 01:13:51 +0000 | [diff] [blame] | 2454 | // XXX - Do we want the SP check in the base implementation? | 
|  | 2455 |  | 
| Nicolai Haehnle | 213e87f | 2016-03-21 20:28:33 +0000 | [diff] [blame] | 2456 | // Target-independent instructions do not have an implicit-use of EXEC, even | 
|  | 2457 | // when they operate on VGPRs. Treating EXEC modifications as scheduling | 
|  | 2458 | // boundaries prevents incorrect movements of such instructions. | 
| Matt Arsenault | 95c7897 | 2016-07-09 01:13:51 +0000 | [diff] [blame] | 2459 | return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) || | 
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 2460 | MI.modifiesRegister(AMDGPU::EXEC, &RI) || | 
| Tom Stellard | 8485fa0 | 2016-12-07 02:42:15 +0000 | [diff] [blame] | 2461 | MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || | 
|  | 2462 | MI.getOpcode() == AMDGPU::S_SETREG_B32 || | 
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 2463 | changesVGPRIndexingMode(MI); | 
| Nicolai Haehnle | 213e87f | 2016-03-21 20:28:33 +0000 | [diff] [blame] | 2464 | } | 
|  | 2465 |  | 
| Marek Olsak | c5cec5e | 2019-01-16 15:43:53 +0000 | [diff] [blame] | 2466 | bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { | 
|  | 2467 | return Opcode == AMDGPU::DS_ORDERED_COUNT || | 
|  | 2468 | Opcode == AMDGPU::DS_GWS_INIT || | 
|  | 2469 | Opcode == AMDGPU::DS_GWS_SEMA_V || | 
|  | 2470 | Opcode == AMDGPU::DS_GWS_SEMA_BR || | 
|  | 2471 | Opcode == AMDGPU::DS_GWS_SEMA_P || | 
|  | 2472 | Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || | 
|  | 2473 | Opcode == AMDGPU::DS_GWS_BARRIER; | 
|  | 2474 | } | 
|  | 2475 |  | 
| Nicolai Haehnle | 7f0d05d | 2018-07-30 09:23:59 +0000 | [diff] [blame] | 2476 | bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { | 
|  | 2477 | unsigned Opcode = MI.getOpcode(); | 
|  | 2478 |  | 
|  | 2479 | if (MI.mayStore() && isSMRD(MI)) | 
|  | 2480 | return true; // scalar store or atomic | 
|  | 2481 |  | 
| Matt Arsenault | b6cfa12 | 2019-06-06 22:51:51 +0000 | [diff] [blame] | 2482 | // This will terminate the function when other lanes may need to continue. | 
|  | 2483 | if (MI.isReturn()) | 
|  | 2484 | return true; | 
|  | 2485 |  | 
| Nicolai Haehnle | 7f0d05d | 2018-07-30 09:23:59 +0000 | [diff] [blame] | 2486 | // These instructions cause shader I/O that may cause hardware lockups | 
|  | 2487 | // when executed with an empty EXEC mask. | 
|  | 2488 | // | 
|  | 2489 | // Note: exp with VM = DONE = 0 is automatically skipped by hardware when | 
|  | 2490 | //       EXEC = 0, but checking for that case here seems not worth it | 
|  | 2491 | //       given the typical code patterns. | 
|  | 2492 | if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || | 
| Marek Olsak | c5cec5e | 2019-01-16 15:43:53 +0000 | [diff] [blame] | 2493 | Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE || | 
| Matt Arsenault | ddd2c9a | 2019-06-07 23:02:52 +0000 | [diff] [blame] | 2494 | Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP) | 
| Nicolai Haehnle | 7f0d05d | 2018-07-30 09:23:59 +0000 | [diff] [blame] | 2495 | return true; | 
|  | 2496 |  | 
| Matt Arsenault | 6dd08e3 | 2019-05-20 22:04:42 +0000 | [diff] [blame] | 2497 | if (MI.isCall() || MI.isInlineAsm()) | 
| Nicolai Haehnle | 7f0d05d | 2018-07-30 09:23:59 +0000 | [diff] [blame] | 2498 | return true; // conservative assumption | 
|  | 2499 |  | 
|  | 2500 | // These are like SALU instructions in terms of effects, so it's questionable | 
|  | 2501 | // whether we should return true for those. | 
|  | 2502 | // | 
|  | 2503 | // However, executing them with EXEC = 0 causes them to operate on undefined | 
|  | 2504 | // data, which we avoid by returning true here. | 
|  | 2505 | if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || Opcode == AMDGPU::V_READLANE_B32) | 
|  | 2506 | return true; | 
|  | 2507 |  | 
|  | 2508 | return false; | 
|  | 2509 | } | 
|  | 2510 |  | 
| Matt Arsenault | a353fd5 | 2019-03-28 14:01:39 +0000 | [diff] [blame] | 2511 | bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, | 
|  | 2512 | const MachineInstr &MI) const { | 
|  | 2513 | if (MI.isMetaInstruction()) | 
|  | 2514 | return false; | 
|  | 2515 |  | 
|  | 2516 | // This won't read exec if this is an SGPR->SGPR copy. | 
|  | 2517 | if (MI.isCopyLike()) { | 
|  | 2518 | if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) | 
|  | 2519 | return true; | 
|  | 2520 |  | 
|  | 2521 | // Make sure this isn't copying exec as a normal operand | 
|  | 2522 | return MI.readsRegister(AMDGPU::EXEC, &RI); | 
|  | 2523 | } | 
|  | 2524 |  | 
| Matt Arsenault | 2cba91b | 2019-05-21 23:23:16 +0000 | [diff] [blame] | 2525 | // Make a conservative assumption about the callee. | 
|  | 2526 | if (MI.isCall()) | 
|  | 2527 | return true; | 
|  | 2528 |  | 
| Matt Arsenault | a353fd5 | 2019-03-28 14:01:39 +0000 | [diff] [blame] | 2529 | // Be conservative with any unhandled generic opcodes. | 
|  | 2530 | if (!isTargetSpecificOpcode(MI.getOpcode())) | 
|  | 2531 | return true; | 
|  | 2532 |  | 
|  | 2533 | return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); | 
|  | 2534 | } | 
|  | 2535 |  | 
| Matt Arsenault | d7bdcc4 | 2014-03-31 19:54:27 +0000 | [diff] [blame] | 2536 | bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { | 
| Matt Arsenault | 26faed3 | 2016-12-05 22:26:17 +0000 | [diff] [blame] | 2537 | switch (Imm.getBitWidth()) { | 
| Stanislav Mekhanoshin | 05791d9 | 2019-05-14 16:18:00 +0000 | [diff] [blame] | 2538 | case 1: // This likely will be a condition code mask. | 
|  | 2539 | return true; | 
|  | 2540 |  | 
| Matt Arsenault | 26faed3 | 2016-12-05 22:26:17 +0000 | [diff] [blame] | 2541 | case 32: | 
|  | 2542 | return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), | 
|  | 2543 | ST.hasInv2PiInlineImm()); | 
|  | 2544 | case 64: | 
|  | 2545 | return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), | 
|  | 2546 | ST.hasInv2PiInlineImm()); | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2547 | case 16: | 
| Matt Arsenault | 9dba9bd | 2017-02-02 02:27:04 +0000 | [diff] [blame] | 2548 | return ST.has16BitInsts() && | 
|  | 2549 | AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2550 | ST.hasInv2PiInlineImm()); | 
| Matt Arsenault | 26faed3 | 2016-12-05 22:26:17 +0000 | [diff] [blame] | 2551 | default: | 
|  | 2552 | llvm_unreachable("invalid bitwidth"); | 
| Matt Arsenault | 303011a | 2014-12-17 21:04:08 +0000 | [diff] [blame] | 2553 | } | 
| Matt Arsenault | d7bdcc4 | 2014-03-31 19:54:27 +0000 | [diff] [blame] | 2554 | } | 
|  | 2555 |  | 
| Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 2556 | bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2557 | uint8_t OperandType) const { | 
| Sam Kolton | 549c89d | 2017-06-21 08:53:38 +0000 | [diff] [blame] | 2558 | if (!MO.isImm() || | 
|  | 2559 | OperandType < AMDGPU::OPERAND_SRC_FIRST || | 
|  | 2560 | OperandType > AMDGPU::OPERAND_SRC_LAST) | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2561 | return false; | 
|  | 2562 |  | 
|  | 2563 | // MachineOperand provides no way to tell the true operand size, since it only | 
|  | 2564 | // records a 64-bit value. We need to know the size to determine if a 32-bit | 
|  | 2565 | // floating point immediate bit pattern is legal for an integer immediate. It | 
|  | 2566 | // would be for any 32-bit integer operand, but would not be for a 64-bit one. | 
|  | 2567 |  | 
|  | 2568 | int64_t Imm = MO.getImm(); | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 2569 | switch (OperandType) { | 
|  | 2570 | case AMDGPU::OPERAND_REG_IMM_INT32: | 
|  | 2571 | case AMDGPU::OPERAND_REG_IMM_FP32: | 
|  | 2572 | case AMDGPU::OPERAND_REG_INLINE_C_INT32: | 
|  | 2573 | case AMDGPU::OPERAND_REG_INLINE_C_FP32: { | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2574 | int32_t Trunc = static_cast<int32_t>(Imm); | 
| Nicolai Haehnle | 283b995 | 2018-08-29 07:46:09 +0000 | [diff] [blame] | 2575 | return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); | 
| Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 2576 | } | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 2577 | case AMDGPU::OPERAND_REG_IMM_INT64: | 
|  | 2578 | case AMDGPU::OPERAND_REG_IMM_FP64: | 
|  | 2579 | case AMDGPU::OPERAND_REG_INLINE_C_INT64: | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 2580 | case AMDGPU::OPERAND_REG_INLINE_C_FP64: | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2581 | return AMDGPU::isInlinableLiteral64(MO.getImm(), | 
|  | 2582 | ST.hasInv2PiInlineImm()); | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 2583 | case AMDGPU::OPERAND_REG_IMM_INT16: | 
|  | 2584 | case AMDGPU::OPERAND_REG_IMM_FP16: | 
|  | 2585 | case AMDGPU::OPERAND_REG_INLINE_C_INT16: | 
|  | 2586 | case AMDGPU::OPERAND_REG_INLINE_C_FP16: { | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2587 | if (isInt<16>(Imm) || isUInt<16>(Imm)) { | 
| Matt Arsenault | 9dba9bd | 2017-02-02 02:27:04 +0000 | [diff] [blame] | 2588 | // A few special case instructions have 16-bit operands on subtargets | 
|  | 2589 | // where 16-bit instructions are not legal. | 
|  | 2590 | // TODO: Do the 32-bit immediates work? We shouldn't really need to handle | 
|  | 2591 | // constants in these cases | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2592 | int16_t Trunc = static_cast<int16_t>(Imm); | 
| Matt Arsenault | 9dba9bd | 2017-02-02 02:27:04 +0000 | [diff] [blame] | 2593 | return ST.has16BitInsts() && | 
|  | 2594 | AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2595 | } | 
| Matt Arsenault | d7bdcc4 | 2014-03-31 19:54:27 +0000 | [diff] [blame] | 2596 |  | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2597 | return false; | 
|  | 2598 | } | 
| Stanislav Mekhanoshin | 5cf8167 | 2019-05-02 04:01:39 +0000 | [diff] [blame] | 2599 | case AMDGPU::OPERAND_REG_IMM_V2INT16: | 
|  | 2600 | case AMDGPU::OPERAND_REG_IMM_V2FP16: | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 2601 | case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: | 
|  | 2602 | case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: { | 
|  | 2603 | uint32_t Trunc = static_cast<uint32_t>(Imm); | 
| Stanislav Mekhanoshin | 5cf8167 | 2019-05-02 04:01:39 +0000 | [diff] [blame] | 2604 | return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 2605 | } | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2606 | default: | 
|  | 2607 | llvm_unreachable("invalid bitwidth"); | 
|  | 2608 | } | 
| Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 2609 | } | 
|  | 2610 |  | 
| Matt Arsenault | c1ebd82 | 2016-08-13 01:43:54 +0000 | [diff] [blame] | 2611 | bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2612 | const MCOperandInfo &OpInfo) const { | 
| Matt Arsenault | c1ebd82 | 2016-08-13 01:43:54 +0000 | [diff] [blame] | 2613 | switch (MO.getType()) { | 
|  | 2614 | case MachineOperand::MO_Register: | 
|  | 2615 | return false; | 
|  | 2616 | case MachineOperand::MO_Immediate: | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2617 | return !isInlineConstant(MO, OpInfo); | 
| Matt Arsenault | c1ebd82 | 2016-08-13 01:43:54 +0000 | [diff] [blame] | 2618 | case MachineOperand::MO_FrameIndex: | 
|  | 2619 | case MachineOperand::MO_MachineBasicBlock: | 
|  | 2620 | case MachineOperand::MO_ExternalSymbol: | 
|  | 2621 | case MachineOperand::MO_GlobalAddress: | 
|  | 2622 | case MachineOperand::MO_MCSymbol: | 
|  | 2623 | return true; | 
|  | 2624 | default: | 
|  | 2625 | llvm_unreachable("unexpected operand type"); | 
|  | 2626 | } | 
|  | 2627 | } | 
|  | 2628 |  | 
| Matt Arsenault | becb140 | 2014-06-23 18:28:31 +0000 | [diff] [blame] | 2629 | static bool compareMachineOp(const MachineOperand &Op0, | 
|  | 2630 | const MachineOperand &Op1) { | 
|  | 2631 | if (Op0.getType() != Op1.getType()) | 
|  | 2632 | return false; | 
|  | 2633 |  | 
|  | 2634 | switch (Op0.getType()) { | 
|  | 2635 | case MachineOperand::MO_Register: | 
|  | 2636 | return Op0.getReg() == Op1.getReg(); | 
|  | 2637 | case MachineOperand::MO_Immediate: | 
|  | 2638 | return Op0.getImm() == Op1.getImm(); | 
| Matt Arsenault | becb140 | 2014-06-23 18:28:31 +0000 | [diff] [blame] | 2639 | default: | 
|  | 2640 | llvm_unreachable("Didn't expect to be comparing these operand types"); | 
|  | 2641 | } | 
|  | 2642 | } | 
|  | 2643 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2644 | bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, | 
|  | 2645 | const MachineOperand &MO) const { | 
| Stanislav Mekhanoshin | 5cf8167 | 2019-05-02 04:01:39 +0000 | [diff] [blame] | 2646 | const MCInstrDesc &InstDesc = MI.getDesc(); | 
|  | 2647 | const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; | 
| Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 2648 |  | 
| Tom Stellard | fb77f00 | 2015-01-13 22:59:41 +0000 | [diff] [blame] | 2649 | assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); | 
| Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 2650 |  | 
|  | 2651 | if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) | 
|  | 2652 | return true; | 
|  | 2653 |  | 
|  | 2654 | if (OpInfo.RegClass < 0) | 
|  | 2655 | return false; | 
|  | 2656 |  | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2657 | if (MO.isImm() && isInlineConstant(MO, OpInfo)) | 
|  | 2658 | return RI.opCanUseInlineConstant(OpInfo.OperandType); | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 2659 |  | 
| Stanislav Mekhanoshin | 5cf8167 | 2019-05-02 04:01:39 +0000 | [diff] [blame] | 2660 | if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) | 
|  | 2661 | return false; | 
|  | 2662 |  | 
|  | 2663 | if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) | 
|  | 2664 | return true; | 
|  | 2665 |  | 
|  | 2666 | const MachineFunction *MF = MI.getParent()->getParent(); | 
|  | 2667 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | 
|  | 2668 | return ST.hasVOP3Literal(); | 
| Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 2669 | } | 
|  | 2670 |  | 
| Tom Stellard | 86d12eb | 2014-08-01 00:32:28 +0000 | [diff] [blame] | 2671 | bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { | 
| Marek Olsak | a93603d | 2015-01-15 18:42:51 +0000 | [diff] [blame] | 2672 | int Op32 = AMDGPU::getVOPe32(Opcode); | 
|  | 2673 | if (Op32 == -1) | 
|  | 2674 | return false; | 
|  | 2675 |  | 
|  | 2676 | return pseudoToMCOpcode(Op32) != -1; | 
| Tom Stellard | 86d12eb | 2014-08-01 00:32:28 +0000 | [diff] [blame] | 2677 | } | 
|  | 2678 |  | 
| Tom Stellard | b4a313a | 2014-08-01 00:32:39 +0000 | [diff] [blame] | 2679 | bool SIInstrInfo::hasModifiers(unsigned Opcode) const { | 
|  | 2680 | // The src0_modifier operand is present on all instructions | 
|  | 2681 | // that have modifiers. | 
|  | 2682 |  | 
|  | 2683 | return AMDGPU::getNamedOperandIdx(Opcode, | 
|  | 2684 | AMDGPU::OpName::src0_modifiers) != -1; | 
|  | 2685 | } | 
|  | 2686 |  | 
| Matt Arsenault | ace5b76 | 2014-10-17 18:00:43 +0000 | [diff] [blame] | 2687 | bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, | 
|  | 2688 | unsigned OpName) const { | 
|  | 2689 | const MachineOperand *Mods = getNamedOperand(MI, OpName); | 
|  | 2690 | return Mods && Mods->getImm(); | 
|  | 2691 | } | 
|  | 2692 |  | 
| Matt Arsenault | 2ed2193 | 2017-02-27 20:21:31 +0000 | [diff] [blame] | 2693 | bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { | 
|  | 2694 | return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || | 
|  | 2695 | hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || | 
|  | 2696 | hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || | 
|  | 2697 | hasModifiersSet(MI, AMDGPU::OpName::clamp) || | 
|  | 2698 | hasModifiersSet(MI, AMDGPU::OpName::omod); | 
|  | 2699 | } | 
|  | 2700 |  | 
| Matt Arsenault | 35b1902 | 2018-08-28 18:22:34 +0000 | [diff] [blame] | 2701 | bool SIInstrInfo::canShrink(const MachineInstr &MI, | 
|  | 2702 | const MachineRegisterInfo &MRI) const { | 
|  | 2703 | const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); | 
|  | 2704 | // Can't shrink instruction with three operands. | 
|  | 2705 | // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add | 
|  | 2706 | // a special case for it.  It can only be shrunk if the third operand | 
| Tim Renouf | 2e94f6e | 2019-03-18 19:25:39 +0000 | [diff] [blame] | 2707 | // is vcc, and src0_modifiers and src1_modifiers are not set. | 
|  | 2708 | // We should handle this the same way we handle vopc, by addding | 
| Matt Arsenault | 35b1902 | 2018-08-28 18:22:34 +0000 | [diff] [blame] | 2709 | // a register allocation hint pre-regalloc and then do the shrinking | 
|  | 2710 | // post-regalloc. | 
|  | 2711 | if (Src2) { | 
|  | 2712 | switch (MI.getOpcode()) { | 
|  | 2713 | default: return false; | 
|  | 2714 |  | 
|  | 2715 | case AMDGPU::V_ADDC_U32_e64: | 
|  | 2716 | case AMDGPU::V_SUBB_U32_e64: | 
|  | 2717 | case AMDGPU::V_SUBBREV_U32_e64: { | 
|  | 2718 | const MachineOperand *Src1 | 
|  | 2719 | = getNamedOperand(MI, AMDGPU::OpName::src1); | 
|  | 2720 | if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) | 
|  | 2721 | return false; | 
|  | 2722 | // Additional verification is needed for sdst/src2. | 
|  | 2723 | return true; | 
|  | 2724 | } | 
|  | 2725 | case AMDGPU::V_MAC_F32_e64: | 
|  | 2726 | case AMDGPU::V_MAC_F16_e64: | 
|  | 2727 | case AMDGPU::V_FMAC_F32_e64: | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 2728 | case AMDGPU::V_FMAC_F16_e64: | 
| Matt Arsenault | 35b1902 | 2018-08-28 18:22:34 +0000 | [diff] [blame] | 2729 | if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || | 
|  | 2730 | hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) | 
|  | 2731 | return false; | 
|  | 2732 | break; | 
|  | 2733 |  | 
|  | 2734 | case AMDGPU::V_CNDMASK_B32_e64: | 
|  | 2735 | break; | 
|  | 2736 | } | 
|  | 2737 | } | 
|  | 2738 |  | 
|  | 2739 | const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); | 
|  | 2740 | if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || | 
|  | 2741 | hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) | 
|  | 2742 | return false; | 
|  | 2743 |  | 
|  | 2744 | // We don't need to check src0, all input types are legal, so just make sure | 
|  | 2745 | // src0 isn't using any modifiers. | 
|  | 2746 | if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) | 
|  | 2747 | return false; | 
|  | 2748 |  | 
| Ron Lieberman | 16de4fd | 2018-12-03 13:04:54 +0000 | [diff] [blame] | 2749 | // Can it be shrunk to a valid 32 bit opcode? | 
|  | 2750 | if (!hasVALU32BitEncoding(MI.getOpcode())) | 
|  | 2751 | return false; | 
|  | 2752 |  | 
| Matt Arsenault | 35b1902 | 2018-08-28 18:22:34 +0000 | [diff] [blame] | 2753 | // Check output modifiers | 
|  | 2754 | return !hasModifiersSet(MI, AMDGPU::OpName::omod) && | 
|  | 2755 | !hasModifiersSet(MI, AMDGPU::OpName::clamp); | 
| Matt Arsenault | de6c421 | 2018-08-28 18:34:24 +0000 | [diff] [blame] | 2756 | } | 
| Matt Arsenault | 35b1902 | 2018-08-28 18:22:34 +0000 | [diff] [blame] | 2757 |  | 
| Matt Arsenault | de6c421 | 2018-08-28 18:34:24 +0000 | [diff] [blame] | 2758 | // Set VCC operand with all flags from \p Orig, except for setting it as | 
|  | 2759 | // implicit. | 
|  | 2760 | static void copyFlagsToImplicitVCC(MachineInstr &MI, | 
|  | 2761 | const MachineOperand &Orig) { | 
|  | 2762 |  | 
|  | 2763 | for (MachineOperand &Use : MI.implicit_operands()) { | 
|  | 2764 | if (Use.isUse() && Use.getReg() == AMDGPU::VCC) { | 
|  | 2765 | Use.setIsUndef(Orig.isUndef()); | 
|  | 2766 | Use.setIsKill(Orig.isKill()); | 
|  | 2767 | return; | 
|  | 2768 | } | 
|  | 2769 | } | 
|  | 2770 | } | 
|  | 2771 |  | 
|  | 2772 | MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, | 
|  | 2773 | unsigned Op32) const { | 
|  | 2774 | MachineBasicBlock *MBB = MI.getParent();; | 
|  | 2775 | MachineInstrBuilder Inst32 = | 
|  | 2776 | BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)); | 
|  | 2777 |  | 
|  | 2778 | // Add the dst operand if the 32-bit encoding also has an explicit $vdst. | 
|  | 2779 | // For VOPC instructions, this is replaced by an implicit def of vcc. | 
|  | 2780 | int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); | 
|  | 2781 | if (Op32DstIdx != -1) { | 
|  | 2782 | // dst | 
|  | 2783 | Inst32.add(MI.getOperand(0)); | 
|  | 2784 | } else { | 
|  | 2785 | assert(MI.getOperand(0).getReg() == AMDGPU::VCC && | 
|  | 2786 | "Unexpected case"); | 
|  | 2787 | } | 
|  | 2788 |  | 
|  | 2789 | Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); | 
|  | 2790 |  | 
|  | 2791 | const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); | 
|  | 2792 | if (Src1) | 
|  | 2793 | Inst32.add(*Src1); | 
|  | 2794 |  | 
|  | 2795 | const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); | 
|  | 2796 |  | 
|  | 2797 | if (Src2) { | 
|  | 2798 | int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); | 
|  | 2799 | if (Op32Src2Idx != -1) { | 
|  | 2800 | Inst32.add(*Src2); | 
|  | 2801 | } else { | 
|  | 2802 | // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is | 
|  | 2803 | // replaced with an implicit read of vcc. This was already added | 
|  | 2804 | // during the initial BuildMI, so find it to preserve the flags. | 
|  | 2805 | copyFlagsToImplicitVCC(*Inst32, *Src2); | 
|  | 2806 | } | 
|  | 2807 | } | 
|  | 2808 |  | 
|  | 2809 | return Inst32; | 
| Matt Arsenault | 35b1902 | 2018-08-28 18:22:34 +0000 | [diff] [blame] | 2810 | } | 
|  | 2811 |  | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 2812 | bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, | 
| Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 2813 | const MachineOperand &MO, | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2814 | const MCOperandInfo &OpInfo) const { | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 2815 | // Literal constants use the constant bus. | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2816 | //if (isLiteralConstantLike(MO, OpInfo)) | 
|  | 2817 | // return true; | 
|  | 2818 | if (MO.isImm()) | 
|  | 2819 | return !isInlineConstant(MO, OpInfo); | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 2820 |  | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2821 | if (!MO.isReg()) | 
|  | 2822 | return true; // Misc other operands like FrameIndex | 
|  | 2823 |  | 
|  | 2824 | if (!MO.isUse()) | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 2825 | return false; | 
|  | 2826 |  | 
|  | 2827 | if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) | 
|  | 2828 | return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); | 
|  | 2829 |  | 
| Dmitry Preobrazhensky | 9111f35 | 2019-06-03 13:51:24 +0000 | [diff] [blame] | 2830 | // Null is free | 
|  | 2831 | if (MO.getReg() == AMDGPU::SGPR_NULL) | 
|  | 2832 | return false; | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 2833 |  | 
|  | 2834 | // SGPRs use the constant bus | 
| Dmitry Preobrazhensky | 9111f35 | 2019-06-03 13:51:24 +0000 | [diff] [blame] | 2835 | if (MO.isImplicit()) { | 
|  | 2836 | return MO.getReg() == AMDGPU::M0 || | 
|  | 2837 | MO.getReg() == AMDGPU::VCC || | 
|  | 2838 | MO.getReg() == AMDGPU::VCC_LO; | 
|  | 2839 | } else { | 
|  | 2840 | return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || | 
|  | 2841 | AMDGPU::SReg_64RegClass.contains(MO.getReg()); | 
|  | 2842 | } | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 2843 | } | 
|  | 2844 |  | 
| Matt Arsenault | e223ceb | 2015-10-21 21:15:01 +0000 | [diff] [blame] | 2845 | static unsigned findImplicitSGPRRead(const MachineInstr &MI) { | 
|  | 2846 | for (const MachineOperand &MO : MI.implicit_operands()) { | 
|  | 2847 | // We only care about reads. | 
|  | 2848 | if (MO.isDef()) | 
|  | 2849 | continue; | 
|  | 2850 |  | 
|  | 2851 | switch (MO.getReg()) { | 
|  | 2852 | case AMDGPU::VCC: | 
|  | 2853 | case AMDGPU::M0: | 
|  | 2854 | case AMDGPU::FLAT_SCR: | 
|  | 2855 | return MO.getReg(); | 
|  | 2856 |  | 
|  | 2857 | default: | 
|  | 2858 | break; | 
|  | 2859 | } | 
|  | 2860 | } | 
|  | 2861 |  | 
|  | 2862 | return AMDGPU::NoRegister; | 
|  | 2863 | } | 
|  | 2864 |  | 
| Matt Arsenault | 529cf25 | 2016-06-23 01:26:16 +0000 | [diff] [blame] | 2865 | static bool shouldReadExec(const MachineInstr &MI) { | 
|  | 2866 | if (SIInstrInfo::isVALU(MI)) { | 
|  | 2867 | switch (MI.getOpcode()) { | 
|  | 2868 | case AMDGPU::V_READLANE_B32: | 
| Stanislav Mekhanoshin | 8f3da70 | 2019-04-26 16:37:51 +0000 | [diff] [blame] | 2869 | case AMDGPU::V_READLANE_B32_gfx6_gfx7: | 
| Stanislav Mekhanoshin | 61beff0 | 2019-04-26 17:56:03 +0000 | [diff] [blame] | 2870 | case AMDGPU::V_READLANE_B32_gfx10: | 
| Matt Arsenault | 529cf25 | 2016-06-23 01:26:16 +0000 | [diff] [blame] | 2871 | case AMDGPU::V_READLANE_B32_vi: | 
|  | 2872 | case AMDGPU::V_WRITELANE_B32: | 
| Stanislav Mekhanoshin | 8f3da70 | 2019-04-26 16:37:51 +0000 | [diff] [blame] | 2873 | case AMDGPU::V_WRITELANE_B32_gfx6_gfx7: | 
| Stanislav Mekhanoshin | 61beff0 | 2019-04-26 17:56:03 +0000 | [diff] [blame] | 2874 | case AMDGPU::V_WRITELANE_B32_gfx10: | 
| Matt Arsenault | 529cf25 | 2016-06-23 01:26:16 +0000 | [diff] [blame] | 2875 | case AMDGPU::V_WRITELANE_B32_vi: | 
|  | 2876 | return false; | 
|  | 2877 | } | 
|  | 2878 |  | 
|  | 2879 | return true; | 
|  | 2880 | } | 
|  | 2881 |  | 
|  | 2882 | if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) || | 
|  | 2883 | SIInstrInfo::isSALU(MI) || | 
|  | 2884 | SIInstrInfo::isSMRD(MI)) | 
|  | 2885 | return false; | 
|  | 2886 |  | 
|  | 2887 | return true; | 
|  | 2888 | } | 
|  | 2889 |  | 
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 2890 | static bool isSubRegOf(const SIRegisterInfo &TRI, | 
|  | 2891 | const MachineOperand &SuperVec, | 
|  | 2892 | const MachineOperand &SubReg) { | 
|  | 2893 | if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg())) | 
|  | 2894 | return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); | 
|  | 2895 |  | 
|  | 2896 | return SubReg.getSubReg() != AMDGPU::NoSubRegister && | 
|  | 2897 | SubReg.getReg() == SuperVec.getReg(); | 
|  | 2898 | } | 
|  | 2899 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2900 | bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, | 
| Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 2901 | StringRef &ErrInfo) const { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2902 | uint16_t Opcode = MI.getOpcode(); | 
| Tom Stellard | dde28a8 | 2017-05-26 16:40:03 +0000 | [diff] [blame] | 2903 | if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) | 
|  | 2904 | return true; | 
|  | 2905 |  | 
| Matt Arsenault | 89ad17c | 2017-06-12 16:37:55 +0000 | [diff] [blame] | 2906 | const MachineFunction *MF = MI.getParent()->getParent(); | 
|  | 2907 | const MachineRegisterInfo &MRI = MF->getRegInfo(); | 
|  | 2908 |  | 
| Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 2909 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); | 
|  | 2910 | int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); | 
|  | 2911 | int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); | 
|  | 2912 |  | 
| Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 2913 | // Make sure the number of operands is correct. | 
|  | 2914 | const MCInstrDesc &Desc = get(Opcode); | 
|  | 2915 | if (!Desc.isVariadic() && | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2916 | Desc.getNumOperands() != MI.getNumExplicitOperands()) { | 
|  | 2917 | ErrInfo = "Instruction has wrong number of operands."; | 
|  | 2918 | return false; | 
| Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 2919 | } | 
|  | 2920 |  | 
| Matt Arsenault | 3d46319 | 2016-11-01 22:55:07 +0000 | [diff] [blame] | 2921 | if (MI.isInlineAsm()) { | 
|  | 2922 | // Verify register classes for inlineasm constraints. | 
|  | 2923 | for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); | 
|  | 2924 | I != E; ++I) { | 
|  | 2925 | const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); | 
|  | 2926 | if (!RC) | 
|  | 2927 | continue; | 
|  | 2928 |  | 
|  | 2929 | const MachineOperand &Op = MI.getOperand(I); | 
|  | 2930 | if (!Op.isReg()) | 
|  | 2931 | continue; | 
|  | 2932 |  | 
|  | 2933 | unsigned Reg = Op.getReg(); | 
|  | 2934 | if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) { | 
|  | 2935 | ErrInfo = "inlineasm operand has incorrect register class."; | 
|  | 2936 | return false; | 
|  | 2937 | } | 
|  | 2938 | } | 
|  | 2939 |  | 
|  | 2940 | return true; | 
|  | 2941 | } | 
|  | 2942 |  | 
| Changpeng Fang | c996393 | 2015-12-18 20:04:28 +0000 | [diff] [blame] | 2943 | // Make sure the register classes are correct. | 
| Tom Stellard | b4a313a | 2014-08-01 00:32:39 +0000 | [diff] [blame] | 2944 | for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2945 | if (MI.getOperand(i).isFPImm()) { | 
| Tom Stellard | fb77f00 | 2015-01-13 22:59:41 +0000 | [diff] [blame] | 2946 | ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " | 
|  | 2947 | "all fp values to integers."; | 
|  | 2948 | return false; | 
|  | 2949 | } | 
|  | 2950 |  | 
| Marek Olsak | 8eeebcc | 2015-02-18 22:12:41 +0000 | [diff] [blame] | 2951 | int RegClass = Desc.OpInfo[i].RegClass; | 
|  | 2952 |  | 
| Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 2953 | switch (Desc.OpInfo[i].OperandType) { | 
| Tom Stellard | 1106b1c | 2015-01-20 17:49:41 +0000 | [diff] [blame] | 2954 | case MCOI::OPERAND_REGISTER: | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2955 | if (MI.getOperand(i).isImm()) { | 
| Tom Stellard | 1106b1c | 2015-01-20 17:49:41 +0000 | [diff] [blame] | 2956 | ErrInfo = "Illegal immediate value for operand."; | 
|  | 2957 | return false; | 
|  | 2958 | } | 
|  | 2959 | break; | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2960 | case AMDGPU::OPERAND_REG_IMM_INT32: | 
|  | 2961 | case AMDGPU::OPERAND_REG_IMM_FP32: | 
| Tom Stellard | 1106b1c | 2015-01-20 17:49:41 +0000 | [diff] [blame] | 2962 | break; | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2963 | case AMDGPU::OPERAND_REG_INLINE_C_INT32: | 
|  | 2964 | case AMDGPU::OPERAND_REG_INLINE_C_FP32: | 
|  | 2965 | case AMDGPU::OPERAND_REG_INLINE_C_INT64: | 
|  | 2966 | case AMDGPU::OPERAND_REG_INLINE_C_FP64: | 
|  | 2967 | case AMDGPU::OPERAND_REG_INLINE_C_INT16: | 
|  | 2968 | case AMDGPU::OPERAND_REG_INLINE_C_FP16: { | 
|  | 2969 | const MachineOperand &MO = MI.getOperand(i); | 
|  | 2970 | if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { | 
| Marek Olsak | 8eeebcc | 2015-02-18 22:12:41 +0000 | [diff] [blame] | 2971 | ErrInfo = "Illegal immediate value for operand."; | 
|  | 2972 | return false; | 
| Tom Stellard | a305f93 | 2014-07-02 20:53:44 +0000 | [diff] [blame] | 2973 | } | 
| Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 2974 | break; | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 2975 | } | 
| Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 2976 | case MCOI::OPERAND_IMMEDIATE: | 
| Matt Arsenault | ffc8275 | 2016-07-05 17:09:01 +0000 | [diff] [blame] | 2977 | case AMDGPU::OPERAND_KIMM32: | 
| Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 2978 | // Check if this operand is an immediate. | 
|  | 2979 | // FrameIndex operands will be replaced by immediates, so they are | 
|  | 2980 | // allowed. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2981 | if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { | 
| Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 2982 | ErrInfo = "Expected immediate, but got non-immediate"; | 
|  | 2983 | return false; | 
|  | 2984 | } | 
| Justin Bogner | b03fd12 | 2016-08-17 05:10:15 +0000 | [diff] [blame] | 2985 | LLVM_FALLTHROUGH; | 
| Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 2986 | default: | 
|  | 2987 | continue; | 
|  | 2988 | } | 
|  | 2989 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2990 | if (!MI.getOperand(i).isReg()) | 
| Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 2991 | continue; | 
|  | 2992 |  | 
| Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 2993 | if (RegClass != -1) { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 2994 | unsigned Reg = MI.getOperand(i).getReg(); | 
| Matt Arsenault | 1322b6f | 2016-07-09 01:13:56 +0000 | [diff] [blame] | 2995 | if (Reg == AMDGPU::NoRegister || | 
|  | 2996 | TargetRegisterInfo::isVirtualRegister(Reg)) | 
| Tom Stellard | ca700e4 | 2014-03-17 17:03:49 +0000 | [diff] [blame] | 2997 | continue; | 
|  | 2998 |  | 
|  | 2999 | const TargetRegisterClass *RC = RI.getRegClass(RegClass); | 
|  | 3000 | if (!RC->contains(Reg)) { | 
|  | 3001 | ErrInfo = "Operand has incorrect register class."; | 
|  | 3002 | return false; | 
|  | 3003 | } | 
|  | 3004 | } | 
|  | 3005 | } | 
|  | 3006 |  | 
| Sam Kolton | 549c89d | 2017-06-21 08:53:38 +0000 | [diff] [blame] | 3007 | // Verify SDWA | 
|  | 3008 | if (isSDWA(MI)) { | 
| Sam Kolton | 549c89d | 2017-06-21 08:53:38 +0000 | [diff] [blame] | 3009 | if (!ST.hasSDWA()) { | 
|  | 3010 | ErrInfo = "SDWA is not supported on this target"; | 
|  | 3011 | return false; | 
|  | 3012 | } | 
|  | 3013 |  | 
|  | 3014 | int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); | 
| Sam Kolton | 549c89d | 2017-06-21 08:53:38 +0000 | [diff] [blame] | 3015 |  | 
|  | 3016 | const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; | 
|  | 3017 |  | 
|  | 3018 | for (int OpIdx: OpIndicies) { | 
|  | 3019 | if (OpIdx == -1) | 
|  | 3020 | continue; | 
|  | 3021 | const MachineOperand &MO = MI.getOperand(OpIdx); | 
|  | 3022 |  | 
| Sam Kolton | 3c4933f | 2017-06-22 06:26:41 +0000 | [diff] [blame] | 3023 | if (!ST.hasSDWAScalar()) { | 
| Sam Kolton | 549c89d | 2017-06-21 08:53:38 +0000 | [diff] [blame] | 3024 | // Only VGPRS on VI | 
|  | 3025 | if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { | 
|  | 3026 | ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; | 
|  | 3027 | return false; | 
|  | 3028 | } | 
|  | 3029 | } else { | 
|  | 3030 | // No immediates on GFX9 | 
|  | 3031 | if (!MO.isReg()) { | 
|  | 3032 | ErrInfo = "Only reg allowed as operands in SDWA instructions on GFX9"; | 
|  | 3033 | return false; | 
|  | 3034 | } | 
|  | 3035 | } | 
|  | 3036 | } | 
|  | 3037 |  | 
| Sam Kolton | 3c4933f | 2017-06-22 06:26:41 +0000 | [diff] [blame] | 3038 | if (!ST.hasSDWAOmod()) { | 
| Sam Kolton | 549c89d | 2017-06-21 08:53:38 +0000 | [diff] [blame] | 3039 | // No omod allowed on VI | 
|  | 3040 | const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); | 
|  | 3041 | if (OMod != nullptr && | 
|  | 3042 | (!OMod->isImm() || OMod->getImm() != 0)) { | 
|  | 3043 | ErrInfo = "OMod not allowed in SDWA instructions on VI"; | 
|  | 3044 | return false; | 
|  | 3045 | } | 
|  | 3046 | } | 
|  | 3047 |  | 
|  | 3048 | uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); | 
|  | 3049 | if (isVOPC(BasicOpcode)) { | 
| Sam Kolton | 3c4933f | 2017-06-22 06:26:41 +0000 | [diff] [blame] | 3050 | if (!ST.hasSDWASdst() && DstIdx != -1) { | 
| Sam Kolton | 549c89d | 2017-06-21 08:53:38 +0000 | [diff] [blame] | 3051 | // Only vcc allowed as dst on VI for VOPC | 
|  | 3052 | const MachineOperand &Dst = MI.getOperand(DstIdx); | 
|  | 3053 | if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { | 
|  | 3054 | ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; | 
|  | 3055 | return false; | 
|  | 3056 | } | 
| Sam Kolton | a179d25 | 2017-06-27 15:02:23 +0000 | [diff] [blame] | 3057 | } else if (!ST.hasSDWAOutModsVOPC()) { | 
| Sam Kolton | 549c89d | 2017-06-21 08:53:38 +0000 | [diff] [blame] | 3058 | // No clamp allowed on GFX9 for VOPC | 
|  | 3059 | const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); | 
| Sam Kolton | a179d25 | 2017-06-27 15:02:23 +0000 | [diff] [blame] | 3060 | if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { | 
| Sam Kolton | 549c89d | 2017-06-21 08:53:38 +0000 | [diff] [blame] | 3061 | ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; | 
|  | 3062 | return false; | 
|  | 3063 | } | 
| Sam Kolton | a179d25 | 2017-06-27 15:02:23 +0000 | [diff] [blame] | 3064 |  | 
|  | 3065 | // No omod allowed on GFX9 for VOPC | 
|  | 3066 | const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); | 
|  | 3067 | if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { | 
|  | 3068 | ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; | 
|  | 3069 | return false; | 
|  | 3070 | } | 
| Sam Kolton | 549c89d | 2017-06-21 08:53:38 +0000 | [diff] [blame] | 3071 | } | 
|  | 3072 | } | 
| Sam Kolton | 5f7f32c | 2017-12-04 16:22:32 +0000 | [diff] [blame] | 3073 |  | 
|  | 3074 | const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); | 
|  | 3075 | if (DstUnused && DstUnused->isImm() && | 
|  | 3076 | DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { | 
|  | 3077 | const MachineOperand &Dst = MI.getOperand(DstIdx); | 
|  | 3078 | if (!Dst.isReg() || !Dst.isTied()) { | 
|  | 3079 | ErrInfo = "Dst register should have tied register"; | 
|  | 3080 | return false; | 
|  | 3081 | } | 
|  | 3082 |  | 
|  | 3083 | const MachineOperand &TiedMO = | 
|  | 3084 | MI.getOperand(MI.findTiedOperandIdx(DstIdx)); | 
|  | 3085 | if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { | 
|  | 3086 | ErrInfo = | 
|  | 3087 | "Dst register should be tied to implicit use of preserved register"; | 
|  | 3088 | return false; | 
|  | 3089 | } else if (TargetRegisterInfo::isPhysicalRegister(TiedMO.getReg()) && | 
|  | 3090 | Dst.getReg() != TiedMO.getReg()) { | 
|  | 3091 | ErrInfo = "Dst register should use same physical register as preserved"; | 
|  | 3092 | return false; | 
|  | 3093 | } | 
|  | 3094 | } | 
| Sam Kolton | 549c89d | 2017-06-21 08:53:38 +0000 | [diff] [blame] | 3095 | } | 
|  | 3096 |  | 
| David Stuttard | f77079f | 2019-01-14 11:55:24 +0000 | [diff] [blame] | 3097 | // Verify MIMG | 
|  | 3098 | if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { | 
|  | 3099 | // Ensure that the return type used is large enough for all the options | 
|  | 3100 | // being used TFE/LWE require an extra result register. | 
|  | 3101 | const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); | 
|  | 3102 | if (DMask) { | 
|  | 3103 | uint64_t DMaskImm = DMask->getImm(); | 
|  | 3104 | uint32_t RegCount = | 
|  | 3105 | isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); | 
|  | 3106 | const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); | 
|  | 3107 | const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); | 
|  | 3108 | const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); | 
|  | 3109 |  | 
|  | 3110 | // Adjust for packed 16 bit values | 
|  | 3111 | if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) | 
|  | 3112 | RegCount >>= 1; | 
|  | 3113 |  | 
|  | 3114 | // Adjust if using LWE or TFE | 
|  | 3115 | if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) | 
|  | 3116 | RegCount += 1; | 
|  | 3117 |  | 
|  | 3118 | const uint32_t DstIdx = | 
|  | 3119 | AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); | 
|  | 3120 | const MachineOperand &Dst = MI.getOperand(DstIdx); | 
|  | 3121 | if (Dst.isReg()) { | 
|  | 3122 | const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); | 
|  | 3123 | uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; | 
|  | 3124 | if (RegCount > DstSize) { | 
|  | 3125 | ErrInfo = "MIMG instruction returns too many registers for dst " | 
|  | 3126 | "register class"; | 
|  | 3127 | return false; | 
|  | 3128 | } | 
|  | 3129 | } | 
|  | 3130 | } | 
|  | 3131 | } | 
|  | 3132 |  | 
| Tim Renouf | 2a99fa2 | 2018-02-28 19:10:32 +0000 | [diff] [blame] | 3133 | // Verify VOP*. Ignore multiple sgpr operands on writelane. | 
|  | 3134 | if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 | 
|  | 3135 | && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { | 
| Matt Arsenault | e368cb3 | 2014-12-11 23:37:32 +0000 | [diff] [blame] | 3136 | // Only look at the true operands. Only a real operand can use the constant | 
|  | 3137 | // bus, and we don't want to check pseudo-operands like the source modifier | 
|  | 3138 | // flags. | 
|  | 3139 | const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; | 
|  | 3140 |  | 
| Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 3141 | unsigned ConstantBusCount = 0; | 
| Stanislav Mekhanoshin | a4bfb3c | 2018-04-24 18:17:55 +0000 | [diff] [blame] | 3142 | unsigned LiteralCount = 0; | 
| Matt Arsenault | ffc8275 | 2016-07-05 17:09:01 +0000 | [diff] [blame] | 3143 |  | 
|  | 3144 | if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) | 
|  | 3145 | ++ConstantBusCount; | 
|  | 3146 |  | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3147 | SmallVector<unsigned, 2> SGPRsUsed; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3148 | unsigned SGPRUsed = findImplicitSGPRRead(MI); | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3149 | if (SGPRUsed != AMDGPU::NoRegister) { | 
| Matt Arsenault | e223ceb | 2015-10-21 21:15:01 +0000 | [diff] [blame] | 3150 | ++ConstantBusCount; | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3151 | SGPRsUsed.push_back(SGPRUsed); | 
|  | 3152 | } | 
| Matt Arsenault | e223ceb | 2015-10-21 21:15:01 +0000 | [diff] [blame] | 3153 |  | 
| Matt Arsenault | e368cb3 | 2014-12-11 23:37:32 +0000 | [diff] [blame] | 3154 | for (int OpIdx : OpIndices) { | 
|  | 3155 | if (OpIdx == -1) | 
|  | 3156 | break; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3157 | const MachineOperand &MO = MI.getOperand(OpIdx); | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 3158 | if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 3159 | if (MO.isReg()) { | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 3160 | SGPRUsed = MO.getReg(); | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3161 | if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { | 
|  | 3162 | return !RI.regsOverlap(SGPRUsed, SGPR); | 
|  | 3163 | })) { | 
|  | 3164 | ++ConstantBusCount; | 
|  | 3165 | SGPRsUsed.push_back(SGPRUsed); | 
|  | 3166 | } | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 3167 | } else { | 
|  | 3168 | ++ConstantBusCount; | 
| Stanislav Mekhanoshin | a4bfb3c | 2018-04-24 18:17:55 +0000 | [diff] [blame] | 3169 | ++LiteralCount; | 
| Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 3170 | } | 
|  | 3171 | } | 
| Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 3172 | } | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3173 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | 
|  | 3174 | // v_writelane_b32 is an exception from constant bus restriction: | 
|  | 3175 | // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const | 
|  | 3176 | if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && | 
|  | 3177 | Opcode != AMDGPU::V_WRITELANE_B32) { | 
|  | 3178 | ErrInfo = "VOP* instruction violates constant bus restriction"; | 
| Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 3179 | return false; | 
|  | 3180 | } | 
| Stanislav Mekhanoshin | a4bfb3c | 2018-04-24 18:17:55 +0000 | [diff] [blame] | 3181 |  | 
|  | 3182 | if (isVOP3(MI) && LiteralCount) { | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3183 | if (LiteralCount && !ST.hasVOP3Literal()) { | 
|  | 3184 | ErrInfo = "VOP3 instruction uses literal"; | 
|  | 3185 | return false; | 
|  | 3186 | } | 
|  | 3187 | if (LiteralCount > 1) { | 
|  | 3188 | ErrInfo = "VOP3 instruction uses more than one literal"; | 
|  | 3189 | return false; | 
|  | 3190 | } | 
| Stanislav Mekhanoshin | a4bfb3c | 2018-04-24 18:17:55 +0000 | [diff] [blame] | 3191 | } | 
| Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 3192 | } | 
|  | 3193 |  | 
| Matt Arsenault | becb140 | 2014-06-23 18:28:31 +0000 | [diff] [blame] | 3194 | // Verify misc. restrictions on specific instructions. | 
|  | 3195 | if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || | 
|  | 3196 | Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3197 | const MachineOperand &Src0 = MI.getOperand(Src0Idx); | 
|  | 3198 | const MachineOperand &Src1 = MI.getOperand(Src1Idx); | 
|  | 3199 | const MachineOperand &Src2 = MI.getOperand(Src2Idx); | 
| Matt Arsenault | becb140 | 2014-06-23 18:28:31 +0000 | [diff] [blame] | 3200 | if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { | 
|  | 3201 | if (!compareMachineOp(Src0, Src1) && | 
|  | 3202 | !compareMachineOp(Src0, Src2)) { | 
|  | 3203 | ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; | 
|  | 3204 | return false; | 
|  | 3205 | } | 
|  | 3206 | } | 
|  | 3207 | } | 
|  | 3208 |  | 
| Nicolai Haehnle | 79ea85c | 2019-05-07 09:19:09 +0000 | [diff] [blame] | 3209 | if (isSOP2(MI) || isSOPC(MI)) { | 
|  | 3210 | const MachineOperand &Src0 = MI.getOperand(Src0Idx); | 
|  | 3211 | const MachineOperand &Src1 = MI.getOperand(Src1Idx); | 
|  | 3212 | unsigned Immediates = 0; | 
|  | 3213 |  | 
|  | 3214 | if (!Src0.isReg() && | 
|  | 3215 | !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) | 
|  | 3216 | Immediates++; | 
|  | 3217 | if (!Src1.isReg() && | 
|  | 3218 | !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) | 
|  | 3219 | Immediates++; | 
|  | 3220 |  | 
|  | 3221 | if (Immediates > 1) { | 
|  | 3222 | ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; | 
|  | 3223 | return false; | 
|  | 3224 | } | 
|  | 3225 | } | 
|  | 3226 |  | 
| Matt Arsenault | 7ccf6cd | 2016-09-16 21:41:16 +0000 | [diff] [blame] | 3227 | if (isSOPK(MI)) { | 
| Stanislav Mekhanoshin | 491746a | 2019-05-06 22:49:45 +0000 | [diff] [blame] | 3228 | auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); | 
|  | 3229 | if (Desc.isBranch()) { | 
|  | 3230 | if (!Op->isMBB()) { | 
|  | 3231 | ErrInfo = "invalid branch target for SOPK instruction"; | 
| Matt Arsenault | 7ccf6cd | 2016-09-16 21:41:16 +0000 | [diff] [blame] | 3232 | return false; | 
|  | 3233 | } | 
|  | 3234 | } else { | 
| Stanislav Mekhanoshin | 491746a | 2019-05-06 22:49:45 +0000 | [diff] [blame] | 3235 | uint64_t Imm = Op->getImm(); | 
|  | 3236 | if (sopkIsZext(MI)) { | 
|  | 3237 | if (!isUInt<16>(Imm)) { | 
|  | 3238 | ErrInfo = "invalid immediate for SOPK instruction"; | 
|  | 3239 | return false; | 
|  | 3240 | } | 
|  | 3241 | } else { | 
|  | 3242 | if (!isInt<16>(Imm)) { | 
|  | 3243 | ErrInfo = "invalid immediate for SOPK instruction"; | 
|  | 3244 | return false; | 
|  | 3245 | } | 
| Matt Arsenault | 7ccf6cd | 2016-09-16 21:41:16 +0000 | [diff] [blame] | 3246 | } | 
|  | 3247 | } | 
|  | 3248 | } | 
|  | 3249 |  | 
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 3250 | if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || | 
|  | 3251 | Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || | 
|  | 3252 | Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || | 
|  | 3253 | Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { | 
|  | 3254 | const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || | 
|  | 3255 | Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; | 
|  | 3256 |  | 
|  | 3257 | const unsigned StaticNumOps = Desc.getNumOperands() + | 
|  | 3258 | Desc.getNumImplicitUses(); | 
|  | 3259 | const unsigned NumImplicitOps = IsDst ? 2 : 1; | 
|  | 3260 |  | 
| Nicolai Haehnle | 368972c | 2016-11-02 17:03:11 +0000 | [diff] [blame] | 3261 | // Allow additional implicit operands. This allows a fixup done by the post | 
|  | 3262 | // RA scheduler where the main implicit operand is killed and implicit-defs | 
|  | 3263 | // are added for sub-registers that remain live after this instruction. | 
|  | 3264 | if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { | 
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 3265 | ErrInfo = "missing implicit register operands"; | 
|  | 3266 | return false; | 
|  | 3267 | } | 
|  | 3268 |  | 
|  | 3269 | const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); | 
|  | 3270 | if (IsDst) { | 
|  | 3271 | if (!Dst->isUse()) { | 
|  | 3272 | ErrInfo = "v_movreld_b32 vdst should be a use operand"; | 
|  | 3273 | return false; | 
|  | 3274 | } | 
|  | 3275 |  | 
|  | 3276 | unsigned UseOpIdx; | 
|  | 3277 | if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || | 
|  | 3278 | UseOpIdx != StaticNumOps + 1) { | 
|  | 3279 | ErrInfo = "movrel implicit operands should be tied"; | 
|  | 3280 | return false; | 
|  | 3281 | } | 
|  | 3282 | } | 
|  | 3283 |  | 
|  | 3284 | const MachineOperand &Src0 = MI.getOperand(Src0Idx); | 
|  | 3285 | const MachineOperand &ImpUse | 
|  | 3286 | = MI.getOperand(StaticNumOps + NumImplicitOps - 1); | 
|  | 3287 | if (!ImpUse.isReg() || !ImpUse.isUse() || | 
|  | 3288 | !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { | 
|  | 3289 | ErrInfo = "src0 should be subreg of implicit vector use"; | 
|  | 3290 | return false; | 
|  | 3291 | } | 
|  | 3292 | } | 
|  | 3293 |  | 
| Matt Arsenault | d092a06 | 2015-10-02 18:58:37 +0000 | [diff] [blame] | 3294 | // Make sure we aren't losing exec uses in the td files. This mostly requires | 
|  | 3295 | // being careful when using let Uses to try to add other use registers. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3296 | if (shouldReadExec(MI)) { | 
|  | 3297 | if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { | 
| Matt Arsenault | d092a06 | 2015-10-02 18:58:37 +0000 | [diff] [blame] | 3298 | ErrInfo = "VALU instruction does not implicitly read exec mask"; | 
|  | 3299 | return false; | 
|  | 3300 | } | 
|  | 3301 | } | 
|  | 3302 |  | 
| Matt Arsenault | 7b64755 | 2016-10-28 21:55:15 +0000 | [diff] [blame] | 3303 | if (isSMRD(MI)) { | 
|  | 3304 | if (MI.mayStore()) { | 
|  | 3305 | // The register offset form of scalar stores may only use m0 as the | 
|  | 3306 | // soffset register. | 
|  | 3307 | const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); | 
|  | 3308 | if (Soff && Soff->getReg() != AMDGPU::M0) { | 
|  | 3309 | ErrInfo = "scalar stores must use m0 as offset register"; | 
|  | 3310 | return false; | 
|  | 3311 | } | 
|  | 3312 | } | 
|  | 3313 | } | 
|  | 3314 |  | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 3315 | if (isFLAT(MI) && !MF->getSubtarget<GCNSubtarget>().hasFlatInstOffsets()) { | 
| Matt Arsenault | 89ad17c | 2017-06-12 16:37:55 +0000 | [diff] [blame] | 3316 | const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); | 
|  | 3317 | if (Offset->getImm() != 0) { | 
|  | 3318 | ErrInfo = "subtarget does not support offsets in flat instructions"; | 
|  | 3319 | return false; | 
|  | 3320 | } | 
|  | 3321 | } | 
|  | 3322 |  | 
| Stanislav Mekhanoshin | 692560d | 2019-05-01 16:32:58 +0000 | [diff] [blame] | 3323 | if (isMIMG(MI)) { | 
|  | 3324 | const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); | 
|  | 3325 | if (DimOp) { | 
|  | 3326 | int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, | 
|  | 3327 | AMDGPU::OpName::vaddr0); | 
|  | 3328 | int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); | 
|  | 3329 | const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); | 
|  | 3330 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = | 
|  | 3331 | AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); | 
|  | 3332 | const AMDGPU::MIMGDimInfo *Dim = | 
|  | 3333 | AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); | 
|  | 3334 |  | 
|  | 3335 | if (!Dim) { | 
|  | 3336 | ErrInfo = "dim is out of range"; | 
|  | 3337 | return false; | 
|  | 3338 | } | 
|  | 3339 |  | 
|  | 3340 | bool IsNSA = SRsrcIdx - VAddr0Idx > 1; | 
|  | 3341 | unsigned AddrWords = BaseOpcode->NumExtraArgs + | 
|  | 3342 | (BaseOpcode->Gradients ? Dim->NumGradients : 0) + | 
|  | 3343 | (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + | 
|  | 3344 | (BaseOpcode->LodOrClampOrMip ? 1 : 0); | 
|  | 3345 |  | 
|  | 3346 | unsigned VAddrWords; | 
|  | 3347 | if (IsNSA) { | 
|  | 3348 | VAddrWords = SRsrcIdx - VAddr0Idx; | 
|  | 3349 | } else { | 
|  | 3350 | const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); | 
|  | 3351 | VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; | 
|  | 3352 | if (AddrWords > 8) | 
|  | 3353 | AddrWords = 16; | 
|  | 3354 | else if (AddrWords > 4) | 
|  | 3355 | AddrWords = 8; | 
|  | 3356 | else if (AddrWords == 3 && VAddrWords == 4) { | 
|  | 3357 | // CodeGen uses the V4 variant of instructions for three addresses, | 
|  | 3358 | // because the selection DAG does not support non-power-of-two types. | 
|  | 3359 | AddrWords = 4; | 
|  | 3360 | } | 
|  | 3361 | } | 
|  | 3362 |  | 
|  | 3363 | if (VAddrWords != AddrWords) { | 
|  | 3364 | ErrInfo = "bad vaddr size"; | 
|  | 3365 | return false; | 
|  | 3366 | } | 
|  | 3367 | } | 
|  | 3368 | } | 
|  | 3369 |  | 
| Stanislav Mekhanoshin | 4329361 | 2018-05-08 16:53:02 +0000 | [diff] [blame] | 3370 | const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); | 
|  | 3371 | if (DppCt) { | 
|  | 3372 | using namespace AMDGPU::DPP; | 
|  | 3373 |  | 
|  | 3374 | unsigned DC = DppCt->getImm(); | 
|  | 3375 | if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || | 
|  | 3376 | DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || | 
|  | 3377 | (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || | 
|  | 3378 | (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || | 
|  | 3379 | (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || | 
| Stanislav Mekhanoshin | 245b5ba | 2019-06-12 18:02:41 +0000 | [diff] [blame^] | 3380 | (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || | 
|  | 3381 | (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { | 
| Stanislav Mekhanoshin | 4329361 | 2018-05-08 16:53:02 +0000 | [diff] [blame] | 3382 | ErrInfo = "Invalid dpp_ctrl value"; | 
|  | 3383 | return false; | 
|  | 3384 | } | 
| Stanislav Mekhanoshin | 245b5ba | 2019-06-12 18:02:41 +0000 | [diff] [blame^] | 3385 | if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && | 
|  | 3386 | ST.getGeneration() >= AMDGPUSubtarget::GFX10) { | 
|  | 3387 | ErrInfo = "Invalid dpp_ctrl value: " | 
|  | 3388 | "wavefront shifts are not supported on GFX10+"; | 
|  | 3389 | return false; | 
|  | 3390 | } | 
|  | 3391 | if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && | 
|  | 3392 | ST.getGeneration() >= AMDGPUSubtarget::GFX10) { | 
|  | 3393 | ErrInfo = "Invalid dpp_ctrl value: " | 
|  | 3394 | "broadcats are not supported on GFX10+"; | 
|  | 3395 | return false; | 
|  | 3396 | } | 
|  | 3397 | if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && | 
|  | 3398 | ST.getGeneration() < AMDGPUSubtarget::GFX10) { | 
|  | 3399 | ErrInfo = "Invalid dpp_ctrl value: " | 
|  | 3400 | "row_share and row_xmask are not supported before GFX10"; | 
|  | 3401 | return false; | 
|  | 3402 | } | 
| Stanislav Mekhanoshin | 4329361 | 2018-05-08 16:53:02 +0000 | [diff] [blame] | 3403 | } | 
|  | 3404 |  | 
| Tom Stellard | 93fabce | 2013-10-10 17:11:55 +0000 | [diff] [blame] | 3405 | return true; | 
|  | 3406 | } | 
|  | 3407 |  | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 3408 | unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 3409 | switch (MI.getOpcode()) { | 
|  | 3410 | default: return AMDGPU::INSTRUCTION_LIST_END; | 
|  | 3411 | case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; | 
|  | 3412 | case AMDGPU::COPY: return AMDGPU::COPY; | 
|  | 3413 | case AMDGPU::PHI: return AMDGPU::PHI; | 
| Tom Stellard | 204e61b | 2014-04-07 19:45:45 +0000 | [diff] [blame] | 3414 | case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; | 
| Connor Abbott | 8c217d0 | 2017-08-04 18:36:49 +0000 | [diff] [blame] | 3415 | case AMDGPU::WQM: return AMDGPU::WQM; | 
| Connor Abbott | 92638ab | 2017-08-04 18:36:52 +0000 | [diff] [blame] | 3416 | case AMDGPU::WWM: return AMDGPU::WWM; | 
| Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 3417 | case AMDGPU::S_MOV_B32: | 
|  | 3418 | return MI.getOperand(1).isReg() ? | 
| Tom Stellard | 8c12fd9 | 2014-03-24 16:12:34 +0000 | [diff] [blame] | 3419 | AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; | 
| Tom Stellard | 80942a1 | 2014-09-05 14:07:59 +0000 | [diff] [blame] | 3420 | case AMDGPU::S_ADD_I32: | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 3421 | return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_I32_e32; | 
|  | 3422 | case AMDGPU::S_ADDC_U32: | 
|  | 3423 | return AMDGPU::V_ADDC_U32_e32; | 
| Tom Stellard | 80942a1 | 2014-09-05 14:07:59 +0000 | [diff] [blame] | 3424 | case AMDGPU::S_SUB_I32: | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 3425 | return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32; | 
|  | 3426 | // FIXME: These are not consistently handled, and selected when the carry is | 
|  | 3427 | // used. | 
|  | 3428 | case AMDGPU::S_ADD_U32: | 
|  | 3429 | return AMDGPU::V_ADD_I32_e32; | 
|  | 3430 | case AMDGPU::S_SUB_U32: | 
|  | 3431 | return AMDGPU::V_SUB_I32_e32; | 
| Matt Arsenault | 43b8e4e | 2013-11-18 20:09:29 +0000 | [diff] [blame] | 3432 | case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; | 
| Stanislav Mekhanoshin | 971cb8b | 2019-05-06 22:27:05 +0000 | [diff] [blame] | 3433 | case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32; | 
| Michael Liao | efb4f9e | 2019-03-18 20:40:09 +0000 | [diff] [blame] | 3434 | case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32; | 
|  | 3435 | case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32; | 
| Matt Arsenault | 124384f | 2016-09-09 23:32:53 +0000 | [diff] [blame] | 3436 | case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; | 
|  | 3437 | case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; | 
|  | 3438 | case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 3439 | case AMDGPU::S_XNOR_B32: | 
|  | 3440 | return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; | 
| Matt Arsenault | 124384f | 2016-09-09 23:32:53 +0000 | [diff] [blame] | 3441 | case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; | 
|  | 3442 | case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; | 
|  | 3443 | case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; | 
|  | 3444 | case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 3445 | case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; | 
|  | 3446 | case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; | 
|  | 3447 | case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; | 
|  | 3448 | case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; | 
|  | 3449 | case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; | 
|  | 3450 | case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; | 
| Matt Arsenault | 27cc958 | 2014-04-18 01:53:18 +0000 | [diff] [blame] | 3451 | case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; | 
|  | 3452 | case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; | 
| Matt Arsenault | 78b8670 | 2014-04-18 05:19:26 +0000 | [diff] [blame] | 3453 | case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; | 
|  | 3454 | case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; | 
| Marek Olsak | 63a7b08 | 2015-03-24 13:40:21 +0000 | [diff] [blame] | 3455 | case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; | 
| Matt Arsenault | 43160e7 | 2014-06-18 17:13:57 +0000 | [diff] [blame] | 3456 | case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; | 
| Matt Arsenault | 2c33562 | 2014-04-09 07:16:16 +0000 | [diff] [blame] | 3457 | case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; | 
| Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 3458 | case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; | 
| Matt Arsenault | 0cb92e1 | 2014-04-11 19:25:18 +0000 | [diff] [blame] | 3459 | case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; | 
|  | 3460 | case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; | 
|  | 3461 | case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; | 
|  | 3462 | case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; | 
|  | 3463 | case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; | 
|  | 3464 | case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 3465 | case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; | 
|  | 3466 | case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; | 
|  | 3467 | case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; | 
|  | 3468 | case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; | 
|  | 3469 | case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; | 
|  | 3470 | case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; | 
| Matt Arsenault | 7b1dc2c | 2016-09-17 02:02:19 +0000 | [diff] [blame] | 3471 | case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; | 
|  | 3472 | case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; | 
| Marek Olsak | c536850 | 2015-01-15 18:43:01 +0000 | [diff] [blame] | 3473 | case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; | 
| Matt Arsenault | 295b86e | 2014-06-17 17:36:27 +0000 | [diff] [blame] | 3474 | case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; | 
| Matt Arsenault | 8579601 | 2014-06-17 17:36:24 +0000 | [diff] [blame] | 3475 | case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; | 
| Marek Olsak | d2af89d | 2015-03-04 17:33:45 +0000 | [diff] [blame] | 3476 | case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 3477 | case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; | 
|  | 3478 | case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 3479 | } | 
| Michael Liao | efb4f9e | 2019-03-18 20:40:09 +0000 | [diff] [blame] | 3480 | llvm_unreachable( | 
|  | 3481 | "Unexpected scalar opcode without corresponding vector one!"); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 3482 | } | 
|  | 3483 |  | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 3484 | const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, | 
|  | 3485 | unsigned OpNo) const { | 
|  | 3486 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); | 
|  | 3487 | const MCInstrDesc &Desc = get(MI.getOpcode()); | 
|  | 3488 | if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || | 
| Matt Arsenault | 102a704 | 2014-12-11 23:37:34 +0000 | [diff] [blame] | 3489 | Desc.OpInfo[OpNo].RegClass == -1) { | 
|  | 3490 | unsigned Reg = MI.getOperand(OpNo).getReg(); | 
|  | 3491 |  | 
|  | 3492 | if (TargetRegisterInfo::isVirtualRegister(Reg)) | 
|  | 3493 | return MRI.getRegClass(Reg); | 
| Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 3494 | return RI.getPhysRegClass(Reg); | 
| Matt Arsenault | 102a704 | 2014-12-11 23:37:34 +0000 | [diff] [blame] | 3495 | } | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 3496 |  | 
|  | 3497 | unsigned RCID = Desc.OpInfo[OpNo].RegClass; | 
|  | 3498 | return RI.getRegClass(RCID); | 
|  | 3499 | } | 
|  | 3500 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3501 | void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 3502 | MachineBasicBlock::iterator I = MI; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3503 | MachineBasicBlock *MBB = MI.getParent(); | 
|  | 3504 | MachineOperand &MO = MI.getOperand(OpIdx); | 
| Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 3505 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 3506 | const SIRegisterInfo *TRI = | 
|  | 3507 | static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3508 | unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 3509 | const TargetRegisterClass *RC = RI.getRegClass(RCID); | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 3510 | unsigned Size = TRI->getRegSizeInBits(*RC); | 
|  | 3511 | unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; | 
| Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 3512 | if (MO.isReg()) | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 3513 | Opcode = AMDGPU::COPY; | 
| Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 3514 | else if (RI.isSGPRClass(RC)) | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 3515 | Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; | 
| Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 3516 |  | 
| Matt Arsenault | 3a4d86a | 2013-11-18 20:09:55 +0000 | [diff] [blame] | 3517 | const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); | 
| Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 3518 | if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) | 
| Tom Stellard | 0c93c9e | 2014-09-05 14:08:01 +0000 | [diff] [blame] | 3519 | VRC = &AMDGPU::VReg_64RegClass; | 
| Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 3520 | else | 
| Tom Stellard | 45c0b3a | 2015-01-07 20:59:25 +0000 | [diff] [blame] | 3521 | VRC = &AMDGPU::VGPR_32RegClass; | 
| Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 3522 |  | 
| Matt Arsenault | 3a4d86a | 2013-11-18 20:09:55 +0000 | [diff] [blame] | 3523 | unsigned Reg = MRI.createVirtualRegister(VRC); | 
| Matt Arsenault | 3f3a275 | 2014-10-13 15:47:59 +0000 | [diff] [blame] | 3524 | DebugLoc DL = MBB->findDebugLoc(I); | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 3525 | BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 3526 | MO.ChangeToRegister(Reg, false); | 
|  | 3527 | } | 
|  | 3528 |  | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 3529 | unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, | 
|  | 3530 | MachineRegisterInfo &MRI, | 
|  | 3531 | MachineOperand &SuperReg, | 
|  | 3532 | const TargetRegisterClass *SuperRC, | 
|  | 3533 | unsigned SubIdx, | 
|  | 3534 | const TargetRegisterClass *SubRC) | 
|  | 3535 | const { | 
| Matt Arsenault | c8e2ce4 | 2015-09-24 07:16:37 +0000 | [diff] [blame] | 3536 | MachineBasicBlock *MBB = MI->getParent(); | 
|  | 3537 | DebugLoc DL = MI->getDebugLoc(); | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 3538 | unsigned SubReg = MRI.createVirtualRegister(SubRC); | 
|  | 3539 |  | 
| Matt Arsenault | c8e2ce4 | 2015-09-24 07:16:37 +0000 | [diff] [blame] | 3540 | if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { | 
|  | 3541 | BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) | 
|  | 3542 | .addReg(SuperReg.getReg(), 0, SubIdx); | 
|  | 3543 | return SubReg; | 
|  | 3544 | } | 
|  | 3545 |  | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 3546 | // Just in case the super register is itself a sub-register, copy it to a new | 
| Matt Arsenault | 08d8494 | 2014-06-03 23:06:13 +0000 | [diff] [blame] | 3547 | // value so we don't need to worry about merging its subreg index with the | 
|  | 3548 | // SubIdx passed to this function. The register coalescer should be able to | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 3549 | // eliminate this extra copy. | 
| Matt Arsenault | c8e2ce4 | 2015-09-24 07:16:37 +0000 | [diff] [blame] | 3550 | unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC); | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 3551 |  | 
| Matt Arsenault | 7480a0e | 2014-11-17 21:11:37 +0000 | [diff] [blame] | 3552 | BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) | 
|  | 3553 | .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); | 
|  | 3554 |  | 
|  | 3555 | BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) | 
|  | 3556 | .addReg(NewSuperReg, 0, SubIdx); | 
|  | 3557 |  | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 3558 | return SubReg; | 
|  | 3559 | } | 
|  | 3560 |  | 
| Matt Arsenault | 248b7b6 | 2014-03-24 20:08:09 +0000 | [diff] [blame] | 3561 | MachineOperand SIInstrInfo::buildExtractSubRegOrImm( | 
|  | 3562 | MachineBasicBlock::iterator MII, | 
|  | 3563 | MachineRegisterInfo &MRI, | 
|  | 3564 | MachineOperand &Op, | 
|  | 3565 | const TargetRegisterClass *SuperRC, | 
|  | 3566 | unsigned SubIdx, | 
|  | 3567 | const TargetRegisterClass *SubRC) const { | 
|  | 3568 | if (Op.isImm()) { | 
| Matt Arsenault | 248b7b6 | 2014-03-24 20:08:09 +0000 | [diff] [blame] | 3569 | if (SubIdx == AMDGPU::sub0) | 
| Matt Arsenault | d745c28 | 2016-09-08 17:44:36 +0000 | [diff] [blame] | 3570 | return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); | 
| Matt Arsenault | 248b7b6 | 2014-03-24 20:08:09 +0000 | [diff] [blame] | 3571 | if (SubIdx == AMDGPU::sub1) | 
| Matt Arsenault | d745c28 | 2016-09-08 17:44:36 +0000 | [diff] [blame] | 3572 | return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); | 
| Matt Arsenault | 248b7b6 | 2014-03-24 20:08:09 +0000 | [diff] [blame] | 3573 |  | 
|  | 3574 | llvm_unreachable("Unhandled register index for immediate"); | 
|  | 3575 | } | 
|  | 3576 |  | 
|  | 3577 | unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, | 
|  | 3578 | SubIdx, SubRC); | 
|  | 3579 | return MachineOperand::CreateReg(SubReg, false); | 
|  | 3580 | } | 
|  | 3581 |  | 
| Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 3582 | // Change the order of operands from (0, 1, 2) to (0, 2, 1) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3583 | void SIInstrInfo::swapOperands(MachineInstr &Inst) const { | 
|  | 3584 | assert(Inst.getNumExplicitOperands() == 3); | 
|  | 3585 | MachineOperand Op1 = Inst.getOperand(1); | 
|  | 3586 | Inst.RemoveOperand(1); | 
|  | 3587 | Inst.addOperand(Op1); | 
| Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 3588 | } | 
|  | 3589 |  | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3590 | bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, | 
|  | 3591 | const MCOperandInfo &OpInfo, | 
|  | 3592 | const MachineOperand &MO) const { | 
|  | 3593 | if (!MO.isReg()) | 
|  | 3594 | return false; | 
|  | 3595 |  | 
|  | 3596 | unsigned Reg = MO.getReg(); | 
|  | 3597 | const TargetRegisterClass *RC = | 
|  | 3598 | TargetRegisterInfo::isVirtualRegister(Reg) ? | 
|  | 3599 | MRI.getRegClass(Reg) : | 
|  | 3600 | RI.getPhysRegClass(Reg); | 
|  | 3601 |  | 
| Nicolai Haehnle | 82fc962 | 2016-01-07 17:10:29 +0000 | [diff] [blame] | 3602 | const SIRegisterInfo *TRI = | 
|  | 3603 | static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); | 
|  | 3604 | RC = TRI->getSubRegClass(RC, MO.getSubReg()); | 
|  | 3605 |  | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3606 | // In order to be legal, the common sub-class must be equal to the | 
|  | 3607 | // class of the current operand.  For example: | 
|  | 3608 | // | 
| Sam Kolton | 1eeb11b | 2016-09-09 14:44:04 +0000 | [diff] [blame] | 3609 | // v_mov_b32 s0 ; Operand defined as vsrc_b32 | 
|  | 3610 | //              ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3611 | // | 
|  | 3612 | // s_sendmsg 0, s0 ; Operand defined as m0reg | 
|  | 3613 | //                 ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL | 
|  | 3614 |  | 
|  | 3615 | return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC; | 
|  | 3616 | } | 
|  | 3617 |  | 
|  | 3618 | bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, | 
|  | 3619 | const MCOperandInfo &OpInfo, | 
|  | 3620 | const MachineOperand &MO) const { | 
|  | 3621 | if (MO.isReg()) | 
|  | 3622 | return isLegalRegOperand(MRI, OpInfo, MO); | 
|  | 3623 |  | 
|  | 3624 | // Handle non-register types that are treated like immediates. | 
|  | 3625 | assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); | 
|  | 3626 | return true; | 
|  | 3627 | } | 
|  | 3628 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3629 | bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 3630 | const MachineOperand *MO) const { | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3631 | const MachineFunction &MF = *MI.getParent()->getParent(); | 
|  | 3632 | const MachineRegisterInfo &MRI = MF.getRegInfo(); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3633 | const MCInstrDesc &InstDesc = MI.getDesc(); | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 3634 | const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3635 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 3636 | const TargetRegisterClass *DefinedRC = | 
|  | 3637 | OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; | 
|  | 3638 | if (!MO) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3639 | MO = &MI.getOperand(OpIdx); | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 3640 |  | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3641 | int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); | 
|  | 3642 | int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 3643 | if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3644 | if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) | 
|  | 3645 | return false; | 
| Matt Arsenault | fcb345f | 2016-02-11 06:15:39 +0000 | [diff] [blame] | 3646 |  | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3647 | SmallDenseSet<RegSubRegPair> SGPRsUsed; | 
| Matt Arsenault | fcb345f | 2016-02-11 06:15:39 +0000 | [diff] [blame] | 3648 | if (MO->isReg()) | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3649 | SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); | 
| Matt Arsenault | fcb345f | 2016-02-11 06:15:39 +0000 | [diff] [blame] | 3650 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3651 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 3652 | if (i == OpIdx) | 
|  | 3653 | continue; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3654 | const MachineOperand &Op = MI.getOperand(i); | 
| Matt Arsenault | ffc8275 | 2016-07-05 17:09:01 +0000 | [diff] [blame] | 3655 | if (Op.isReg()) { | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3656 | RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); | 
|  | 3657 | if (!SGPRsUsed.count(SGPR) && | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 3658 | usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3659 | if (--ConstantBusLimit <= 0) | 
|  | 3660 | return false; | 
|  | 3661 | SGPRsUsed.insert(SGPR); | 
| Matt Arsenault | ffc8275 | 2016-07-05 17:09:01 +0000 | [diff] [blame] | 3662 | } | 
|  | 3663 | } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3664 | if (--ConstantBusLimit <= 0) | 
|  | 3665 | return false; | 
|  | 3666 | } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && | 
|  | 3667 | isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { | 
|  | 3668 | if (!VOP3LiteralLimit--) | 
|  | 3669 | return false; | 
|  | 3670 | if (--ConstantBusLimit <= 0) | 
|  | 3671 | return false; | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 3672 | } | 
|  | 3673 | } | 
|  | 3674 | } | 
|  | 3675 |  | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 3676 | if (MO->isReg()) { | 
|  | 3677 | assert(DefinedRC); | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3678 | return isLegalRegOperand(MRI, OpInfo, *MO); | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 3679 | } | 
|  | 3680 |  | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 3681 | // Handle non-register types that are treated like immediates. | 
| Tom Stellard | fb77f00 | 2015-01-13 22:59:41 +0000 | [diff] [blame] | 3682 | assert(MO->isImm() || MO->isTargetIndex() || MO->isFI()); | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 3683 |  | 
| Matt Arsenault | 4364fef | 2014-09-23 18:30:57 +0000 | [diff] [blame] | 3684 | if (!DefinedRC) { | 
|  | 3685 | // This operand expects an immediate. | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 3686 | return true; | 
| Matt Arsenault | 4364fef | 2014-09-23 18:30:57 +0000 | [diff] [blame] | 3687 | } | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 3688 |  | 
| Tom Stellard | 73ae1cb | 2014-09-23 21:26:25 +0000 | [diff] [blame] | 3689 | return isImmOperandLegal(MI, OpIdx, *MO); | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 3690 | } | 
|  | 3691 |  | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3692 | void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3693 | MachineInstr &MI) const { | 
|  | 3694 | unsigned Opc = MI.getOpcode(); | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3695 | const MCInstrDesc &InstrDesc = get(Opc); | 
|  | 3696 |  | 
|  | 3697 | int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3698 | MachineOperand &Src1 = MI.getOperand(Src1Idx); | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3699 |  | 
|  | 3700 | // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 | 
| Stanislav Mekhanoshin | 5cf8167 | 2019-05-02 04:01:39 +0000 | [diff] [blame] | 3701 | // we need to only have one constant bus use before GFX10. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3702 | bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3703 | if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1) { | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3704 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3705 | MachineOperand &Src0 = MI.getOperand(Src0Idx); | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3706 |  | 
| Stanislav Mekhanoshin | 5cf8167 | 2019-05-02 04:01:39 +0000 | [diff] [blame] | 3707 | if (Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || | 
|  | 3708 | isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3709 | legalizeOpWithMove(MI, Src0Idx); | 
|  | 3710 | } | 
|  | 3711 |  | 
| Tim Renouf | 2a99fa2 | 2018-02-28 19:10:32 +0000 | [diff] [blame] | 3712 | // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for | 
|  | 3713 | // both the value to write (src0) and lane select (src1).  Fix up non-SGPR | 
|  | 3714 | // src0/src1 with V_READFIRSTLANE. | 
|  | 3715 | if (Opc == AMDGPU::V_WRITELANE_B32) { | 
|  | 3716 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); | 
|  | 3717 | MachineOperand &Src0 = MI.getOperand(Src0Idx); | 
|  | 3718 | const DebugLoc &DL = MI.getDebugLoc(); | 
|  | 3719 | if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { | 
|  | 3720 | unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | 
|  | 3721 | BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) | 
|  | 3722 | .add(Src0); | 
|  | 3723 | Src0.ChangeToRegister(Reg, false); | 
|  | 3724 | } | 
|  | 3725 | if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { | 
|  | 3726 | unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | 
|  | 3727 | const DebugLoc &DL = MI.getDebugLoc(); | 
|  | 3728 | BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) | 
|  | 3729 | .add(Src1); | 
|  | 3730 | Src1.ChangeToRegister(Reg, false); | 
|  | 3731 | } | 
|  | 3732 | return; | 
|  | 3733 | } | 
|  | 3734 |  | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3735 | // VOP2 src0 instructions support all operand types, so we don't need to check | 
|  | 3736 | // their legality. If src1 is already legal, we don't need to do anything. | 
|  | 3737 | if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) | 
|  | 3738 | return; | 
|  | 3739 |  | 
| Nicolai Haehnle | 5dea645 | 2017-04-24 17:17:36 +0000 | [diff] [blame] | 3740 | // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for | 
|  | 3741 | // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane | 
|  | 3742 | // select is uniform. | 
|  | 3743 | if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && | 
|  | 3744 | RI.isVGPR(MRI, Src1.getReg())) { | 
|  | 3745 | unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | 
|  | 3746 | const DebugLoc &DL = MI.getDebugLoc(); | 
|  | 3747 | BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) | 
|  | 3748 | .add(Src1); | 
|  | 3749 | Src1.ChangeToRegister(Reg, false); | 
|  | 3750 | return; | 
|  | 3751 | } | 
|  | 3752 |  | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3753 | // We do not use commuteInstruction here because it is too aggressive and will | 
|  | 3754 | // commute if it is possible. We only want to commute here if it improves | 
|  | 3755 | // legality. This can be called a fairly large number of times so don't waste | 
|  | 3756 | // compile time pointlessly swapping and checking legality again. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3757 | if (HasImplicitSGPR || !MI.isCommutable()) { | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3758 | legalizeOpWithMove(MI, Src1Idx); | 
|  | 3759 | return; | 
|  | 3760 | } | 
|  | 3761 |  | 
|  | 3762 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3763 | MachineOperand &Src0 = MI.getOperand(Src0Idx); | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3764 |  | 
|  | 3765 | // If src0 can be used as src1, commuting will make the operands legal. | 
|  | 3766 | // Otherwise we have to give up and insert a move. | 
|  | 3767 | // | 
|  | 3768 | // TODO: Other immediate-like operand kinds could be commuted if there was a | 
|  | 3769 | // MachineOperand::ChangeTo* for them. | 
|  | 3770 | if ((!Src1.isImm() && !Src1.isReg()) || | 
|  | 3771 | !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { | 
|  | 3772 | legalizeOpWithMove(MI, Src1Idx); | 
|  | 3773 | return; | 
|  | 3774 | } | 
|  | 3775 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3776 | int CommutedOpc = commuteOpcode(MI); | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3777 | if (CommutedOpc == -1) { | 
|  | 3778 | legalizeOpWithMove(MI, Src1Idx); | 
|  | 3779 | return; | 
|  | 3780 | } | 
|  | 3781 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3782 | MI.setDesc(get(CommutedOpc)); | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 3783 |  | 
|  | 3784 | unsigned Src0Reg = Src0.getReg(); | 
|  | 3785 | unsigned Src0SubReg = Src0.getSubReg(); | 
|  | 3786 | bool Src0Kill = Src0.isKill(); | 
|  | 3787 |  | 
|  | 3788 | if (Src1.isImm()) | 
|  | 3789 | Src0.ChangeToImmediate(Src1.getImm()); | 
|  | 3790 | else if (Src1.isReg()) { | 
|  | 3791 | Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); | 
|  | 3792 | Src0.setSubReg(Src1.getSubReg()); | 
|  | 3793 | } else | 
|  | 3794 | llvm_unreachable("Should only have register or immediate operands"); | 
|  | 3795 |  | 
|  | 3796 | Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); | 
|  | 3797 | Src1.setSubReg(Src0SubReg); | 
|  | 3798 | } | 
|  | 3799 |  | 
| Stanislav Mekhanoshin | 5cf8167 | 2019-05-02 04:01:39 +0000 | [diff] [blame] | 3800 | // Legalize VOP3 operands. All operand types are supported for any operand | 
|  | 3801 | // but only one literal constant and only starting from GFX10. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3802 | void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, | 
|  | 3803 | MachineInstr &MI) const { | 
|  | 3804 | unsigned Opc = MI.getOpcode(); | 
| Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 3805 |  | 
|  | 3806 | int VOP3Idx[3] = { | 
|  | 3807 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), | 
|  | 3808 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), | 
|  | 3809 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) | 
|  | 3810 | }; | 
|  | 3811 |  | 
| Stanislav Mekhanoshin | 5f581c9 | 2019-06-12 17:52:51 +0000 | [diff] [blame] | 3812 | if (Opc == AMDGPU::V_PERMLANE16_B32 || | 
|  | 3813 | Opc == AMDGPU::V_PERMLANEX16_B32) { | 
|  | 3814 | // src1 and src2 must be scalar | 
|  | 3815 | MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); | 
|  | 3816 | MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); | 
|  | 3817 | const DebugLoc &DL = MI.getDebugLoc(); | 
|  | 3818 | if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { | 
|  | 3819 | unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | 
|  | 3820 | BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) | 
|  | 3821 | .add(Src1); | 
|  | 3822 | Src1.ChangeToRegister(Reg, false); | 
|  | 3823 | } | 
|  | 3824 | if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { | 
|  | 3825 | unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | 
|  | 3826 | BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) | 
|  | 3827 | .add(Src2); | 
|  | 3828 | Src2.ChangeToRegister(Reg, false); | 
|  | 3829 | } | 
|  | 3830 | } | 
|  | 3831 |  | 
| Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 3832 | // Find the one SGPR operand we are allowed to use. | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3833 | int ConstantBusLimit = ST.getConstantBusLimit(Opc); | 
|  | 3834 | int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; | 
|  | 3835 | SmallDenseSet<unsigned> SGPRsUsed; | 
| Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 3836 | unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3837 | if (SGPRReg != AMDGPU::NoRegister) { | 
|  | 3838 | SGPRsUsed.insert(SGPRReg); | 
|  | 3839 | --ConstantBusLimit; | 
|  | 3840 | } | 
| Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 3841 |  | 
|  | 3842 | for (unsigned i = 0; i < 3; ++i) { | 
|  | 3843 | int Idx = VOP3Idx[i]; | 
|  | 3844 | if (Idx == -1) | 
|  | 3845 | break; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3846 | MachineOperand &MO = MI.getOperand(Idx); | 
| Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 3847 |  | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3848 | if (!MO.isReg()) { | 
|  | 3849 | if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) | 
|  | 3850 | continue; | 
|  | 3851 |  | 
|  | 3852 | if (LiteralLimit > 0 && ConstantBusLimit > 0) { | 
|  | 3853 | --LiteralLimit; | 
|  | 3854 | --ConstantBusLimit; | 
|  | 3855 | continue; | 
|  | 3856 | } | 
|  | 3857 |  | 
|  | 3858 | --LiteralLimit; | 
|  | 3859 | --ConstantBusLimit; | 
|  | 3860 | legalizeOpWithMove(MI, Idx); | 
| Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 3861 | continue; | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3862 | } | 
| Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 3863 |  | 
|  | 3864 | if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) | 
|  | 3865 | continue; // VGPRs are legal | 
|  | 3866 |  | 
| Stanislav Mekhanoshin | f2baae0 | 2019-05-02 03:47:23 +0000 | [diff] [blame] | 3867 | // We can use one SGPR in each VOP3 instruction prior to GFX10 | 
|  | 3868 | // and two starting from GFX10. | 
|  | 3869 | if (SGPRsUsed.count(MO.getReg())) | 
|  | 3870 | continue; | 
|  | 3871 | if (ConstantBusLimit > 0) { | 
|  | 3872 | SGPRsUsed.insert(MO.getReg()); | 
|  | 3873 | --ConstantBusLimit; | 
| Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 3874 | continue; | 
|  | 3875 | } | 
|  | 3876 |  | 
|  | 3877 | // If we make it this far, then the operand is not legal and we must | 
|  | 3878 | // legalize it. | 
|  | 3879 | legalizeOpWithMove(MI, Idx); | 
|  | 3880 | } | 
|  | 3881 | } | 
|  | 3882 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3883 | unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, | 
|  | 3884 | MachineRegisterInfo &MRI) const { | 
| Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 3885 | const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); | 
|  | 3886 | const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); | 
|  | 3887 | unsigned DstReg = MRI.createVirtualRegister(SRC); | 
| Krzysztof Parzyszek | 44e25f3 | 2017-04-24 18:55:33 +0000 | [diff] [blame] | 3888 | unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; | 
| Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 3889 |  | 
| Nicolai Haehnle | 7a87977 | 2018-04-20 07:14:25 +0000 | [diff] [blame] | 3890 | if (SubRegs == 1) { | 
|  | 3891 | BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), | 
|  | 3892 | get(AMDGPU::V_READFIRSTLANE_B32), DstReg) | 
|  | 3893 | .addReg(SrcReg); | 
|  | 3894 | return DstReg; | 
|  | 3895 | } | 
|  | 3896 |  | 
| Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 3897 | SmallVector<unsigned, 8> SRegs; | 
|  | 3898 | for (unsigned i = 0; i < SubRegs; ++i) { | 
|  | 3899 | unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3900 | BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), | 
| Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 3901 | get(AMDGPU::V_READFIRSTLANE_B32), SGPR) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3902 | .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); | 
| Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 3903 | SRegs.push_back(SGPR); | 
|  | 3904 | } | 
|  | 3905 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3906 | MachineInstrBuilder MIB = | 
|  | 3907 | BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), | 
|  | 3908 | get(AMDGPU::REG_SEQUENCE), DstReg); | 
| Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 3909 | for (unsigned i = 0; i < SubRegs; ++i) { | 
|  | 3910 | MIB.addReg(SRegs[i]); | 
|  | 3911 | MIB.addImm(RI.getSubRegFromChannel(i)); | 
|  | 3912 | } | 
|  | 3913 | return DstReg; | 
|  | 3914 | } | 
|  | 3915 |  | 
| Tom Stellard | 467b5b9 | 2016-02-20 00:37:25 +0000 | [diff] [blame] | 3916 | void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3917 | MachineInstr &MI) const { | 
| Tom Stellard | 467b5b9 | 2016-02-20 00:37:25 +0000 | [diff] [blame] | 3918 |  | 
|  | 3919 | // If the pointer is store in VGPRs, then we need to move them to | 
|  | 3920 | // SGPRs using v_readfirstlane.  This is safe because we only select | 
|  | 3921 | // loads with uniform pointers to SMRD instruction so we know the | 
|  | 3922 | // pointer value is uniform. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 3923 | MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); | 
| Tom Stellard | 467b5b9 | 2016-02-20 00:37:25 +0000 | [diff] [blame] | 3924 | if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { | 
| Nicolai Haehnle | a7b0005 | 2018-11-30 22:55:38 +0000 | [diff] [blame] | 3925 | unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); | 
|  | 3926 | SBase->setReg(SGPR); | 
|  | 3927 | } | 
|  | 3928 | MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); | 
|  | 3929 | if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { | 
|  | 3930 | unsigned SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); | 
|  | 3931 | SOff->setReg(SGPR); | 
| Tom Stellard | 467b5b9 | 2016-02-20 00:37:25 +0000 | [diff] [blame] | 3932 | } | 
|  | 3933 | } | 
|  | 3934 |  | 
| Tom Stellard | 0d162b1 | 2016-11-16 18:42:17 +0000 | [diff] [blame] | 3935 | void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, | 
|  | 3936 | MachineBasicBlock::iterator I, | 
|  | 3937 | const TargetRegisterClass *DstRC, | 
|  | 3938 | MachineOperand &Op, | 
|  | 3939 | MachineRegisterInfo &MRI, | 
|  | 3940 | const DebugLoc &DL) const { | 
| Tom Stellard | 0d162b1 | 2016-11-16 18:42:17 +0000 | [diff] [blame] | 3941 | unsigned OpReg = Op.getReg(); | 
|  | 3942 | unsigned OpSubReg = Op.getSubReg(); | 
|  | 3943 |  | 
|  | 3944 | const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( | 
|  | 3945 | RI.getRegClassForReg(MRI, OpReg), OpSubReg); | 
|  | 3946 |  | 
|  | 3947 | // Check if operand is already the correct register class. | 
|  | 3948 | if (DstRC == OpRC) | 
|  | 3949 | return; | 
|  | 3950 |  | 
|  | 3951 | unsigned DstReg = MRI.createVirtualRegister(DstRC); | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 3952 | MachineInstr *Copy = | 
|  | 3953 | BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); | 
| Tom Stellard | 0d162b1 | 2016-11-16 18:42:17 +0000 | [diff] [blame] | 3954 |  | 
|  | 3955 | Op.setReg(DstReg); | 
|  | 3956 | Op.setSubReg(0); | 
|  | 3957 |  | 
|  | 3958 | MachineInstr *Def = MRI.getVRegDef(OpReg); | 
|  | 3959 | if (!Def) | 
|  | 3960 | return; | 
|  | 3961 |  | 
|  | 3962 | // Try to eliminate the copy if it is copying an immediate value. | 
| Alexander Timofeev | 37bd9bd | 2019-06-06 21:13:02 +0000 | [diff] [blame] | 3963 | if (Def->isMoveImmediate()) | 
| Tom Stellard | 0d162b1 | 2016-11-16 18:42:17 +0000 | [diff] [blame] | 3964 | FoldImmediate(*Copy, *Def, OpReg, &MRI); | 
|  | 3965 | } | 
|  | 3966 |  | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 3967 | // Emit the actual waterfall loop, executing the wrapped instruction for each | 
|  | 3968 | // unique value of \p Rsrc across all lanes. In the best case we execute 1 | 
|  | 3969 | // iteration, in the worst case we execute 64 (once per lane). | 
|  | 3970 | static void | 
|  | 3971 | emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, | 
|  | 3972 | MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, | 
|  | 3973 | const DebugLoc &DL, MachineOperand &Rsrc) { | 
|  | 3974 | MachineBasicBlock::iterator I = LoopBB.begin(); | 
|  | 3975 |  | 
|  | 3976 | unsigned VRsrc = Rsrc.getReg(); | 
|  | 3977 | unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); | 
|  | 3978 |  | 
|  | 3979 | unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 3980 | unsigned CondReg0 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 3981 | unsigned CondReg1 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 3982 | unsigned AndCond = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 3983 | unsigned SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); | 
|  | 3984 | unsigned SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); | 
|  | 3985 | unsigned SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); | 
|  | 3986 | unsigned SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); | 
|  | 3987 | unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); | 
|  | 3988 |  | 
|  | 3989 | // Beginning of the loop, read the next Rsrc variant. | 
|  | 3990 | BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0) | 
|  | 3991 | .addReg(VRsrc, VRsrcUndef, AMDGPU::sub0); | 
|  | 3992 | BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub1) | 
|  | 3993 | .addReg(VRsrc, VRsrcUndef, AMDGPU::sub1); | 
|  | 3994 | BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub2) | 
|  | 3995 | .addReg(VRsrc, VRsrcUndef, AMDGPU::sub2); | 
|  | 3996 | BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub3) | 
|  | 3997 | .addReg(VRsrc, VRsrcUndef, AMDGPU::sub3); | 
|  | 3998 |  | 
|  | 3999 | BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc) | 
|  | 4000 | .addReg(SRsrcSub0) | 
|  | 4001 | .addImm(AMDGPU::sub0) | 
|  | 4002 | .addReg(SRsrcSub1) | 
|  | 4003 | .addImm(AMDGPU::sub1) | 
|  | 4004 | .addReg(SRsrcSub2) | 
|  | 4005 | .addImm(AMDGPU::sub2) | 
|  | 4006 | .addReg(SRsrcSub3) | 
|  | 4007 | .addImm(AMDGPU::sub3); | 
|  | 4008 |  | 
|  | 4009 | // Update Rsrc operand to use the SGPR Rsrc. | 
|  | 4010 | Rsrc.setReg(SRsrc); | 
|  | 4011 | Rsrc.setIsKill(true); | 
|  | 4012 |  | 
|  | 4013 | // Identify all lanes with identical Rsrc operands in their VGPRs. | 
|  | 4014 | BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg0) | 
|  | 4015 | .addReg(SRsrc, 0, AMDGPU::sub0_sub1) | 
|  | 4016 | .addReg(VRsrc, 0, AMDGPU::sub0_sub1); | 
|  | 4017 | BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1) | 
|  | 4018 | .addReg(SRsrc, 0, AMDGPU::sub2_sub3) | 
|  | 4019 | .addReg(VRsrc, 0, AMDGPU::sub2_sub3); | 
|  | 4020 | BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_B64), AndCond) | 
|  | 4021 | .addReg(CondReg0) | 
|  | 4022 | .addReg(CondReg1); | 
|  | 4023 |  | 
|  | 4024 | MRI.setSimpleHint(SaveExec, AndCond); | 
|  | 4025 |  | 
|  | 4026 | // Update EXEC to matching lanes, saving original to SaveExec. | 
|  | 4027 | BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_SAVEEXEC_B64), SaveExec) | 
|  | 4028 | .addReg(AndCond, RegState::Kill); | 
|  | 4029 |  | 
|  | 4030 | // The original instruction is here; we insert the terminators after it. | 
|  | 4031 | I = LoopBB.end(); | 
|  | 4032 |  | 
|  | 4033 | // Update EXEC, switch all done bits to 0 and all todo bits to 1. | 
|  | 4034 | BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC) | 
|  | 4035 | .addReg(AMDGPU::EXEC) | 
|  | 4036 | .addReg(SaveExec); | 
|  | 4037 | BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); | 
|  | 4038 | } | 
|  | 4039 |  | 
|  | 4040 | // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register | 
|  | 4041 | // with SGPRs by iterating over all unique values across all lanes. | 
|  | 4042 | static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, | 
|  | 4043 | MachineOperand &Rsrc, MachineDominatorTree *MDT) { | 
|  | 4044 | MachineBasicBlock &MBB = *MI.getParent(); | 
|  | 4045 | MachineFunction &MF = *MBB.getParent(); | 
|  | 4046 | MachineRegisterInfo &MRI = MF.getRegInfo(); | 
|  | 4047 | MachineBasicBlock::iterator I(&MI); | 
|  | 4048 | const DebugLoc &DL = MI.getDebugLoc(); | 
|  | 4049 |  | 
|  | 4050 | unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); | 
|  | 4051 |  | 
|  | 4052 | // Save the EXEC mask | 
|  | 4053 | BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B64), SaveExec) | 
|  | 4054 | .addReg(AMDGPU::EXEC); | 
|  | 4055 |  | 
|  | 4056 | // Killed uses in the instruction we are waterfalling around will be | 
|  | 4057 | // incorrect due to the added control-flow. | 
|  | 4058 | for (auto &MO : MI.uses()) { | 
|  | 4059 | if (MO.isReg() && MO.isUse()) { | 
|  | 4060 | MRI.clearKillFlags(MO.getReg()); | 
|  | 4061 | } | 
|  | 4062 | } | 
|  | 4063 |  | 
|  | 4064 | // To insert the loop we need to split the block. Move everything after this | 
|  | 4065 | // point to a new block, and insert a new empty block between the two. | 
|  | 4066 | MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); | 
|  | 4067 | MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); | 
|  | 4068 | MachineFunction::iterator MBBI(MBB); | 
|  | 4069 | ++MBBI; | 
|  | 4070 |  | 
|  | 4071 | MF.insert(MBBI, LoopBB); | 
|  | 4072 | MF.insert(MBBI, RemainderBB); | 
|  | 4073 |  | 
|  | 4074 | LoopBB->addSuccessor(LoopBB); | 
|  | 4075 | LoopBB->addSuccessor(RemainderBB); | 
|  | 4076 |  | 
|  | 4077 | // Move MI to the LoopBB, and the remainder of the block to RemainderBB. | 
|  | 4078 | MachineBasicBlock::iterator J = I++; | 
|  | 4079 | RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); | 
|  | 4080 | RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); | 
|  | 4081 | LoopBB->splice(LoopBB->begin(), &MBB, J); | 
|  | 4082 |  | 
|  | 4083 | MBB.addSuccessor(LoopBB); | 
|  | 4084 |  | 
|  | 4085 | // Update dominators. We know that MBB immediately dominates LoopBB, that | 
|  | 4086 | // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately | 
|  | 4087 | // dominates all of the successors transferred to it from MBB that MBB used | 
|  | 4088 | // to dominate. | 
|  | 4089 | if (MDT) { | 
|  | 4090 | MDT->addNewBlock(LoopBB, &MBB); | 
|  | 4091 | MDT->addNewBlock(RemainderBB, LoopBB); | 
|  | 4092 | for (auto &Succ : RemainderBB->successors()) { | 
|  | 4093 | if (MDT->dominates(&MBB, Succ)) { | 
|  | 4094 | MDT->changeImmediateDominator(Succ, RemainderBB); | 
|  | 4095 | } | 
|  | 4096 | } | 
|  | 4097 | } | 
|  | 4098 |  | 
|  | 4099 | emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); | 
|  | 4100 |  | 
|  | 4101 | // Restore the EXEC mask | 
|  | 4102 | MachineBasicBlock::iterator First = RemainderBB->begin(); | 
|  | 4103 | BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) | 
|  | 4104 | .addReg(SaveExec); | 
|  | 4105 | } | 
|  | 4106 |  | 
|  | 4107 | // Extract pointer from Rsrc and return a zero-value Rsrc replacement. | 
|  | 4108 | static std::tuple<unsigned, unsigned> | 
|  | 4109 | extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { | 
|  | 4110 | MachineBasicBlock &MBB = *MI.getParent(); | 
|  | 4111 | MachineFunction &MF = *MBB.getParent(); | 
|  | 4112 | MachineRegisterInfo &MRI = MF.getRegInfo(); | 
|  | 4113 |  | 
|  | 4114 | // Extract the ptr from the resource descriptor. | 
|  | 4115 | unsigned RsrcPtr = | 
|  | 4116 | TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, | 
|  | 4117 | AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); | 
|  | 4118 |  | 
|  | 4119 | // Create an empty resource descriptor | 
|  | 4120 | unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 4121 | unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); | 
|  | 4122 | unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); | 
|  | 4123 | unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); | 
|  | 4124 | uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); | 
|  | 4125 |  | 
|  | 4126 | // Zero64 = 0 | 
|  | 4127 | BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) | 
|  | 4128 | .addImm(0); | 
|  | 4129 |  | 
|  | 4130 | // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} | 
|  | 4131 | BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) | 
|  | 4132 | .addImm(RsrcDataFormat & 0xFFFFFFFF); | 
|  | 4133 |  | 
|  | 4134 | // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} | 
|  | 4135 | BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) | 
|  | 4136 | .addImm(RsrcDataFormat >> 32); | 
|  | 4137 |  | 
|  | 4138 | // NewSRsrc = {Zero64, SRsrcFormat} | 
|  | 4139 | BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) | 
|  | 4140 | .addReg(Zero64) | 
|  | 4141 | .addImm(AMDGPU::sub0_sub1) | 
|  | 4142 | .addReg(SRsrcFormatLo) | 
|  | 4143 | .addImm(AMDGPU::sub2) | 
|  | 4144 | .addReg(SRsrcFormatHi) | 
|  | 4145 | .addImm(AMDGPU::sub3); | 
|  | 4146 |  | 
|  | 4147 | return std::make_tuple(RsrcPtr, NewSRsrc); | 
|  | 4148 | } | 
|  | 4149 |  | 
|  | 4150 | void SIInstrInfo::legalizeOperands(MachineInstr &MI, | 
|  | 4151 | MachineDominatorTree *MDT) const { | 
| Nicolai Haehnle | ce2b589 | 2016-11-18 11:55:52 +0000 | [diff] [blame] | 4152 | MachineFunction &MF = *MI.getParent()->getParent(); | 
|  | 4153 | MachineRegisterInfo &MRI = MF.getRegInfo(); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4154 |  | 
|  | 4155 | // Legalize VOP2 | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4156 | if (isVOP2(MI) || isVOPC(MI)) { | 
| Matt Arsenault | 856d192 | 2015-12-01 19:57:17 +0000 | [diff] [blame] | 4157 | legalizeOperandsVOP2(MRI, MI); | 
| Tom Stellard | 0e975cf | 2014-08-01 00:32:35 +0000 | [diff] [blame] | 4158 | return; | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4159 | } | 
|  | 4160 |  | 
|  | 4161 | // Legalize VOP3 | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4162 | if (isVOP3(MI)) { | 
| Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 4163 | legalizeOperandsVOP3(MRI, MI); | 
| Matt Arsenault | e068f9a | 2015-09-24 07:51:28 +0000 | [diff] [blame] | 4164 | return; | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4165 | } | 
|  | 4166 |  | 
| Tom Stellard | 467b5b9 | 2016-02-20 00:37:25 +0000 | [diff] [blame] | 4167 | // Legalize SMRD | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4168 | if (isSMRD(MI)) { | 
| Tom Stellard | 467b5b9 | 2016-02-20 00:37:25 +0000 | [diff] [blame] | 4169 | legalizeOperandsSMRD(MRI, MI); | 
|  | 4170 | return; | 
|  | 4171 | } | 
|  | 4172 |  | 
| Tom Stellard | 4f3b04d | 2014-04-17 21:00:07 +0000 | [diff] [blame] | 4173 | // Legalize REG_SEQUENCE and PHI | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4174 | // The register class of the operands much be the same type as the register | 
|  | 4175 | // class of the output. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4176 | if (MI.getOpcode() == AMDGPU::PHI) { | 
| Craig Topper | 062a2ba | 2014-04-25 05:30:21 +0000 | [diff] [blame] | 4177 | const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4178 | for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { | 
|  | 4179 | if (!MI.getOperand(i).isReg() || | 
|  | 4180 | !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg())) | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4181 | continue; | 
|  | 4182 | const TargetRegisterClass *OpRC = | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4183 | MRI.getRegClass(MI.getOperand(i).getReg()); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4184 | if (RI.hasVGPRs(OpRC)) { | 
|  | 4185 | VRC = OpRC; | 
|  | 4186 | } else { | 
|  | 4187 | SRC = OpRC; | 
|  | 4188 | } | 
|  | 4189 | } | 
|  | 4190 |  | 
|  | 4191 | // If any of the operands are VGPR registers, then they all most be | 
|  | 4192 | // otherwise we will create illegal VGPR->SGPR copies when legalizing | 
|  | 4193 | // them. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4194 | if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4195 | if (!VRC) { | 
|  | 4196 | assert(SRC); | 
| Alexander Timofeev | 37bd9bd | 2019-06-06 21:13:02 +0000 | [diff] [blame] | 4197 | VRC = RI.getEquivalentVGPRClass(SRC); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4198 | } | 
|  | 4199 | RC = VRC; | 
|  | 4200 | } else { | 
|  | 4201 | RC = SRC; | 
|  | 4202 | } | 
|  | 4203 |  | 
|  | 4204 | // Update all the operands so they have the same type. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4205 | for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { | 
|  | 4206 | MachineOperand &Op = MI.getOperand(I); | 
| Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 4207 | if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4208 | continue; | 
| Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 4209 |  | 
|  | 4210 | // MI is a PHI instruction. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4211 | MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); | 
| Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 4212 | MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); | 
|  | 4213 |  | 
| Tom Stellard | 0d162b1 | 2016-11-16 18:42:17 +0000 | [diff] [blame] | 4214 | // Avoid creating no-op copies with the same src and dst reg class.  These | 
|  | 4215 | // confuse some of the machine passes. | 
|  | 4216 | legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); | 
| Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 4217 | } | 
|  | 4218 | } | 
|  | 4219 |  | 
|  | 4220 | // REG_SEQUENCE doesn't really require operand legalization, but if one has a | 
|  | 4221 | // VGPR dest type and SGPR sources, insert copies so all operands are | 
|  | 4222 | // VGPRs. This seems to help operand folding / the register coalescer. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4223 | if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { | 
|  | 4224 | MachineBasicBlock *MBB = MI.getParent(); | 
|  | 4225 | const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); | 
| Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 4226 | if (RI.hasVGPRs(DstRC)) { | 
|  | 4227 | // Update all the operands so they are VGPR register classes. These may | 
|  | 4228 | // not be the same register class because REG_SEQUENCE supports mixing | 
|  | 4229 | // subregister index types e.g. sub0_sub1 + sub2 + sub3 | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4230 | for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { | 
|  | 4231 | MachineOperand &Op = MI.getOperand(I); | 
| Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 4232 | if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) | 
|  | 4233 | continue; | 
|  | 4234 |  | 
|  | 4235 | const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); | 
|  | 4236 | const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); | 
|  | 4237 | if (VRC == OpRC) | 
|  | 4238 | continue; | 
|  | 4239 |  | 
| Tom Stellard | 0d162b1 | 2016-11-16 18:42:17 +0000 | [diff] [blame] | 4240 | legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); | 
| Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 4241 | Op.setIsKill(); | 
| Tom Stellard | 4f3b04d | 2014-04-17 21:00:07 +0000 | [diff] [blame] | 4242 | } | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4243 | } | 
| Matt Arsenault | e068f9a | 2015-09-24 07:51:28 +0000 | [diff] [blame] | 4244 |  | 
|  | 4245 | return; | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4246 | } | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 4247 |  | 
| Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 4248 | // Legalize INSERT_SUBREG | 
|  | 4249 | // src0 must have the same register class as dst | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4250 | if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { | 
|  | 4251 | unsigned Dst = MI.getOperand(0).getReg(); | 
|  | 4252 | unsigned Src0 = MI.getOperand(1).getReg(); | 
| Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 4253 | const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); | 
|  | 4254 | const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); | 
|  | 4255 | if (DstRC != Src0RC) { | 
| Tom Stellard | 0d162b1 | 2016-11-16 18:42:17 +0000 | [diff] [blame] | 4256 | MachineBasicBlock *MBB = MI.getParent(); | 
|  | 4257 | MachineOperand &Op = MI.getOperand(1); | 
|  | 4258 | legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); | 
| Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 4259 | } | 
|  | 4260 | return; | 
|  | 4261 | } | 
|  | 4262 |  | 
| Nicolai Haehnle | 7a87977 | 2018-04-20 07:14:25 +0000 | [diff] [blame] | 4263 | // Legalize SI_INIT_M0 | 
|  | 4264 | if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { | 
|  | 4265 | MachineOperand &Src = MI.getOperand(0); | 
|  | 4266 | if (Src.isReg() && RI.hasVGPRs(MRI.getRegClass(Src.getReg()))) | 
|  | 4267 | Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); | 
|  | 4268 | return; | 
|  | 4269 | } | 
|  | 4270 |  | 
| Nicolai Haehnle | ce2b589 | 2016-11-18 11:55:52 +0000 | [diff] [blame] | 4271 | // Legalize MIMG and MUBUF/MTBUF for shaders. | 
|  | 4272 | // | 
|  | 4273 | // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via | 
|  | 4274 | // scratch memory access. In both cases, the legalization never involves | 
|  | 4275 | // conversion to the addr64 form. | 
|  | 4276 | if (isMIMG(MI) || | 
| Matthias Braun | f1caa28 | 2017-12-15 22:22:58 +0000 | [diff] [blame] | 4277 | (AMDGPU::isShader(MF.getFunction().getCallingConv()) && | 
| Nicolai Haehnle | ce2b589 | 2016-11-18 11:55:52 +0000 | [diff] [blame] | 4278 | (isMUBUF(MI) || isMTBUF(MI)))) { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4279 | MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); | 
| Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 4280 | if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { | 
|  | 4281 | unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); | 
|  | 4282 | SRsrc->setReg(SGPR); | 
|  | 4283 | } | 
|  | 4284 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4285 | MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); | 
| Tom Stellard | 1397d49 | 2016-02-11 21:45:07 +0000 | [diff] [blame] | 4286 | if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { | 
|  | 4287 | unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI); | 
|  | 4288 | SSamp->setReg(SGPR); | 
|  | 4289 | } | 
|  | 4290 | return; | 
|  | 4291 | } | 
|  | 4292 |  | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4293 | // Legalize MUBUF* instructions. | 
|  | 4294 | int RsrcIdx = | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4295 | AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4296 | if (RsrcIdx != -1) { | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 4297 | // We have an MUBUF instruction | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4298 | MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); | 
|  | 4299 | unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; | 
|  | 4300 | if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), | 
|  | 4301 | RI.getRegClass(RsrcRC))) { | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 4302 | // The operands are legal. | 
|  | 4303 | // FIXME: We may need to legalize operands besided srsrc. | 
|  | 4304 | return; | 
|  | 4305 | } | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 4306 |  | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4307 | // Legalize a VGPR Rsrc. | 
|  | 4308 | // | 
|  | 4309 | // If the instruction is _ADDR64, we can avoid a waterfall by extracting | 
|  | 4310 | // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using | 
|  | 4311 | // a zero-value SRsrc. | 
|  | 4312 | // | 
|  | 4313 | // If the instruction is _OFFSET (both idxen and offen disabled), and we | 
|  | 4314 | // support ADDR64 instructions, we can convert to ADDR64 and do the same as | 
|  | 4315 | // above. | 
|  | 4316 | // | 
|  | 4317 | // Otherwise we are on non-ADDR64 hardware, and/or we have | 
|  | 4318 | // idxen/offen/bothen and we fall back to a waterfall loop. | 
|  | 4319 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4320 | MachineBasicBlock &MBB = *MI.getParent(); | 
| Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 4321 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4322 | MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4323 | if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 4324 | // This is already an ADDR64 instruction so we need to add the pointer | 
|  | 4325 | // extracted from the resource descriptor to the current value of VAddr. | 
| Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 4326 | unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 4327 | unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4328 | unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 4329 |  | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4330 | unsigned RsrcPtr, NewSRsrc; | 
|  | 4331 | std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); | 
|  | 4332 |  | 
|  | 4333 | // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4334 | DebugLoc DL = MI.getDebugLoc(); | 
| Matt Arsenault | 51d2d0f | 2015-09-01 02:02:21 +0000 | [diff] [blame] | 4335 | BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4336 | .addReg(RsrcPtr, 0, AMDGPU::sub0) | 
|  | 4337 | .addReg(VAddr->getReg(), 0, AMDGPU::sub0); | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 4338 |  | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4339 | // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 | 
| Matt Arsenault | 51d2d0f | 2015-09-01 02:02:21 +0000 | [diff] [blame] | 4340 | BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4341 | .addReg(RsrcPtr, 0, AMDGPU::sub1) | 
|  | 4342 | .addReg(VAddr->getReg(), 0, AMDGPU::sub1); | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 4343 |  | 
| Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 4344 | // NewVaddr = {NewVaddrHi, NewVaddrLo} | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4345 | BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) | 
|  | 4346 | .addReg(NewVAddrLo) | 
|  | 4347 | .addImm(AMDGPU::sub0) | 
|  | 4348 | .addReg(NewVAddrHi) | 
|  | 4349 | .addImm(AMDGPU::sub1); | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4350 |  | 
|  | 4351 | VAddr->setReg(NewVAddr); | 
|  | 4352 | Rsrc->setReg(NewSRsrc); | 
|  | 4353 | } else if (!VAddr && ST.hasAddr64()) { | 
| Tom Stellard | 155bbb7 | 2014-08-11 22:18:17 +0000 | [diff] [blame] | 4354 | // This instructions is the _OFFSET variant, so we need to convert it to | 
|  | 4355 | // ADDR64. | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 4356 | assert(MBB.getParent()->getSubtarget<GCNSubtarget>().getGeneration() | 
|  | 4357 | < AMDGPUSubtarget::VOLCANIC_ISLANDS && | 
| Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 4358 | "FIXME: Need to emit flat atomics here"); | 
|  | 4359 |  | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4360 | unsigned RsrcPtr, NewSRsrc; | 
|  | 4361 | std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); | 
|  | 4362 |  | 
|  | 4363 | unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4364 | MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); | 
|  | 4365 | MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); | 
|  | 4366 | MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); | 
|  | 4367 | unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); | 
| Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 4368 |  | 
|  | 4369 | // Atomics rith return have have an additional tied operand and are | 
|  | 4370 | // missing some of the special bits. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4371 | MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); | 
| Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 4372 | MachineInstr *Addr64; | 
|  | 4373 |  | 
|  | 4374 | if (!VDataIn) { | 
|  | 4375 | // Regular buffer load / store. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4376 | MachineInstrBuilder MIB = | 
|  | 4377 | BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 4378 | .add(*VData) | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4379 | .addReg(NewVAddr) | 
|  | 4380 | .addReg(NewSRsrc) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 4381 | .add(*SOffset) | 
|  | 4382 | .add(*Offset); | 
| Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 4383 |  | 
|  | 4384 | // Atomics do not have this operand. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4385 | if (const MachineOperand *GLC = | 
|  | 4386 | getNamedOperand(MI, AMDGPU::OpName::glc)) { | 
| Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 4387 | MIB.addImm(GLC->getImm()); | 
|  | 4388 | } | 
| Stanislav Mekhanoshin | a632294 | 2019-04-30 22:08:23 +0000 | [diff] [blame] | 4389 | if (const MachineOperand *DLC = | 
|  | 4390 | getNamedOperand(MI, AMDGPU::OpName::dlc)) { | 
|  | 4391 | MIB.addImm(DLC->getImm()); | 
|  | 4392 | } | 
| Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 4393 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4394 | MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); | 
| Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 4395 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4396 | if (const MachineOperand *TFE = | 
|  | 4397 | getNamedOperand(MI, AMDGPU::OpName::tfe)) { | 
| Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 4398 | MIB.addImm(TFE->getImm()); | 
|  | 4399 | } | 
|  | 4400 |  | 
| Chandler Carruth | c73c030 | 2018-08-16 21:30:05 +0000 | [diff] [blame] | 4401 | MIB.cloneMemRefs(MI); | 
| Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 4402 | Addr64 = MIB; | 
|  | 4403 | } else { | 
|  | 4404 | // Atomics with return. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4405 | Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 4406 | .add(*VData) | 
|  | 4407 | .add(*VDataIn) | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4408 | .addReg(NewVAddr) | 
|  | 4409 | .addReg(NewSRsrc) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 4410 | .add(*SOffset) | 
|  | 4411 | .add(*Offset) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4412 | .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) | 
| Chandler Carruth | c73c030 | 2018-08-16 21:30:05 +0000 | [diff] [blame] | 4413 | .cloneMemRefs(MI); | 
| Matt Arsenault | a40450c | 2015-11-05 02:46:56 +0000 | [diff] [blame] | 4414 | } | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 4415 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4416 | MI.removeFromParent(); | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 4417 |  | 
| Matt Arsenault | ef67d76 | 2015-09-09 17:03:29 +0000 | [diff] [blame] | 4418 | // NewVaddr = {NewVaddrHi, NewVaddrLo} | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4419 | BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), | 
|  | 4420 | NewVAddr) | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4421 | .addReg(RsrcPtr, 0, AMDGPU::sub0) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4422 | .addImm(AMDGPU::sub0) | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4423 | .addReg(RsrcPtr, 0, AMDGPU::sub1) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4424 | .addImm(AMDGPU::sub1); | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4425 | } else { | 
|  | 4426 | // This is another variant; legalize Rsrc with waterfall loop from VGPRs | 
|  | 4427 | // to SGPRs. | 
|  | 4428 | loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 4429 | } | 
|  | 4430 | } | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4431 | } | 
|  | 4432 |  | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4433 | void SIInstrInfo::moveToVALU(MachineInstr &TopInst, | 
|  | 4434 | MachineDominatorTree *MDT) const { | 
| Alfred Huang | 5b27072 | 2017-07-14 17:56:55 +0000 | [diff] [blame] | 4435 | SetVectorType Worklist; | 
|  | 4436 | Worklist.insert(&TopInst); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4437 |  | 
|  | 4438 | while (!Worklist.empty()) { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4439 | MachineInstr &Inst = *Worklist.pop_back_val(); | 
|  | 4440 | MachineBasicBlock *MBB = Inst.getParent(); | 
| Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 4441 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); | 
|  | 4442 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4443 | unsigned Opcode = Inst.getOpcode(); | 
|  | 4444 | unsigned NewOpcode = getVALUOp(Inst); | 
| Matt Arsenault | 27cc958 | 2014-04-18 01:53:18 +0000 | [diff] [blame] | 4445 |  | 
| Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 4446 | // Handle some special cases | 
| Matt Arsenault | 27cc958 | 2014-04-18 01:53:18 +0000 | [diff] [blame] | 4447 | switch (Opcode) { | 
| Tom Stellard | 0c354f2 | 2014-04-30 15:31:29 +0000 | [diff] [blame] | 4448 | default: | 
| Tom Stellard | 0c354f2 | 2014-04-30 15:31:29 +0000 | [diff] [blame] | 4449 | break; | 
| Matt Arsenault | 301162c | 2017-11-15 21:51:43 +0000 | [diff] [blame] | 4450 | case AMDGPU::S_ADD_U64_PSEUDO: | 
|  | 4451 | case AMDGPU::S_SUB_U64_PSEUDO: | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4452 | splitScalar64BitAddSub(Worklist, Inst, MDT); | 
| Matt Arsenault | 301162c | 2017-11-15 21:51:43 +0000 | [diff] [blame] | 4453 | Inst.eraseFromParent(); | 
|  | 4454 | continue; | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 4455 | case AMDGPU::S_ADD_I32: | 
|  | 4456 | case AMDGPU::S_SUB_I32: | 
|  | 4457 | // FIXME: The u32 versions currently selected use the carry. | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4458 | if (moveScalarAddSub(Worklist, Inst, MDT)) | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 4459 | continue; | 
|  | 4460 |  | 
|  | 4461 | // Default handling | 
|  | 4462 | break; | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 4463 | case AMDGPU::S_AND_B64: | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4464 | splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4465 | Inst.eraseFromParent(); | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 4466 | continue; | 
|  | 4467 |  | 
|  | 4468 | case AMDGPU::S_OR_B64: | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4469 | splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4470 | Inst.eraseFromParent(); | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 4471 | continue; | 
|  | 4472 |  | 
|  | 4473 | case AMDGPU::S_XOR_B64: | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4474 | splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); | 
|  | 4475 | Inst.eraseFromParent(); | 
|  | 4476 | continue; | 
|  | 4477 |  | 
|  | 4478 | case AMDGPU::S_NAND_B64: | 
|  | 4479 | splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); | 
|  | 4480 | Inst.eraseFromParent(); | 
|  | 4481 | continue; | 
|  | 4482 |  | 
|  | 4483 | case AMDGPU::S_NOR_B64: | 
|  | 4484 | splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); | 
|  | 4485 | Inst.eraseFromParent(); | 
|  | 4486 | continue; | 
|  | 4487 |  | 
|  | 4488 | case AMDGPU::S_XNOR_B64: | 
| Graham Sellers | ba559ac | 2018-12-01 12:27:53 +0000 | [diff] [blame] | 4489 | if (ST.hasDLInsts()) | 
|  | 4490 | splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); | 
|  | 4491 | else | 
|  | 4492 | splitScalar64BitXnor(Worklist, Inst, MDT); | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4493 | Inst.eraseFromParent(); | 
|  | 4494 | continue; | 
|  | 4495 |  | 
|  | 4496 | case AMDGPU::S_ANDN2_B64: | 
|  | 4497 | splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); | 
|  | 4498 | Inst.eraseFromParent(); | 
|  | 4499 | continue; | 
|  | 4500 |  | 
|  | 4501 | case AMDGPU::S_ORN2_B64: | 
|  | 4502 | splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4503 | Inst.eraseFromParent(); | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 4504 | continue; | 
|  | 4505 |  | 
|  | 4506 | case AMDGPU::S_NOT_B64: | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4507 | splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4508 | Inst.eraseFromParent(); | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 4509 | continue; | 
|  | 4510 |  | 
| Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 4511 | case AMDGPU::S_BCNT1_I32_B64: | 
|  | 4512 | splitScalar64BitBCNT(Worklist, Inst); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4513 | Inst.eraseFromParent(); | 
| Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 4514 | continue; | 
|  | 4515 |  | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 4516 | case AMDGPU::S_BFE_I64: | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 4517 | splitScalar64BitBFE(Worklist, Inst); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4518 | Inst.eraseFromParent(); | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 4519 | continue; | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 4520 |  | 
| Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 4521 | case AMDGPU::S_LSHL_B32: | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 4522 | if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { | 
| Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 4523 | NewOpcode = AMDGPU::V_LSHLREV_B32_e64; | 
|  | 4524 | swapOperands(Inst); | 
|  | 4525 | } | 
|  | 4526 | break; | 
|  | 4527 | case AMDGPU::S_ASHR_I32: | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 4528 | if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { | 
| Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 4529 | NewOpcode = AMDGPU::V_ASHRREV_I32_e64; | 
|  | 4530 | swapOperands(Inst); | 
|  | 4531 | } | 
|  | 4532 | break; | 
|  | 4533 | case AMDGPU::S_LSHR_B32: | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 4534 | if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { | 
| Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 4535 | NewOpcode = AMDGPU::V_LSHRREV_B32_e64; | 
|  | 4536 | swapOperands(Inst); | 
|  | 4537 | } | 
|  | 4538 | break; | 
| Marek Olsak | 707a6d0 | 2015-02-03 21:53:01 +0000 | [diff] [blame] | 4539 | case AMDGPU::S_LSHL_B64: | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 4540 | if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { | 
| Marek Olsak | 707a6d0 | 2015-02-03 21:53:01 +0000 | [diff] [blame] | 4541 | NewOpcode = AMDGPU::V_LSHLREV_B64; | 
|  | 4542 | swapOperands(Inst); | 
|  | 4543 | } | 
|  | 4544 | break; | 
|  | 4545 | case AMDGPU::S_ASHR_I64: | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 4546 | if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { | 
| Marek Olsak | 707a6d0 | 2015-02-03 21:53:01 +0000 | [diff] [blame] | 4547 | NewOpcode = AMDGPU::V_ASHRREV_I64; | 
|  | 4548 | swapOperands(Inst); | 
|  | 4549 | } | 
|  | 4550 | break; | 
|  | 4551 | case AMDGPU::S_LSHR_B64: | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 4552 | if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { | 
| Marek Olsak | 707a6d0 | 2015-02-03 21:53:01 +0000 | [diff] [blame] | 4553 | NewOpcode = AMDGPU::V_LSHRREV_B64; | 
|  | 4554 | swapOperands(Inst); | 
|  | 4555 | } | 
|  | 4556 | break; | 
| Marek Olsak | be04780 | 2014-12-07 12:19:03 +0000 | [diff] [blame] | 4557 |  | 
| Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 4558 | case AMDGPU::S_ABS_I32: | 
|  | 4559 | lowerScalarAbs(Worklist, Inst); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4560 | Inst.eraseFromParent(); | 
| Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 4561 | continue; | 
|  | 4562 |  | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 4563 | case AMDGPU::S_CBRANCH_SCC0: | 
|  | 4564 | case AMDGPU::S_CBRANCH_SCC1: | 
|  | 4565 | // Clear unused bits of vcc | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4566 | BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), | 
|  | 4567 | AMDGPU::VCC) | 
|  | 4568 | .addReg(AMDGPU::EXEC) | 
|  | 4569 | .addReg(AMDGPU::VCC); | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 4570 | break; | 
|  | 4571 |  | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 4572 | case AMDGPU::S_BFE_U64: | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 4573 | case AMDGPU::S_BFM_B64: | 
|  | 4574 | llvm_unreachable("Moving this op to VALU not implemented"); | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 4575 |  | 
|  | 4576 | case AMDGPU::S_PACK_LL_B32_B16: | 
|  | 4577 | case AMDGPU::S_PACK_LH_B32_B16: | 
| Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 4578 | case AMDGPU::S_PACK_HH_B32_B16: | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 4579 | movePackToVALU(Worklist, MRI, Inst); | 
|  | 4580 | Inst.eraseFromParent(); | 
|  | 4581 | continue; | 
| Konstantin Zhuravlyov | ca8946a | 2017-09-18 21:22:45 +0000 | [diff] [blame] | 4582 |  | 
|  | 4583 | case AMDGPU::S_XNOR_B32: | 
|  | 4584 | lowerScalarXnor(Worklist, Inst); | 
|  | 4585 | Inst.eraseFromParent(); | 
|  | 4586 | continue; | 
|  | 4587 |  | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4588 | case AMDGPU::S_NAND_B32: | 
|  | 4589 | splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); | 
|  | 4590 | Inst.eraseFromParent(); | 
|  | 4591 | continue; | 
|  | 4592 |  | 
|  | 4593 | case AMDGPU::S_NOR_B32: | 
|  | 4594 | splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); | 
|  | 4595 | Inst.eraseFromParent(); | 
|  | 4596 | continue; | 
|  | 4597 |  | 
|  | 4598 | case AMDGPU::S_ANDN2_B32: | 
|  | 4599 | splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); | 
|  | 4600 | Inst.eraseFromParent(); | 
|  | 4601 | continue; | 
|  | 4602 |  | 
|  | 4603 | case AMDGPU::S_ORN2_B32: | 
|  | 4604 | splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); | 
| Konstantin Zhuravlyov | ca8946a | 2017-09-18 21:22:45 +0000 | [diff] [blame] | 4605 | Inst.eraseFromParent(); | 
|  | 4606 | continue; | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 4607 | } | 
| Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 4608 |  | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 4609 | if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { | 
|  | 4610 | // We cannot move this instruction to the VALU, so we should try to | 
|  | 4611 | // legalize its operands instead. | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4612 | legalizeOperands(Inst, MDT); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4613 | continue; | 
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 4614 | } | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4615 |  | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4616 | // Use the new VALU Opcode. | 
|  | 4617 | const MCInstrDesc &NewDesc = get(NewOpcode); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4618 | Inst.setDesc(NewDesc); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4619 |  | 
| Matt Arsenault | f0b1e3a | 2013-11-18 20:09:21 +0000 | [diff] [blame] | 4620 | // Remove any references to SCC. Vector instructions can't read from it, and | 
|  | 4621 | // We're just about to add the implicit use / defs of VCC, and we don't want | 
|  | 4622 | // both. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4623 | for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { | 
|  | 4624 | MachineOperand &Op = Inst.getOperand(i); | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 4625 | if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { | 
| Michael Liao | 6883d7e | 2019-03-15 12:42:21 +0000 | [diff] [blame] | 4626 | // Only propagate through live-def of SCC. | 
|  | 4627 | if (Op.isDef() && !Op.isDead()) | 
|  | 4628 | addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4629 | Inst.RemoveOperand(i); | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 4630 | } | 
| Matt Arsenault | f0b1e3a | 2013-11-18 20:09:21 +0000 | [diff] [blame] | 4631 | } | 
|  | 4632 |  | 
| Matt Arsenault | 27cc958 | 2014-04-18 01:53:18 +0000 | [diff] [blame] | 4633 | if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { | 
|  | 4634 | // We are converting these to a BFE, so we need to add the missing | 
|  | 4635 | // operands for the size and offset. | 
|  | 4636 | unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4637 | Inst.addOperand(MachineOperand::CreateImm(0)); | 
|  | 4638 | Inst.addOperand(MachineOperand::CreateImm(Size)); | 
| Matt Arsenault | 27cc958 | 2014-04-18 01:53:18 +0000 | [diff] [blame] | 4639 |  | 
| Matt Arsenault | b5b5110 | 2014-06-10 19:18:21 +0000 | [diff] [blame] | 4640 | } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { | 
|  | 4641 | // The VALU version adds the second operand to the result, so insert an | 
|  | 4642 | // extra 0 operand. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4643 | Inst.addOperand(MachineOperand::CreateImm(0)); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4644 | } | 
|  | 4645 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4646 | Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4647 |  | 
| Matt Arsenault | 78b8670 | 2014-04-18 05:19:26 +0000 | [diff] [blame] | 4648 | if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4649 | const MachineOperand &OffsetWidthOp = Inst.getOperand(2); | 
| Matt Arsenault | 78b8670 | 2014-04-18 05:19:26 +0000 | [diff] [blame] | 4650 | // If we need to move this to VGPRs, we need to unpack the second operand | 
|  | 4651 | // back into the 2 separate ones for bit offset and width. | 
|  | 4652 | assert(OffsetWidthOp.isImm() && | 
|  | 4653 | "Scalar BFE is only implemented for constant width and offset"); | 
|  | 4654 | uint32_t Imm = OffsetWidthOp.getImm(); | 
|  | 4655 |  | 
|  | 4656 | uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. | 
|  | 4657 | uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4658 | Inst.RemoveOperand(2);                     // Remove old immediate. | 
|  | 4659 | Inst.addOperand(MachineOperand::CreateImm(Offset)); | 
|  | 4660 | Inst.addOperand(MachineOperand::CreateImm(BitWidth)); | 
| Matt Arsenault | 78b8670 | 2014-04-18 05:19:26 +0000 | [diff] [blame] | 4661 | } | 
|  | 4662 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4663 | bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 4664 | unsigned NewDstReg = AMDGPU::NoRegister; | 
|  | 4665 | if (HasDst) { | 
| Matt Arsenault | 21a4382 | 2017-04-06 21:09:53 +0000 | [diff] [blame] | 4666 | unsigned DstReg = Inst.getOperand(0).getReg(); | 
|  | 4667 | if (TargetRegisterInfo::isPhysicalRegister(DstReg)) | 
|  | 4668 | continue; | 
|  | 4669 |  | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 4670 | // Update the destination register class. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4671 | const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 4672 | if (!NewDstRC) | 
|  | 4673 | continue; | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4674 |  | 
| Tom Stellard | 0d162b1 | 2016-11-16 18:42:17 +0000 | [diff] [blame] | 4675 | if (Inst.isCopy() && | 
|  | 4676 | TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) && | 
|  | 4677 | NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { | 
|  | 4678 | // Instead of creating a copy where src and dst are the same register | 
|  | 4679 | // class, we just replace all uses of dst with src.  These kinds of | 
|  | 4680 | // copies interfere with the heuristics MachineSink uses to decide | 
|  | 4681 | // whether or not to split a critical edge.  Since the pass assumes | 
|  | 4682 | // that copies will end up as machine instructions and not be | 
|  | 4683 | // eliminated. | 
|  | 4684 | addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); | 
|  | 4685 | MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); | 
|  | 4686 | MRI.clearKillFlags(Inst.getOperand(1).getReg()); | 
|  | 4687 | Inst.getOperand(0).setReg(DstReg); | 
| Matt Arsenault | 69932e4 | 2018-03-19 14:07:15 +0000 | [diff] [blame] | 4688 |  | 
|  | 4689 | // Make sure we don't leave around a dead VGPR->SGPR copy. Normally | 
|  | 4690 | // these are deleted later, but at -O0 it would leave a suspicious | 
|  | 4691 | // looking illegal copy of an undef register. | 
|  | 4692 | for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) | 
|  | 4693 | Inst.RemoveOperand(I); | 
|  | 4694 | Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); | 
| Tom Stellard | 0d162b1 | 2016-11-16 18:42:17 +0000 | [diff] [blame] | 4695 | continue; | 
|  | 4696 | } | 
|  | 4697 |  | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 4698 | NewDstReg = MRI.createVirtualRegister(NewDstRC); | 
|  | 4699 | MRI.replaceRegWith(DstReg, NewDstReg); | 
|  | 4700 | } | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4701 |  | 
| Tom Stellard | e1a2445 | 2014-04-17 21:00:01 +0000 | [diff] [blame] | 4702 | // Legalize the operands | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4703 | legalizeOperands(Inst, MDT); | 
| Tom Stellard | e1a2445 | 2014-04-17 21:00:01 +0000 | [diff] [blame] | 4704 |  | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 4705 | if (HasDst) | 
|  | 4706 | addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); | 
| Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 4707 | } | 
|  | 4708 | } | 
|  | 4709 |  | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 4710 | // Add/sub require special handling to deal with carry outs. | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4711 | bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, | 
|  | 4712 | MachineDominatorTree *MDT) const { | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 4713 | if (ST.hasAddNoCarry()) { | 
|  | 4714 | // Assume there is no user of scc since we don't select this in that case. | 
|  | 4715 | // Since scc isn't used, it doesn't really matter if the i32 or u32 variant | 
|  | 4716 | // is used. | 
|  | 4717 |  | 
|  | 4718 | MachineBasicBlock &MBB = *Inst.getParent(); | 
|  | 4719 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 4720 |  | 
|  | 4721 | unsigned OldDstReg = Inst.getOperand(0).getReg(); | 
|  | 4722 | unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 4723 |  | 
|  | 4724 | unsigned Opc = Inst.getOpcode(); | 
|  | 4725 | assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); | 
|  | 4726 |  | 
|  | 4727 | unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? | 
|  | 4728 | AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; | 
|  | 4729 |  | 
|  | 4730 | assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); | 
|  | 4731 | Inst.RemoveOperand(3); | 
|  | 4732 |  | 
|  | 4733 | Inst.setDesc(get(NewOpc)); | 
| Tim Renouf | cfdfba9 | 2019-03-18 19:35:44 +0000 | [diff] [blame] | 4734 | Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 4735 | Inst.addImplicitDefUseOperands(*MBB.getParent()); | 
|  | 4736 | MRI.replaceRegWith(OldDstReg, ResultReg); | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4737 | legalizeOperands(Inst, MDT); | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 4738 |  | 
|  | 4739 | addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); | 
|  | 4740 | return true; | 
|  | 4741 | } | 
|  | 4742 |  | 
|  | 4743 | return false; | 
|  | 4744 | } | 
|  | 4745 |  | 
| Alfred Huang | 5b27072 | 2017-07-14 17:56:55 +0000 | [diff] [blame] | 4746 | void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4747 | MachineInstr &Inst) const { | 
|  | 4748 | MachineBasicBlock &MBB = *Inst.getParent(); | 
| Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 4749 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 4750 | MachineBasicBlock::iterator MII = Inst; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4751 | DebugLoc DL = Inst.getDebugLoc(); | 
| Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 4752 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4753 | MachineOperand &Dest = Inst.getOperand(0); | 
|  | 4754 | MachineOperand &Src = Inst.getOperand(1); | 
| Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 4755 | unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 4756 | unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 4757 |  | 
| Matt Arsenault | 84445dd | 2017-11-30 22:51:26 +0000 | [diff] [blame] | 4758 | unsigned SubOp = ST.hasAddNoCarry() ? | 
|  | 4759 | AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_I32_e32; | 
|  | 4760 |  | 
|  | 4761 | BuildMI(MBB, MII, DL, get(SubOp), TmpReg) | 
| Marek Olsak | 7ed6b2f | 2015-11-25 21:22:45 +0000 | [diff] [blame] | 4762 | .addImm(0) | 
|  | 4763 | .addReg(Src.getReg()); | 
|  | 4764 |  | 
|  | 4765 | BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) | 
|  | 4766 | .addReg(Src.getReg()) | 
|  | 4767 | .addReg(TmpReg); | 
|  | 4768 |  | 
|  | 4769 | MRI.replaceRegWith(Dest.getReg(), ResultReg); | 
|  | 4770 | addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); | 
|  | 4771 | } | 
|  | 4772 |  | 
| Konstantin Zhuravlyov | ca8946a | 2017-09-18 21:22:45 +0000 | [diff] [blame] | 4773 | void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, | 
|  | 4774 | MachineInstr &Inst) const { | 
|  | 4775 | MachineBasicBlock &MBB = *Inst.getParent(); | 
|  | 4776 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 4777 | MachineBasicBlock::iterator MII = Inst; | 
|  | 4778 | const DebugLoc &DL = Inst.getDebugLoc(); | 
|  | 4779 |  | 
|  | 4780 | MachineOperand &Dest = Inst.getOperand(0); | 
|  | 4781 | MachineOperand &Src0 = Inst.getOperand(1); | 
|  | 4782 | MachineOperand &Src1 = Inst.getOperand(2); | 
|  | 4783 |  | 
| Matt Arsenault | 0084adc | 2018-04-30 19:08:16 +0000 | [diff] [blame] | 4784 | if (ST.hasDLInsts()) { | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4785 | unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 4786 | legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); | 
|  | 4787 | legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); | 
|  | 4788 |  | 
| Matt Arsenault | 0084adc | 2018-04-30 19:08:16 +0000 | [diff] [blame] | 4789 | BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) | 
|  | 4790 | .add(Src0) | 
|  | 4791 | .add(Src1); | 
| Konstantin Zhuravlyov | ca8946a | 2017-09-18 21:22:45 +0000 | [diff] [blame] | 4792 |  | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4793 | MRI.replaceRegWith(Dest.getReg(), NewDest); | 
|  | 4794 | addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); | 
|  | 4795 | } else { | 
|  | 4796 | // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can | 
|  | 4797 | // invert either source and then perform the XOR. If either source is a | 
|  | 4798 | // scalar register, then we can leave the inversion on the scalar unit to | 
|  | 4799 | // acheive a better distrubution of scalar and vector instructions. | 
|  | 4800 | bool Src0IsSGPR = Src0.isReg() && | 
|  | 4801 | RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); | 
|  | 4802 | bool Src1IsSGPR = Src1.isReg() && | 
|  | 4803 | RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); | 
|  | 4804 | MachineInstr *Not = nullptr; | 
|  | 4805 | MachineInstr *Xor = nullptr; | 
|  | 4806 | unsigned Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | 
|  | 4807 | unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | 
|  | 4808 |  | 
|  | 4809 | // Build a pair of scalar instructions and add them to the work list. | 
|  | 4810 | // The next iteration over the work list will lower these to the vector | 
|  | 4811 | // unit as necessary. | 
|  | 4812 | if (Src0IsSGPR) { | 
|  | 4813 | Not = BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp) | 
|  | 4814 | .add(Src0); | 
|  | 4815 | Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) | 
|  | 4816 | .addReg(Temp) | 
|  | 4817 | .add(Src1); | 
|  | 4818 | } else if (Src1IsSGPR) { | 
|  | 4819 | Not = BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp) | 
|  | 4820 | .add(Src1); | 
|  | 4821 | Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) | 
|  | 4822 | .add(Src0) | 
|  | 4823 | .addReg(Temp); | 
|  | 4824 | } else { | 
|  | 4825 | Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) | 
|  | 4826 | .add(Src0) | 
|  | 4827 | .add(Src1); | 
|  | 4828 | Not = BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) | 
|  | 4829 | .addReg(Temp); | 
|  | 4830 | Worklist.insert(Not); | 
|  | 4831 | } | 
|  | 4832 |  | 
|  | 4833 | MRI.replaceRegWith(Dest.getReg(), NewDest); | 
|  | 4834 |  | 
|  | 4835 | Worklist.insert(Xor); | 
|  | 4836 |  | 
|  | 4837 | addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); | 
| Matt Arsenault | 0084adc | 2018-04-30 19:08:16 +0000 | [diff] [blame] | 4838 | } | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4839 | } | 
|  | 4840 |  | 
|  | 4841 | void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, | 
|  | 4842 | MachineInstr &Inst, | 
|  | 4843 | unsigned Opcode) const { | 
|  | 4844 | MachineBasicBlock &MBB = *Inst.getParent(); | 
|  | 4845 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 4846 | MachineBasicBlock::iterator MII = Inst; | 
|  | 4847 | const DebugLoc &DL = Inst.getDebugLoc(); | 
|  | 4848 |  | 
|  | 4849 | MachineOperand &Dest = Inst.getOperand(0); | 
|  | 4850 | MachineOperand &Src0 = Inst.getOperand(1); | 
|  | 4851 | MachineOperand &Src1 = Inst.getOperand(2); | 
|  | 4852 |  | 
|  | 4853 | unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | 
|  | 4854 | unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | 
|  | 4855 |  | 
|  | 4856 | MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) | 
|  | 4857 | .add(Src0) | 
|  | 4858 | .add(Src1); | 
|  | 4859 |  | 
|  | 4860 | MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) | 
|  | 4861 | .addReg(Interm); | 
|  | 4862 |  | 
|  | 4863 | Worklist.insert(&Op); | 
|  | 4864 | Worklist.insert(&Not); | 
|  | 4865 |  | 
|  | 4866 | MRI.replaceRegWith(Dest.getReg(), NewDest); | 
|  | 4867 | addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); | 
|  | 4868 | } | 
|  | 4869 |  | 
|  | 4870 | void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, | 
|  | 4871 | MachineInstr &Inst, | 
|  | 4872 | unsigned Opcode) const { | 
|  | 4873 | MachineBasicBlock &MBB = *Inst.getParent(); | 
|  | 4874 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 4875 | MachineBasicBlock::iterator MII = Inst; | 
|  | 4876 | const DebugLoc &DL = Inst.getDebugLoc(); | 
|  | 4877 |  | 
|  | 4878 | MachineOperand &Dest = Inst.getOperand(0); | 
|  | 4879 | MachineOperand &Src0 = Inst.getOperand(1); | 
|  | 4880 | MachineOperand &Src1 = Inst.getOperand(2); | 
|  | 4881 |  | 
|  | 4882 | unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | 
|  | 4883 | unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | 
|  | 4884 |  | 
|  | 4885 | MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) | 
|  | 4886 | .add(Src1); | 
|  | 4887 |  | 
|  | 4888 | MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) | 
|  | 4889 | .add(Src0) | 
|  | 4890 | .addReg(Interm); | 
|  | 4891 |  | 
|  | 4892 | Worklist.insert(&Not); | 
|  | 4893 | Worklist.insert(&Op); | 
| Konstantin Zhuravlyov | ca8946a | 2017-09-18 21:22:45 +0000 | [diff] [blame] | 4894 |  | 
| Matt Arsenault | 0084adc | 2018-04-30 19:08:16 +0000 | [diff] [blame] | 4895 | MRI.replaceRegWith(Dest.getReg(), NewDest); | 
|  | 4896 | addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); | 
| Konstantin Zhuravlyov | ca8946a | 2017-09-18 21:22:45 +0000 | [diff] [blame] | 4897 | } | 
|  | 4898 |  | 
| Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 4899 | void SIInstrInfo::splitScalar64BitUnaryOp( | 
| Alfred Huang | 5b27072 | 2017-07-14 17:56:55 +0000 | [diff] [blame] | 4900 | SetVectorType &Worklist, MachineInstr &Inst, | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4901 | unsigned Opcode) const { | 
|  | 4902 | MachineBasicBlock &MBB = *Inst.getParent(); | 
| Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 4903 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 4904 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 4905 | MachineOperand &Dest = Inst.getOperand(0); | 
|  | 4906 | MachineOperand &Src0 = Inst.getOperand(1); | 
|  | 4907 | DebugLoc DL = Inst.getDebugLoc(); | 
| Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 4908 |  | 
|  | 4909 | MachineBasicBlock::iterator MII = Inst; | 
|  | 4910 |  | 
|  | 4911 | const MCInstrDesc &InstDesc = get(Opcode); | 
|  | 4912 | const TargetRegisterClass *Src0RC = Src0.isReg() ? | 
|  | 4913 | MRI.getRegClass(Src0.getReg()) : | 
|  | 4914 | &AMDGPU::SGPR_32RegClass; | 
|  | 4915 |  | 
|  | 4916 | const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); | 
|  | 4917 |  | 
|  | 4918 | MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, | 
|  | 4919 | AMDGPU::sub0, Src0SubRC); | 
|  | 4920 |  | 
|  | 4921 | const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 4922 | const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); | 
|  | 4923 | const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); | 
| Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 4924 |  | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 4925 | unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4926 | MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); | 
| Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 4927 |  | 
|  | 4928 | MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, | 
|  | 4929 | AMDGPU::sub1, Src0SubRC); | 
|  | 4930 |  | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 4931 | unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4932 | MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); | 
| Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 4933 |  | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 4934 | unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); | 
| Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 4935 | BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) | 
|  | 4936 | .addReg(DestSub0) | 
|  | 4937 | .addImm(AMDGPU::sub0) | 
|  | 4938 | .addReg(DestSub1) | 
|  | 4939 | .addImm(AMDGPU::sub1); | 
|  | 4940 |  | 
|  | 4941 | MRI.replaceRegWith(Dest.getReg(), FullDestReg); | 
|  | 4942 |  | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 4943 | Worklist.insert(&LoHalf); | 
|  | 4944 | Worklist.insert(&HiHalf); | 
|  | 4945 |  | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 4946 | // We don't need to legalizeOperands here because for a single operand, src0 | 
|  | 4947 | // will support any kind of input. | 
|  | 4948 |  | 
|  | 4949 | // Move all users of this moved value. | 
|  | 4950 | addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); | 
| Matt Arsenault | 689f325 | 2014-06-09 16:36:31 +0000 | [diff] [blame] | 4951 | } | 
|  | 4952 |  | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 4953 | void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, | 
|  | 4954 | MachineInstr &Inst, | 
|  | 4955 | MachineDominatorTree *MDT) const { | 
| Matt Arsenault | 301162c | 2017-11-15 21:51:43 +0000 | [diff] [blame] | 4956 | bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); | 
|  | 4957 |  | 
|  | 4958 | MachineBasicBlock &MBB = *Inst.getParent(); | 
|  | 4959 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 4960 |  | 
|  | 4961 | unsigned FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); | 
|  | 4962 | unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 4963 | unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 4964 |  | 
|  | 4965 | unsigned CarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); | 
|  | 4966 | unsigned DeadCarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); | 
|  | 4967 |  | 
|  | 4968 | MachineOperand &Dest = Inst.getOperand(0); | 
|  | 4969 | MachineOperand &Src0 = Inst.getOperand(1); | 
|  | 4970 | MachineOperand &Src1 = Inst.getOperand(2); | 
|  | 4971 | const DebugLoc &DL = Inst.getDebugLoc(); | 
|  | 4972 | MachineBasicBlock::iterator MII = Inst; | 
|  | 4973 |  | 
|  | 4974 | const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); | 
|  | 4975 | const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); | 
|  | 4976 | const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); | 
|  | 4977 | const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); | 
|  | 4978 |  | 
|  | 4979 | MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, | 
|  | 4980 | AMDGPU::sub0, Src0SubRC); | 
|  | 4981 | MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, | 
|  | 4982 | AMDGPU::sub0, Src1SubRC); | 
|  | 4983 |  | 
|  | 4984 |  | 
|  | 4985 | MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, | 
|  | 4986 | AMDGPU::sub1, Src0SubRC); | 
|  | 4987 | MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, | 
|  | 4988 | AMDGPU::sub1, Src1SubRC); | 
|  | 4989 |  | 
|  | 4990 | unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64; | 
|  | 4991 | MachineInstr *LoHalf = | 
|  | 4992 | BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) | 
|  | 4993 | .addReg(CarryReg, RegState::Define) | 
|  | 4994 | .add(SrcReg0Sub0) | 
| Tim Renouf | cfdfba9 | 2019-03-18 19:35:44 +0000 | [diff] [blame] | 4995 | .add(SrcReg1Sub0) | 
|  | 4996 | .addImm(0); // clamp bit | 
| Matt Arsenault | 301162c | 2017-11-15 21:51:43 +0000 | [diff] [blame] | 4997 |  | 
|  | 4998 | unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; | 
|  | 4999 | MachineInstr *HiHalf = | 
|  | 5000 | BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) | 
|  | 5001 | .addReg(DeadCarryReg, RegState::Define | RegState::Dead) | 
|  | 5002 | .add(SrcReg0Sub1) | 
|  | 5003 | .add(SrcReg1Sub1) | 
| Tim Renouf | cfdfba9 | 2019-03-18 19:35:44 +0000 | [diff] [blame] | 5004 | .addReg(CarryReg, RegState::Kill) | 
|  | 5005 | .addImm(0); // clamp bit | 
| Matt Arsenault | 301162c | 2017-11-15 21:51:43 +0000 | [diff] [blame] | 5006 |  | 
|  | 5007 | BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) | 
|  | 5008 | .addReg(DestSub0) | 
|  | 5009 | .addImm(AMDGPU::sub0) | 
|  | 5010 | .addReg(DestSub1) | 
|  | 5011 | .addImm(AMDGPU::sub1); | 
|  | 5012 |  | 
|  | 5013 | MRI.replaceRegWith(Dest.getReg(), FullDestReg); | 
|  | 5014 |  | 
|  | 5015 | // Try to legalize the operands in case we need to swap the order to keep it | 
|  | 5016 | // valid. | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 5017 | legalizeOperands(*LoHalf, MDT); | 
|  | 5018 | legalizeOperands(*HiHalf, MDT); | 
| Matt Arsenault | 301162c | 2017-11-15 21:51:43 +0000 | [diff] [blame] | 5019 |  | 
|  | 5020 | // Move all users of this moved vlaue. | 
|  | 5021 | addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); | 
|  | 5022 | } | 
|  | 5023 |  | 
| Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 5024 | void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, | 
|  | 5025 | MachineInstr &Inst, unsigned Opcode, | 
|  | 5026 | MachineDominatorTree *MDT) const { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5027 | MachineBasicBlock &MBB = *Inst.getParent(); | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 5028 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 5029 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5030 | MachineOperand &Dest = Inst.getOperand(0); | 
|  | 5031 | MachineOperand &Src0 = Inst.getOperand(1); | 
|  | 5032 | MachineOperand &Src1 = Inst.getOperand(2); | 
|  | 5033 | DebugLoc DL = Inst.getDebugLoc(); | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 5034 |  | 
|  | 5035 | MachineBasicBlock::iterator MII = Inst; | 
|  | 5036 |  | 
|  | 5037 | const MCInstrDesc &InstDesc = get(Opcode); | 
| Matt Arsenault | 684dc80 | 2014-03-24 20:08:13 +0000 | [diff] [blame] | 5038 | const TargetRegisterClass *Src0RC = Src0.isReg() ? | 
|  | 5039 | MRI.getRegClass(Src0.getReg()) : | 
|  | 5040 | &AMDGPU::SGPR_32RegClass; | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 5041 |  | 
| Matt Arsenault | 684dc80 | 2014-03-24 20:08:13 +0000 | [diff] [blame] | 5042 | const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); | 
|  | 5043 | const TargetRegisterClass *Src1RC = Src1.isReg() ? | 
|  | 5044 | MRI.getRegClass(Src1.getReg()) : | 
|  | 5045 | &AMDGPU::SGPR_32RegClass; | 
|  | 5046 |  | 
|  | 5047 | const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); | 
|  | 5048 |  | 
|  | 5049 | MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, | 
|  | 5050 | AMDGPU::sub0, Src0SubRC); | 
|  | 5051 | MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, | 
|  | 5052 | AMDGPU::sub0, Src1SubRC); | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 5053 | MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, | 
|  | 5054 | AMDGPU::sub1, Src0SubRC); | 
|  | 5055 | MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, | 
|  | 5056 | AMDGPU::sub1, Src1SubRC); | 
| Matt Arsenault | 684dc80 | 2014-03-24 20:08:13 +0000 | [diff] [blame] | 5057 |  | 
|  | 5058 | const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 5059 | const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); | 
|  | 5060 | const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); | 
| Matt Arsenault | 684dc80 | 2014-03-24 20:08:13 +0000 | [diff] [blame] | 5061 |  | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 5062 | unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5063 | MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 5064 | .add(SrcReg0Sub0) | 
|  | 5065 | .add(SrcReg1Sub0); | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 5066 |  | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 5067 | unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5068 | MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 5069 | .add(SrcReg0Sub1) | 
|  | 5070 | .add(SrcReg1Sub1); | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 5071 |  | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 5072 | unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 5073 | BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) | 
|  | 5074 | .addReg(DestSub0) | 
|  | 5075 | .addImm(AMDGPU::sub0) | 
|  | 5076 | .addReg(DestSub1) | 
|  | 5077 | .addImm(AMDGPU::sub1); | 
|  | 5078 |  | 
|  | 5079 | MRI.replaceRegWith(Dest.getReg(), FullDestReg); | 
|  | 5080 |  | 
| Graham Sellers | 04f7a4d | 2018-11-29 16:05:38 +0000 | [diff] [blame] | 5081 | Worklist.insert(&LoHalf); | 
|  | 5082 | Worklist.insert(&HiHalf); | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 5083 |  | 
|  | 5084 | // Move all users of this moved vlaue. | 
|  | 5085 | addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); | 
| Matt Arsenault | f35182c | 2014-03-24 20:08:05 +0000 | [diff] [blame] | 5086 | } | 
|  | 5087 |  | 
| Graham Sellers | ba559ac | 2018-12-01 12:27:53 +0000 | [diff] [blame] | 5088 | void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, | 
|  | 5089 | MachineInstr &Inst, | 
|  | 5090 | MachineDominatorTree *MDT) const { | 
|  | 5091 | MachineBasicBlock &MBB = *Inst.getParent(); | 
|  | 5092 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 5093 |  | 
|  | 5094 | MachineOperand &Dest = Inst.getOperand(0); | 
|  | 5095 | MachineOperand &Src0 = Inst.getOperand(1); | 
|  | 5096 | MachineOperand &Src1 = Inst.getOperand(2); | 
|  | 5097 | const DebugLoc &DL = Inst.getDebugLoc(); | 
|  | 5098 |  | 
|  | 5099 | MachineBasicBlock::iterator MII = Inst; | 
|  | 5100 |  | 
|  | 5101 | const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); | 
|  | 5102 |  | 
|  | 5103 | unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 5104 |  | 
|  | 5105 | MachineOperand* Op0; | 
|  | 5106 | MachineOperand* Op1; | 
|  | 5107 |  | 
|  | 5108 | if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { | 
|  | 5109 | Op0 = &Src0; | 
|  | 5110 | Op1 = &Src1; | 
|  | 5111 | } else { | 
|  | 5112 | Op0 = &Src1; | 
|  | 5113 | Op1 = &Src0; | 
|  | 5114 | } | 
|  | 5115 |  | 
|  | 5116 | BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) | 
|  | 5117 | .add(*Op0); | 
|  | 5118 |  | 
|  | 5119 | unsigned NewDest = MRI.createVirtualRegister(DestRC); | 
|  | 5120 |  | 
|  | 5121 | MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) | 
|  | 5122 | .addReg(Interm) | 
|  | 5123 | .add(*Op1); | 
|  | 5124 |  | 
|  | 5125 | MRI.replaceRegWith(Dest.getReg(), NewDest); | 
|  | 5126 |  | 
|  | 5127 | Worklist.insert(&Xor); | 
|  | 5128 | } | 
|  | 5129 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5130 | void SIInstrInfo::splitScalar64BitBCNT( | 
| Alfred Huang | 5b27072 | 2017-07-14 17:56:55 +0000 | [diff] [blame] | 5131 | SetVectorType &Worklist, MachineInstr &Inst) const { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5132 | MachineBasicBlock &MBB = *Inst.getParent(); | 
| Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 5133 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 5134 |  | 
|  | 5135 | MachineBasicBlock::iterator MII = Inst; | 
| Graham Sellers | ba559ac | 2018-12-01 12:27:53 +0000 | [diff] [blame] | 5136 | const DebugLoc &DL = Inst.getDebugLoc(); | 
| Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 5137 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5138 | MachineOperand &Dest = Inst.getOperand(0); | 
|  | 5139 | MachineOperand &Src = Inst.getOperand(1); | 
| Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 5140 |  | 
| Marek Olsak | c536850 | 2015-01-15 18:43:01 +0000 | [diff] [blame] | 5141 | const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); | 
| Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 5142 | const TargetRegisterClass *SrcRC = Src.isReg() ? | 
|  | 5143 | MRI.getRegClass(Src.getReg()) : | 
|  | 5144 | &AMDGPU::SGPR_32RegClass; | 
|  | 5145 |  | 
|  | 5146 | unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 5147 | unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 5148 |  | 
|  | 5149 | const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); | 
|  | 5150 |  | 
|  | 5151 | MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, | 
|  | 5152 | AMDGPU::sub0, SrcSubRC); | 
|  | 5153 | MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, | 
|  | 5154 | AMDGPU::sub1, SrcSubRC); | 
|  | 5155 |  | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 5156 | BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); | 
| Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 5157 |  | 
| Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 5158 | BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); | 
| Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 5159 |  | 
|  | 5160 | MRI.replaceRegWith(Dest.getReg(), ResultReg); | 
|  | 5161 |  | 
| Matt Arsenault | 5e7f95e | 2015-08-26 20:48:04 +0000 | [diff] [blame] | 5162 | // We don't need to legalize operands here. src0 for etiher instruction can be | 
|  | 5163 | // an SGPR, and the second input is unused or determined here. | 
|  | 5164 | addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); | 
| Matt Arsenault | 8333e43 | 2014-06-10 19:18:24 +0000 | [diff] [blame] | 5165 | } | 
|  | 5166 |  | 
| Alfred Huang | 5b27072 | 2017-07-14 17:56:55 +0000 | [diff] [blame] | 5167 | void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5168 | MachineInstr &Inst) const { | 
|  | 5169 | MachineBasicBlock &MBB = *Inst.getParent(); | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 5170 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
|  | 5171 | MachineBasicBlock::iterator MII = Inst; | 
| Graham Sellers | ba559ac | 2018-12-01 12:27:53 +0000 | [diff] [blame] | 5172 | const DebugLoc &DL = Inst.getDebugLoc(); | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 5173 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5174 | MachineOperand &Dest = Inst.getOperand(0); | 
|  | 5175 | uint32_t Imm = Inst.getOperand(2).getImm(); | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 5176 | uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. | 
|  | 5177 | uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. | 
|  | 5178 |  | 
| Matt Arsenault | 6ad3426 | 2014-11-14 18:40:49 +0000 | [diff] [blame] | 5179 | (void) Offset; | 
|  | 5180 |  | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 5181 | // Only sext_inreg cases handled. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5182 | assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && | 
|  | 5183 | Offset == 0 && "Not implemented"); | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 5184 |  | 
|  | 5185 | if (BitWidth < 32) { | 
|  | 5186 | unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 5187 | unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 5188 | unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); | 
|  | 5189 |  | 
|  | 5190 | BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5191 | .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) | 
|  | 5192 | .addImm(0) | 
|  | 5193 | .addImm(BitWidth); | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 5194 |  | 
|  | 5195 | BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) | 
|  | 5196 | .addImm(31) | 
|  | 5197 | .addReg(MidRegLo); | 
|  | 5198 |  | 
|  | 5199 | BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) | 
|  | 5200 | .addReg(MidRegLo) | 
|  | 5201 | .addImm(AMDGPU::sub0) | 
|  | 5202 | .addReg(MidRegHi) | 
|  | 5203 | .addImm(AMDGPU::sub1); | 
|  | 5204 |  | 
|  | 5205 | MRI.replaceRegWith(Dest.getReg(), ResultReg); | 
| Matt Arsenault | 445833c | 2015-08-26 20:47:58 +0000 | [diff] [blame] | 5206 | addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 5207 | return; | 
|  | 5208 | } | 
|  | 5209 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5210 | MachineOperand &Src = Inst.getOperand(1); | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 5211 | unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 5212 | unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); | 
|  | 5213 |  | 
|  | 5214 | BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) | 
|  | 5215 | .addImm(31) | 
|  | 5216 | .addReg(Src.getReg(), 0, AMDGPU::sub0); | 
|  | 5217 |  | 
|  | 5218 | BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) | 
|  | 5219 | .addReg(Src.getReg(), 0, AMDGPU::sub0) | 
|  | 5220 | .addImm(AMDGPU::sub0) | 
|  | 5221 | .addReg(TmpReg) | 
|  | 5222 | .addImm(AMDGPU::sub1); | 
|  | 5223 |  | 
|  | 5224 | MRI.replaceRegWith(Dest.getReg(), ResultReg); | 
| Matt Arsenault | 445833c | 2015-08-26 20:47:58 +0000 | [diff] [blame] | 5225 | addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); | 
| Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 5226 | } | 
|  | 5227 |  | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 5228 | void SIInstrInfo::addUsersToMoveToVALUWorklist( | 
|  | 5229 | unsigned DstReg, | 
|  | 5230 | MachineRegisterInfo &MRI, | 
| Alfred Huang | 5b27072 | 2017-07-14 17:56:55 +0000 | [diff] [blame] | 5231 | SetVectorType &Worklist) const { | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 5232 | for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), | 
| Matt Arsenault | 4c1e9ec | 2016-12-20 18:55:06 +0000 | [diff] [blame] | 5233 | E = MRI.use_end(); I != E;) { | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 5234 | MachineInstr &UseMI = *I->getParent(); | 
| Neil Henning | 0799352 | 2019-01-29 14:28:17 +0000 | [diff] [blame] | 5235 |  | 
|  | 5236 | unsigned OpNo = 0; | 
|  | 5237 |  | 
|  | 5238 | switch (UseMI.getOpcode()) { | 
|  | 5239 | case AMDGPU::COPY: | 
|  | 5240 | case AMDGPU::WQM: | 
|  | 5241 | case AMDGPU::WWM: | 
|  | 5242 | case AMDGPU::REG_SEQUENCE: | 
|  | 5243 | case AMDGPU::PHI: | 
|  | 5244 | case AMDGPU::INSERT_SUBREG: | 
|  | 5245 | break; | 
|  | 5246 | default: | 
|  | 5247 | OpNo = I.getOperandNo(); | 
|  | 5248 | break; | 
|  | 5249 | } | 
|  | 5250 |  | 
|  | 5251 | if (!RI.hasVGPRs(getOpRegClass(UseMI, OpNo))) { | 
| Alfred Huang | 5b27072 | 2017-07-14 17:56:55 +0000 | [diff] [blame] | 5252 | Worklist.insert(&UseMI); | 
| Matt Arsenault | 4c1e9ec | 2016-12-20 18:55:06 +0000 | [diff] [blame] | 5253 |  | 
|  | 5254 | do { | 
|  | 5255 | ++I; | 
|  | 5256 | } while (I != E && I->getParent() == &UseMI); | 
|  | 5257 | } else { | 
|  | 5258 | ++I; | 
| Matt Arsenault | f003c38 | 2015-08-26 20:47:50 +0000 | [diff] [blame] | 5259 | } | 
|  | 5260 | } | 
|  | 5261 | } | 
|  | 5262 |  | 
| Alfred Huang | 5b27072 | 2017-07-14 17:56:55 +0000 | [diff] [blame] | 5263 | void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 5264 | MachineRegisterInfo &MRI, | 
|  | 5265 | MachineInstr &Inst) const { | 
|  | 5266 | unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 5267 | MachineBasicBlock *MBB = Inst.getParent(); | 
|  | 5268 | MachineOperand &Src0 = Inst.getOperand(1); | 
|  | 5269 | MachineOperand &Src1 = Inst.getOperand(2); | 
|  | 5270 | const DebugLoc &DL = Inst.getDebugLoc(); | 
|  | 5271 |  | 
|  | 5272 | switch (Inst.getOpcode()) { | 
|  | 5273 | case AMDGPU::S_PACK_LL_B32_B16: { | 
| Konstantin Zhuravlyov | d24aeb2 | 2017-04-13 23:17:00 +0000 | [diff] [blame] | 5274 | unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 5275 | unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 5276 |  | 
| Konstantin Zhuravlyov | d24aeb2 | 2017-04-13 23:17:00 +0000 | [diff] [blame] | 5277 | // FIXME: Can do a lot better if we know the high bits of src0 or src1 are | 
|  | 5278 | // 0. | 
|  | 5279 | BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) | 
|  | 5280 | .addImm(0xffff); | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 5281 |  | 
| Konstantin Zhuravlyov | d24aeb2 | 2017-04-13 23:17:00 +0000 | [diff] [blame] | 5282 | BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) | 
|  | 5283 | .addReg(ImmReg, RegState::Kill) | 
|  | 5284 | .add(Src0); | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 5285 |  | 
| Konstantin Zhuravlyov | d24aeb2 | 2017-04-13 23:17:00 +0000 | [diff] [blame] | 5286 | BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg) | 
|  | 5287 | .add(Src1) | 
|  | 5288 | .addImm(16) | 
|  | 5289 | .addReg(TmpReg, RegState::Kill); | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 5290 | break; | 
|  | 5291 | } | 
|  | 5292 | case AMDGPU::S_PACK_LH_B32_B16: { | 
|  | 5293 | unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 5294 | BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) | 
|  | 5295 | .addImm(0xffff); | 
|  | 5296 | BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) | 
|  | 5297 | .addReg(ImmReg, RegState::Kill) | 
|  | 5298 | .add(Src0) | 
|  | 5299 | .add(Src1); | 
|  | 5300 | break; | 
|  | 5301 | } | 
|  | 5302 | case AMDGPU::S_PACK_HH_B32_B16: { | 
|  | 5303 | unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 5304 | unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | 
|  | 5305 | BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) | 
|  | 5306 | .addImm(16) | 
|  | 5307 | .add(Src0); | 
|  | 5308 | BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) | 
| Konstantin Zhuravlyov | 88938d4 | 2017-04-21 19:35:05 +0000 | [diff] [blame] | 5309 | .addImm(0xffff0000); | 
| Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 5310 | BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg) | 
|  | 5311 | .add(Src1) | 
|  | 5312 | .addReg(ImmReg, RegState::Kill) | 
|  | 5313 | .addReg(TmpReg, RegState::Kill); | 
|  | 5314 | break; | 
|  | 5315 | } | 
|  | 5316 | default: | 
|  | 5317 | llvm_unreachable("unhandled s_pack_* instruction"); | 
|  | 5318 | } | 
|  | 5319 |  | 
|  | 5320 | MachineOperand &Dest = Inst.getOperand(0); | 
|  | 5321 | MRI.replaceRegWith(Dest.getReg(), ResultReg); | 
|  | 5322 | addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); | 
|  | 5323 | } | 
|  | 5324 |  | 
| Michael Liao | 6883d7e | 2019-03-15 12:42:21 +0000 | [diff] [blame] | 5325 | void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, | 
|  | 5326 | MachineInstr &SCCDefInst, | 
|  | 5327 | SetVectorType &Worklist) const { | 
|  | 5328 | // Ensure that def inst defines SCC, which is still live. | 
|  | 5329 | assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && | 
|  | 5330 | !Op.isDead() && Op.getParent() == &SCCDefInst); | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 5331 | // This assumes that all the users of SCC are in the same block | 
|  | 5332 | // as the SCC def. | 
| Michael Liao | 6883d7e | 2019-03-15 12:42:21 +0000 | [diff] [blame] | 5333 | for (MachineInstr &MI : // Skip the def inst itself. | 
|  | 5334 | make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), | 
|  | 5335 | SCCDefInst.getParent()->end())) { | 
|  | 5336 | // Check if SCC is used first. | 
|  | 5337 | if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) | 
|  | 5338 | Worklist.insert(&MI); | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 5339 | // Exit if we find another SCC def. | 
| Stanislav Mekhanoshin | 13d3371 | 2018-11-09 17:58:59 +0000 | [diff] [blame] | 5340 | if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 5341 | return; | 
| Tom Stellard | bc4497b | 2016-02-12 23:45:29 +0000 | [diff] [blame] | 5342 | } | 
|  | 5343 | } | 
|  | 5344 |  | 
| Matt Arsenault | ba6aae7 | 2015-09-28 20:54:57 +0000 | [diff] [blame] | 5345 | const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( | 
|  | 5346 | const MachineInstr &Inst) const { | 
|  | 5347 | const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); | 
|  | 5348 |  | 
|  | 5349 | switch (Inst.getOpcode()) { | 
|  | 5350 | // For target instructions, getOpRegClass just returns the virtual register | 
|  | 5351 | // class associated with the operand, so we need to find an equivalent VGPR | 
|  | 5352 | // register class in order to move the instruction to the VALU. | 
|  | 5353 | case AMDGPU::COPY: | 
|  | 5354 | case AMDGPU::PHI: | 
|  | 5355 | case AMDGPU::REG_SEQUENCE: | 
|  | 5356 | case AMDGPU::INSERT_SUBREG: | 
| Connor Abbott | 8c217d0 | 2017-08-04 18:36:49 +0000 | [diff] [blame] | 5357 | case AMDGPU::WQM: | 
| Connor Abbott | 92638ab | 2017-08-04 18:36:52 +0000 | [diff] [blame] | 5358 | case AMDGPU::WWM: | 
| Alexander Timofeev | 37bd9bd | 2019-06-06 21:13:02 +0000 | [diff] [blame] | 5359 | if (RI.hasVGPRs(NewDstRC)) | 
| Matt Arsenault | ba6aae7 | 2015-09-28 20:54:57 +0000 | [diff] [blame] | 5360 | return nullptr; | 
|  | 5361 |  | 
|  | 5362 | NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); | 
|  | 5363 | if (!NewDstRC) | 
|  | 5364 | return nullptr; | 
|  | 5365 | return NewDstRC; | 
|  | 5366 | default: | 
|  | 5367 | return NewDstRC; | 
|  | 5368 | } | 
|  | 5369 | } | 
|  | 5370 |  | 
| Matt Arsenault | 6c06741 | 2015-11-03 22:30:15 +0000 | [diff] [blame] | 5371 | // Find the one SGPR operand we are allowed to use. | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5372 | unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI, | 
| Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 5373 | int OpIndices[3]) const { | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5374 | const MCInstrDesc &Desc = MI.getDesc(); | 
| Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 5375 |  | 
|  | 5376 | // Find the one SGPR operand we are allowed to use. | 
| Matt Arsenault | e223ceb | 2015-10-21 21:15:01 +0000 | [diff] [blame] | 5377 | // | 
| Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 5378 | // First we need to consider the instruction's operand requirements before | 
|  | 5379 | // legalizing. Some operands are required to be SGPRs, such as implicit uses | 
|  | 5380 | // of VCC, but we are still bound by the constant bus requirement to only use | 
|  | 5381 | // one. | 
|  | 5382 | // | 
|  | 5383 | // If the operand's class is an SGPR, we can never move it. | 
|  | 5384 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5385 | unsigned SGPRReg = findImplicitSGPRRead(MI); | 
| Matt Arsenault | e223ceb | 2015-10-21 21:15:01 +0000 | [diff] [blame] | 5386 | if (SGPRReg != AMDGPU::NoRegister) | 
|  | 5387 | return SGPRReg; | 
| Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 5388 |  | 
|  | 5389 | unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5390 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); | 
| Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 5391 |  | 
|  | 5392 | for (unsigned i = 0; i < 3; ++i) { | 
|  | 5393 | int Idx = OpIndices[i]; | 
|  | 5394 | if (Idx == -1) | 
|  | 5395 | break; | 
|  | 5396 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5397 | const MachineOperand &MO = MI.getOperand(Idx); | 
| Matt Arsenault | 6c06741 | 2015-11-03 22:30:15 +0000 | [diff] [blame] | 5398 | if (!MO.isReg()) | 
|  | 5399 | continue; | 
| Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 5400 |  | 
| Matt Arsenault | 6c06741 | 2015-11-03 22:30:15 +0000 | [diff] [blame] | 5401 | // Is this operand statically required to be an SGPR based on the operand | 
|  | 5402 | // constraints? | 
|  | 5403 | const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); | 
|  | 5404 | bool IsRequiredSGPR = RI.isSGPRClass(OpRC); | 
|  | 5405 | if (IsRequiredSGPR) | 
|  | 5406 | return MO.getReg(); | 
|  | 5407 |  | 
|  | 5408 | // If this could be a VGPR or an SGPR, Check the dynamic register class. | 
|  | 5409 | unsigned Reg = MO.getReg(); | 
|  | 5410 | const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); | 
|  | 5411 | if (RI.isSGPRClass(RegRC)) | 
|  | 5412 | UsedSGPRs[i] = Reg; | 
| Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 5413 | } | 
|  | 5414 |  | 
| Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 5415 | // We don't have a required SGPR operand, so we have a bit more freedom in | 
|  | 5416 | // selecting operands to move. | 
|  | 5417 |  | 
|  | 5418 | // Try to select the most used SGPR. If an SGPR is equal to one of the | 
|  | 5419 | // others, we choose that. | 
|  | 5420 | // | 
|  | 5421 | // e.g. | 
|  | 5422 | // V_FMA_F32 v0, s0, s0, s0 -> No moves | 
|  | 5423 | // V_FMA_F32 v0, s0, s1, s0 -> Move s1 | 
|  | 5424 |  | 
| Matt Arsenault | 6c06741 | 2015-11-03 22:30:15 +0000 | [diff] [blame] | 5425 | // TODO: If some of the operands are 64-bit SGPRs and some 32, we should | 
|  | 5426 | // prefer those. | 
|  | 5427 |  | 
| Matt Arsenault | ee522bf | 2014-09-26 17:55:06 +0000 | [diff] [blame] | 5428 | if (UsedSGPRs[0] != AMDGPU::NoRegister) { | 
|  | 5429 | if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) | 
|  | 5430 | SGPRReg = UsedSGPRs[0]; | 
|  | 5431 | } | 
|  | 5432 |  | 
|  | 5433 | if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { | 
|  | 5434 | if (UsedSGPRs[1] == UsedSGPRs[2]) | 
|  | 5435 | SGPRReg = UsedSGPRs[1]; | 
|  | 5436 | } | 
|  | 5437 |  | 
|  | 5438 | return SGPRReg; | 
|  | 5439 | } | 
|  | 5440 |  | 
| Tom Stellard | 6407e1e | 2014-08-01 00:32:33 +0000 | [diff] [blame] | 5441 | MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, | 
| Matt Arsenault | ace5b76 | 2014-10-17 18:00:43 +0000 | [diff] [blame] | 5442 | unsigned OperandName) const { | 
| Tom Stellard | 1aaad69 | 2014-07-21 16:55:33 +0000 | [diff] [blame] | 5443 | int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); | 
|  | 5444 | if (Idx == -1) | 
|  | 5445 | return nullptr; | 
|  | 5446 |  | 
|  | 5447 | return &MI.getOperand(Idx); | 
|  | 5448 | } | 
| Tom Stellard | 794c8c0 | 2014-12-02 17:05:41 +0000 | [diff] [blame] | 5449 |  | 
|  | 5450 | uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 5451 | if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { | 
|  | 5452 | return (16ULL << 44) | // IMG_FORMAT_32_FLOAT | 
|  | 5453 | (1ULL << 56) | // RESOURCE_LEVEL = 1 | 
|  | 5454 | (3ULL << 60); // OOB_SELECT = 3 | 
|  | 5455 | } | 
|  | 5456 |  | 
| Tom Stellard | 794c8c0 | 2014-12-02 17:05:41 +0000 | [diff] [blame] | 5457 | uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; | 
| Tom Stellard | 4694ed0 | 2015-06-26 21:58:42 +0000 | [diff] [blame] | 5458 | if (ST.isAmdHsaOS()) { | 
| Marek Olsak | 5c7a61d | 2017-03-21 17:00:39 +0000 | [diff] [blame] | 5459 | // Set ATC = 1. GFX9 doesn't have this bit. | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 5460 | if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) | 
| Marek Olsak | 5c7a61d | 2017-03-21 17:00:39 +0000 | [diff] [blame] | 5461 | RsrcDataFormat |= (1ULL << 56); | 
| Tom Stellard | 794c8c0 | 2014-12-02 17:05:41 +0000 | [diff] [blame] | 5462 |  | 
| Marek Olsak | 5c7a61d | 2017-03-21 17:00:39 +0000 | [diff] [blame] | 5463 | // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. | 
|  | 5464 | // BTW, it disables TC L2 and therefore decreases performance. | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 5465 | if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) | 
| Michel Danzer | beb79ce | 2016-03-16 09:10:35 +0000 | [diff] [blame] | 5466 | RsrcDataFormat |= (2ULL << 59); | 
| Tom Stellard | 4694ed0 | 2015-06-26 21:58:42 +0000 | [diff] [blame] | 5467 | } | 
|  | 5468 |  | 
| Tom Stellard | 794c8c0 | 2014-12-02 17:05:41 +0000 | [diff] [blame] | 5469 | return RsrcDataFormat; | 
|  | 5470 | } | 
| Marek Olsak | d1a69a2 | 2015-09-29 23:37:32 +0000 | [diff] [blame] | 5471 |  | 
|  | 5472 | uint64_t SIInstrInfo::getScratchRsrcWords23() const { | 
|  | 5473 | uint64_t Rsrc23 = getDefaultRsrcDataFormat() | | 
|  | 5474 | AMDGPU::RSRC_TID_ENABLE | | 
|  | 5475 | 0xffffffff; // Size; | 
|  | 5476 |  | 
| Marek Olsak | 5c7a61d | 2017-03-21 17:00:39 +0000 | [diff] [blame] | 5477 | // GFX9 doesn't have ELEMENT_SIZE. | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 5478 | if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { | 
| Marek Olsak | 5c7a61d | 2017-03-21 17:00:39 +0000 | [diff] [blame] | 5479 | uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; | 
|  | 5480 | Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; | 
|  | 5481 | } | 
| Matt Arsenault | 24ee078 | 2016-02-12 02:40:47 +0000 | [diff] [blame] | 5482 |  | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 5483 | // IndexStride = 64 / 32. | 
|  | 5484 | uint64_t IndexStride = ST.getGeneration() <= AMDGPUSubtarget::GFX9 ? 3 : 2; | 
|  | 5485 | Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; | 
| Matt Arsenault | 24ee078 | 2016-02-12 02:40:47 +0000 | [diff] [blame] | 5486 |  | 
| Marek Olsak | d1a69a2 | 2015-09-29 23:37:32 +0000 | [diff] [blame] | 5487 | // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. | 
|  | 5488 | // Clear them unless we want a huge stride. | 
| Stanislav Mekhanoshin | 28a1936 | 2019-05-04 04:20:37 +0000 | [diff] [blame] | 5489 | if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && | 
|  | 5490 | ST.getGeneration() <= AMDGPUSubtarget::GFX9) | 
| Marek Olsak | d1a69a2 | 2015-09-29 23:37:32 +0000 | [diff] [blame] | 5491 | Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; | 
|  | 5492 |  | 
|  | 5493 | return Rsrc23; | 
|  | 5494 | } | 
| Nicolai Haehnle | 02c3291 | 2016-01-13 16:10:10 +0000 | [diff] [blame] | 5495 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5496 | bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { | 
|  | 5497 | unsigned Opc = MI.getOpcode(); | 
| Nicolai Haehnle | 02c3291 | 2016-01-13 16:10:10 +0000 | [diff] [blame] | 5498 |  | 
|  | 5499 | return isSMRD(Opc); | 
|  | 5500 | } | 
|  | 5501 |  | 
| Duncan P. N. Exon Smith | 9cfc75c | 2016-06-30 00:01:54 +0000 | [diff] [blame] | 5502 | bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const { | 
|  | 5503 | unsigned Opc = MI.getOpcode(); | 
| Nicolai Haehnle | 02c3291 | 2016-01-13 16:10:10 +0000 | [diff] [blame] | 5504 |  | 
|  | 5505 | return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc); | 
|  | 5506 | } | 
| Tom Stellard | 2ff7262 | 2016-01-28 16:04:37 +0000 | [diff] [blame] | 5507 |  | 
| Matt Arsenault | 3354f42 | 2016-09-10 01:20:33 +0000 | [diff] [blame] | 5508 | unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, | 
|  | 5509 | int &FrameIndex) const { | 
|  | 5510 | const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); | 
|  | 5511 | if (!Addr || !Addr->isFI()) | 
|  | 5512 | return AMDGPU::NoRegister; | 
|  | 5513 |  | 
|  | 5514 | assert(!MI.memoperands_empty() && | 
| Matt Arsenault | 0da6350 | 2018-08-31 05:49:54 +0000 | [diff] [blame] | 5515 | (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); | 
| Matt Arsenault | 3354f42 | 2016-09-10 01:20:33 +0000 | [diff] [blame] | 5516 |  | 
|  | 5517 | FrameIndex = Addr->getIndex(); | 
|  | 5518 | return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); | 
|  | 5519 | } | 
|  | 5520 |  | 
|  | 5521 | unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, | 
|  | 5522 | int &FrameIndex) const { | 
|  | 5523 | const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); | 
|  | 5524 | assert(Addr && Addr->isFI()); | 
|  | 5525 | FrameIndex = Addr->getIndex(); | 
|  | 5526 | return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); | 
|  | 5527 | } | 
|  | 5528 |  | 
|  | 5529 | unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, | 
|  | 5530 | int &FrameIndex) const { | 
| Matt Arsenault | 3354f42 | 2016-09-10 01:20:33 +0000 | [diff] [blame] | 5531 | if (!MI.mayLoad()) | 
|  | 5532 | return AMDGPU::NoRegister; | 
|  | 5533 |  | 
|  | 5534 | if (isMUBUF(MI) || isVGPRSpill(MI)) | 
|  | 5535 | return isStackAccess(MI, FrameIndex); | 
|  | 5536 |  | 
|  | 5537 | if (isSGPRSpill(MI)) | 
|  | 5538 | return isSGPRStackAccess(MI, FrameIndex); | 
|  | 5539 |  | 
|  | 5540 | return AMDGPU::NoRegister; | 
|  | 5541 | } | 
|  | 5542 |  | 
|  | 5543 | unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, | 
|  | 5544 | int &FrameIndex) const { | 
|  | 5545 | if (!MI.mayStore()) | 
|  | 5546 | return AMDGPU::NoRegister; | 
|  | 5547 |  | 
|  | 5548 | if (isMUBUF(MI) || isVGPRSpill(MI)) | 
|  | 5549 | return isStackAccess(MI, FrameIndex); | 
|  | 5550 |  | 
|  | 5551 | if (isSGPRSpill(MI)) | 
|  | 5552 | return isSGPRStackAccess(MI, FrameIndex); | 
|  | 5553 |  | 
|  | 5554 | return AMDGPU::NoRegister; | 
|  | 5555 | } | 
|  | 5556 |  | 
| Matt Arsenault | 9ab1fa6 | 2017-10-04 22:59:12 +0000 | [diff] [blame] | 5557 | unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { | 
|  | 5558 | unsigned Size = 0; | 
|  | 5559 | MachineBasicBlock::const_instr_iterator I = MI.getIterator(); | 
|  | 5560 | MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); | 
|  | 5561 | while (++I != E && I->isInsideBundle()) { | 
|  | 5562 | assert(!I->isBundle() && "No nested bundle!"); | 
|  | 5563 | Size += getInstSizeInBytes(*I); | 
|  | 5564 | } | 
|  | 5565 |  | 
|  | 5566 | return Size; | 
|  | 5567 | } | 
|  | 5568 |  | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5569 | unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { | 
|  | 5570 | unsigned Opc = MI.getOpcode(); | 
|  | 5571 | const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); | 
|  | 5572 | unsigned DescSize = Desc.getSize(); | 
|  | 5573 |  | 
|  | 5574 | // If we have a definitive size, we can use it. Otherwise we need to inspect | 
|  | 5575 | // the operands to know the size. | 
| Matt Arsenault | 0183c56 | 2018-07-27 09:15:03 +0000 | [diff] [blame] | 5576 | if (isFixedSize(MI)) | 
|  | 5577 | return DescSize; | 
|  | 5578 |  | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5579 | // 4-byte instructions may have a 32-bit literal encoded after them. Check | 
|  | 5580 | // operands that coud ever be literals. | 
|  | 5581 | if (isVALU(MI) || isSALU(MI)) { | 
|  | 5582 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); | 
|  | 5583 | if (Src0Idx == -1) | 
| Nicolai Haehnle | 283b995 | 2018-08-29 07:46:09 +0000 | [diff] [blame] | 5584 | return DescSize; // No operands. | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5585 |  | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 5586 | if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) | 
| Stanislav Mekhanoshin | 692560d | 2019-05-01 16:32:58 +0000 | [diff] [blame] | 5587 | return isVOP3(MI) ? 12 : (DescSize + 4); | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5588 |  | 
|  | 5589 | int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); | 
|  | 5590 | if (Src1Idx == -1) | 
| Nicolai Haehnle | 283b995 | 2018-08-29 07:46:09 +0000 | [diff] [blame] | 5591 | return DescSize; | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5592 |  | 
| Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 5593 | if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) | 
| Stanislav Mekhanoshin | 692560d | 2019-05-01 16:32:58 +0000 | [diff] [blame] | 5594 | return isVOP3(MI) ? 12 : (DescSize + 4); | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5595 |  | 
| Nicolai Haehnle | 283b995 | 2018-08-29 07:46:09 +0000 | [diff] [blame] | 5596 | int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); | 
|  | 5597 | if (Src2Idx == -1) | 
|  | 5598 | return DescSize; | 
|  | 5599 |  | 
|  | 5600 | if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) | 
| Stanislav Mekhanoshin | 692560d | 2019-05-01 16:32:58 +0000 | [diff] [blame] | 5601 | return isVOP3(MI) ? 12 : (DescSize + 4); | 
| Nicolai Haehnle | 283b995 | 2018-08-29 07:46:09 +0000 | [diff] [blame] | 5602 |  | 
|  | 5603 | return DescSize; | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5604 | } | 
|  | 5605 |  | 
| Stanislav Mekhanoshin | 692560d | 2019-05-01 16:32:58 +0000 | [diff] [blame] | 5606 | // Check whether we have extra NSA words. | 
|  | 5607 | if (isMIMG(MI)) { | 
|  | 5608 | int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); | 
|  | 5609 | if (VAddr0Idx < 0) | 
|  | 5610 | return 8; | 
|  | 5611 |  | 
|  | 5612 | int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); | 
|  | 5613 | return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); | 
|  | 5614 | } | 
|  | 5615 |  | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5616 | switch (Opc) { | 
|  | 5617 | case TargetOpcode::IMPLICIT_DEF: | 
|  | 5618 | case TargetOpcode::KILL: | 
|  | 5619 | case TargetOpcode::DBG_VALUE: | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5620 | case TargetOpcode::EH_LABEL: | 
|  | 5621 | return 0; | 
| Matt Arsenault | 9ab1fa6 | 2017-10-04 22:59:12 +0000 | [diff] [blame] | 5622 | case TargetOpcode::BUNDLE: | 
|  | 5623 | return getInstBundleSize(MI); | 
| Craig Topper | 784929d | 2019-02-08 20:48:56 +0000 | [diff] [blame] | 5624 | case TargetOpcode::INLINEASM: | 
|  | 5625 | case TargetOpcode::INLINEASM_BR: { | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5626 | const MachineFunction *MF = MI.getParent()->getParent(); | 
|  | 5627 | const char *AsmStr = MI.getOperand(0).getSymbolName(); | 
| Matt Arsenault | ca64ef2 | 2019-05-22 16:28:41 +0000 | [diff] [blame] | 5628 | return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), | 
|  | 5629 | &MF->getSubtarget()); | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5630 | } | 
|  | 5631 | default: | 
| Nicolai Haehnle | 283b995 | 2018-08-29 07:46:09 +0000 | [diff] [blame] | 5632 | return DescSize; | 
| Matt Arsenault | 02458c2 | 2016-06-06 20:10:33 +0000 | [diff] [blame] | 5633 | } | 
|  | 5634 | } | 
|  | 5635 |  | 
| Tom Stellard | 6695ba0 | 2016-10-28 23:53:48 +0000 | [diff] [blame] | 5636 | bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { | 
|  | 5637 | if (!isFLAT(MI)) | 
|  | 5638 | return false; | 
|  | 5639 |  | 
|  | 5640 | if (MI.memoperands_empty()) | 
|  | 5641 | return true; | 
|  | 5642 |  | 
|  | 5643 | for (const MachineMemOperand *MMO : MI.memoperands()) { | 
| Matt Arsenault | 0da6350 | 2018-08-31 05:49:54 +0000 | [diff] [blame] | 5644 | if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) | 
| Tom Stellard | 6695ba0 | 2016-10-28 23:53:48 +0000 | [diff] [blame] | 5645 | return true; | 
|  | 5646 | } | 
|  | 5647 | return false; | 
|  | 5648 | } | 
|  | 5649 |  | 
| Jan Sjodin | a06bfe0 | 2017-05-15 20:18:37 +0000 | [diff] [blame] | 5650 | bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { | 
|  | 5651 | return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; | 
|  | 5652 | } | 
|  | 5653 |  | 
|  | 5654 | void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, | 
|  | 5655 | MachineBasicBlock *IfEnd) const { | 
|  | 5656 | MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); | 
|  | 5657 | assert(TI != IfEntry->end()); | 
|  | 5658 |  | 
|  | 5659 | MachineInstr *Branch = &(*TI); | 
|  | 5660 | MachineFunction *MF = IfEntry->getParent(); | 
|  | 5661 | MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); | 
|  | 5662 |  | 
|  | 5663 | if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { | 
|  | 5664 | unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 5665 | MachineInstr *SIIF = | 
|  | 5666 | BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) | 
|  | 5667 | .add(Branch->getOperand(0)) | 
|  | 5668 | .add(Branch->getOperand(1)); | 
|  | 5669 | MachineInstr *SIEND = | 
|  | 5670 | BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) | 
|  | 5671 | .addReg(DstReg); | 
|  | 5672 |  | 
|  | 5673 | IfEntry->erase(TI); | 
|  | 5674 | IfEntry->insert(IfEntry->end(), SIIF); | 
|  | 5675 | IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); | 
|  | 5676 | } | 
|  | 5677 | } | 
|  | 5678 |  | 
|  | 5679 | void SIInstrInfo::convertNonUniformLoopRegion( | 
|  | 5680 | MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { | 
|  | 5681 | MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); | 
|  | 5682 | // We expect 2 terminators, one conditional and one unconditional. | 
|  | 5683 | assert(TI != LoopEnd->end()); | 
|  | 5684 |  | 
|  | 5685 | MachineInstr *Branch = &(*TI); | 
|  | 5686 | MachineFunction *MF = LoopEnd->getParent(); | 
|  | 5687 | MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); | 
|  | 5688 |  | 
|  | 5689 | if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { | 
|  | 5690 |  | 
|  | 5691 | unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 5692 | unsigned BackEdgeReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 5693 | MachineInstrBuilder HeaderPHIBuilder = | 
|  | 5694 | BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); | 
|  | 5695 | for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), | 
|  | 5696 | E = LoopEntry->pred_end(); | 
|  | 5697 | PI != E; ++PI) { | 
|  | 5698 | if (*PI == LoopEnd) { | 
|  | 5699 | HeaderPHIBuilder.addReg(BackEdgeReg); | 
|  | 5700 | } else { | 
|  | 5701 | MachineBasicBlock *PMBB = *PI; | 
|  | 5702 | unsigned ZeroReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
|  | 5703 | materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), | 
|  | 5704 | ZeroReg, 0); | 
|  | 5705 | HeaderPHIBuilder.addReg(ZeroReg); | 
|  | 5706 | } | 
|  | 5707 | HeaderPHIBuilder.addMBB(*PI); | 
|  | 5708 | } | 
|  | 5709 | MachineInstr *HeaderPhi = HeaderPHIBuilder; | 
|  | 5710 | MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), | 
|  | 5711 | get(AMDGPU::SI_IF_BREAK), BackEdgeReg) | 
|  | 5712 | .addReg(DstReg) | 
|  | 5713 | .add(Branch->getOperand(0)); | 
|  | 5714 | MachineInstr *SILOOP = | 
|  | 5715 | BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) | 
|  | 5716 | .addReg(BackEdgeReg) | 
|  | 5717 | .addMBB(LoopEntry); | 
|  | 5718 |  | 
|  | 5719 | LoopEntry->insert(LoopEntry->begin(), HeaderPhi); | 
|  | 5720 | LoopEnd->erase(TI); | 
|  | 5721 | LoopEnd->insert(LoopEnd->end(), SIIFBREAK); | 
|  | 5722 | LoopEnd->insert(LoopEnd->end(), SILOOP); | 
|  | 5723 | } | 
|  | 5724 | } | 
|  | 5725 |  | 
| Tom Stellard | 2ff7262 | 2016-01-28 16:04:37 +0000 | [diff] [blame] | 5726 | ArrayRef<std::pair<int, const char *>> | 
|  | 5727 | SIInstrInfo::getSerializableTargetIndices() const { | 
|  | 5728 | static const std::pair<int, const char *> TargetIndices[] = { | 
|  | 5729 | {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, | 
|  | 5730 | {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, | 
|  | 5731 | {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, | 
|  | 5732 | {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, | 
|  | 5733 | {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; | 
|  | 5734 | return makeArrayRef(TargetIndices); | 
|  | 5735 | } | 
| Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 5736 |  | 
|  | 5737 | /// This is used by the post-RA scheduler (SchedulePostRAList.cpp).  The | 
|  | 5738 | /// post-RA version of misched uses CreateTargetMIHazardRecognizer. | 
|  | 5739 | ScheduleHazardRecognizer * | 
|  | 5740 | SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, | 
|  | 5741 | const ScheduleDAG *DAG) const { | 
|  | 5742 | return new GCNHazardRecognizer(DAG->MF); | 
|  | 5743 | } | 
|  | 5744 |  | 
|  | 5745 | /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer | 
|  | 5746 | /// pass. | 
|  | 5747 | ScheduleHazardRecognizer * | 
|  | 5748 | SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { | 
|  | 5749 | return new GCNHazardRecognizer(MF); | 
|  | 5750 | } | 
| Stanislav Mekhanoshin | 6ec3e3a | 2017-01-20 00:44:31 +0000 | [diff] [blame] | 5751 |  | 
| Matt Arsenault | 3f031e7 | 2017-07-02 23:21:48 +0000 | [diff] [blame] | 5752 | std::pair<unsigned, unsigned> | 
|  | 5753 | SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { | 
|  | 5754 | return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); | 
|  | 5755 | } | 
|  | 5756 |  | 
|  | 5757 | ArrayRef<std::pair<unsigned, const char *>> | 
|  | 5758 | SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { | 
|  | 5759 | static const std::pair<unsigned, const char *> TargetFlags[] = { | 
|  | 5760 | { MO_GOTPCREL, "amdgpu-gotprel" }, | 
|  | 5761 | { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, | 
|  | 5762 | { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, | 
|  | 5763 | { MO_REL32_LO, "amdgpu-rel32-lo" }, | 
|  | 5764 | { MO_REL32_HI, "amdgpu-rel32-hi" } | 
|  | 5765 | }; | 
|  | 5766 |  | 
|  | 5767 | return makeArrayRef(TargetFlags); | 
|  | 5768 | } | 
|  | 5769 |  | 
| Stanislav Mekhanoshin | 6ec3e3a | 2017-01-20 00:44:31 +0000 | [diff] [blame] | 5770 | bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { | 
|  | 5771 | return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && | 
|  | 5772 | MI.modifiesRegister(AMDGPU::EXEC, &RI); | 
|  | 5773 | } | 
| Stanislav Mekhanoshin | 86b0a54 | 2017-04-14 00:33:44 +0000 | [diff] [blame] | 5774 |  | 
|  | 5775 | MachineInstrBuilder | 
|  | 5776 | SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, | 
|  | 5777 | MachineBasicBlock::iterator I, | 
|  | 5778 | const DebugLoc &DL, | 
|  | 5779 | unsigned DestReg) const { | 
| Matt Arsenault | 686d5c7 | 2017-11-30 23:42:30 +0000 | [diff] [blame] | 5780 | if (ST.hasAddNoCarry()) | 
|  | 5781 | return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); | 
| Stanislav Mekhanoshin | 86b0a54 | 2017-04-14 00:33:44 +0000 | [diff] [blame] | 5782 |  | 
| Matt Arsenault | 686d5c7 | 2017-11-30 23:42:30 +0000 | [diff] [blame] | 5783 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); | 
| Stanislav Mekhanoshin | 86b0a54 | 2017-04-14 00:33:44 +0000 | [diff] [blame] | 5784 | unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); | 
| Matt Arsenault | 686d5c7 | 2017-11-30 23:42:30 +0000 | [diff] [blame] | 5785 | MRI.setRegAllocationHint(UnusedCarry, 0, AMDGPU::VCC); | 
| Stanislav Mekhanoshin | 86b0a54 | 2017-04-14 00:33:44 +0000 | [diff] [blame] | 5786 |  | 
|  | 5787 | return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) | 
|  | 5788 | .addReg(UnusedCarry, RegState::Define | RegState::Dead); | 
|  | 5789 | } | 
| Marek Olsak | ce76ea0 | 2017-10-24 10:27:13 +0000 | [diff] [blame] | 5790 |  | 
|  | 5791 | bool SIInstrInfo::isKillTerminator(unsigned Opcode) { | 
|  | 5792 | switch (Opcode) { | 
|  | 5793 | case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: | 
|  | 5794 | case AMDGPU::SI_KILL_I1_TERMINATOR: | 
|  | 5795 | return true; | 
|  | 5796 | default: | 
|  | 5797 | return false; | 
|  | 5798 | } | 
|  | 5799 | } | 
|  | 5800 |  | 
|  | 5801 | const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { | 
|  | 5802 | switch (Opcode) { | 
|  | 5803 | case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: | 
|  | 5804 | return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); | 
|  | 5805 | case AMDGPU::SI_KILL_I1_PSEUDO: | 
|  | 5806 | return get(AMDGPU::SI_KILL_I1_TERMINATOR); | 
|  | 5807 | default: | 
|  | 5808 | llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); | 
|  | 5809 | } | 
|  | 5810 | } | 
| Tom Stellard | 44b30b4 | 2018-05-22 02:03:23 +0000 | [diff] [blame] | 5811 |  | 
|  | 5812 | bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { | 
|  | 5813 | if (!isSMRD(MI)) | 
|  | 5814 | return false; | 
|  | 5815 |  | 
|  | 5816 | // Check that it is using a buffer resource. | 
|  | 5817 | int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); | 
|  | 5818 | if (Idx == -1) // e.g. s_memtime | 
|  | 5819 | return false; | 
|  | 5820 |  | 
|  | 5821 | const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; | 
|  | 5822 | return RCID == AMDGPU::SReg_128RegClassID; | 
|  | 5823 | } | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 5824 |  | 
|  | 5825 | // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td | 
|  | 5826 | enum SIEncodingFamily { | 
|  | 5827 | SI = 0, | 
|  | 5828 | VI = 1, | 
|  | 5829 | SDWA = 2, | 
|  | 5830 | SDWA9 = 3, | 
|  | 5831 | GFX80 = 4, | 
| Stanislav Mekhanoshin | cee607e | 2019-04-24 17:03:15 +0000 | [diff] [blame] | 5832 | GFX9 = 5, | 
|  | 5833 | GFX10 = 6, | 
|  | 5834 | SDWA10 = 7 | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 5835 | }; | 
|  | 5836 |  | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 5837 | static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 5838 | switch (ST.getGeneration()) { | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 5839 | default: | 
|  | 5840 | break; | 
|  | 5841 | case AMDGPUSubtarget::SOUTHERN_ISLANDS: | 
|  | 5842 | case AMDGPUSubtarget::SEA_ISLANDS: | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 5843 | return SIEncodingFamily::SI; | 
| Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 5844 | case AMDGPUSubtarget::VOLCANIC_ISLANDS: | 
|  | 5845 | case AMDGPUSubtarget::GFX9: | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 5846 | return SIEncodingFamily::VI; | 
| Stanislav Mekhanoshin | cee607e | 2019-04-24 17:03:15 +0000 | [diff] [blame] | 5847 | case AMDGPUSubtarget::GFX10: | 
|  | 5848 | return SIEncodingFamily::GFX10; | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 5849 | } | 
|  | 5850 | llvm_unreachable("Unknown subtarget generation!"); | 
|  | 5851 | } | 
|  | 5852 |  | 
|  | 5853 | int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { | 
|  | 5854 | SIEncodingFamily Gen = subtargetEncodingFamily(ST); | 
|  | 5855 |  | 
|  | 5856 | if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && | 
| Stanislav Mekhanoshin | 5cf8167 | 2019-05-02 04:01:39 +0000 | [diff] [blame] | 5857 | ST.getGeneration() == AMDGPUSubtarget::GFX9) | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 5858 | Gen = SIEncodingFamily::GFX9; | 
|  | 5859 |  | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 5860 | // Adjust the encoding family to GFX80 for D16 buffer instructions when the | 
|  | 5861 | // subtarget has UnpackedD16VMem feature. | 
|  | 5862 | // TODO: remove this when we discard GFX80 encoding. | 
|  | 5863 | if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) | 
|  | 5864 | Gen = SIEncodingFamily::GFX80; | 
|  | 5865 |  | 
| Stanislav Mekhanoshin | 5cf8167 | 2019-05-02 04:01:39 +0000 | [diff] [blame] | 5866 | if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { | 
|  | 5867 | switch (ST.getGeneration()) { | 
|  | 5868 | default: | 
|  | 5869 | Gen = SIEncodingFamily::SDWA; | 
|  | 5870 | break; | 
|  | 5871 | case AMDGPUSubtarget::GFX9: | 
|  | 5872 | Gen = SIEncodingFamily::SDWA9; | 
|  | 5873 | break; | 
|  | 5874 | case AMDGPUSubtarget::GFX10: | 
|  | 5875 | Gen = SIEncodingFamily::SDWA10; | 
|  | 5876 | break; | 
|  | 5877 | } | 
|  | 5878 | } | 
|  | 5879 |  | 
| Tom Stellard | c5a154d | 2018-06-28 23:47:12 +0000 | [diff] [blame] | 5880 | int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); | 
|  | 5881 |  | 
|  | 5882 | // -1 means that Opcode is already a native instruction. | 
|  | 5883 | if (MCOp == -1) | 
|  | 5884 | return Opcode; | 
|  | 5885 |  | 
|  | 5886 | // (uint16_t)-1 means that Opcode is a pseudo instruction that has | 
|  | 5887 | // no encoding in the given subtarget generation. | 
|  | 5888 | if (MCOp == (uint16_t)-1) | 
|  | 5889 | return -1; | 
|  | 5890 |  | 
|  | 5891 | return MCOp; | 
|  | 5892 | } | 
| Valery Pykhtin | 3d9afa2 | 2018-11-30 14:21:56 +0000 | [diff] [blame] | 5893 |  | 
|  | 5894 | static | 
|  | 5895 | TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { | 
|  | 5896 | assert(RegOpnd.isReg()); | 
|  | 5897 | return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : | 
|  | 5898 | getRegSubRegPair(RegOpnd); | 
|  | 5899 | } | 
|  | 5900 |  | 
|  | 5901 | TargetInstrInfo::RegSubRegPair | 
|  | 5902 | llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { | 
|  | 5903 | assert(MI.isRegSequence()); | 
|  | 5904 | for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) | 
|  | 5905 | if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { | 
|  | 5906 | auto &RegOp = MI.getOperand(1 + 2 * I); | 
|  | 5907 | return getRegOrUndef(RegOp); | 
|  | 5908 | } | 
|  | 5909 | return TargetInstrInfo::RegSubRegPair(); | 
|  | 5910 | } | 
|  | 5911 |  | 
|  | 5912 | // Try to find the definition of reg:subreg in subreg-manipulation pseudos | 
|  | 5913 | // Following a subreg of reg:subreg isn't supported | 
|  | 5914 | static bool followSubRegDef(MachineInstr &MI, | 
|  | 5915 | TargetInstrInfo::RegSubRegPair &RSR) { | 
|  | 5916 | if (!RSR.SubReg) | 
|  | 5917 | return false; | 
|  | 5918 | switch (MI.getOpcode()) { | 
|  | 5919 | default: break; | 
|  | 5920 | case AMDGPU::REG_SEQUENCE: | 
|  | 5921 | RSR = getRegSequenceSubReg(MI, RSR.SubReg); | 
|  | 5922 | return true; | 
|  | 5923 | // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg | 
|  | 5924 | case AMDGPU::INSERT_SUBREG: | 
|  | 5925 | if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) | 
|  | 5926 | // inserted the subreg we're looking for | 
|  | 5927 | RSR = getRegOrUndef(MI.getOperand(2)); | 
|  | 5928 | else { // the subreg in the rest of the reg | 
|  | 5929 | auto R1 = getRegOrUndef(MI.getOperand(1)); | 
|  | 5930 | if (R1.SubReg) // subreg of subreg isn't supported | 
|  | 5931 | return false; | 
|  | 5932 | RSR.Reg = R1.Reg; | 
|  | 5933 | } | 
|  | 5934 | return true; | 
|  | 5935 | } | 
|  | 5936 | return false; | 
|  | 5937 | } | 
|  | 5938 |  | 
|  | 5939 | MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, | 
|  | 5940 | MachineRegisterInfo &MRI) { | 
|  | 5941 | assert(MRI.isSSA()); | 
|  | 5942 | if (!TargetRegisterInfo::isVirtualRegister(P.Reg)) | 
|  | 5943 | return nullptr; | 
|  | 5944 |  | 
|  | 5945 | auto RSR = P; | 
|  | 5946 | auto *DefInst = MRI.getVRegDef(RSR.Reg); | 
|  | 5947 | while (auto *MI = DefInst) { | 
|  | 5948 | DefInst = nullptr; | 
|  | 5949 | switch (MI->getOpcode()) { | 
|  | 5950 | case AMDGPU::COPY: | 
|  | 5951 | case AMDGPU::V_MOV_B32_e32: { | 
|  | 5952 | auto &Op1 = MI->getOperand(1); | 
|  | 5953 | if (Op1.isReg() && | 
|  | 5954 | TargetRegisterInfo::isVirtualRegister(Op1.getReg())) { | 
|  | 5955 | if (Op1.isUndef()) | 
|  | 5956 | return nullptr; | 
|  | 5957 | RSR = getRegSubRegPair(Op1); | 
|  | 5958 | DefInst = MRI.getVRegDef(RSR.Reg); | 
|  | 5959 | } | 
|  | 5960 | break; | 
|  | 5961 | } | 
|  | 5962 | default: | 
|  | 5963 | if (followSubRegDef(*MI, RSR)) { | 
|  | 5964 | if (!RSR.Reg) | 
|  | 5965 | return nullptr; | 
|  | 5966 | DefInst = MRI.getVRegDef(RSR.Reg); | 
|  | 5967 | } | 
|  | 5968 | } | 
|  | 5969 | if (!DefInst) | 
|  | 5970 | return MI; | 
|  | 5971 | } | 
|  | 5972 | return nullptr; | 
|  | 5973 | } | 
| Valery Pykhtin | 7fe97f8 | 2019-02-08 11:59:48 +0000 | [diff] [blame] | 5974 |  | 
|  | 5975 | bool llvm::isEXECMaskConstantBetweenDefAndUses(unsigned VReg, | 
|  | 5976 | MachineRegisterInfo &MRI) { | 
|  | 5977 | assert(MRI.isSSA() && "Must be run on SSA"); | 
|  | 5978 | auto *TRI = MRI.getTargetRegisterInfo(); | 
|  | 5979 |  | 
|  | 5980 | auto *DefI = MRI.getVRegDef(VReg); | 
|  | 5981 | auto *BB = DefI->getParent(); | 
|  | 5982 |  | 
|  | 5983 | DenseSet<MachineInstr*> Uses; | 
|  | 5984 | for (auto &Use : MRI.use_nodbg_operands(VReg)) { | 
|  | 5985 | auto *I = Use.getParent(); | 
|  | 5986 | if (I->getParent() != BB) | 
|  | 5987 | return false; | 
|  | 5988 | Uses.insert(I); | 
|  | 5989 | } | 
|  | 5990 |  | 
|  | 5991 | auto E = BB->end(); | 
|  | 5992 | for (auto I = std::next(DefI->getIterator()); I != E; ++I) { | 
|  | 5993 | Uses.erase(&*I); | 
|  | 5994 | // don't check the last use | 
|  | 5995 | if (Uses.empty() || I->modifiesRegister(AMDGPU::EXEC, TRI)) | 
|  | 5996 | break; | 
|  | 5997 | } | 
|  | 5998 | return Uses.empty(); | 
|  | 5999 | } |