blob: 925fdce757a53707cc3694bb646d161b5f65d528 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Custom DAG lowering for SI
12//
13//===----------------------------------------------------------------------===//
14
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000015#ifdef _MSC_VER
16// Provide M_PI.
17#define _USE_MATH_DEFINES
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000018#endif
19
Chandler Carruth6bda14b2017-06-06 11:49:48 +000020#include "SIISelLowering.h"
Christian Konig99ee0f42013-03-07 09:04:14 +000021#include "AMDGPU.h"
Matt Arsenaultc791f392014-06-23 18:00:31 +000022#include "AMDGPUIntrinsicInfo.h"
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000023#include "AMDGPUSubtarget.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000024#include "AMDGPUTargetMachine.h"
Tom Stellard8485fa02016-12-07 02:42:15 +000025#include "SIDefines.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "SIInstrInfo.h"
27#include "SIMachineFunctionInfo.h"
28#include "SIRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000029#include "Utils/AMDGPUBaseInfo.h"
30#include "llvm/ADT/APFloat.h"
31#include "llvm/ADT/APInt.h"
32#include "llvm/ADT/ArrayRef.h"
Alexey Samsonova253bf92014-08-27 19:36:53 +000033#include "llvm/ADT/BitVector.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000034#include "llvm/ADT/SmallVector.h"
Matt Arsenault71bcbd42017-08-11 20:42:08 +000035#include "llvm/ADT/Statistic.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000036#include "llvm/ADT/StringRef.h"
Matt Arsenault9a10cea2016-01-26 04:29:24 +000037#include "llvm/ADT/StringSwitch.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000038#include "llvm/ADT/Twine.h"
Wei Ding07e03712016-07-28 16:42:13 +000039#include "llvm/CodeGen/Analysis.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000040#include "llvm/CodeGen/CallingConvLower.h"
41#include "llvm/CodeGen/DAGCombine.h"
42#include "llvm/CodeGen/ISDOpcodes.h"
43#include "llvm/CodeGen/MachineBasicBlock.h"
44#include "llvm/CodeGen/MachineFrameInfo.h"
45#include "llvm/CodeGen/MachineFunction.h"
46#include "llvm/CodeGen/MachineInstr.h"
47#include "llvm/CodeGen/MachineInstrBuilder.h"
48#include "llvm/CodeGen/MachineMemOperand.h"
Matt Arsenault8623e8d2017-08-03 23:00:29 +000049#include "llvm/CodeGen/MachineModuleInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000050#include "llvm/CodeGen/MachineOperand.h"
51#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000052#include "llvm/CodeGen/SelectionDAG.h"
53#include "llvm/CodeGen/SelectionDAGNodes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000054#include "llvm/CodeGen/TargetCallingConv.h"
55#include "llvm/CodeGen/TargetRegisterInfo.h"
Craig Topper2fa14362018-03-29 17:21:10 +000056#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000057#include "llvm/IR/Constants.h"
58#include "llvm/IR/DataLayout.h"
59#include "llvm/IR/DebugLoc.h"
60#include "llvm/IR/DerivedTypes.h"
Oliver Stannard7e7d9832016-02-02 13:52:43 +000061#include "llvm/IR/DiagnosticInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000062#include "llvm/IR/Function.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000063#include "llvm/IR/GlobalValue.h"
64#include "llvm/IR/InstrTypes.h"
65#include "llvm/IR/Instruction.h"
66#include "llvm/IR/Instructions.h"
Matt Arsenault7dc01c92017-03-15 23:15:12 +000067#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000068#include "llvm/IR/Type.h"
69#include "llvm/Support/Casting.h"
70#include "llvm/Support/CodeGen.h"
71#include "llvm/Support/CommandLine.h"
72#include "llvm/Support/Compiler.h"
73#include "llvm/Support/ErrorHandling.h"
Craig Topperd0af7e82017-04-28 05:31:46 +000074#include "llvm/Support/KnownBits.h"
David Blaikie13e77db2018-03-23 23:58:25 +000075#include "llvm/Support/MachineValueType.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000076#include "llvm/Support/MathExtras.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000077#include "llvm/Target/TargetOptions.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000078#include <cassert>
79#include <cmath>
80#include <cstdint>
81#include <iterator>
82#include <tuple>
83#include <utility>
84#include <vector>
Tom Stellard75aadc22012-12-11 21:25:42 +000085
86using namespace llvm;
87
Matt Arsenault71bcbd42017-08-11 20:42:08 +000088#define DEBUG_TYPE "si-lower"
89
90STATISTIC(NumTailCalls, "Number of tail calls");
91
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000092static cl::opt<bool> EnableVGPRIndexMode(
93 "amdgpu-vgpr-index-mode",
94 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
95 cl::init(false));
96
Matt Arsenault45b98182017-11-15 00:45:43 +000097static cl::opt<unsigned> AssumeFrameIndexHighZeroBits(
98 "amdgpu-frame-index-zero-bits",
99 cl::desc("High bits of frame index assumed to be zero"),
100 cl::init(5),
101 cl::ReallyHidden);
102
Tom Stellardf110f8f2016-04-14 16:27:03 +0000103static unsigned findFirstFreeSGPR(CCState &CCInfo) {
104 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
105 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
106 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
107 return AMDGPU::SGPR0 + Reg;
108 }
109 }
110 llvm_unreachable("Cannot allocate sgpr");
111}
112
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000113SITargetLowering::SITargetLowering(const TargetMachine &TM,
114 const SISubtarget &STI)
Eric Christopher7792e322015-01-30 23:24:40 +0000115 : AMDGPUTargetLowering(TM, STI) {
Tom Stellard1bd80722014-04-30 15:31:33 +0000116 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
Tom Stellard436780b2014-05-15 14:41:57 +0000117 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000118
Marek Olsak79c05872016-11-25 17:37:09 +0000119 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000120 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000121
Tom Stellard436780b2014-05-15 14:41:57 +0000122 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
123 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
124 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000125
Matt Arsenault61001bb2015-11-25 19:58:34 +0000126 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
127 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
128
Tom Stellard436780b2014-05-15 14:41:57 +0000129 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
130 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000131
Tom Stellardf0a21072014-11-18 20:39:39 +0000132 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000133 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
134
Tom Stellardf0a21072014-11-18 20:39:39 +0000135 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000136 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000137
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000138 if (Subtarget->has16BitInsts()) {
Marek Olsak79c05872016-11-25 17:37:09 +0000139 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
140 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000141 }
Tom Stellard115a6152016-11-10 16:02:37 +0000142
Matt Arsenault7596f132017-02-27 20:52:10 +0000143 if (Subtarget->hasVOP3PInsts()) {
144 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
145 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
146 }
147
Eric Christopher23a3a7c2015-02-26 00:00:24 +0000148 computeRegisterProperties(STI.getRegisterInfo());
Tom Stellard75aadc22012-12-11 21:25:42 +0000149
Tom Stellard35bb18c2013-08-26 15:06:04 +0000150 // We need to custom lower vector stores from local memory
Matt Arsenault71e66762016-05-21 02:27:49 +0000151 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
Tom Stellard35bb18c2013-08-26 15:06:04 +0000152 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
Tom Stellardaf775432013-10-23 00:44:32 +0000153 setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
154 setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000155 setOperationAction(ISD::LOAD, MVT::i1, Custom);
Matt Arsenault2b957b52016-05-02 20:07:26 +0000156
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000157 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000158 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
159 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
160 setOperationAction(ISD::STORE, MVT::v16i32, Custom);
161 setOperationAction(ISD::STORE, MVT::i1, Custom);
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000162
Jan Vesely06200bd2017-01-06 21:00:46 +0000163 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
164 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
165 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
166 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
167 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
168 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
169 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
170 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
171 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
172 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
173
Matt Arsenault71e66762016-05-21 02:27:49 +0000174 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
175 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000176 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand);
177
178 setOperationAction(ISD::SELECT, MVT::i1, Promote);
Tom Stellard0ec134f2014-02-04 17:18:40 +0000179 setOperationAction(ISD::SELECT, MVT::i64, Custom);
Tom Stellardda99c6e2014-03-24 16:07:30 +0000180 setOperationAction(ISD::SELECT, MVT::f64, Promote);
181 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
Tom Stellard81d871d2013-11-13 23:36:50 +0000182
Tom Stellard3ca1bfc2014-06-10 16:01:22 +0000183 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
184 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
185 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
186 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
Matt Arsenault71e66762016-05-21 02:27:49 +0000187 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
Tom Stellard754f80f2013-04-05 23:31:51 +0000188
Tom Stellardd1efda82016-01-20 21:48:24 +0000189 setOperationAction(ISD::SETCC, MVT::i1, Promote);
Tom Stellard83747202013-07-18 21:43:53 +0000190 setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
191 setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
Matt Arsenault18f56be2016-12-22 16:27:11 +0000192 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
Tom Stellard83747202013-07-18 21:43:53 +0000193
Matt Arsenault71e66762016-05-21 02:27:49 +0000194 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
195 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
Matt Arsenaulte306a322014-10-21 16:25:08 +0000196
Matt Arsenault4e466652014-04-16 01:41:30 +0000197 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
198 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000199 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
200 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000201 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000203 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
204
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000205 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000206 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000207 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
Marek Olsak13e47412018-01-31 20:18:04 +0000208 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000209 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
210
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000211 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
212 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000213 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000214
215 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
Matt Arsenault4165efd2017-01-17 07:26:53 +0000216 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
217 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000218 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000219
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000220 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000221 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000222 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
223 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
224 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
225 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
Tom Stellardafcf12f2013-09-12 02:55:14 +0000226
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +0000227 setOperationAction(ISD::UADDO, MVT::i32, Legal);
228 setOperationAction(ISD::USUBO, MVT::i32, Legal);
229
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000230 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
231 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
232
Matt Arsenault84445dd2017-11-30 22:51:26 +0000233#if 0
234 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
235 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
236#endif
237
238 //setOperationAction(ISD::ADDC, MVT::i64, Expand);
239 //setOperationAction(ISD::SUBC, MVT::i64, Expand);
240
Benjamin Kramer867bfc52015-03-07 17:41:00 +0000241 // We only support LOAD/STORE and vector manipulation ops for vectors
242 // with > 4 elements.
Matt Arsenault7596f132017-02-27 20:52:10 +0000243 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
244 MVT::v2i64, MVT::v2f64}) {
Tom Stellard967bf582014-02-13 23:34:15 +0000245 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000246 switch (Op) {
Tom Stellard967bf582014-02-13 23:34:15 +0000247 case ISD::LOAD:
248 case ISD::STORE:
249 case ISD::BUILD_VECTOR:
250 case ISD::BITCAST:
251 case ISD::EXTRACT_VECTOR_ELT:
252 case ISD::INSERT_VECTOR_ELT:
Tom Stellard967bf582014-02-13 23:34:15 +0000253 case ISD::INSERT_SUBVECTOR:
254 case ISD::EXTRACT_SUBVECTOR:
Matt Arsenault61001bb2015-11-25 19:58:34 +0000255 case ISD::SCALAR_TO_VECTOR:
Tom Stellard967bf582014-02-13 23:34:15 +0000256 break;
Tom Stellardc0503db2014-08-09 01:06:56 +0000257 case ISD::CONCAT_VECTORS:
258 setOperationAction(Op, VT, Custom);
259 break;
Tom Stellard967bf582014-02-13 23:34:15 +0000260 default:
Matt Arsenaultd504a742014-05-15 21:44:05 +0000261 setOperationAction(Op, VT, Expand);
Tom Stellard967bf582014-02-13 23:34:15 +0000262 break;
263 }
264 }
265 }
266
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000267 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
268 // is expanded to avoid having two separate loops in case the index is a VGPR.
269
Matt Arsenault61001bb2015-11-25 19:58:34 +0000270 // Most operations are naturally 32-bit vector operations. We only support
271 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
272 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
273 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
274 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
275
276 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
277 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
278
279 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
280 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
281
282 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
283 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
284 }
285
Matt Arsenault71e66762016-05-21 02:27:49 +0000286 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
287 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
288 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
289 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000290
Matt Arsenault3aef8092017-01-23 23:09:58 +0000291 // Avoid stack access for these.
292 // TODO: Generalize to more vector types.
293 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
294 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
295 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
296 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
297
Tom Stellard354a43c2016-04-01 18:27:37 +0000298 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
299 // and output demarshalling
300 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
301 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
302
303 // We can't return success/failure, only the old value,
304 // let LLVM add the comparison
305 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
306 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
307
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000308 if (getSubtarget()->hasFlatAddressSpace()) {
Matt Arsenault99c14522016-04-25 19:27:24 +0000309 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
310 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
311 }
312
Matt Arsenault71e66762016-05-21 02:27:49 +0000313 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
314 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
315
316 // On SI this is s_memtime and s_memrealtime on VI.
317 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
Matt Arsenault3e025382017-04-24 17:49:13 +0000318 setOperationAction(ISD::TRAP, MVT::Other, Custom);
319 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000320
321 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
322 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
323
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000324 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000325 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
326 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
327 setOperationAction(ISD::FRINT, MVT::f64, Legal);
328 }
329
330 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
331
332 setOperationAction(ISD::FSIN, MVT::f32, Custom);
333 setOperationAction(ISD::FCOS, MVT::f32, Custom);
334 setOperationAction(ISD::FDIV, MVT::f32, Custom);
335 setOperationAction(ISD::FDIV, MVT::f64, Custom);
336
Tom Stellard115a6152016-11-10 16:02:37 +0000337 if (Subtarget->has16BitInsts()) {
338 setOperationAction(ISD::Constant, MVT::i16, Legal);
339
340 setOperationAction(ISD::SMIN, MVT::i16, Legal);
341 setOperationAction(ISD::SMAX, MVT::i16, Legal);
342
343 setOperationAction(ISD::UMIN, MVT::i16, Legal);
344 setOperationAction(ISD::UMAX, MVT::i16, Legal);
345
Tom Stellard115a6152016-11-10 16:02:37 +0000346 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
347 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
348
349 setOperationAction(ISD::ROTR, MVT::i16, Promote);
350 setOperationAction(ISD::ROTL, MVT::i16, Promote);
351
352 setOperationAction(ISD::SDIV, MVT::i16, Promote);
353 setOperationAction(ISD::UDIV, MVT::i16, Promote);
354 setOperationAction(ISD::SREM, MVT::i16, Promote);
355 setOperationAction(ISD::UREM, MVT::i16, Promote);
356
357 setOperationAction(ISD::BSWAP, MVT::i16, Promote);
358 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
359
360 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
361 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
362 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
363 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
Jan Veselyb283ea02018-03-02 02:50:22 +0000364 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
Tom Stellard115a6152016-11-10 16:02:37 +0000365
366 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
367
368 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
369
370 setOperationAction(ISD::LOAD, MVT::i16, Custom);
371
372 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
373
Tom Stellard115a6152016-11-10 16:02:37 +0000374 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
375 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
376 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
377 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000378
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000379 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
380 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
381 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
382 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000383
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000384 // F16 - Constant Actions.
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000385 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000386
387 // F16 - Load/Store Actions.
388 setOperationAction(ISD::LOAD, MVT::f16, Promote);
389 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
390 setOperationAction(ISD::STORE, MVT::f16, Promote);
391 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
392
393 // F16 - VOP1 Actions.
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +0000394 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000395 setOperationAction(ISD::FCOS, MVT::f16, Promote);
396 setOperationAction(ISD::FSIN, MVT::f16, Promote);
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000397 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
398 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
399 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
400 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
Matt Arsenaultb5d23272017-03-24 20:04:18 +0000401 setOperationAction(ISD::FROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000402
403 // F16 - VOP2 Actions.
Konstantin Zhuravlyov662e01d2016-11-17 03:49:01 +0000404 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000405 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000406 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
407 setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
Matt Arsenault4052a572016-12-22 03:05:41 +0000408 setOperationAction(ISD::FDIV, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000409
410 // F16 - VOP3 Actions.
411 setOperationAction(ISD::FMA, MVT::f16, Legal);
412 if (!Subtarget->hasFP16Denormals())
413 setOperationAction(ISD::FMAD, MVT::f16, Legal);
Tom Stellard115a6152016-11-10 16:02:37 +0000414 }
415
Matt Arsenault7596f132017-02-27 20:52:10 +0000416 if (Subtarget->hasVOP3PInsts()) {
417 for (MVT VT : {MVT::v2i16, MVT::v2f16}) {
418 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
419 switch (Op) {
420 case ISD::LOAD:
421 case ISD::STORE:
422 case ISD::BUILD_VECTOR:
423 case ISD::BITCAST:
424 case ISD::EXTRACT_VECTOR_ELT:
425 case ISD::INSERT_VECTOR_ELT:
426 case ISD::INSERT_SUBVECTOR:
427 case ISD::EXTRACT_SUBVECTOR:
428 case ISD::SCALAR_TO_VECTOR:
429 break;
430 case ISD::CONCAT_VECTORS:
431 setOperationAction(Op, VT, Custom);
432 break;
433 default:
434 setOperationAction(Op, VT, Expand);
435 break;
436 }
437 }
438 }
439
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000440 // XXX - Do these do anything? Vector constants turn into build_vector.
441 setOperationAction(ISD::Constant, MVT::v2i16, Legal);
442 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
443
Matt Arsenault7596f132017-02-27 20:52:10 +0000444 setOperationAction(ISD::STORE, MVT::v2i16, Promote);
445 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
446 setOperationAction(ISD::STORE, MVT::v2f16, Promote);
447 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
448
449 setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
450 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
451 setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
452 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000453
454 setOperationAction(ISD::AND, MVT::v2i16, Promote);
455 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
456 setOperationAction(ISD::OR, MVT::v2i16, Promote);
457 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
458 setOperationAction(ISD::XOR, MVT::v2i16, Promote);
459 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
460 setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
461 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
462 setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
463 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
464
465 setOperationAction(ISD::ADD, MVT::v2i16, Legal);
466 setOperationAction(ISD::SUB, MVT::v2i16, Legal);
467 setOperationAction(ISD::MUL, MVT::v2i16, Legal);
468 setOperationAction(ISD::SHL, MVT::v2i16, Legal);
469 setOperationAction(ISD::SRL, MVT::v2i16, Legal);
470 setOperationAction(ISD::SRA, MVT::v2i16, Legal);
471 setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
472 setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
473 setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
474 setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
475
476 setOperationAction(ISD::FADD, MVT::v2f16, Legal);
477 setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
478 setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
479 setOperationAction(ISD::FMA, MVT::v2f16, Legal);
480 setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal);
481 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal);
Matt Arsenault540512c2018-04-26 19:21:37 +0000482 setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000483
484 // This isn't really legal, but this avoids the legalizer unrolling it (and
485 // allows matching fneg (fabs x) patterns)
486 setOperationAction(ISD::FABS, MVT::v2f16, Legal);
487
488 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
489 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
490
Matt Arsenault2d3f8f32017-10-05 17:38:30 +0000491 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000492 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
493 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
494 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
Matt Arsenault4a486232017-04-19 20:53:07 +0000495 } else {
496 setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
497 setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
498 }
499
500 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
501 setOperationAction(ISD::SELECT, VT, Custom);
Matt Arsenault7596f132017-02-27 20:52:10 +0000502 }
503
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000504 setTargetDAGCombine(ISD::ADD);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +0000505 setTargetDAGCombine(ISD::ADDCARRY);
506 setTargetDAGCombine(ISD::SUB);
507 setTargetDAGCombine(ISD::SUBCARRY);
Matt Arsenault02cb0ff2014-09-29 14:59:34 +0000508 setTargetDAGCombine(ISD::FADD);
Matt Arsenault8675db12014-08-29 16:01:14 +0000509 setTargetDAGCombine(ISD::FSUB);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +0000510 setTargetDAGCombine(ISD::FMINNUM);
511 setTargetDAGCombine(ISD::FMAXNUM);
Matt Arsenault5881f4e2015-06-09 00:52:37 +0000512 setTargetDAGCombine(ISD::SMIN);
513 setTargetDAGCombine(ISD::SMAX);
514 setTargetDAGCombine(ISD::UMIN);
515 setTargetDAGCombine(ISD::UMAX);
Tom Stellard75aadc22012-12-11 21:25:42 +0000516 setTargetDAGCombine(ISD::SETCC);
Matt Arsenaultd0101a22015-01-06 23:00:46 +0000517 setTargetDAGCombine(ISD::AND);
Matt Arsenaultf2290332015-01-06 23:00:39 +0000518 setTargetDAGCombine(ISD::OR);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000519 setTargetDAGCombine(ISD::XOR);
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +0000520 setTargetDAGCombine(ISD::SINT_TO_FP);
Matt Arsenault364a6742014-06-11 17:50:44 +0000521 setTargetDAGCombine(ISD::UINT_TO_FP);
Matt Arsenault9cd90712016-04-14 01:42:16 +0000522 setTargetDAGCombine(ISD::FCANONICALIZE);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000523 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
Matt Arsenault8edfaee2017-03-31 19:53:03 +0000524 setTargetDAGCombine(ISD::ZERO_EXTEND);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +0000525 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
Matt Arsenault8cbb4882017-09-20 21:01:24 +0000526 setTargetDAGCombine(ISD::BUILD_VECTOR);
Matt Arsenault364a6742014-06-11 17:50:44 +0000527
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000528 // All memory operations. Some folding on the pointer operand is done to help
529 // matching the constant offsets in the addressing modes.
530 setTargetDAGCombine(ISD::LOAD);
531 setTargetDAGCombine(ISD::STORE);
532 setTargetDAGCombine(ISD::ATOMIC_LOAD);
533 setTargetDAGCombine(ISD::ATOMIC_STORE);
534 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
535 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
536 setTargetDAGCombine(ISD::ATOMIC_SWAP);
537 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
538 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
539 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
540 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
541 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
542 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
543 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
544 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
545 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
546 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
547
Christian Konigeecebd02013-03-26 14:04:02 +0000548 setSchedulingPreference(Sched::RegPressure);
Tom Stellard75aadc22012-12-11 21:25:42 +0000549}
550
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000551const SISubtarget *SITargetLowering::getSubtarget() const {
552 return static_cast<const SISubtarget *>(Subtarget);
553}
554
Tom Stellard0125f2a2013-06-25 02:39:35 +0000555//===----------------------------------------------------------------------===//
556// TargetLowering queries
557//===----------------------------------------------------------------------===//
558
Zvi Rackover1b736822017-07-26 08:06:58 +0000559bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000560 // SI has some legal vector types, but no legal vector operations. Say no
561 // shuffles are legal in order to prefer scalarizing some vector operations.
562 return false;
563}
564
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000565bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
566 const CallInst &CI,
Matt Arsenault7d7adf42017-12-14 22:34:10 +0000567 MachineFunction &MF,
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000568 unsigned IntrID) const {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000569 if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
570 AMDGPU::lookupRsrcIntrinsicByIntr(IntrID)) {
571 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
572 (Intrinsic::ID)IntrID);
573 if (Attr.hasFnAttribute(Attribute::ReadNone))
574 return false;
575
576 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
577
578 if (RsrcIntr->IsImage) {
579 Info.ptrVal = MFI->getImagePSV(
580 *MF.getSubtarget<SISubtarget>().getInstrInfo(),
581 CI.getArgOperand(RsrcIntr->RsrcArg));
582 Info.align = 0;
583 } else {
584 Info.ptrVal = MFI->getBufferPSV(
585 *MF.getSubtarget<SISubtarget>().getInstrInfo(),
586 CI.getArgOperand(RsrcIntr->RsrcArg));
587 }
588
589 Info.flags = MachineMemOperand::MODereferenceable;
590 if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
591 Info.opc = ISD::INTRINSIC_W_CHAIN;
592 Info.memVT = MVT::getVT(CI.getType());
593 Info.flags |= MachineMemOperand::MOLoad;
594 } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
595 Info.opc = ISD::INTRINSIC_VOID;
596 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
597 Info.flags |= MachineMemOperand::MOStore;
598 } else {
599 // Atomic
600 Info.opc = ISD::INTRINSIC_W_CHAIN;
601 Info.memVT = MVT::getVT(CI.getType());
602 Info.flags = MachineMemOperand::MOLoad |
603 MachineMemOperand::MOStore |
604 MachineMemOperand::MODereferenceable;
605
606 // XXX - Should this be volatile without known ordering?
607 Info.flags |= MachineMemOperand::MOVolatile;
608 }
609 return true;
610 }
611
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000612 switch (IntrID) {
613 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000614 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000615 case Intrinsic::amdgcn_ds_fadd:
616 case Intrinsic::amdgcn_ds_fmin:
617 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000618 Info.opc = ISD::INTRINSIC_W_CHAIN;
619 Info.memVT = MVT::getVT(CI.getType());
620 Info.ptrVal = CI.getOperand(0);
621 Info.align = 0;
Matt Arsenault11171332017-12-14 21:39:51 +0000622 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000623
624 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
Matt Arsenault11171332017-12-14 21:39:51 +0000625 if (!Vol || !Vol->isZero())
626 Info.flags |= MachineMemOperand::MOVolatile;
627
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000628 return true;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000629 }
Matt Arsenault905f3512017-12-29 17:18:14 +0000630
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000631 default:
632 return false;
633 }
634}
635
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000636bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
637 SmallVectorImpl<Value*> &Ops,
638 Type *&AccessTy) const {
639 switch (II->getIntrinsicID()) {
640 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000641 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000642 case Intrinsic::amdgcn_ds_fadd:
643 case Intrinsic::amdgcn_ds_fmin:
644 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000645 Value *Ptr = II->getArgOperand(0);
646 AccessTy = II->getType();
647 Ops.push_back(Ptr);
648 return true;
649 }
650 default:
651 return false;
652 }
Matt Arsenaulte306a322014-10-21 16:25:08 +0000653}
654
Tom Stellard70580f82015-07-20 14:28:41 +0000655bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
Matt Arsenaultd9b77842017-06-12 17:06:35 +0000656 if (!Subtarget->hasFlatInstOffsets()) {
657 // Flat instructions do not have offsets, and only have the register
658 // address.
659 return AM.BaseOffs == 0 && AM.Scale == 0;
660 }
661
662 // GFX9 added a 13-bit signed offset. When using regular flat instructions,
663 // the sign bit is ignored and is treated as a 12-bit unsigned offset.
664
665 // Just r + i
666 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
Tom Stellard70580f82015-07-20 14:28:41 +0000667}
668
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000669bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
670 if (Subtarget->hasFlatGlobalInsts())
671 return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
672
673 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
674 // Assume the we will use FLAT for all global memory accesses
675 // on VI.
676 // FIXME: This assumption is currently wrong. On VI we still use
677 // MUBUF instructions for the r + i addressing mode. As currently
678 // implemented, the MUBUF instructions only work on buffer < 4GB.
679 // It may be possible to support > 4GB buffers with MUBUF instructions,
680 // by setting the stride value in the resource descriptor which would
681 // increase the size limit to (stride * 4GB). However, this is risky,
682 // because it has never been validated.
683 return isLegalFlatAddressingMode(AM);
684 }
685
686 return isLegalMUBUFAddressingMode(AM);
687}
688
Matt Arsenault711b3902015-08-07 20:18:34 +0000689bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
690 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
691 // additionally can do r + r + i with addr64. 32-bit has more addressing
692 // mode options. Depending on the resource constant, it can also do
693 // (i64 r0) + (i32 r1) * (i14 i).
694 //
695 // Private arrays end up using a scratch buffer most of the time, so also
696 // assume those use MUBUF instructions. Scratch loads / stores are currently
697 // implemented as mubuf instructions with offen bit set, so slightly
698 // different than the normal addr64.
699 if (!isUInt<12>(AM.BaseOffs))
700 return false;
701
702 // FIXME: Since we can split immediate into soffset and immediate offset,
703 // would it make sense to allow any immediate?
704
705 switch (AM.Scale) {
706 case 0: // r + i or just i, depending on HasBaseReg.
707 return true;
708 case 1:
709 return true; // We have r + r or r + i.
710 case 2:
711 if (AM.HasBaseReg) {
712 // Reject 2 * r + r.
713 return false;
714 }
715
716 // Allow 2 * r as r + r
717 // Or 2 * r + i is allowed as r + r + i.
718 return true;
719 default: // Don't allow n * r
720 return false;
721 }
722}
723
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000724bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
725 const AddrMode &AM, Type *Ty,
Jonas Paulsson024e3192017-07-21 11:59:37 +0000726 unsigned AS, Instruction *I) const {
Matt Arsenault5015a892014-08-15 17:17:07 +0000727 // No global is ever allowed as a base.
728 if (AM.BaseGV)
729 return false;
730
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000731 if (AS == AMDGPUASI.GLOBAL_ADDRESS)
732 return isLegalGlobalAddressingMode(AM);
Matt Arsenault5015a892014-08-15 17:17:07 +0000733
Matt Arsenault923712b2018-02-09 16:57:57 +0000734 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
735 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000736 // If the offset isn't a multiple of 4, it probably isn't going to be
737 // correctly aligned.
Matt Arsenault3cc1e002016-08-13 01:43:51 +0000738 // FIXME: Can we get the real alignment here?
Matt Arsenault711b3902015-08-07 20:18:34 +0000739 if (AM.BaseOffs % 4 != 0)
740 return isLegalMUBUFAddressingMode(AM);
741
742 // There are no SMRD extloads, so if we have to do a small type access we
743 // will use a MUBUF load.
744 // FIXME?: We also need to do this if unaligned, but we don't know the
745 // alignment here.
746 if (DL.getTypeStoreSize(Ty) < 4)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000747 return isLegalGlobalAddressingMode(AM);
Matt Arsenault711b3902015-08-07 20:18:34 +0000748
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000749 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000750 // SMRD instructions have an 8-bit, dword offset on SI.
751 if (!isUInt<8>(AM.BaseOffs / 4))
752 return false;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000753 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000754 // On CI+, this can also be a 32-bit literal constant offset. If it fits
755 // in 8-bits, it can use a smaller encoding.
756 if (!isUInt<32>(AM.BaseOffs / 4))
757 return false;
Matt Arsenaulte823d922017-02-18 18:29:53 +0000758 } else if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000759 // On VI, these use the SMEM format and the offset is 20-bit in bytes.
760 if (!isUInt<20>(AM.BaseOffs))
761 return false;
762 } else
763 llvm_unreachable("unhandled generation");
764
765 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
766 return true;
767
768 if (AM.Scale == 1 && AM.HasBaseReg)
769 return true;
770
771 return false;
Matt Arsenault711b3902015-08-07 20:18:34 +0000772
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000773 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000774 return isLegalMUBUFAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000775 } else if (AS == AMDGPUASI.LOCAL_ADDRESS ||
776 AS == AMDGPUASI.REGION_ADDRESS) {
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000777 // Basic, single offset DS instructions allow a 16-bit unsigned immediate
778 // field.
779 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
780 // an 8-bit dword offset but we don't know the alignment here.
781 if (!isUInt<16>(AM.BaseOffs))
Matt Arsenault5015a892014-08-15 17:17:07 +0000782 return false;
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000783
784 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
785 return true;
786
787 if (AM.Scale == 1 && AM.HasBaseReg)
788 return true;
789
Matt Arsenault5015a892014-08-15 17:17:07 +0000790 return false;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000791 } else if (AS == AMDGPUASI.FLAT_ADDRESS ||
792 AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) {
Matt Arsenault7d1b6c82016-04-29 06:25:10 +0000793 // For an unknown address space, this usually means that this is for some
794 // reason being used for pure arithmetic, and not based on some addressing
795 // computation. We don't have instructions that compute pointers with any
796 // addressing modes, so treat them as having no offset like flat
797 // instructions.
Tom Stellard70580f82015-07-20 14:28:41 +0000798 return isLegalFlatAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000799 } else {
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000800 llvm_unreachable("unhandled address space");
801 }
Matt Arsenault5015a892014-08-15 17:17:07 +0000802}
803
Nirav Dave4dcad5d2017-07-10 20:25:54 +0000804bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
805 const SelectionDAG &DAG) const {
Nirav Daved20066c2017-05-24 15:59:09 +0000806 if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) {
807 return (MemVT.getSizeInBits() <= 4 * 32);
808 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
809 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
810 return (MemVT.getSizeInBits() <= MaxPrivateBits);
811 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
812 return (MemVT.getSizeInBits() <= 2 * 32);
813 }
814 return true;
815}
816
Matt Arsenaulte6986632015-01-14 01:35:22 +0000817bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000818 unsigned AddrSpace,
819 unsigned Align,
820 bool *IsFast) const {
Matt Arsenault1018c892014-04-24 17:08:26 +0000821 if (IsFast)
822 *IsFast = false;
823
Matt Arsenault1018c892014-04-24 17:08:26 +0000824 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
825 // which isn't a simple VT.
Alina Sbirlea6f937b12016-08-04 16:38:44 +0000826 // Until MVT is extended to handle this, simply check for the size and
827 // rely on the condition below: allow accesses if the size is a multiple of 4.
828 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
829 VT.getStoreSize() > 16)) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000830 return false;
Alina Sbirlea6f937b12016-08-04 16:38:44 +0000831 }
Matt Arsenault1018c892014-04-24 17:08:26 +0000832
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000833 if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS ||
834 AddrSpace == AMDGPUASI.REGION_ADDRESS) {
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000835 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
836 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
837 // with adjacent offsets.
Sanjay Patelce74db92015-09-03 15:03:19 +0000838 bool AlignedBy4 = (Align % 4 == 0);
839 if (IsFast)
840 *IsFast = AlignedBy4;
Matt Arsenault7f681ac2016-07-01 23:03:44 +0000841
Sanjay Patelce74db92015-09-03 15:03:19 +0000842 return AlignedBy4;
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000843 }
Matt Arsenault1018c892014-04-24 17:08:26 +0000844
Tom Stellard64a9d082016-10-14 18:10:39 +0000845 // FIXME: We have to be conservative here and assume that flat operations
846 // will access scratch. If we had access to the IR function, then we
847 // could determine if any private memory was used in the function.
848 if (!Subtarget->hasUnalignedScratchAccess() &&
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000849 (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS ||
850 AddrSpace == AMDGPUASI.FLAT_ADDRESS)) {
Tom Stellard64a9d082016-10-14 18:10:39 +0000851 return false;
852 }
853
Matt Arsenault7f681ac2016-07-01 23:03:44 +0000854 if (Subtarget->hasUnalignedBufferAccess()) {
855 // If we have an uniform constant load, it still requires using a slow
856 // buffer instruction if unaligned.
857 if (IsFast) {
Matt Arsenault923712b2018-02-09 16:57:57 +0000858 *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS ||
859 AddrSpace == AMDGPUASI.CONSTANT_ADDRESS_32BIT) ?
Matt Arsenault7f681ac2016-07-01 23:03:44 +0000860 (Align % 4 == 0) : true;
861 }
862
863 return true;
864 }
865
Tom Stellard33e64c62015-02-04 20:49:52 +0000866 // Smaller than dword value must be aligned.
Tom Stellard33e64c62015-02-04 20:49:52 +0000867 if (VT.bitsLT(MVT::i32))
868 return false;
869
Matt Arsenault1018c892014-04-24 17:08:26 +0000870 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
871 // byte-address are ignored, thus forcing Dword alignment.
Tom Stellarde812f2f2014-07-21 15:45:06 +0000872 // This applies to private, global, and constant memory.
Matt Arsenault1018c892014-04-24 17:08:26 +0000873 if (IsFast)
874 *IsFast = true;
Tom Stellardc6b299c2015-02-02 18:02:28 +0000875
876 return VT.bitsGT(MVT::i32) && Align % 4 == 0;
Tom Stellard0125f2a2013-06-25 02:39:35 +0000877}
878
Matt Arsenault46645fa2014-07-28 17:49:26 +0000879EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
880 unsigned SrcAlign, bool IsMemset,
881 bool ZeroMemset,
882 bool MemcpyStrSrc,
883 MachineFunction &MF) const {
884 // FIXME: Should account for address space here.
885
886 // The default fallback uses the private pointer size as a guess for a type to
887 // use. Make sure we switch these to 64-bit accesses.
888
889 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
890 return MVT::v4i32;
891
892 if (Size >= 8 && DstAlign >= 4)
893 return MVT::v2i32;
894
895 // Use the default.
896 return MVT::Other;
897}
898
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000899static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) {
900 return AS == AMDGPUASI.GLOBAL_ADDRESS ||
901 AS == AMDGPUASI.FLAT_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +0000902 AS == AMDGPUASI.CONSTANT_ADDRESS ||
903 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT;
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +0000904}
905
906bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
907 unsigned DestAS) const {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000908 return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) &&
909 isFlatGlobalAddrSpace(DestAS, AMDGPUASI);
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +0000910}
911
Alexander Timofeev18009562016-12-08 17:28:47 +0000912bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
913 const MemSDNode *MemNode = cast<MemSDNode>(N);
914 const Value *Ptr = MemNode->getMemOperand()->getValue();
Matt Arsenault0a0c8712018-03-27 18:39:45 +0000915 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
Alexander Timofeev18009562016-12-08 17:28:47 +0000916 return I && I->getMetadata("amdgpu.noclobber");
917}
918
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +0000919bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
920 unsigned DestAS) const {
921 // Flat -> private/local is a simple truncate.
922 // Flat -> global is no-op
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000923 if (SrcAS == AMDGPUASI.FLAT_ADDRESS)
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +0000924 return true;
925
926 return isNoopAddrSpaceCast(SrcAS, DestAS);
927}
928
Tom Stellarda6f24c62015-12-15 20:55:55 +0000929bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
930 const MemSDNode *MemNode = cast<MemSDNode>(N);
Tom Stellarda6f24c62015-12-15 20:55:55 +0000931
Matt Arsenaultbcf7bec2018-02-09 16:57:48 +0000932 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
Tom Stellarda6f24c62015-12-15 20:55:55 +0000933}
934
Chandler Carruth9d010ff2014-07-03 00:23:43 +0000935TargetLoweringBase::LegalizeTypeAction
936SITargetLowering::getPreferredVectorAction(EVT VT) const {
937 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
938 return TypeSplitVector;
939
940 return TargetLoweringBase::getPreferredVectorAction(VT);
Tom Stellardd86003e2013-08-14 23:25:00 +0000941}
Tom Stellard0125f2a2013-06-25 02:39:35 +0000942
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +0000943bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
944 Type *Ty) const {
Matt Arsenault749035b2016-07-30 01:40:36 +0000945 // FIXME: Could be smarter if called for vector constants.
946 return true;
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +0000947}
948
Tom Stellard2e045bb2016-01-20 00:13:22 +0000949bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
Matt Arsenault7b00cf42016-12-09 17:57:43 +0000950 if (Subtarget->has16BitInsts() && VT == MVT::i16) {
951 switch (Op) {
952 case ISD::LOAD:
953 case ISD::STORE:
Tom Stellard2e045bb2016-01-20 00:13:22 +0000954
Matt Arsenault7b00cf42016-12-09 17:57:43 +0000955 // These operations are done with 32-bit instructions anyway.
956 case ISD::AND:
957 case ISD::OR:
958 case ISD::XOR:
959 case ISD::SELECT:
960 // TODO: Extensions?
961 return true;
962 default:
963 return false;
964 }
965 }
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000966
Tom Stellard2e045bb2016-01-20 00:13:22 +0000967 // SimplifySetCC uses this function to determine whether or not it should
968 // create setcc with i1 operands. We don't have instructions for i1 setcc.
969 if (VT == MVT::i1 && Op == ISD::SETCC)
970 return false;
971
972 return TargetLowering::isTypeDesirableForOp(Op, VT);
973}
974
Matt Arsenaulte622dc32017-04-11 22:29:24 +0000975SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
976 const SDLoc &SL,
977 SDValue Chain,
978 uint64_t Offset) const {
Mehdi Aminia749f2a2015-07-09 02:09:52 +0000979 const DataLayout &DL = DAG.getDataLayout();
Tom Stellardec2e43c2014-09-22 15:35:29 +0000980 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000981 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
982
983 const ArgDescriptor *InputPtrReg;
984 const TargetRegisterClass *RC;
985
986 std::tie(InputPtrReg, RC)
987 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Tom Stellard94593ee2013-06-03 17:40:18 +0000988
Matt Arsenault86033ca2014-07-28 17:31:39 +0000989 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000990 MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS);
Matt Arsenaulta0269b62015-06-01 21:58:24 +0000991 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000992 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
993
Jan Veselyfea814d2016-06-21 20:46:20 +0000994 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
995 DAG.getConstant(Offset, SL, PtrVT));
996}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000997
Matt Arsenault9166ce82017-07-28 15:52:08 +0000998SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
999 const SDLoc &SL) const {
1000 auto MFI = DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>();
1001 uint64_t Offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT);
1002 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1003}
1004
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001005SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1006 const SDLoc &SL, SDValue Val,
1007 bool Signed,
Matt Arsenault6dca5422017-01-09 18:52:39 +00001008 const ISD::InputArg *Arg) const {
Matt Arsenault6dca5422017-01-09 18:52:39 +00001009 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1010 VT.bitsLT(MemVT)) {
1011 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1012 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1013 }
1014
Tom Stellardbc6c5232016-10-17 16:21:45 +00001015 if (MemVT.isFloatingPoint())
Matt Arsenault6dca5422017-01-09 18:52:39 +00001016 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001017 else if (Signed)
Matt Arsenault6dca5422017-01-09 18:52:39 +00001018 Val = DAG.getSExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001019 else
Matt Arsenault6dca5422017-01-09 18:52:39 +00001020 Val = DAG.getZExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001021
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001022 return Val;
1023}
1024
1025SDValue SITargetLowering::lowerKernargMemParameter(
1026 SelectionDAG &DAG, EVT VT, EVT MemVT,
1027 const SDLoc &SL, SDValue Chain,
1028 uint64_t Offset, bool Signed,
1029 const ISD::InputArg *Arg) const {
1030 const DataLayout &DL = DAG.getDataLayout();
1031 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1032 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
1033 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1034
1035 unsigned Align = DL.getABITypeAlignment(Ty);
1036
1037 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1038 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001039 MachineMemOperand::MODereferenceable |
1040 MachineMemOperand::MOInvariant);
1041
1042 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
Matt Arsenault6dca5422017-01-09 18:52:39 +00001043 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
Tom Stellard94593ee2013-06-03 17:40:18 +00001044}
1045
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001046SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1047 const SDLoc &SL, SDValue Chain,
1048 const ISD::InputArg &Arg) const {
1049 MachineFunction &MF = DAG.getMachineFunction();
1050 MachineFrameInfo &MFI = MF.getFrameInfo();
1051
1052 if (Arg.Flags.isByVal()) {
1053 unsigned Size = Arg.Flags.getByValSize();
1054 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1055 return DAG.getFrameIndex(FrameIdx, MVT::i32);
1056 }
1057
1058 unsigned ArgOffset = VA.getLocMemOffset();
1059 unsigned ArgSize = VA.getValVT().getStoreSize();
1060
1061 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1062
1063 // Create load nodes to retrieve arguments from the stack.
1064 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1065 SDValue ArgValue;
1066
1067 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1068 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1069 MVT MemVT = VA.getValVT();
1070
1071 switch (VA.getLocInfo()) {
1072 default:
1073 break;
1074 case CCValAssign::BCvt:
1075 MemVT = VA.getLocVT();
1076 break;
1077 case CCValAssign::SExt:
1078 ExtType = ISD::SEXTLOAD;
1079 break;
1080 case CCValAssign::ZExt:
1081 ExtType = ISD::ZEXTLOAD;
1082 break;
1083 case CCValAssign::AExt:
1084 ExtType = ISD::EXTLOAD;
1085 break;
1086 }
1087
1088 ArgValue = DAG.getExtLoad(
1089 ExtType, SL, VA.getLocVT(), Chain, FIN,
1090 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1091 MemVT);
1092 return ArgValue;
1093}
1094
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001095SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1096 const SIMachineFunctionInfo &MFI,
1097 EVT VT,
1098 AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1099 const ArgDescriptor *Reg;
1100 const TargetRegisterClass *RC;
1101
1102 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1103 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1104}
1105
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001106static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1107 CallingConv::ID CallConv,
1108 ArrayRef<ISD::InputArg> Ins,
1109 BitVector &Skipped,
1110 FunctionType *FType,
1111 SIMachineFunctionInfo *Info) {
1112 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1113 const ISD::InputArg &Arg = Ins[I];
1114
1115 // First check if it's a PS input addr.
1116 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() &&
1117 !Arg.Flags.isByVal() && PSInputNum <= 15) {
1118
1119 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) {
1120 // We can safely skip PS inputs.
1121 Skipped.set(I);
1122 ++PSInputNum;
1123 continue;
1124 }
1125
1126 Info->markPSInputAllocated(PSInputNum);
1127 if (Arg.Used)
1128 Info->markPSInputEnabled(PSInputNum);
1129
1130 ++PSInputNum;
1131 }
1132
1133 // Second split vertices into their elements.
1134 if (Arg.VT.isVector()) {
1135 ISD::InputArg NewArg = Arg;
1136 NewArg.Flags.setSplit();
1137 NewArg.VT = Arg.VT.getVectorElementType();
1138
1139 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
1140 // three or five element vertex only needs three or five registers,
1141 // NOT four or eight.
1142 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1143 unsigned NumElements = ParamType->getVectorNumElements();
1144
1145 for (unsigned J = 0; J != NumElements; ++J) {
1146 Splits.push_back(NewArg);
1147 NewArg.PartOffset += NewArg.VT.getStoreSize();
1148 }
1149 } else {
1150 Splits.push_back(Arg);
1151 }
1152 }
1153}
1154
1155// Allocate special inputs passed in VGPRs.
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001156static void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1157 MachineFunction &MF,
1158 const SIRegisterInfo &TRI,
1159 SIMachineFunctionInfo &Info) {
1160 if (Info.hasWorkItemIDX()) {
1161 unsigned Reg = AMDGPU::VGPR0;
1162 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001163
1164 CCInfo.AllocateReg(Reg);
1165 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1166 }
1167
1168 if (Info.hasWorkItemIDY()) {
1169 unsigned Reg = AMDGPU::VGPR1;
1170 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1171
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001172 CCInfo.AllocateReg(Reg);
1173 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1174 }
1175
1176 if (Info.hasWorkItemIDZ()) {
1177 unsigned Reg = AMDGPU::VGPR2;
1178 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1179
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001180 CCInfo.AllocateReg(Reg);
1181 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1182 }
1183}
1184
1185// Try to allocate a VGPR at the end of the argument list, or if no argument
1186// VGPRs are left allocating a stack slot.
1187static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) {
1188 ArrayRef<MCPhysReg> ArgVGPRs
1189 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1190 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1191 if (RegIdx == ArgVGPRs.size()) {
1192 // Spill to stack required.
1193 int64_t Offset = CCInfo.AllocateStack(4, 4);
1194
1195 return ArgDescriptor::createStack(Offset);
1196 }
1197
1198 unsigned Reg = ArgVGPRs[RegIdx];
1199 Reg = CCInfo.AllocateReg(Reg);
1200 assert(Reg != AMDGPU::NoRegister);
1201
1202 MachineFunction &MF = CCInfo.getMachineFunction();
1203 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1204 return ArgDescriptor::createRegister(Reg);
1205}
1206
1207static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1208 const TargetRegisterClass *RC,
1209 unsigned NumArgRegs) {
1210 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1211 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1212 if (RegIdx == ArgSGPRs.size())
1213 report_fatal_error("ran out of SGPRs for arguments");
1214
1215 unsigned Reg = ArgSGPRs[RegIdx];
1216 Reg = CCInfo.AllocateReg(Reg);
1217 assert(Reg != AMDGPU::NoRegister);
1218
1219 MachineFunction &MF = CCInfo.getMachineFunction();
1220 MF.addLiveIn(Reg, RC);
1221 return ArgDescriptor::createRegister(Reg);
1222}
1223
1224static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1225 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1226}
1227
1228static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1229 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1230}
1231
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001232static void allocateSpecialInputVGPRs(CCState &CCInfo,
1233 MachineFunction &MF,
1234 const SIRegisterInfo &TRI,
1235 SIMachineFunctionInfo &Info) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001236 if (Info.hasWorkItemIDX())
1237 Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001238
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001239 if (Info.hasWorkItemIDY())
1240 Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001241
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001242 if (Info.hasWorkItemIDZ())
1243 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1244}
1245
1246static void allocateSpecialInputSGPRs(CCState &CCInfo,
1247 MachineFunction &MF,
1248 const SIRegisterInfo &TRI,
1249 SIMachineFunctionInfo &Info) {
1250 auto &ArgInfo = Info.getArgInfo();
1251
1252 // TODO: Unify handling with private memory pointers.
1253
1254 if (Info.hasDispatchPtr())
1255 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1256
1257 if (Info.hasQueuePtr())
1258 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1259
1260 if (Info.hasKernargSegmentPtr())
1261 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1262
1263 if (Info.hasDispatchID())
1264 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1265
1266 // flat_scratch_init is not applicable for non-kernel functions.
1267
1268 if (Info.hasWorkGroupIDX())
1269 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1270
1271 if (Info.hasWorkGroupIDY())
1272 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1273
1274 if (Info.hasWorkGroupIDZ())
1275 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
Matt Arsenault817c2532017-08-03 23:12:44 +00001276
1277 if (Info.hasImplicitArgPtr())
1278 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001279}
1280
1281// Allocate special inputs passed in user SGPRs.
1282static void allocateHSAUserSGPRs(CCState &CCInfo,
1283 MachineFunction &MF,
1284 const SIRegisterInfo &TRI,
1285 SIMachineFunctionInfo &Info) {
Matt Arsenault10fc0622017-06-26 03:01:31 +00001286 if (Info.hasImplicitBufferPtr()) {
1287 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1288 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1289 CCInfo.AllocateReg(ImplicitBufferPtrReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001290 }
1291
1292 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1293 if (Info.hasPrivateSegmentBuffer()) {
1294 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1295 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1296 CCInfo.AllocateReg(PrivateSegmentBufferReg);
1297 }
1298
1299 if (Info.hasDispatchPtr()) {
1300 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1301 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1302 CCInfo.AllocateReg(DispatchPtrReg);
1303 }
1304
1305 if (Info.hasQueuePtr()) {
1306 unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1307 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1308 CCInfo.AllocateReg(QueuePtrReg);
1309 }
1310
1311 if (Info.hasKernargSegmentPtr()) {
1312 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1313 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1314 CCInfo.AllocateReg(InputPtrReg);
1315 }
1316
1317 if (Info.hasDispatchID()) {
1318 unsigned DispatchIDReg = Info.addDispatchID(TRI);
1319 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1320 CCInfo.AllocateReg(DispatchIDReg);
1321 }
1322
1323 if (Info.hasFlatScratchInit()) {
1324 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1325 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1326 CCInfo.AllocateReg(FlatScratchInitReg);
1327 }
1328
1329 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1330 // these from the dispatch pointer.
1331}
1332
1333// Allocate special input registers that are initialized per-wave.
1334static void allocateSystemSGPRs(CCState &CCInfo,
1335 MachineFunction &MF,
1336 SIMachineFunctionInfo &Info,
Marek Olsak584d2c02017-05-04 22:25:20 +00001337 CallingConv::ID CallConv,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001338 bool IsShader) {
1339 if (Info.hasWorkGroupIDX()) {
1340 unsigned Reg = Info.addWorkGroupIDX();
1341 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1342 CCInfo.AllocateReg(Reg);
1343 }
1344
1345 if (Info.hasWorkGroupIDY()) {
1346 unsigned Reg = Info.addWorkGroupIDY();
1347 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1348 CCInfo.AllocateReg(Reg);
1349 }
1350
1351 if (Info.hasWorkGroupIDZ()) {
1352 unsigned Reg = Info.addWorkGroupIDZ();
1353 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1354 CCInfo.AllocateReg(Reg);
1355 }
1356
1357 if (Info.hasWorkGroupInfo()) {
1358 unsigned Reg = Info.addWorkGroupInfo();
1359 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1360 CCInfo.AllocateReg(Reg);
1361 }
1362
1363 if (Info.hasPrivateSegmentWaveByteOffset()) {
1364 // Scratch wave offset passed in system SGPR.
1365 unsigned PrivateSegmentWaveByteOffsetReg;
1366
1367 if (IsShader) {
Marek Olsak584d2c02017-05-04 22:25:20 +00001368 PrivateSegmentWaveByteOffsetReg =
1369 Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1370
1371 // This is true if the scratch wave byte offset doesn't have a fixed
1372 // location.
1373 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1374 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1375 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1376 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001377 } else
1378 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1379
1380 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1381 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1382 }
1383}
1384
1385static void reservePrivateMemoryRegs(const TargetMachine &TM,
1386 MachineFunction &MF,
1387 const SIRegisterInfo &TRI,
Matt Arsenault1cc47f82017-07-18 16:44:56 +00001388 SIMachineFunctionInfo &Info) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001389 // Now that we've figured out where the scratch register inputs are, see if
1390 // should reserve the arguments and use them directly.
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001391 MachineFrameInfo &MFI = MF.getFrameInfo();
1392 bool HasStackObjects = MFI.hasStackObjects();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001393
1394 // Record that we know we have non-spill stack objects so we don't need to
1395 // check all stack objects later.
1396 if (HasStackObjects)
1397 Info.setHasNonSpillStackObjects(true);
1398
1399 // Everything live out of a block is spilled with fast regalloc, so it's
1400 // almost certain that spilling will be required.
1401 if (TM.getOptLevel() == CodeGenOpt::None)
1402 HasStackObjects = true;
1403
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001404 // For now assume stack access is needed in any callee functions, so we need
1405 // the scratch registers to pass in.
1406 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1407
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001408 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1409 if (ST.isAmdCodeObjectV2(MF)) {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001410 if (RequiresStackAccess) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001411 // If we have stack objects, we unquestionably need the private buffer
1412 // resource. For the Code Object V2 ABI, this will be the first 4 user
1413 // SGPR inputs. We can reserve those and use them directly.
1414
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001415 unsigned PrivateSegmentBufferReg = Info.getPreloadedReg(
1416 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001417 Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1418
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001419 if (MFI.hasCalls()) {
1420 // If we have calls, we need to keep the frame register in a register
1421 // that won't be clobbered by a call, so ensure it is copied somewhere.
1422
1423 // This is not a problem for the scratch wave offset, because the same
1424 // registers are reserved in all functions.
1425
1426 // FIXME: Nothing is really ensuring this is a call preserved register,
1427 // it's just selected from the end so it happens to be.
1428 unsigned ReservedOffsetReg
1429 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1430 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1431 } else {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001432 unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg(
1433 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001434 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1435 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001436 } else {
1437 unsigned ReservedBufferReg
1438 = TRI.reservedPrivateSegmentBufferReg(MF);
1439 unsigned ReservedOffsetReg
1440 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1441
1442 // We tentatively reserve the last registers (skipping the last two
1443 // which may contain VCC). After register allocation, we'll replace
1444 // these with the ones immediately after those which were really
1445 // allocated. In the prologue copies will be inserted from the argument
1446 // to these reserved registers.
1447 Info.setScratchRSrcReg(ReservedBufferReg);
1448 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1449 }
1450 } else {
1451 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1452
1453 // Without HSA, relocations are used for the scratch pointer and the
1454 // buffer resource setup is always inserted in the prologue. Scratch wave
1455 // offset is still in an input SGPR.
1456 Info.setScratchRSrcReg(ReservedBufferReg);
1457
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001458 if (HasStackObjects && !MFI.hasCalls()) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001459 unsigned ScratchWaveOffsetReg = Info.getPreloadedReg(
1460 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001461 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1462 } else {
1463 unsigned ReservedOffsetReg
1464 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1465 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1466 }
1467 }
1468}
1469
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001470bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1471 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1472 return !Info->isEntryFunction();
1473}
1474
1475void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1476
1477}
1478
1479void SITargetLowering::insertCopiesSplitCSR(
1480 MachineBasicBlock *Entry,
1481 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1482 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1483
1484 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1485 if (!IStart)
1486 return;
1487
1488 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1489 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1490 MachineBasicBlock::iterator MBBI = Entry->begin();
1491 for (const MCPhysReg *I = IStart; *I; ++I) {
1492 const TargetRegisterClass *RC = nullptr;
1493 if (AMDGPU::SReg_64RegClass.contains(*I))
1494 RC = &AMDGPU::SGPR_64RegClass;
1495 else if (AMDGPU::SReg_32RegClass.contains(*I))
1496 RC = &AMDGPU::SGPR_32RegClass;
1497 else
1498 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1499
1500 unsigned NewVR = MRI->createVirtualRegister(RC);
1501 // Create copy from CSR to a virtual register.
1502 Entry->addLiveIn(*I);
1503 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1504 .addReg(*I);
1505
1506 // Insert the copy-back instructions right before the terminator.
1507 for (auto *Exit : Exits)
1508 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1509 TII->get(TargetOpcode::COPY), *I)
1510 .addReg(NewVR);
1511 }
1512}
1513
Christian Konig2c8f6d52013-03-07 09:03:52 +00001514SDValue SITargetLowering::LowerFormalArguments(
Eric Christopher7792e322015-01-30 23:24:40 +00001515 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001516 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1517 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001518 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001519
1520 MachineFunction &MF = DAG.getMachineFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00001521 FunctionType *FType = MF.getFunction().getFunctionType();
Christian Konig99ee0f42013-03-07 09:04:14 +00001522 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001523 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001524
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +00001525 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
Matthias Braunf1caa282017-12-15 22:22:58 +00001526 const Function &Fn = MF.getFunction();
Oliver Stannard7e7d9832016-02-02 13:52:43 +00001527 DiagnosticInfoUnsupported NoGraphicsHSA(
Matthias Braunf1caa282017-12-15 22:22:58 +00001528 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
Matt Arsenaultd48da142015-11-02 23:23:02 +00001529 DAG.getContext()->diagnose(NoGraphicsHSA);
Diana Picus81bc3172016-05-26 15:24:55 +00001530 return DAG.getEntryNode();
Matt Arsenaultd48da142015-11-02 23:23:02 +00001531 }
1532
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00001533 // Create stack objects that are used for emitting debugger prologue if
1534 // "amdgpu-debugger-emit-prologue" attribute was specified.
1535 if (ST.debuggerEmitPrologue())
1536 createDebuggerPrologueStackObjects(MF);
1537
Christian Konig2c8f6d52013-03-07 09:03:52 +00001538 SmallVector<ISD::InputArg, 16> Splits;
Christian Konig2c8f6d52013-03-07 09:03:52 +00001539 SmallVector<CCValAssign, 16> ArgLocs;
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001540 BitVector Skipped(Ins.size());
Eric Christopherb5217502014-08-06 18:45:26 +00001541 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1542 *DAG.getContext());
Christian Konig2c8f6d52013-03-07 09:03:52 +00001543
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001544 bool IsShader = AMDGPU::isShader(CallConv);
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +00001545 bool IsKernel = AMDGPU::isKernel(CallConv);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001546 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
Christian Konig99ee0f42013-03-07 09:04:14 +00001547
Matt Arsenaultd1867c02017-08-02 00:59:51 +00001548 if (!IsEntryFunc) {
1549 // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over
1550 // this when allocating argument fixed offsets.
1551 CCInfo.AllocateStack(4, 4);
1552 }
1553
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001554 if (IsShader) {
1555 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1556
1557 // At least one interpolation mode must be enabled or else the GPU will
1558 // hang.
1559 //
1560 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1561 // set PSInputAddr, the user wants to enable some bits after the compilation
1562 // based on run-time states. Since we can't know what the final PSInputEna
1563 // will look like, so we shouldn't do anything here and the user should take
1564 // responsibility for the correct programming.
1565 //
1566 // Otherwise, the following restrictions apply:
1567 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1568 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1569 // enabled too.
Tim Renoufc8ffffe2017-10-12 16:16:41 +00001570 if (CallConv == CallingConv::AMDGPU_PS) {
1571 if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1572 ((Info->getPSInputAddr() & 0xF) == 0 &&
1573 Info->isPSInputAllocated(11))) {
1574 CCInfo.AllocateReg(AMDGPU::VGPR0);
1575 CCInfo.AllocateReg(AMDGPU::VGPR1);
1576 Info->markPSInputAllocated(0);
1577 Info->markPSInputEnabled(0);
1578 }
1579 if (Subtarget->isAmdPalOS()) {
1580 // For isAmdPalOS, the user does not enable some bits after compilation
1581 // based on run-time states; the register values being generated here are
1582 // the final ones set in hardware. Therefore we need to apply the
1583 // workaround to PSInputAddr and PSInputEnable together. (The case where
1584 // a bit is set in PSInputAddr but not PSInputEnable is where the
1585 // frontend set up an input arg for a particular interpolation mode, but
1586 // nothing uses that input arg. Really we should have an earlier pass
1587 // that removes such an arg.)
1588 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1589 if ((PsInputBits & 0x7F) == 0 ||
1590 ((PsInputBits & 0xF) == 0 &&
1591 (PsInputBits >> 11 & 1)))
1592 Info->markPSInputEnabled(
1593 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
1594 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001595 }
1596
Tom Stellard2f3f9852017-01-25 01:25:13 +00001597 assert(!Info->hasDispatchPtr() &&
Tom Stellardf110f8f2016-04-14 16:27:03 +00001598 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1599 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1600 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1601 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1602 !Info->hasWorkItemIDZ());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001603 } else if (IsKernel) {
1604 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001605 } else {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001606 Splits.append(Ins.begin(), Ins.end());
Tom Stellardaf775432013-10-23 00:44:32 +00001607 }
1608
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001609 if (IsEntryFunc) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001610 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001611 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
Tom Stellard2f3f9852017-01-25 01:25:13 +00001612 }
1613
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001614 if (IsKernel) {
Tom Stellardbbeb45a2016-09-16 21:53:00 +00001615 analyzeFormalArgumentsCompute(CCInfo, Ins);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001616 } else {
1617 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1618 CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1619 }
Christian Konig2c8f6d52013-03-07 09:03:52 +00001620
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001621 SmallVector<SDValue, 16> Chains;
1622
Christian Konig2c8f6d52013-03-07 09:03:52 +00001623 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
Christian Konigb7be72d2013-05-17 09:46:48 +00001624 const ISD::InputArg &Arg = Ins[i];
Alexey Samsonova253bf92014-08-27 19:36:53 +00001625 if (Skipped[i]) {
Christian Konigb7be72d2013-05-17 09:46:48 +00001626 InVals.push_back(DAG.getUNDEF(Arg.VT));
Christian Konig99ee0f42013-03-07 09:04:14 +00001627 continue;
1628 }
1629
Christian Konig2c8f6d52013-03-07 09:03:52 +00001630 CCValAssign &VA = ArgLocs[ArgIdx++];
Craig Topper7f416c82014-11-16 21:17:18 +00001631 MVT VT = VA.getLocVT();
Tom Stellarded882c22013-06-03 17:40:11 +00001632
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001633 if (IsEntryFunc && VA.isMemLoc()) {
Tom Stellardaf775432013-10-23 00:44:32 +00001634 VT = Ins[i].VT;
Tom Stellardbbeb45a2016-09-16 21:53:00 +00001635 EVT MemVT = VA.getLocVT();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001636
1637 const uint64_t Offset = Subtarget->getExplicitKernelArgOffset(MF) +
1638 VA.getLocMemOffset();
1639 Info->setABIArgOffset(Offset + MemVT.getStoreSize());
1640
Tom Stellard94593ee2013-06-03 17:40:18 +00001641 // The first 36 bytes of the input buffer contains information about
1642 // thread group and global sizes.
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001643 SDValue Arg = lowerKernargMemParameter(
1644 DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt(), &Ins[i]);
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001645 Chains.push_back(Arg.getValue(1));
Tom Stellardca7ecf32014-08-22 18:49:31 +00001646
Craig Toppere3dcce92015-08-01 22:20:21 +00001647 auto *ParamTy =
Andrew Trick05938a52015-02-16 18:10:47 +00001648 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001649 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001650 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
Tom Stellardca7ecf32014-08-22 18:49:31 +00001651 // On SI local pointers are just offsets into LDS, so they are always
1652 // less than 16-bits. On CI and newer they could potentially be
1653 // real pointers, so we can't guarantee their size.
1654 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
1655 DAG.getValueType(MVT::i16));
1656 }
1657
Tom Stellarded882c22013-06-03 17:40:11 +00001658 InVals.push_back(Arg);
1659 continue;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001660 } else if (!IsEntryFunc && VA.isMemLoc()) {
1661 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
1662 InVals.push_back(Val);
1663 if (!Arg.Flags.isByVal())
1664 Chains.push_back(Val.getValue(1));
1665 continue;
Tom Stellarded882c22013-06-03 17:40:11 +00001666 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001667
Christian Konig2c8f6d52013-03-07 09:03:52 +00001668 assert(VA.isRegLoc() && "Parameter must be in a register!");
1669
1670 unsigned Reg = VA.getLocReg();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001671 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
Matt Arsenaultb3463552017-07-15 05:52:59 +00001672 EVT ValVT = VA.getValVT();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001673
1674 Reg = MF.addLiveIn(Reg, RC);
1675 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1676
Matt Arsenault45b98182017-11-15 00:45:43 +00001677 if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) {
1678 // The return object should be reasonably addressable.
1679
1680 // FIXME: This helps when the return is a real sret. If it is a
1681 // automatically inserted sret (i.e. CanLowerReturn returns false), an
1682 // extra copy is inserted in SelectionDAGBuilder which obscures this.
1683 unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits;
1684 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1685 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
1686 }
1687
Matt Arsenaultb3463552017-07-15 05:52:59 +00001688 // If this is an 8 or 16-bit value, it is really passed promoted
1689 // to 32 bits. Insert an assert[sz]ext to capture this, then
1690 // truncate to the right size.
1691 switch (VA.getLocInfo()) {
1692 case CCValAssign::Full:
1693 break;
1694 case CCValAssign::BCvt:
1695 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
1696 break;
1697 case CCValAssign::SExt:
1698 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
1699 DAG.getValueType(ValVT));
1700 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1701 break;
1702 case CCValAssign::ZExt:
1703 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1704 DAG.getValueType(ValVT));
1705 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1706 break;
1707 case CCValAssign::AExt:
1708 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1709 break;
1710 default:
1711 llvm_unreachable("Unknown loc info!");
1712 }
1713
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001714 if (IsShader && Arg.VT.isVector()) {
Christian Konig2c8f6d52013-03-07 09:03:52 +00001715 // Build a vector from the registers
Andrew Trick05938a52015-02-16 18:10:47 +00001716 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
Christian Konig2c8f6d52013-03-07 09:03:52 +00001717 unsigned NumElements = ParamType->getVectorNumElements();
1718
1719 SmallVector<SDValue, 4> Regs;
1720 Regs.push_back(Val);
1721 for (unsigned j = 1; j != NumElements; ++j) {
1722 Reg = ArgLocs[ArgIdx++].getLocReg();
1723 Reg = MF.addLiveIn(Reg, RC);
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001724
1725 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1726 Regs.push_back(Copy);
Christian Konig2c8f6d52013-03-07 09:03:52 +00001727 }
1728
1729 // Fill up the missing vector elements
1730 NumElements = Arg.VT.getVectorNumElements() - NumElements;
Benjamin Kramer6cd780f2015-02-17 15:29:18 +00001731 Regs.append(NumElements, DAG.getUNDEF(VT));
Matt Arsenault758659232013-05-18 00:21:46 +00001732
Ahmed Bougacha128f8732016-04-26 21:15:30 +00001733 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
Christian Konig2c8f6d52013-03-07 09:03:52 +00001734 continue;
1735 }
1736
1737 InVals.push_back(Val);
1738 }
Tom Stellarde99fb652015-01-20 19:33:04 +00001739
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001740 if (!IsEntryFunc) {
1741 // Special inputs come after user arguments.
1742 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
1743 }
1744
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001745 // Start adding system SGPRs.
1746 if (IsEntryFunc) {
1747 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001748 } else {
1749 CCInfo.AllocateReg(Info->getScratchRSrcReg());
1750 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
1751 CCInfo.AllocateReg(Info->getFrameOffsetReg());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001752 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001753 }
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001754
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001755 auto &ArgUsageInfo =
1756 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
Matthias Braunf1caa282017-12-15 22:22:58 +00001757 ArgUsageInfo.setFuncArgInfo(MF.getFunction(), Info->getArgInfo());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001758
Matt Arsenault71bcbd42017-08-11 20:42:08 +00001759 unsigned StackArgSize = CCInfo.getNextStackOffset();
1760 Info->setBytesInStackArgArea(StackArgSize);
1761
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001762 return Chains.empty() ? Chain :
1763 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
Christian Konig2c8f6d52013-03-07 09:03:52 +00001764}
1765
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001766// TODO: If return values can't fit in registers, we should return as many as
1767// possible in registers before passing on stack.
1768bool SITargetLowering::CanLowerReturn(
1769 CallingConv::ID CallConv,
1770 MachineFunction &MF, bool IsVarArg,
1771 const SmallVectorImpl<ISD::OutputArg> &Outs,
1772 LLVMContext &Context) const {
1773 // Replacing returns with sret/stack usage doesn't make sense for shaders.
1774 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
1775 // for shaders. Vector types should be explicitly handled by CC.
1776 if (AMDGPU::isEntryFunctionCC(CallConv))
1777 return true;
1778
1779 SmallVector<CCValAssign, 16> RVLocs;
1780 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1781 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
1782}
1783
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001784SDValue
1785SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1786 bool isVarArg,
1787 const SmallVectorImpl<ISD::OutputArg> &Outs,
1788 const SmallVectorImpl<SDValue> &OutVals,
1789 const SDLoc &DL, SelectionDAG &DAG) const {
Marek Olsak8a0f3352016-01-13 17:23:04 +00001790 MachineFunction &MF = DAG.getMachineFunction();
1791 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1792
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001793 if (AMDGPU::isKernel(CallConv)) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00001794 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
1795 OutVals, DL, DAG);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001796 }
1797
1798 bool IsShader = AMDGPU::isShader(CallConv);
Marek Olsak8a0f3352016-01-13 17:23:04 +00001799
Marek Olsak8e9cc632016-01-13 17:23:09 +00001800 Info->setIfReturnsVoid(Outs.size() == 0);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001801 bool IsWaveEnd = Info->returnsVoid() && IsShader;
Marek Olsak8e9cc632016-01-13 17:23:09 +00001802
Marek Olsak8a0f3352016-01-13 17:23:04 +00001803 SmallVector<ISD::OutputArg, 48> Splits;
1804 SmallVector<SDValue, 48> SplitVals;
1805
1806 // Split vectors into their elements.
1807 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1808 const ISD::OutputArg &Out = Outs[i];
1809
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001810 if (IsShader && Out.VT.isVector()) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00001811 MVT VT = Out.VT.getVectorElementType();
1812 ISD::OutputArg NewOut = Out;
1813 NewOut.Flags.setSplit();
1814 NewOut.VT = VT;
1815
1816 // We want the original number of vector elements here, e.g.
1817 // three or five, not four or eight.
1818 unsigned NumElements = Out.ArgVT.getVectorNumElements();
1819
1820 for (unsigned j = 0; j != NumElements; ++j) {
1821 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i],
1822 DAG.getConstant(j, DL, MVT::i32));
1823 SplitVals.push_back(Elem);
1824 Splits.push_back(NewOut);
1825 NewOut.PartOffset += NewOut.VT.getStoreSize();
1826 }
1827 } else {
1828 SplitVals.push_back(OutVals[i]);
1829 Splits.push_back(Out);
1830 }
1831 }
1832
1833 // CCValAssign - represent the assignment of the return value to a location.
1834 SmallVector<CCValAssign, 48> RVLocs;
1835
1836 // CCState - Info about the registers and stack slots.
1837 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1838 *DAG.getContext());
1839
1840 // Analyze outgoing return values.
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001841 CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg));
Marek Olsak8a0f3352016-01-13 17:23:04 +00001842
1843 SDValue Flag;
1844 SmallVector<SDValue, 48> RetOps;
1845 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
1846
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001847 // Add return address for callable functions.
1848 if (!Info->isEntryFunction()) {
1849 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1850 SDValue ReturnAddrReg = CreateLiveInRegister(
1851 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
1852
1853 // FIXME: Should be able to use a vreg here, but need a way to prevent it
1854 // from being allcoated to a CSR.
1855
1856 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
1857 MVT::i64);
1858
1859 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
1860 Flag = Chain.getValue(1);
1861
1862 RetOps.push_back(PhysReturnAddrReg);
1863 }
1864
Marek Olsak8a0f3352016-01-13 17:23:04 +00001865 // Copy the result values into the output registers.
1866 for (unsigned i = 0, realRVLocIdx = 0;
1867 i != RVLocs.size();
1868 ++i, ++realRVLocIdx) {
1869 CCValAssign &VA = RVLocs[i];
1870 assert(VA.isRegLoc() && "Can only return in registers!");
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001871 // TODO: Partially return in registers if return values don't fit.
Marek Olsak8a0f3352016-01-13 17:23:04 +00001872
1873 SDValue Arg = SplitVals[realRVLocIdx];
1874
1875 // Copied from other backends.
1876 switch (VA.getLocInfo()) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00001877 case CCValAssign::Full:
1878 break;
1879 case CCValAssign::BCvt:
1880 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1881 break;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001882 case CCValAssign::SExt:
1883 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1884 break;
1885 case CCValAssign::ZExt:
1886 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1887 break;
1888 case CCValAssign::AExt:
1889 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1890 break;
1891 default:
1892 llvm_unreachable("Unknown loc info!");
Marek Olsak8a0f3352016-01-13 17:23:04 +00001893 }
1894
1895 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
1896 Flag = Chain.getValue(1);
1897 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1898 }
1899
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001900 // FIXME: Does sret work properly?
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001901 if (!Info->isEntryFunction()) {
1902 const SIRegisterInfo *TRI
1903 = static_cast<const SISubtarget *>(Subtarget)->getRegisterInfo();
1904 const MCPhysReg *I =
1905 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
1906 if (I) {
1907 for (; *I; ++I) {
1908 if (AMDGPU::SReg_64RegClass.contains(*I))
1909 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
1910 else if (AMDGPU::SReg_32RegClass.contains(*I))
1911 RetOps.push_back(DAG.getRegister(*I, MVT::i32));
1912 else
1913 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1914 }
1915 }
1916 }
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001917
Marek Olsak8a0f3352016-01-13 17:23:04 +00001918 // Update chain and glue.
1919 RetOps[0] = Chain;
1920 if (Flag.getNode())
1921 RetOps.push_back(Flag);
1922
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001923 unsigned Opc = AMDGPUISD::ENDPGM;
1924 if (!IsWaveEnd)
1925 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
Matt Arsenault9babdf42016-06-22 20:15:28 +00001926 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
Marek Olsak8a0f3352016-01-13 17:23:04 +00001927}
1928
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001929SDValue SITargetLowering::LowerCallResult(
1930 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
1931 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1932 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
1933 SDValue ThisVal) const {
1934 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
1935
1936 // Assign locations to each value returned by this call.
1937 SmallVector<CCValAssign, 16> RVLocs;
1938 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
1939 *DAG.getContext());
1940 CCInfo.AnalyzeCallResult(Ins, RetCC);
1941
1942 // Copy all of the result registers out of their specified physreg.
1943 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1944 CCValAssign VA = RVLocs[i];
1945 SDValue Val;
1946
1947 if (VA.isRegLoc()) {
1948 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
1949 Chain = Val.getValue(1);
1950 InFlag = Val.getValue(2);
1951 } else if (VA.isMemLoc()) {
1952 report_fatal_error("TODO: return values in memory");
1953 } else
1954 llvm_unreachable("unknown argument location type");
1955
1956 switch (VA.getLocInfo()) {
1957 case CCValAssign::Full:
1958 break;
1959 case CCValAssign::BCvt:
1960 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
1961 break;
1962 case CCValAssign::ZExt:
1963 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
1964 DAG.getValueType(VA.getValVT()));
1965 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
1966 break;
1967 case CCValAssign::SExt:
1968 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
1969 DAG.getValueType(VA.getValVT()));
1970 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
1971 break;
1972 case CCValAssign::AExt:
1973 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
1974 break;
1975 default:
1976 llvm_unreachable("Unknown loc info!");
1977 }
1978
1979 InVals.push_back(Val);
1980 }
1981
1982 return Chain;
1983}
1984
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001985// Add code to pass special inputs required depending on used features separate
1986// from the explicit user arguments present in the IR.
1987void SITargetLowering::passSpecialInputs(
1988 CallLoweringInfo &CLI,
1989 const SIMachineFunctionInfo &Info,
1990 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
1991 SmallVectorImpl<SDValue> &MemOpChains,
1992 SDValue Chain,
1993 SDValue StackPtr) const {
1994 // If we don't have a call site, this was a call inserted by
1995 // legalization. These can never use special inputs.
1996 if (!CLI.CS)
1997 return;
1998
1999 const Function *CalleeFunc = CLI.CS.getCalledFunction();
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002000 assert(CalleeFunc);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002001
2002 SelectionDAG &DAG = CLI.DAG;
2003 const SDLoc &DL = CLI.DL;
2004
2005 const SISubtarget *ST = getSubtarget();
2006 const SIRegisterInfo *TRI = ST->getRegisterInfo();
2007
2008 auto &ArgUsageInfo =
2009 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2010 const AMDGPUFunctionArgInfo &CalleeArgInfo
2011 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2012
2013 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2014
2015 // TODO: Unify with private memory register handling. This is complicated by
2016 // the fact that at least in kernels, the input argument is not necessarily
2017 // in the same location as the input.
2018 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2019 AMDGPUFunctionArgInfo::DISPATCH_PTR,
2020 AMDGPUFunctionArgInfo::QUEUE_PTR,
2021 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2022 AMDGPUFunctionArgInfo::DISPATCH_ID,
2023 AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2024 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2025 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2026 AMDGPUFunctionArgInfo::WORKITEM_ID_X,
2027 AMDGPUFunctionArgInfo::WORKITEM_ID_Y,
Matt Arsenault817c2532017-08-03 23:12:44 +00002028 AMDGPUFunctionArgInfo::WORKITEM_ID_Z,
2029 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002030 };
2031
2032 for (auto InputID : InputRegs) {
2033 const ArgDescriptor *OutgoingArg;
2034 const TargetRegisterClass *ArgRC;
2035
2036 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2037 if (!OutgoingArg)
2038 continue;
2039
2040 const ArgDescriptor *IncomingArg;
2041 const TargetRegisterClass *IncomingArgRC;
2042 std::tie(IncomingArg, IncomingArgRC)
2043 = CallerArgInfo.getPreloadedValue(InputID);
2044 assert(IncomingArgRC == ArgRC);
2045
2046 // All special arguments are ints for now.
2047 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
Matt Arsenault817c2532017-08-03 23:12:44 +00002048 SDValue InputReg;
2049
2050 if (IncomingArg) {
2051 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2052 } else {
2053 // The implicit arg ptr is special because it doesn't have a corresponding
2054 // input for kernels, and is computed from the kernarg segment pointer.
2055 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2056 InputReg = getImplicitArgPtr(DAG, DL);
2057 }
2058
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002059 if (OutgoingArg->isRegister()) {
2060 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2061 } else {
2062 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, StackPtr,
2063 InputReg,
2064 OutgoingArg->getStackOffset());
2065 MemOpChains.push_back(ArgStore);
2066 }
2067 }
2068}
2069
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002070static bool canGuaranteeTCO(CallingConv::ID CC) {
2071 return CC == CallingConv::Fast;
2072}
2073
2074/// Return true if we might ever do TCO for calls with this calling convention.
2075static bool mayTailCallThisCC(CallingConv::ID CC) {
2076 switch (CC) {
2077 case CallingConv::C:
2078 return true;
2079 default:
2080 return canGuaranteeTCO(CC);
2081 }
2082}
2083
2084bool SITargetLowering::isEligibleForTailCallOptimization(
2085 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2086 const SmallVectorImpl<ISD::OutputArg> &Outs,
2087 const SmallVectorImpl<SDValue> &OutVals,
2088 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2089 if (!mayTailCallThisCC(CalleeCC))
2090 return false;
2091
2092 MachineFunction &MF = DAG.getMachineFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00002093 const Function &CallerF = MF.getFunction();
2094 CallingConv::ID CallerCC = CallerF.getCallingConv();
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002095 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2096 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2097
2098 // Kernels aren't callable, and don't have a live in return address so it
2099 // doesn't make sense to do a tail call with entry functions.
2100 if (!CallerPreserved)
2101 return false;
2102
2103 bool CCMatch = CallerCC == CalleeCC;
2104
2105 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2106 if (canGuaranteeTCO(CalleeCC) && CCMatch)
2107 return true;
2108 return false;
2109 }
2110
2111 // TODO: Can we handle var args?
2112 if (IsVarArg)
2113 return false;
2114
Matthias Braunf1caa282017-12-15 22:22:58 +00002115 for (const Argument &Arg : CallerF.args()) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002116 if (Arg.hasByValAttr())
2117 return false;
2118 }
2119
2120 LLVMContext &Ctx = *DAG.getContext();
2121
2122 // Check that the call results are passed in the same way.
2123 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2124 CCAssignFnForCall(CalleeCC, IsVarArg),
2125 CCAssignFnForCall(CallerCC, IsVarArg)))
2126 return false;
2127
2128 // The callee has to preserve all registers the caller needs to preserve.
2129 if (!CCMatch) {
2130 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2131 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2132 return false;
2133 }
2134
2135 // Nothing more to check if the callee is taking no arguments.
2136 if (Outs.empty())
2137 return true;
2138
2139 SmallVector<CCValAssign, 16> ArgLocs;
2140 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2141
2142 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2143
2144 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2145 // If the stack arguments for this call do not fit into our own save area then
2146 // the call cannot be made tail.
2147 // TODO: Is this really necessary?
2148 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2149 return false;
2150
2151 const MachineRegisterInfo &MRI = MF.getRegInfo();
2152 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2153}
2154
2155bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2156 if (!CI->isTailCall())
2157 return false;
2158
2159 const Function *ParentFn = CI->getParent()->getParent();
2160 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2161 return false;
2162
2163 auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2164 return (Attr.getValueAsString() != "true");
2165}
2166
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002167// The wave scratch offset register is used as the global base pointer.
2168SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2169 SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002170 SelectionDAG &DAG = CLI.DAG;
2171 const SDLoc &DL = CLI.DL;
2172 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2173 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2174 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2175 SDValue Chain = CLI.Chain;
2176 SDValue Callee = CLI.Callee;
2177 bool &IsTailCall = CLI.IsTailCall;
2178 CallingConv::ID CallConv = CLI.CallConv;
2179 bool IsVarArg = CLI.IsVarArg;
2180 bool IsSibCall = false;
2181 bool IsThisReturn = false;
2182 MachineFunction &MF = DAG.getMachineFunction();
2183
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002184 if (IsVarArg) {
2185 return lowerUnhandledCall(CLI, InVals,
2186 "unsupported call to variadic function ");
2187 }
2188
2189 if (!CLI.CS.getCalledFunction()) {
2190 return lowerUnhandledCall(CLI, InVals,
2191 "unsupported indirect call to function ");
2192 }
2193
2194 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2195 return lowerUnhandledCall(CLI, InVals,
2196 "unsupported required tail call to function ");
2197 }
2198
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002199 // The first 4 bytes are reserved for the callee's emergency stack slot.
2200 const unsigned CalleeUsableStackOffset = 4;
2201
2202 if (IsTailCall) {
2203 IsTailCall = isEligibleForTailCallOptimization(
2204 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2205 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2206 report_fatal_error("failed to perform tail call elimination on a call "
2207 "site marked musttail");
2208 }
2209
2210 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2211
2212 // A sibling call is one where we're under the usual C ABI and not planning
2213 // to change that but can still do a tail call:
2214 if (!TailCallOpt && IsTailCall)
2215 IsSibCall = true;
2216
2217 if (IsTailCall)
2218 ++NumTailCalls;
2219 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002220
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002221 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee)) {
Yaxun Liu1ac16612017-11-06 13:01:33 +00002222 // FIXME: Remove this hack for function pointer types after removing
2223 // support of old address space mapping. In the new address space
2224 // mapping the pointer in default address space is 64 bit, therefore
2225 // does not need this hack.
2226 if (Callee.getValueType() == MVT::i32) {
2227 const GlobalValue *GV = GA->getGlobal();
2228 Callee = DAG.getGlobalAddress(GV, DL, MVT::i64, GA->getOffset(), false,
2229 GA->getTargetFlags());
2230 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002231 }
Yaxun Liu1ac16612017-11-06 13:01:33 +00002232 assert(Callee.getValueType() == MVT::i64);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002233
2234 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2235
2236 // Analyze operands of the call, assigning locations to each operand.
2237 SmallVector<CCValAssign, 16> ArgLocs;
2238 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2239 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2240 CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2241
2242 // Get a count of how many bytes are to be pushed on the stack.
2243 unsigned NumBytes = CCInfo.getNextStackOffset();
2244
2245 if (IsSibCall) {
2246 // Since we're not changing the ABI to make this a tail call, the memory
2247 // operands are already available in the caller's incoming argument space.
2248 NumBytes = 0;
2249 }
2250
2251 // FPDiff is the byte offset of the call's argument area from the callee's.
2252 // Stores to callee stack arguments will be placed in FixedStackSlots offset
2253 // by this amount for a tail call. In a sibling call it must be 0 because the
2254 // caller will deallocate the entire stack and the callee still expects its
2255 // arguments to begin at SP+0. Completely unused for non-tail calls.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002256 int32_t FPDiff = 0;
2257 MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002258 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2259
Matt Arsenault6efd0822017-09-14 17:14:57 +00002260 SDValue CallerSavedFP;
2261
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002262 // Adjust the stack pointer for the new arguments...
2263 // These operations are automatically eliminated by the prolog/epilog pass
2264 if (!IsSibCall) {
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002265 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002266
2267 unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2268
2269 // In the HSA case, this should be an identity copy.
2270 SDValue ScratchRSrcReg
2271 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2272 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2273
2274 // TODO: Don't hardcode these registers and get from the callee function.
2275 SDValue ScratchWaveOffsetReg
2276 = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2277 RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
Matt Arsenault6efd0822017-09-14 17:14:57 +00002278
2279 if (!Info->isEntryFunction()) {
2280 // Avoid clobbering this function's FP value. In the current convention
2281 // callee will overwrite this, so do save/restore around the call site.
2282 CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2283 Info->getFrameOffsetReg(), MVT::i32);
2284 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002285 }
2286
2287 // Stack pointer relative accesses are done by changing the offset SGPR. This
2288 // is just the VGPR offset component.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002289 SDValue StackPtr = DAG.getConstant(CalleeUsableStackOffset, DL, MVT::i32);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002290
2291 SmallVector<SDValue, 8> MemOpChains;
2292 MVT PtrVT = MVT::i32;
2293
2294 // Walk the register/memloc assignments, inserting copies/loads.
2295 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2296 ++i, ++realArgIdx) {
2297 CCValAssign &VA = ArgLocs[i];
2298 SDValue Arg = OutVals[realArgIdx];
2299
2300 // Promote the value if needed.
2301 switch (VA.getLocInfo()) {
2302 case CCValAssign::Full:
2303 break;
2304 case CCValAssign::BCvt:
2305 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2306 break;
2307 case CCValAssign::ZExt:
2308 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2309 break;
2310 case CCValAssign::SExt:
2311 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2312 break;
2313 case CCValAssign::AExt:
2314 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2315 break;
2316 case CCValAssign::FPExt:
2317 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2318 break;
2319 default:
2320 llvm_unreachable("Unknown loc info!");
2321 }
2322
2323 if (VA.isRegLoc()) {
2324 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2325 } else {
2326 assert(VA.isMemLoc());
2327
2328 SDValue DstAddr;
2329 MachinePointerInfo DstInfo;
2330
2331 unsigned LocMemOffset = VA.getLocMemOffset();
2332 int32_t Offset = LocMemOffset;
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002333
2334 SDValue PtrOff = DAG.getObjectPtrOffset(DL, StackPtr, Offset);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002335
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002336 if (IsTailCall) {
2337 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2338 unsigned OpSize = Flags.isByVal() ?
2339 Flags.getByValSize() : VA.getValVT().getStoreSize();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002340
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002341 Offset = Offset + FPDiff;
2342 int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2343
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002344 DstAddr = DAG.getObjectPtrOffset(DL, DAG.getFrameIndex(FI, PtrVT),
2345 StackPtr);
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002346 DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2347
2348 // Make sure any stack arguments overlapping with where we're storing
2349 // are loaded before this eventual operation. Otherwise they'll be
2350 // clobbered.
2351
2352 // FIXME: Why is this really necessary? This seems to just result in a
2353 // lot of code to copy the stack and write them back to the same
2354 // locations, which are supposed to be immutable?
2355 Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2356 } else {
2357 DstAddr = PtrOff;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002358 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2359 }
2360
2361 if (Outs[i].Flags.isByVal()) {
2362 SDValue SizeNode =
2363 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2364 SDValue Cpy = DAG.getMemcpy(
2365 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2366 /*isVol = */ false, /*AlwaysInline = */ true,
Yaxun Liuc5962262017-11-22 16:13:35 +00002367 /*isTailCall = */ false, DstInfo,
2368 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
2369 *DAG.getContext(), AMDGPUASI.PRIVATE_ADDRESS))));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002370
2371 MemOpChains.push_back(Cpy);
2372 } else {
2373 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
2374 MemOpChains.push_back(Store);
2375 }
2376 }
2377 }
2378
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002379 // Copy special input registers after user input arguments.
2380 passSpecialInputs(CLI, *Info, RegsToPass, MemOpChains, Chain, StackPtr);
2381
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002382 if (!MemOpChains.empty())
2383 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2384
2385 // Build a sequence of copy-to-reg nodes chained together with token chain
2386 // and flag operands which copy the outgoing args into the appropriate regs.
2387 SDValue InFlag;
2388 for (auto &RegToPass : RegsToPass) {
2389 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2390 RegToPass.second, InFlag);
2391 InFlag = Chain.getValue(1);
2392 }
2393
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002394
2395 SDValue PhysReturnAddrReg;
2396 if (IsTailCall) {
2397 // Since the return is being combined with the call, we need to pass on the
2398 // return address.
2399
2400 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2401 SDValue ReturnAddrReg = CreateLiveInRegister(
2402 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2403
2404 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2405 MVT::i64);
2406 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2407 InFlag = Chain.getValue(1);
2408 }
2409
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002410 // We don't usually want to end the call-sequence here because we would tidy
2411 // the frame up *after* the call, however in the ABI-changing tail-call case
2412 // we've carefully laid out the parameters so that when sp is reset they'll be
2413 // in the correct location.
2414 if (IsTailCall && !IsSibCall) {
2415 Chain = DAG.getCALLSEQ_END(Chain,
2416 DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2417 DAG.getTargetConstant(0, DL, MVT::i32),
2418 InFlag, DL);
2419 InFlag = Chain.getValue(1);
2420 }
2421
2422 std::vector<SDValue> Ops;
2423 Ops.push_back(Chain);
2424 Ops.push_back(Callee);
2425
2426 if (IsTailCall) {
2427 // Each tail call may have to adjust the stack by a different amount, so
2428 // this information must travel along with the operation for eventual
2429 // consumption by emitEpilogue.
2430 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002431
2432 Ops.push_back(PhysReturnAddrReg);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002433 }
2434
2435 // Add argument registers to the end of the list so that they are known live
2436 // into the call.
2437 for (auto &RegToPass : RegsToPass) {
2438 Ops.push_back(DAG.getRegister(RegToPass.first,
2439 RegToPass.second.getValueType()));
2440 }
2441
2442 // Add a register mask operand representing the call-preserved registers.
2443
2444 const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo();
2445 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2446 assert(Mask && "Missing call preserved mask for calling convention");
2447 Ops.push_back(DAG.getRegisterMask(Mask));
2448
2449 if (InFlag.getNode())
2450 Ops.push_back(InFlag);
2451
2452 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2453
2454 // If we're doing a tall call, use a TC_RETURN here rather than an
2455 // actual call instruction.
2456 if (IsTailCall) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002457 MFI.setHasTailCall();
2458 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002459 }
2460
2461 // Returns a chain and a flag for retval copy to use.
2462 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2463 Chain = Call.getValue(0);
2464 InFlag = Call.getValue(1);
2465
Matt Arsenault6efd0822017-09-14 17:14:57 +00002466 if (CallerSavedFP) {
2467 SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2468 Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2469 InFlag = Chain.getValue(1);
2470 }
2471
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002472 uint64_t CalleePopBytes = NumBytes;
2473 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002474 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2475 InFlag, DL);
2476 if (!Ins.empty())
2477 InFlag = Chain.getValue(1);
2478
2479 // Handle result values, copying them out of physregs into vregs that we
2480 // return.
2481 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2482 InVals, IsThisReturn,
2483 IsThisReturn ? OutVals[0] : SDValue());
2484}
2485
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002486unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2487 SelectionDAG &DAG) const {
2488 unsigned Reg = StringSwitch<unsigned>(RegName)
2489 .Case("m0", AMDGPU::M0)
2490 .Case("exec", AMDGPU::EXEC)
2491 .Case("exec_lo", AMDGPU::EXEC_LO)
2492 .Case("exec_hi", AMDGPU::EXEC_HI)
2493 .Case("flat_scratch", AMDGPU::FLAT_SCR)
2494 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2495 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2496 .Default(AMDGPU::NoRegister);
2497
2498 if (Reg == AMDGPU::NoRegister) {
2499 report_fatal_error(Twine("invalid register name \""
2500 + StringRef(RegName) + "\"."));
2501
2502 }
2503
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002504 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002505 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2506 report_fatal_error(Twine("invalid register \""
2507 + StringRef(RegName) + "\" for subtarget."));
2508 }
2509
2510 switch (Reg) {
2511 case AMDGPU::M0:
2512 case AMDGPU::EXEC_LO:
2513 case AMDGPU::EXEC_HI:
2514 case AMDGPU::FLAT_SCR_LO:
2515 case AMDGPU::FLAT_SCR_HI:
2516 if (VT.getSizeInBits() == 32)
2517 return Reg;
2518 break;
2519 case AMDGPU::EXEC:
2520 case AMDGPU::FLAT_SCR:
2521 if (VT.getSizeInBits() == 64)
2522 return Reg;
2523 break;
2524 default:
2525 llvm_unreachable("missing register type checking");
2526 }
2527
2528 report_fatal_error(Twine("invalid type for register \""
2529 + StringRef(RegName) + "\"."));
2530}
2531
Matt Arsenault786724a2016-07-12 21:41:32 +00002532// If kill is not the last instruction, split the block so kill is always a
2533// proper terminator.
2534MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
2535 MachineBasicBlock *BB) const {
2536 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2537
2538 MachineBasicBlock::iterator SplitPoint(&MI);
2539 ++SplitPoint;
2540
2541 if (SplitPoint == BB->end()) {
2542 // Don't bother with a new block.
Marek Olsakce76ea02017-10-24 10:27:13 +00002543 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002544 return BB;
2545 }
2546
2547 MachineFunction *MF = BB->getParent();
2548 MachineBasicBlock *SplitBB
2549 = MF->CreateMachineBasicBlock(BB->getBasicBlock());
2550
Matt Arsenault786724a2016-07-12 21:41:32 +00002551 MF->insert(++MachineFunction::iterator(BB), SplitBB);
2552 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2553
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002554 SplitBB->transferSuccessorsAndUpdatePHIs(BB);
Matt Arsenault786724a2016-07-12 21:41:32 +00002555 BB->addSuccessor(SplitBB);
2556
Marek Olsakce76ea02017-10-24 10:27:13 +00002557 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002558 return SplitBB;
2559}
2560
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002561// Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2562// wavefront. If the value is uniform and just happens to be in a VGPR, this
2563// will only do one iteration. In the worst case, this will loop 64 times.
2564//
2565// TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002566static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
2567 const SIInstrInfo *TII,
2568 MachineRegisterInfo &MRI,
2569 MachineBasicBlock &OrigBB,
2570 MachineBasicBlock &LoopBB,
2571 const DebugLoc &DL,
2572 const MachineOperand &IdxReg,
2573 unsigned InitReg,
2574 unsigned ResultReg,
2575 unsigned PhiReg,
2576 unsigned InitSaveExecReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002577 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002578 bool UseGPRIdxMode,
2579 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002580 MachineBasicBlock::iterator I = LoopBB.begin();
2581
2582 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2583 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2584 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2585 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2586
2587 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2588 .addReg(InitReg)
2589 .addMBB(&OrigBB)
2590 .addReg(ResultReg)
2591 .addMBB(&LoopBB);
2592
2593 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2594 .addReg(InitSaveExecReg)
2595 .addMBB(&OrigBB)
2596 .addReg(NewExec)
2597 .addMBB(&LoopBB);
2598
2599 // Read the next variant <- also loop target.
2600 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2601 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2602
2603 // Compare the just read M0 value to all possible Idx values.
2604 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2605 .addReg(CurrentIdxReg)
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +00002606 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002607
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002608 // Update EXEC, save the original EXEC value to VCC.
2609 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2610 .addReg(CondReg, RegState::Kill);
2611
2612 MRI.setSimpleHint(NewExec, CondReg);
2613
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002614 if (UseGPRIdxMode) {
2615 unsigned IdxReg;
2616 if (Offset == 0) {
2617 IdxReg = CurrentIdxReg;
2618 } else {
2619 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2620 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2621 .addReg(CurrentIdxReg, RegState::Kill)
2622 .addImm(Offset);
2623 }
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002624 unsigned IdxMode = IsIndirectSrc ?
2625 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
2626 MachineInstr *SetOn =
2627 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2628 .addReg(IdxReg, RegState::Kill)
2629 .addImm(IdxMode);
2630 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002631 } else {
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002632 // Move index from VCC into M0
2633 if (Offset == 0) {
2634 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2635 .addReg(CurrentIdxReg, RegState::Kill);
2636 } else {
2637 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2638 .addReg(CurrentIdxReg, RegState::Kill)
2639 .addImm(Offset);
2640 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002641 }
2642
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002643 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002644 MachineInstr *InsertPt =
2645 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002646 .addReg(AMDGPU::EXEC)
2647 .addReg(NewExec);
2648
2649 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2650 // s_cbranch_scc0?
2651
2652 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
2653 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
2654 .addMBB(&LoopBB);
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002655
2656 return InsertPt->getIterator();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002657}
2658
2659// This has slightly sub-optimal regalloc when the source vector is killed by
2660// the read. The register allocator does not understand that the kill is
2661// per-workitem, so is kept alive for the whole loop so we end up not re-using a
2662// subregister from it, using 1 more VGPR than necessary. This was saved when
2663// this was expanded after register allocation.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002664static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
2665 MachineBasicBlock &MBB,
2666 MachineInstr &MI,
2667 unsigned InitResultReg,
2668 unsigned PhiReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002669 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002670 bool UseGPRIdxMode,
2671 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002672 MachineFunction *MF = MBB.getParent();
2673 MachineRegisterInfo &MRI = MF->getRegInfo();
2674 const DebugLoc &DL = MI.getDebugLoc();
2675 MachineBasicBlock::iterator I(&MI);
2676
2677 unsigned DstReg = MI.getOperand(0).getReg();
Matt Arsenault301162c2017-11-15 21:51:43 +00002678 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2679 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002680
2681 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
2682
2683 // Save the EXEC mask
2684 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
2685 .addReg(AMDGPU::EXEC);
2686
2687 // To insert the loop we need to split the block. Move everything after this
2688 // point to a new block, and insert a new empty block between the two.
2689 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
2690 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
2691 MachineFunction::iterator MBBI(MBB);
2692 ++MBBI;
2693
2694 MF->insert(MBBI, LoopBB);
2695 MF->insert(MBBI, RemainderBB);
2696
2697 LoopBB->addSuccessor(LoopBB);
2698 LoopBB->addSuccessor(RemainderBB);
2699
2700 // Move the rest of the block into a new block.
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002701 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002702 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
2703
2704 MBB.addSuccessor(LoopBB);
2705
2706 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2707
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002708 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
2709 InitResultReg, DstReg, PhiReg, TmpExec,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002710 Offset, UseGPRIdxMode, IsIndirectSrc);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002711
2712 MachineBasicBlock::iterator First = RemainderBB->begin();
2713 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
2714 .addReg(SaveExec);
2715
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002716 return InsPt;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002717}
2718
2719// Returns subreg index, offset
2720static std::pair<unsigned, int>
2721computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
2722 const TargetRegisterClass *SuperRC,
2723 unsigned VecReg,
2724 int Offset) {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002725 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002726
2727 // Skip out of bounds offsets, or else we would end up using an undefined
2728 // register.
2729 if (Offset >= NumElts || Offset < 0)
2730 return std::make_pair(AMDGPU::sub0, Offset);
2731
2732 return std::make_pair(AMDGPU::sub0 + Offset, 0);
2733}
2734
2735// Return true if the index is an SGPR and was set.
2736static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
2737 MachineRegisterInfo &MRI,
2738 MachineInstr &MI,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002739 int Offset,
2740 bool UseGPRIdxMode,
2741 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002742 MachineBasicBlock *MBB = MI.getParent();
2743 const DebugLoc &DL = MI.getDebugLoc();
2744 MachineBasicBlock::iterator I(&MI);
2745
2746 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2747 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
2748
2749 assert(Idx->getReg() != AMDGPU::NoRegister);
2750
2751 if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
2752 return false;
2753
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002754 if (UseGPRIdxMode) {
2755 unsigned IdxMode = IsIndirectSrc ?
2756 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
2757 if (Offset == 0) {
2758 MachineInstr *SetOn =
Diana Picus116bbab2017-01-13 09:58:52 +00002759 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2760 .add(*Idx)
2761 .addImm(IdxMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002762
Matt Arsenaultdac31db2016-10-13 12:45:16 +00002763 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002764 } else {
2765 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
2766 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
Diana Picus116bbab2017-01-13 09:58:52 +00002767 .add(*Idx)
2768 .addImm(Offset);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002769 MachineInstr *SetOn =
2770 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2771 .addReg(Tmp, RegState::Kill)
2772 .addImm(IdxMode);
2773
Matt Arsenaultdac31db2016-10-13 12:45:16 +00002774 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002775 }
2776
2777 return true;
2778 }
2779
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002780 if (Offset == 0) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00002781 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2782 .add(*Idx);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002783 } else {
2784 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00002785 .add(*Idx)
2786 .addImm(Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002787 }
2788
2789 return true;
2790}
2791
2792// Control flow needs to be inserted if indexing with a VGPR.
2793static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
2794 MachineBasicBlock &MBB,
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002795 const SISubtarget &ST) {
2796 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002797 const SIRegisterInfo &TRI = TII->getRegisterInfo();
2798 MachineFunction *MF = MBB.getParent();
2799 MachineRegisterInfo &MRI = MF->getRegInfo();
2800
2801 unsigned Dst = MI.getOperand(0).getReg();
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002802 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002803 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
2804
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002805 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002806
2807 unsigned SubReg;
2808 std::tie(SubReg, Offset)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002809 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002810
Marek Olsake22fdb92017-03-21 17:00:32 +00002811 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002812
2813 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002814 MachineBasicBlock::iterator I(&MI);
2815 const DebugLoc &DL = MI.getDebugLoc();
2816
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002817 if (UseGPRIdxMode) {
2818 // TODO: Look at the uses to avoid the copy. This may require rescheduling
2819 // to avoid interfering with other uses, so probably requires a new
2820 // optimization pass.
2821 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002822 .addReg(SrcReg, RegState::Undef, SubReg)
2823 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002824 .addReg(AMDGPU::M0, RegState::Implicit);
2825 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2826 } else {
2827 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002828 .addReg(SrcReg, RegState::Undef, SubReg)
2829 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002830 }
2831
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002832 MI.eraseFromParent();
2833
2834 return &MBB;
2835 }
2836
2837 const DebugLoc &DL = MI.getDebugLoc();
2838 MachineBasicBlock::iterator I(&MI);
2839
2840 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2841 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2842
2843 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
2844
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002845 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
2846 Offset, UseGPRIdxMode, true);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002847 MachineBasicBlock *LoopBB = InsPt->getParent();
2848
2849 if (UseGPRIdxMode) {
2850 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002851 .addReg(SrcReg, RegState::Undef, SubReg)
2852 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002853 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002854 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002855 } else {
2856 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002857 .addReg(SrcReg, RegState::Undef, SubReg)
2858 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002859 }
2860
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002861 MI.eraseFromParent();
2862
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002863 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002864}
2865
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002866static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
2867 const TargetRegisterClass *VecRC) {
2868 switch (TRI.getRegSizeInBits(*VecRC)) {
2869 case 32: // 4 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002870 return AMDGPU::V_MOVRELD_B32_V1;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002871 case 64: // 8 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002872 return AMDGPU::V_MOVRELD_B32_V2;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002873 case 128: // 16 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002874 return AMDGPU::V_MOVRELD_B32_V4;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002875 case 256: // 32 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002876 return AMDGPU::V_MOVRELD_B32_V8;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002877 case 512: // 64 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002878 return AMDGPU::V_MOVRELD_B32_V16;
2879 default:
2880 llvm_unreachable("unsupported size for MOVRELD pseudos");
2881 }
2882}
2883
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002884static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
2885 MachineBasicBlock &MBB,
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002886 const SISubtarget &ST) {
2887 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002888 const SIRegisterInfo &TRI = TII->getRegisterInfo();
2889 MachineFunction *MF = MBB.getParent();
2890 MachineRegisterInfo &MRI = MF->getRegInfo();
2891
2892 unsigned Dst = MI.getOperand(0).getReg();
2893 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
2894 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2895 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
2896 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
2897 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
2898
2899 // This can be an immediate, but will be folded later.
2900 assert(Val->getReg());
2901
2902 unsigned SubReg;
2903 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
2904 SrcVec->getReg(),
2905 Offset);
Marek Olsake22fdb92017-03-21 17:00:32 +00002906 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002907
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002908 if (Idx->getReg() == AMDGPU::NoRegister) {
2909 MachineBasicBlock::iterator I(&MI);
2910 const DebugLoc &DL = MI.getDebugLoc();
2911
2912 assert(Offset == 0);
2913
2914 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
Diana Picus116bbab2017-01-13 09:58:52 +00002915 .add(*SrcVec)
2916 .add(*Val)
2917 .addImm(SubReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002918
2919 MI.eraseFromParent();
2920 return &MBB;
2921 }
2922
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002923 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002924 MachineBasicBlock::iterator I(&MI);
2925 const DebugLoc &DL = MI.getDebugLoc();
2926
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002927 if (UseGPRIdxMode) {
2928 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00002929 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
2930 .add(*Val)
2931 .addReg(Dst, RegState::ImplicitDefine)
2932 .addReg(SrcVec->getReg(), RegState::Implicit)
2933 .addReg(AMDGPU::M0, RegState::Implicit);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002934
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002935 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2936 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002937 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002938
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002939 BuildMI(MBB, I, DL, MovRelDesc)
2940 .addReg(Dst, RegState::Define)
2941 .addReg(SrcVec->getReg())
Diana Picus116bbab2017-01-13 09:58:52 +00002942 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002943 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002944 }
2945
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002946 MI.eraseFromParent();
2947 return &MBB;
2948 }
2949
2950 if (Val->isReg())
2951 MRI.clearKillFlags(Val->getReg());
2952
2953 const DebugLoc &DL = MI.getDebugLoc();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002954
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002955 unsigned PhiReg = MRI.createVirtualRegister(VecRC);
2956
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002957 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002958 Offset, UseGPRIdxMode, false);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002959 MachineBasicBlock *LoopBB = InsPt->getParent();
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002960
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002961 if (UseGPRIdxMode) {
2962 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00002963 .addReg(PhiReg, RegState::Undef, SubReg) // vdst
2964 .add(*Val) // src0
2965 .addReg(Dst, RegState::ImplicitDefine)
2966 .addReg(PhiReg, RegState::Implicit)
2967 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002968 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002969 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002970 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002971
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002972 BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
2973 .addReg(Dst, RegState::Define)
2974 .addReg(PhiReg)
Diana Picus116bbab2017-01-13 09:58:52 +00002975 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002976 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002977 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002978
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002979 MI.eraseFromParent();
2980
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002981 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002982}
2983
Matt Arsenault786724a2016-07-12 21:41:32 +00002984MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
2985 MachineInstr &MI, MachineBasicBlock *BB) const {
Tom Stellard244891d2016-12-20 15:52:17 +00002986
2987 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2988 MachineFunction *MF = BB->getParent();
2989 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
2990
2991 if (TII->isMIMG(MI)) {
Matt Arsenault905f3512017-12-29 17:18:14 +00002992 if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
2993 report_fatal_error("missing mem operand from MIMG instruction");
2994 }
Tom Stellard244891d2016-12-20 15:52:17 +00002995 // Add a memoperand for mimg instructions so that they aren't assumed to
2996 // be ordered memory instuctions.
2997
Tom Stellard244891d2016-12-20 15:52:17 +00002998 return BB;
2999 }
3000
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003001 switch (MI.getOpcode()) {
Matt Arsenault301162c2017-11-15 21:51:43 +00003002 case AMDGPU::S_ADD_U64_PSEUDO:
3003 case AMDGPU::S_SUB_U64_PSEUDO: {
3004 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3005 const DebugLoc &DL = MI.getDebugLoc();
3006
3007 MachineOperand &Dest = MI.getOperand(0);
3008 MachineOperand &Src0 = MI.getOperand(1);
3009 MachineOperand &Src1 = MI.getOperand(2);
3010
3011 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3012 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3013
3014 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3015 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3016 &AMDGPU::SReg_32_XM0RegClass);
3017 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3018 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3019 &AMDGPU::SReg_32_XM0RegClass);
3020
3021 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3022 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3023 &AMDGPU::SReg_32_XM0RegClass);
3024 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3025 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3026 &AMDGPU::SReg_32_XM0RegClass);
3027
3028 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3029
3030 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3031 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3032 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3033 .add(Src0Sub0)
3034 .add(Src1Sub0);
3035 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3036 .add(Src0Sub1)
3037 .add(Src1Sub1);
3038 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3039 .addReg(DestSub0)
3040 .addImm(AMDGPU::sub0)
3041 .addReg(DestSub1)
3042 .addImm(AMDGPU::sub1);
3043 MI.eraseFromParent();
3044 return BB;
3045 }
3046 case AMDGPU::SI_INIT_M0: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003047 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
Matt Arsenault4ac341c2016-04-14 21:58:15 +00003048 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Diana Picus116bbab2017-01-13 09:58:52 +00003049 .add(MI.getOperand(0));
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003050 MI.eraseFromParent();
Matt Arsenault20711b72015-02-20 22:10:45 +00003051 return BB;
Matt Arsenault301162c2017-11-15 21:51:43 +00003052 }
Marek Olsak2d825902017-04-28 20:21:58 +00003053 case AMDGPU::SI_INIT_EXEC:
3054 // This should be before all vector instructions.
3055 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3056 AMDGPU::EXEC)
3057 .addImm(MI.getOperand(0).getImm());
3058 MI.eraseFromParent();
3059 return BB;
3060
3061 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3062 // Extract the thread count from an SGPR input and set EXEC accordingly.
3063 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3064 //
3065 // S_BFE_U32 count, input, {shift, 7}
3066 // S_BFM_B64 exec, count, 0
3067 // S_CMP_EQ_U32 count, 64
3068 // S_CMOV_B64 exec, -1
3069 MachineInstr *FirstMI = &*BB->begin();
3070 MachineRegisterInfo &MRI = MF->getRegInfo();
3071 unsigned InputReg = MI.getOperand(0).getReg();
3072 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3073 bool Found = false;
3074
3075 // Move the COPY of the input reg to the beginning, so that we can use it.
3076 for (auto I = BB->begin(); I != &MI; I++) {
3077 if (I->getOpcode() != TargetOpcode::COPY ||
3078 I->getOperand(0).getReg() != InputReg)
3079 continue;
3080
3081 if (I == FirstMI) {
3082 FirstMI = &*++BB->begin();
3083 } else {
3084 I->removeFromParent();
3085 BB->insert(FirstMI, &*I);
3086 }
3087 Found = true;
3088 break;
3089 }
3090 assert(Found);
Davide Italiano0dcc0152017-05-11 19:58:52 +00003091 (void)Found;
Marek Olsak2d825902017-04-28 20:21:58 +00003092
3093 // This should be before all vector instructions.
3094 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3095 .addReg(InputReg)
3096 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3097 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3098 AMDGPU::EXEC)
3099 .addReg(CountReg)
3100 .addImm(0);
3101 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3102 .addReg(CountReg, RegState::Kill)
3103 .addImm(64);
3104 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3105 AMDGPU::EXEC)
3106 .addImm(-1);
3107 MI.eraseFromParent();
3108 return BB;
3109 }
3110
Changpeng Fang01f60622016-03-15 17:28:44 +00003111 case AMDGPU::GET_GROUPSTATICSIZE: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003112 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3c07c812016-07-22 17:01:33 +00003113 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
Diana Picus116bbab2017-01-13 09:58:52 +00003114 .add(MI.getOperand(0))
3115 .addImm(MFI->getLDSSize());
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003116 MI.eraseFromParent();
Changpeng Fang01f60622016-03-15 17:28:44 +00003117 return BB;
3118 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003119 case AMDGPU::SI_INDIRECT_SRC_V1:
3120 case AMDGPU::SI_INDIRECT_SRC_V2:
3121 case AMDGPU::SI_INDIRECT_SRC_V4:
3122 case AMDGPU::SI_INDIRECT_SRC_V8:
3123 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003124 return emitIndirectSrc(MI, *BB, *getSubtarget());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003125 case AMDGPU::SI_INDIRECT_DST_V1:
3126 case AMDGPU::SI_INDIRECT_DST_V2:
3127 case AMDGPU::SI_INDIRECT_DST_V4:
3128 case AMDGPU::SI_INDIRECT_DST_V8:
3129 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003130 return emitIndirectDst(MI, *BB, *getSubtarget());
Marek Olsakce76ea02017-10-24 10:27:13 +00003131 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3132 case AMDGPU::SI_KILL_I1_PSEUDO:
Matt Arsenault786724a2016-07-12 21:41:32 +00003133 return splitKillBlock(MI, BB);
Matt Arsenault22e41792016-08-27 01:00:37 +00003134 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3135 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
Matt Arsenault22e41792016-08-27 01:00:37 +00003136
3137 unsigned Dst = MI.getOperand(0).getReg();
3138 unsigned Src0 = MI.getOperand(1).getReg();
3139 unsigned Src1 = MI.getOperand(2).getReg();
3140 const DebugLoc &DL = MI.getDebugLoc();
3141 unsigned SrcCond = MI.getOperand(3).getReg();
3142
3143 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3144 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003145 unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenault22e41792016-08-27 01:00:37 +00003146
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003147 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3148 .addReg(SrcCond);
Matt Arsenault22e41792016-08-27 01:00:37 +00003149 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3150 .addReg(Src0, 0, AMDGPU::sub0)
3151 .addReg(Src1, 0, AMDGPU::sub0)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003152 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003153 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3154 .addReg(Src0, 0, AMDGPU::sub1)
3155 .addReg(Src1, 0, AMDGPU::sub1)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003156 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003157
3158 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3159 .addReg(DstLo)
3160 .addImm(AMDGPU::sub0)
3161 .addReg(DstHi)
3162 .addImm(AMDGPU::sub1);
3163 MI.eraseFromParent();
3164 return BB;
3165 }
Matt Arsenault327188a2016-12-15 21:57:11 +00003166 case AMDGPU::SI_BR_UNDEF: {
3167 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3168 const DebugLoc &DL = MI.getDebugLoc();
3169 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
Diana Picus116bbab2017-01-13 09:58:52 +00003170 .add(MI.getOperand(0));
Matt Arsenault327188a2016-12-15 21:57:11 +00003171 Br->getOperand(1).setIsUndef(true); // read undef SCC
3172 MI.eraseFromParent();
3173 return BB;
3174 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003175 case AMDGPU::ADJCALLSTACKUP:
3176 case AMDGPU::ADJCALLSTACKDOWN: {
3177 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3178 MachineInstrBuilder MIB(*MF, &MI);
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003179
3180 // Add an implicit use of the frame offset reg to prevent the restore copy
3181 // inserted after the call from being reorderd after stack operations in the
3182 // the caller's frame.
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003183 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003184 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3185 .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003186 return BB;
3187 }
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003188 case AMDGPU::SI_CALL_ISEL:
3189 case AMDGPU::SI_TCRETURN_ISEL: {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003190 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3191 const DebugLoc &DL = MI.getDebugLoc();
3192 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003193
3194 MachineRegisterInfo &MRI = MF->getRegInfo();
3195 unsigned GlobalAddrReg = MI.getOperand(0).getReg();
3196 MachineInstr *PCRel = MRI.getVRegDef(GlobalAddrReg);
3197 assert(PCRel->getOpcode() == AMDGPU::SI_PC_ADD_REL_OFFSET);
3198
3199 const GlobalValue *G = PCRel->getOperand(1).getGlobal();
3200
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003201 MachineInstrBuilder MIB;
3202 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
3203 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg)
3204 .add(MI.getOperand(0))
3205 .addGlobalAddress(G);
3206 } else {
3207 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_TCRETURN))
3208 .add(MI.getOperand(0))
3209 .addGlobalAddress(G);
3210
3211 // There is an additional imm operand for tcreturn, but it should be in the
3212 // right place already.
3213 }
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003214
3215 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003216 MIB.add(MI.getOperand(I));
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003217
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003218 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003219 MI.eraseFromParent();
3220 return BB;
3221 }
Changpeng Fang01f60622016-03-15 17:28:44 +00003222 default:
3223 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
Tom Stellard75aadc22012-12-11 21:25:42 +00003224 }
Tom Stellard75aadc22012-12-11 21:25:42 +00003225}
3226
Matt Arsenaulte11d8ac2017-10-13 21:10:22 +00003227bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3228 return isTypeLegal(VT.getScalarType());
3229}
3230
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003231bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3232 // This currently forces unfolding various combinations of fsub into fma with
3233 // free fneg'd operands. As long as we have fast FMA (controlled by
3234 // isFMAFasterThanFMulAndFAdd), we should perform these.
3235
3236 // When fma is quarter rate, for f64 where add / sub are at best half rate,
3237 // most of these combines appear to be cycle neutral but save on instruction
3238 // count / code size.
3239 return true;
3240}
3241
Mehdi Amini44ede332015-07-09 02:09:04 +00003242EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3243 EVT VT) const {
Tom Stellard83747202013-07-18 21:43:53 +00003244 if (!VT.isVector()) {
3245 return MVT::i1;
3246 }
Matt Arsenault8596f712014-11-28 22:51:38 +00003247 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
Tom Stellard75aadc22012-12-11 21:25:42 +00003248}
3249
Matt Arsenault94163282016-12-22 16:36:25 +00003250MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3251 // TODO: Should i16 be used always if legal? For now it would force VALU
3252 // shifts.
3253 return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
Christian Konig082a14a2013-03-18 11:34:05 +00003254}
3255
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003256// Answering this is somewhat tricky and depends on the specific device which
3257// have different rates for fma or all f64 operations.
3258//
3259// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3260// regardless of which device (although the number of cycles differs between
3261// devices), so it is always profitable for f64.
3262//
3263// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3264// only on full rate devices. Normally, we should prefer selecting v_mad_f32
3265// which we can always do even without fused FP ops since it returns the same
3266// result as the separate operations and since it is always full
3267// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3268// however does not support denormals, so we do report fma as faster if we have
3269// a fast fma device and require denormals.
3270//
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003271bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3272 VT = VT.getScalarType();
3273
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003274 switch (VT.getSimpleVT().SimpleTy) {
3275 case MVT::f32:
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003276 // This is as fast on some subtargets. However, we always have full rate f32
3277 // mad available which returns the same result as the separate operations
Matt Arsenault8d630032015-02-20 22:10:41 +00003278 // which we should prefer over fma. We can't use this if we want to support
3279 // denormals, so only report this in these cases.
3280 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32();
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003281 case MVT::f64:
3282 return true;
Matt Arsenault9e22bc22016-12-22 03:21:48 +00003283 case MVT::f16:
3284 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003285 default:
3286 break;
3287 }
3288
3289 return false;
3290}
3291
Tom Stellard75aadc22012-12-11 21:25:42 +00003292//===----------------------------------------------------------------------===//
3293// Custom DAG Lowering Operations
3294//===----------------------------------------------------------------------===//
3295
3296SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3297 switch (Op.getOpcode()) {
3298 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
Tom Stellardf8794352012-12-19 22:10:31 +00003299 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
Tom Stellard35bb18c2013-08-26 15:06:04 +00003300 case ISD::LOAD: {
Tom Stellarde812f2f2014-07-21 15:45:06 +00003301 SDValue Result = LowerLOAD(Op, DAG);
3302 assert((!Result.getNode() ||
3303 Result.getNode()->getNumValues() == 2) &&
3304 "Load should return a value and a chain");
3305 return Result;
Tom Stellard35bb18c2013-08-26 15:06:04 +00003306 }
Tom Stellardaf775432013-10-23 00:44:32 +00003307
Matt Arsenaultad14ce82014-07-19 18:44:39 +00003308 case ISD::FSIN:
3309 case ISD::FCOS:
3310 return LowerTrig(Op, DAG);
Tom Stellard0ec134f2014-02-04 17:18:40 +00003311 case ISD::SELECT: return LowerSELECT(Op, DAG);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00003312 case ISD::FDIV: return LowerFDIV(Op, DAG);
Tom Stellard354a43c2016-04-01 18:27:37 +00003313 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +00003314 case ISD::STORE: return LowerSTORE(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003315 case ISD::GlobalAddress: {
3316 MachineFunction &MF = DAG.getMachineFunction();
3317 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3318 return LowerGlobalAddress(MFI, Op, DAG);
Tom Stellard94593ee2013-06-03 17:40:18 +00003319 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003320 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00003321 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003322 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00003323 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
Matt Arsenault3aef8092017-01-23 23:09:58 +00003324 case ISD::INSERT_VECTOR_ELT:
3325 return lowerINSERT_VECTOR_ELT(Op, DAG);
3326 case ISD::EXTRACT_VECTOR_ELT:
3327 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003328 case ISD::FP_ROUND:
3329 return lowerFP_ROUND(Op, DAG);
Matt Arsenault3e025382017-04-24 17:49:13 +00003330 case ISD::TRAP:
3331 case ISD::DEBUGTRAP:
3332 return lowerTRAP(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +00003333 }
3334 return SDValue();
3335}
3336
Changpeng Fang4737e892018-01-18 22:08:53 +00003337static unsigned getImageOpcode(unsigned IID) {
3338 switch (IID) {
3339 case Intrinsic::amdgcn_image_load:
3340 return AMDGPUISD::IMAGE_LOAD;
3341 case Intrinsic::amdgcn_image_load_mip:
3342 return AMDGPUISD::IMAGE_LOAD_MIP;
3343
3344 // Basic sample.
3345 case Intrinsic::amdgcn_image_sample:
3346 return AMDGPUISD::IMAGE_SAMPLE;
3347 case Intrinsic::amdgcn_image_sample_cl:
3348 return AMDGPUISD::IMAGE_SAMPLE_CL;
3349 case Intrinsic::amdgcn_image_sample_d:
3350 return AMDGPUISD::IMAGE_SAMPLE_D;
3351 case Intrinsic::amdgcn_image_sample_d_cl:
3352 return AMDGPUISD::IMAGE_SAMPLE_D_CL;
3353 case Intrinsic::amdgcn_image_sample_l:
3354 return AMDGPUISD::IMAGE_SAMPLE_L;
3355 case Intrinsic::amdgcn_image_sample_b:
3356 return AMDGPUISD::IMAGE_SAMPLE_B;
3357 case Intrinsic::amdgcn_image_sample_b_cl:
3358 return AMDGPUISD::IMAGE_SAMPLE_B_CL;
3359 case Intrinsic::amdgcn_image_sample_lz:
3360 return AMDGPUISD::IMAGE_SAMPLE_LZ;
3361 case Intrinsic::amdgcn_image_sample_cd:
3362 return AMDGPUISD::IMAGE_SAMPLE_CD;
3363 case Intrinsic::amdgcn_image_sample_cd_cl:
3364 return AMDGPUISD::IMAGE_SAMPLE_CD_CL;
3365
3366 // Sample with comparison.
3367 case Intrinsic::amdgcn_image_sample_c:
3368 return AMDGPUISD::IMAGE_SAMPLE_C;
3369 case Intrinsic::amdgcn_image_sample_c_cl:
3370 return AMDGPUISD::IMAGE_SAMPLE_C_CL;
3371 case Intrinsic::amdgcn_image_sample_c_d:
3372 return AMDGPUISD::IMAGE_SAMPLE_C_D;
3373 case Intrinsic::amdgcn_image_sample_c_d_cl:
3374 return AMDGPUISD::IMAGE_SAMPLE_C_D_CL;
3375 case Intrinsic::amdgcn_image_sample_c_l:
3376 return AMDGPUISD::IMAGE_SAMPLE_C_L;
3377 case Intrinsic::amdgcn_image_sample_c_b:
3378 return AMDGPUISD::IMAGE_SAMPLE_C_B;
3379 case Intrinsic::amdgcn_image_sample_c_b_cl:
3380 return AMDGPUISD::IMAGE_SAMPLE_C_B_CL;
3381 case Intrinsic::amdgcn_image_sample_c_lz:
3382 return AMDGPUISD::IMAGE_SAMPLE_C_LZ;
3383 case Intrinsic::amdgcn_image_sample_c_cd:
3384 return AMDGPUISD::IMAGE_SAMPLE_C_CD;
3385 case Intrinsic::amdgcn_image_sample_c_cd_cl:
3386 return AMDGPUISD::IMAGE_SAMPLE_C_CD_CL;
3387
3388 // Sample with offsets.
3389 case Intrinsic::amdgcn_image_sample_o:
3390 return AMDGPUISD::IMAGE_SAMPLE_O;
3391 case Intrinsic::amdgcn_image_sample_cl_o:
3392 return AMDGPUISD::IMAGE_SAMPLE_CL_O;
3393 case Intrinsic::amdgcn_image_sample_d_o:
3394 return AMDGPUISD::IMAGE_SAMPLE_D_O;
3395 case Intrinsic::amdgcn_image_sample_d_cl_o:
3396 return AMDGPUISD::IMAGE_SAMPLE_D_CL_O;
3397 case Intrinsic::amdgcn_image_sample_l_o:
3398 return AMDGPUISD::IMAGE_SAMPLE_L_O;
3399 case Intrinsic::amdgcn_image_sample_b_o:
3400 return AMDGPUISD::IMAGE_SAMPLE_B_O;
3401 case Intrinsic::amdgcn_image_sample_b_cl_o:
3402 return AMDGPUISD::IMAGE_SAMPLE_B_CL_O;
3403 case Intrinsic::amdgcn_image_sample_lz_o:
3404 return AMDGPUISD::IMAGE_SAMPLE_LZ_O;
3405 case Intrinsic::amdgcn_image_sample_cd_o:
3406 return AMDGPUISD::IMAGE_SAMPLE_CD_O;
3407 case Intrinsic::amdgcn_image_sample_cd_cl_o:
3408 return AMDGPUISD::IMAGE_SAMPLE_CD_CL_O;
3409
3410 // Sample with comparison and offsets.
3411 case Intrinsic::amdgcn_image_sample_c_o:
3412 return AMDGPUISD::IMAGE_SAMPLE_C_O;
3413 case Intrinsic::amdgcn_image_sample_c_cl_o:
3414 return AMDGPUISD::IMAGE_SAMPLE_C_CL_O;
3415 case Intrinsic::amdgcn_image_sample_c_d_o:
3416 return AMDGPUISD::IMAGE_SAMPLE_C_D_O;
3417 case Intrinsic::amdgcn_image_sample_c_d_cl_o:
3418 return AMDGPUISD::IMAGE_SAMPLE_C_D_CL_O;
3419 case Intrinsic::amdgcn_image_sample_c_l_o:
3420 return AMDGPUISD::IMAGE_SAMPLE_C_L_O;
3421 case Intrinsic::amdgcn_image_sample_c_b_o:
3422 return AMDGPUISD::IMAGE_SAMPLE_C_B_O;
3423 case Intrinsic::amdgcn_image_sample_c_b_cl_o:
3424 return AMDGPUISD::IMAGE_SAMPLE_C_B_CL_O;
3425 case Intrinsic::amdgcn_image_sample_c_lz_o:
3426 return AMDGPUISD::IMAGE_SAMPLE_C_LZ_O;
3427 case Intrinsic::amdgcn_image_sample_c_cd_o:
3428 return AMDGPUISD::IMAGE_SAMPLE_C_CD_O;
3429 case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
3430 return AMDGPUISD::IMAGE_SAMPLE_C_CD_CL_O;
3431
3432 // Basic gather4.
3433 case Intrinsic::amdgcn_image_gather4:
3434 return AMDGPUISD::IMAGE_GATHER4;
3435 case Intrinsic::amdgcn_image_gather4_cl:
3436 return AMDGPUISD::IMAGE_GATHER4_CL;
3437 case Intrinsic::amdgcn_image_gather4_l:
3438 return AMDGPUISD::IMAGE_GATHER4_L;
3439 case Intrinsic::amdgcn_image_gather4_b:
3440 return AMDGPUISD::IMAGE_GATHER4_B;
3441 case Intrinsic::amdgcn_image_gather4_b_cl:
3442 return AMDGPUISD::IMAGE_GATHER4_B_CL;
3443 case Intrinsic::amdgcn_image_gather4_lz:
3444 return AMDGPUISD::IMAGE_GATHER4_LZ;
3445
3446 // Gather4 with comparison.
3447 case Intrinsic::amdgcn_image_gather4_c:
3448 return AMDGPUISD::IMAGE_GATHER4_C;
3449 case Intrinsic::amdgcn_image_gather4_c_cl:
3450 return AMDGPUISD::IMAGE_GATHER4_C_CL;
3451 case Intrinsic::amdgcn_image_gather4_c_l:
3452 return AMDGPUISD::IMAGE_GATHER4_C_L;
3453 case Intrinsic::amdgcn_image_gather4_c_b:
3454 return AMDGPUISD::IMAGE_GATHER4_C_B;
3455 case Intrinsic::amdgcn_image_gather4_c_b_cl:
3456 return AMDGPUISD::IMAGE_GATHER4_C_B_CL;
3457 case Intrinsic::amdgcn_image_gather4_c_lz:
3458 return AMDGPUISD::IMAGE_GATHER4_C_LZ;
3459
3460 // Gather4 with offsets.
3461 case Intrinsic::amdgcn_image_gather4_o:
3462 return AMDGPUISD::IMAGE_GATHER4_O;
3463 case Intrinsic::amdgcn_image_gather4_cl_o:
3464 return AMDGPUISD::IMAGE_GATHER4_CL_O;
3465 case Intrinsic::amdgcn_image_gather4_l_o:
3466 return AMDGPUISD::IMAGE_GATHER4_L_O;
3467 case Intrinsic::amdgcn_image_gather4_b_o:
3468 return AMDGPUISD::IMAGE_GATHER4_B_O;
3469 case Intrinsic::amdgcn_image_gather4_b_cl_o:
3470 return AMDGPUISD::IMAGE_GATHER4_B_CL_O;
3471 case Intrinsic::amdgcn_image_gather4_lz_o:
3472 return AMDGPUISD::IMAGE_GATHER4_LZ_O;
3473
3474 // Gather4 with comparison and offsets.
3475 case Intrinsic::amdgcn_image_gather4_c_o:
3476 return AMDGPUISD::IMAGE_GATHER4_C_O;
3477 case Intrinsic::amdgcn_image_gather4_c_cl_o:
3478 return AMDGPUISD::IMAGE_GATHER4_C_CL_O;
3479 case Intrinsic::amdgcn_image_gather4_c_l_o:
3480 return AMDGPUISD::IMAGE_GATHER4_C_L_O;
3481 case Intrinsic::amdgcn_image_gather4_c_b_o:
3482 return AMDGPUISD::IMAGE_GATHER4_C_B_O;
3483 case Intrinsic::amdgcn_image_gather4_c_b_cl_o:
3484 return AMDGPUISD::IMAGE_GATHER4_C_B_CL_O;
3485 case Intrinsic::amdgcn_image_gather4_c_lz_o:
3486 return AMDGPUISD::IMAGE_GATHER4_C_LZ_O;
3487
3488 default:
3489 break;
3490 }
3491 return 0;
3492}
3493
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003494static SDValue adjustLoadValueType(SDValue Result, EVT LoadVT, SDLoc DL,
3495 SelectionDAG &DAG, bool Unpacked) {
3496 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3497 // Truncate to v2i16/v4i16.
3498 EVT IntLoadVT = LoadVT.changeTypeToInteger();
3499 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, IntLoadVT, Result);
3500 // Bitcast to original type (v2f16/v4f16).
3501 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Trunc);
3502 }
3503 // Cast back to the original packed type.
3504 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3505}
3506
3507// This is to lower INTRINSIC_W_CHAIN with illegal result types.
3508SDValue SITargetLowering::lowerIntrinsicWChain_IllegalReturnType(SDValue Op,
3509 SDValue &Chain, SelectionDAG &DAG) const {
3510 EVT LoadVT = Op.getValueType();
3511 // TODO: handle v3f16.
3512 if (LoadVT != MVT::v2f16 && LoadVT != MVT::v4f16)
3513 return SDValue();
3514
3515 bool Unpacked = Subtarget->hasUnpackedD16VMem();
3516 EVT UnpackedLoadVT = (LoadVT == MVT::v2f16) ? MVT::v2i32 : MVT::v4i32;
3517 EVT EquivLoadVT = Unpacked ? UnpackedLoadVT :
3518 getEquivalentMemType(*DAG.getContext(), LoadVT);
3519 // Change from v4f16/v2f16 to EquivLoadVT.
3520 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3521
3522 SDValue Res;
3523 SDLoc DL(Op);
3524 MemSDNode *M = cast<MemSDNode>(Op);
3525 unsigned IID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
3526 switch (IID) {
3527 case Intrinsic::amdgcn_tbuffer_load: {
3528 SDValue Ops[] = {
Changpeng Fang4737e892018-01-18 22:08:53 +00003529 Op.getOperand(0), // Chain
3530 Op.getOperand(2), // rsrc
3531 Op.getOperand(3), // vindex
3532 Op.getOperand(4), // voffset
3533 Op.getOperand(5), // soffset
3534 Op.getOperand(6), // offset
3535 Op.getOperand(7), // dfmt
3536 Op.getOperand(8), // nfmt
3537 Op.getOperand(9), // glc
3538 Op.getOperand(10) // slc
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003539 };
3540 Res = DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, DL,
3541 VTList, Ops, M->getMemoryVT(),
3542 M->getMemOperand());
3543 Chain = Res.getValue(1);
3544 return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked);
3545 }
3546 case Intrinsic::amdgcn_buffer_load_format: {
Changpeng Fang4737e892018-01-18 22:08:53 +00003547 SDValue Ops[] = {
3548 Op.getOperand(0), // Chain
3549 Op.getOperand(2), // rsrc
3550 Op.getOperand(3), // vindex
3551 Op.getOperand(4), // offset
3552 Op.getOperand(5), // glc
3553 Op.getOperand(6) // slc
3554 };
3555 Res = DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
3556 DL, VTList, Ops, M->getMemoryVT(),
3557 M->getMemOperand());
3558 Chain = Res.getValue(1);
3559 return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked);
3560 }
3561 case Intrinsic::amdgcn_image_load:
3562 case Intrinsic::amdgcn_image_load_mip: {
3563 SDValue Ops[] = {
3564 Op.getOperand(0), // Chain
3565 Op.getOperand(2), // vaddr
3566 Op.getOperand(3), // rsrc
3567 Op.getOperand(4), // dmask
3568 Op.getOperand(5), // glc
3569 Op.getOperand(6), // slc
3570 Op.getOperand(7), // lwe
3571 Op.getOperand(8) // da
3572 };
3573 unsigned Opc = getImageOpcode(IID);
3574 Res = DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, M->getMemoryVT(),
3575 M->getMemOperand());
3576 Chain = Res.getValue(1);
3577 return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked);
3578 }
3579 // Basic sample.
3580 case Intrinsic::amdgcn_image_sample:
3581 case Intrinsic::amdgcn_image_sample_cl:
3582 case Intrinsic::amdgcn_image_sample_d:
3583 case Intrinsic::amdgcn_image_sample_d_cl:
3584 case Intrinsic::amdgcn_image_sample_l:
3585 case Intrinsic::amdgcn_image_sample_b:
3586 case Intrinsic::amdgcn_image_sample_b_cl:
3587 case Intrinsic::amdgcn_image_sample_lz:
3588 case Intrinsic::amdgcn_image_sample_cd:
3589 case Intrinsic::amdgcn_image_sample_cd_cl:
3590
3591 // Sample with comparison.
3592 case Intrinsic::amdgcn_image_sample_c:
3593 case Intrinsic::amdgcn_image_sample_c_cl:
3594 case Intrinsic::amdgcn_image_sample_c_d:
3595 case Intrinsic::amdgcn_image_sample_c_d_cl:
3596 case Intrinsic::amdgcn_image_sample_c_l:
3597 case Intrinsic::amdgcn_image_sample_c_b:
3598 case Intrinsic::amdgcn_image_sample_c_b_cl:
3599 case Intrinsic::amdgcn_image_sample_c_lz:
3600 case Intrinsic::amdgcn_image_sample_c_cd:
3601 case Intrinsic::amdgcn_image_sample_c_cd_cl:
3602
3603 // Sample with offsets.
3604 case Intrinsic::amdgcn_image_sample_o:
3605 case Intrinsic::amdgcn_image_sample_cl_o:
3606 case Intrinsic::amdgcn_image_sample_d_o:
3607 case Intrinsic::amdgcn_image_sample_d_cl_o:
3608 case Intrinsic::amdgcn_image_sample_l_o:
3609 case Intrinsic::amdgcn_image_sample_b_o:
3610 case Intrinsic::amdgcn_image_sample_b_cl_o:
3611 case Intrinsic::amdgcn_image_sample_lz_o:
3612 case Intrinsic::amdgcn_image_sample_cd_o:
3613 case Intrinsic::amdgcn_image_sample_cd_cl_o:
3614
3615 // Sample with comparison and offsets.
3616 case Intrinsic::amdgcn_image_sample_c_o:
3617 case Intrinsic::amdgcn_image_sample_c_cl_o:
3618 case Intrinsic::amdgcn_image_sample_c_d_o:
3619 case Intrinsic::amdgcn_image_sample_c_d_cl_o:
3620 case Intrinsic::amdgcn_image_sample_c_l_o:
3621 case Intrinsic::amdgcn_image_sample_c_b_o:
3622 case Intrinsic::amdgcn_image_sample_c_b_cl_o:
3623 case Intrinsic::amdgcn_image_sample_c_lz_o:
3624 case Intrinsic::amdgcn_image_sample_c_cd_o:
3625 case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
3626
3627 // Basic gather4
3628 case Intrinsic::amdgcn_image_gather4:
3629 case Intrinsic::amdgcn_image_gather4_cl:
3630 case Intrinsic::amdgcn_image_gather4_l:
3631 case Intrinsic::amdgcn_image_gather4_b:
3632 case Intrinsic::amdgcn_image_gather4_b_cl:
3633 case Intrinsic::amdgcn_image_gather4_lz:
3634
3635 // Gather4 with comparison
3636 case Intrinsic::amdgcn_image_gather4_c:
3637 case Intrinsic::amdgcn_image_gather4_c_cl:
3638 case Intrinsic::amdgcn_image_gather4_c_l:
3639 case Intrinsic::amdgcn_image_gather4_c_b:
3640 case Intrinsic::amdgcn_image_gather4_c_b_cl:
3641 case Intrinsic::amdgcn_image_gather4_c_lz:
3642
3643 // Gather4 with offsets
3644 case Intrinsic::amdgcn_image_gather4_o:
3645 case Intrinsic::amdgcn_image_gather4_cl_o:
3646 case Intrinsic::amdgcn_image_gather4_l_o:
3647 case Intrinsic::amdgcn_image_gather4_b_o:
3648 case Intrinsic::amdgcn_image_gather4_b_cl_o:
3649 case Intrinsic::amdgcn_image_gather4_lz_o:
3650
3651 // Gather4 with comparison and offsets
3652 case Intrinsic::amdgcn_image_gather4_c_o:
3653 case Intrinsic::amdgcn_image_gather4_c_cl_o:
3654 case Intrinsic::amdgcn_image_gather4_c_l_o:
3655 case Intrinsic::amdgcn_image_gather4_c_b_o:
3656 case Intrinsic::amdgcn_image_gather4_c_b_cl_o:
3657 case Intrinsic::amdgcn_image_gather4_c_lz_o: {
3658 SDValue Ops[] = {
3659 Op.getOperand(0), // Chain
3660 Op.getOperand(2), // vaddr
3661 Op.getOperand(3), // rsrc
3662 Op.getOperand(4), // sampler
3663 Op.getOperand(5), // dmask
3664 Op.getOperand(6), // unorm
3665 Op.getOperand(7), // glc
3666 Op.getOperand(8), // slc
3667 Op.getOperand(9), // lwe
3668 Op.getOperand(10) // da
3669 };
3670 unsigned Opc = getImageOpcode(IID);
3671 Res = DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, M->getMemoryVT(),
3672 M->getMemOperand());
3673 Chain = Res.getValue(1);
3674 return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003675 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00003676 default: {
3677 const AMDGPU::D16ImageDimIntrinsic *D16ImageDimIntr =
3678 AMDGPU::lookupD16ImageDimIntrinsicByIntr(IID);
3679 if (D16ImageDimIntr) {
3680 SmallVector<SDValue, 20> Ops;
3681 for (auto Value : Op.getNode()->op_values())
3682 Ops.push_back(Value);
3683 Ops[1] = DAG.getConstant(D16ImageDimIntr->D16HelperIntr, DL, MVT::i32);
3684 Res = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTList, Ops,
3685 M->getMemoryVT(), M->getMemOperand());
3686 Chain = Res.getValue(1);
3687 return adjustLoadValueType(Res, LoadVT, DL, DAG, Unpacked);
3688 }
3689
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003690 return SDValue();
3691 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00003692 }
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003693}
3694
Matt Arsenault3aef8092017-01-23 23:09:58 +00003695void SITargetLowering::ReplaceNodeResults(SDNode *N,
3696 SmallVectorImpl<SDValue> &Results,
3697 SelectionDAG &DAG) const {
3698 switch (N->getOpcode()) {
3699 case ISD::INSERT_VECTOR_ELT: {
3700 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3701 Results.push_back(Res);
3702 return;
3703 }
3704 case ISD::EXTRACT_VECTOR_ELT: {
3705 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3706 Results.push_back(Res);
3707 return;
3708 }
Matt Arsenault1f17c662017-02-22 00:27:34 +00003709 case ISD::INTRINSIC_WO_CHAIN: {
3710 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Marek Olsak13e47412018-01-31 20:18:04 +00003711 switch (IID) {
3712 case Intrinsic::amdgcn_cvt_pkrtz: {
Matt Arsenault1f17c662017-02-22 00:27:34 +00003713 SDValue Src0 = N->getOperand(1);
3714 SDValue Src1 = N->getOperand(2);
3715 SDLoc SL(N);
3716 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
3717 Src0, Src1);
Matt Arsenault1f17c662017-02-22 00:27:34 +00003718 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3719 return;
3720 }
Marek Olsak13e47412018-01-31 20:18:04 +00003721 case Intrinsic::amdgcn_cvt_pknorm_i16:
3722 case Intrinsic::amdgcn_cvt_pknorm_u16:
3723 case Intrinsic::amdgcn_cvt_pk_i16:
3724 case Intrinsic::amdgcn_cvt_pk_u16: {
3725 SDValue Src0 = N->getOperand(1);
3726 SDValue Src1 = N->getOperand(2);
3727 SDLoc SL(N);
3728 unsigned Opcode;
3729
3730 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
3731 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
3732 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
3733 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
3734 else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
3735 Opcode = AMDGPUISD::CVT_PK_I16_I32;
3736 else
3737 Opcode = AMDGPUISD::CVT_PK_U16_U32;
3738
3739 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
3740 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
3741 return;
3742 }
3743 }
Simon Pilgrimd362d272017-07-08 19:50:03 +00003744 break;
Matt Arsenault1f17c662017-02-22 00:27:34 +00003745 }
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003746 case ISD::INTRINSIC_W_CHAIN: {
3747 SDValue Chain;
3748 if (SDValue Res = lowerIntrinsicWChain_IllegalReturnType(SDValue(N, 0),
3749 Chain, DAG)) {
3750 Results.push_back(Res);
3751 Results.push_back(Chain);
3752 return;
3753 }
3754 break;
3755 }
Matt Arsenault4a486232017-04-19 20:53:07 +00003756 case ISD::SELECT: {
3757 SDLoc SL(N);
3758 EVT VT = N->getValueType(0);
3759 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3760 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3761 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3762
3763 EVT SelectVT = NewVT;
3764 if (NewVT.bitsLT(MVT::i32)) {
3765 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3766 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3767 SelectVT = MVT::i32;
3768 }
3769
3770 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3771 N->getOperand(0), LHS, RHS);
3772
3773 if (NewVT != SelectVT)
3774 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3775 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3776 return;
3777 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00003778 default:
3779 break;
3780 }
3781}
3782
Tom Stellardf8794352012-12-19 22:10:31 +00003783/// \brief Helper function for LowerBRCOND
3784static SDNode *findUser(SDValue Value, unsigned Opcode) {
Tom Stellard75aadc22012-12-11 21:25:42 +00003785
Tom Stellardf8794352012-12-19 22:10:31 +00003786 SDNode *Parent = Value.getNode();
3787 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
3788 I != E; ++I) {
3789
3790 if (I.getUse().get() != Value)
3791 continue;
3792
3793 if (I->getOpcode() == Opcode)
3794 return *I;
3795 }
Craig Topper062a2ba2014-04-25 05:30:21 +00003796 return nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00003797}
3798
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003799unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
Matt Arsenault6408c912016-09-16 22:11:18 +00003800 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
3801 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003802 case Intrinsic::amdgcn_if:
3803 return AMDGPUISD::IF;
3804 case Intrinsic::amdgcn_else:
3805 return AMDGPUISD::ELSE;
3806 case Intrinsic::amdgcn_loop:
3807 return AMDGPUISD::LOOP;
3808 case Intrinsic::amdgcn_end_cf:
3809 llvm_unreachable("should not occur");
Matt Arsenault6408c912016-09-16 22:11:18 +00003810 default:
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003811 return 0;
Matt Arsenault6408c912016-09-16 22:11:18 +00003812 }
Tom Stellardbc4497b2016-02-12 23:45:29 +00003813 }
Matt Arsenault6408c912016-09-16 22:11:18 +00003814
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003815 // break, if_break, else_break are all only used as inputs to loop, not
3816 // directly as branch conditions.
3817 return 0;
Tom Stellardbc4497b2016-02-12 23:45:29 +00003818}
3819
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003820void SITargetLowering::createDebuggerPrologueStackObjects(
3821 MachineFunction &MF) const {
3822 // Create stack objects that are used for emitting debugger prologue.
3823 //
3824 // Debugger prologue writes work group IDs and work item IDs to scratch memory
3825 // at fixed location in the following format:
3826 // offset 0: work group ID x
3827 // offset 4: work group ID y
3828 // offset 8: work group ID z
3829 // offset 16: work item ID x
3830 // offset 20: work item ID y
3831 // offset 24: work item ID z
3832 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3833 int ObjectIdx = 0;
3834
3835 // For each dimension:
3836 for (unsigned i = 0; i < 3; ++i) {
3837 // Create fixed stack object for work group ID.
Matthias Braun941a7052016-07-28 18:40:00 +00003838 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003839 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
3840 // Create fixed stack object for work item ID.
Matthias Braun941a7052016-07-28 18:40:00 +00003841 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003842 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
3843 }
3844}
3845
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003846bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
3847 const Triple &TT = getTargetMachine().getTargetTriple();
Matt Arsenault923712b2018-02-09 16:57:57 +00003848 return (GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
3849 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003850 AMDGPU::shouldEmitConstantsToTextSection(TT);
3851}
3852
3853bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00003854 return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +00003855 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
3856 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003857 !shouldEmitFixup(GV) &&
3858 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3859}
3860
3861bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
3862 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
3863}
3864
Tom Stellardf8794352012-12-19 22:10:31 +00003865/// This transforms the control flow intrinsics to get the branch destination as
3866/// last parameter, also switches branch target with BR if the need arise
3867SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
3868 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00003869 SDLoc DL(BRCOND);
Tom Stellardf8794352012-12-19 22:10:31 +00003870
3871 SDNode *Intr = BRCOND.getOperand(1).getNode();
3872 SDValue Target = BRCOND.getOperand(2);
Craig Topper062a2ba2014-04-25 05:30:21 +00003873 SDNode *BR = nullptr;
Tom Stellardbc4497b2016-02-12 23:45:29 +00003874 SDNode *SetCC = nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00003875
3876 if (Intr->getOpcode() == ISD::SETCC) {
3877 // As long as we negate the condition everything is fine
Tom Stellardbc4497b2016-02-12 23:45:29 +00003878 SetCC = Intr;
Tom Stellardf8794352012-12-19 22:10:31 +00003879 Intr = SetCC->getOperand(0).getNode();
3880
3881 } else {
3882 // Get the target from BR if we don't negate the condition
3883 BR = findUser(BRCOND, ISD::BR);
3884 Target = BR->getOperand(1);
3885 }
3886
Matt Arsenault6408c912016-09-16 22:11:18 +00003887 // FIXME: This changes the types of the intrinsics instead of introducing new
3888 // nodes with the correct types.
3889 // e.g. llvm.amdgcn.loop
3890
3891 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
3892 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
3893
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003894 unsigned CFNode = isCFIntrinsic(Intr);
3895 if (CFNode == 0) {
Tom Stellardbc4497b2016-02-12 23:45:29 +00003896 // This is a uniform branch so we don't need to legalize.
3897 return BRCOND;
3898 }
3899
Matt Arsenault6408c912016-09-16 22:11:18 +00003900 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
3901 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
3902
Tom Stellardbc4497b2016-02-12 23:45:29 +00003903 assert(!SetCC ||
3904 (SetCC->getConstantOperandVal(1) == 1 &&
Tom Stellardbc4497b2016-02-12 23:45:29 +00003905 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
3906 ISD::SETNE));
Tom Stellardf8794352012-12-19 22:10:31 +00003907
Tom Stellardf8794352012-12-19 22:10:31 +00003908 // operands of the new intrinsic call
3909 SmallVector<SDValue, 4> Ops;
Matt Arsenault6408c912016-09-16 22:11:18 +00003910 if (HaveChain)
3911 Ops.push_back(BRCOND.getOperand(0));
3912
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003913 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
Tom Stellardf8794352012-12-19 22:10:31 +00003914 Ops.push_back(Target);
3915
Matt Arsenault6408c912016-09-16 22:11:18 +00003916 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
3917
Tom Stellardf8794352012-12-19 22:10:31 +00003918 // build the new intrinsic call
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003919 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00003920
Matt Arsenault6408c912016-09-16 22:11:18 +00003921 if (!HaveChain) {
3922 SDValue Ops[] = {
3923 SDValue(Result, 0),
3924 BRCOND.getOperand(0)
3925 };
3926
3927 Result = DAG.getMergeValues(Ops, DL).getNode();
3928 }
3929
Tom Stellardf8794352012-12-19 22:10:31 +00003930 if (BR) {
3931 // Give the branch instruction our target
3932 SDValue Ops[] = {
3933 BR->getOperand(0),
3934 BRCOND.getOperand(2)
3935 };
Chandler Carruth356665a2014-08-01 22:09:43 +00003936 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
3937 DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
3938 BR = NewBR.getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00003939 }
3940
3941 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
3942
3943 // Copy the intrinsic results to registers
3944 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
3945 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
3946 if (!CopyToReg)
3947 continue;
3948
3949 Chain = DAG.getCopyToReg(
3950 Chain, DL,
3951 CopyToReg->getOperand(1),
3952 SDValue(Result, i - 1),
3953 SDValue());
3954
3955 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
3956 }
3957
3958 // Remove the old intrinsic from the chain
3959 DAG.ReplaceAllUsesOfValueWith(
3960 SDValue(Intr, Intr->getNumValues() - 1),
3961 Intr->getOperand(0));
3962
3963 return Chain;
Tom Stellard75aadc22012-12-11 21:25:42 +00003964}
3965
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003966SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
3967 SDValue Op,
3968 const SDLoc &DL,
3969 EVT VT) const {
3970 return Op.getValueType().bitsLE(VT) ?
3971 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
3972 DAG.getNode(ISD::FTRUNC, DL, VT, Op);
3973}
3974
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003975SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultafe614c2016-11-18 18:33:36 +00003976 assert(Op.getValueType() == MVT::f16 &&
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003977 "Do not know how to custom lower FP_ROUND for non-f16 type");
3978
Matt Arsenaultafe614c2016-11-18 18:33:36 +00003979 SDValue Src = Op.getOperand(0);
3980 EVT SrcVT = Src.getValueType();
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003981 if (SrcVT != MVT::f64)
3982 return Op;
3983
3984 SDLoc DL(Op);
Matt Arsenaultafe614c2016-11-18 18:33:36 +00003985
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003986 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
3987 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
Mandeep Singh Grang5e1697e2017-06-06 05:08:36 +00003988 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003989}
3990
Matt Arsenault3e025382017-04-24 17:49:13 +00003991SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
3992 SDLoc SL(Op);
3993 MachineFunction &MF = DAG.getMachineFunction();
3994 SDValue Chain = Op.getOperand(0);
3995
3996 unsigned TrapID = Op.getOpcode() == ISD::DEBUGTRAP ?
3997 SISubtarget::TrapIDLLVMDebugTrap : SISubtarget::TrapIDLLVMTrap;
3998
3999 if (Subtarget->getTrapHandlerAbi() == SISubtarget::TrapHandlerAbiHsa &&
4000 Subtarget->isTrapHandlerEnabled()) {
4001 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4002 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4003 assert(UserSGPR != AMDGPU::NoRegister);
4004
4005 SDValue QueuePtr = CreateLiveInRegister(
4006 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4007
4008 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4009
4010 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4011 QueuePtr, SDValue());
4012
4013 SDValue Ops[] = {
4014 ToReg,
4015 DAG.getTargetConstant(TrapID, SL, MVT::i16),
4016 SGPR01,
4017 ToReg.getValue(1)
4018 };
4019
4020 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4021 }
4022
4023 switch (TrapID) {
4024 case SISubtarget::TrapIDLLVMTrap:
4025 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
4026 case SISubtarget::TrapIDLLVMDebugTrap: {
Matthias Braunf1caa282017-12-15 22:22:58 +00004027 DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
Matt Arsenault3e025382017-04-24 17:49:13 +00004028 "debugtrap handler not supported",
4029 Op.getDebugLoc(),
4030 DS_Warning);
Matthias Braunf1caa282017-12-15 22:22:58 +00004031 LLVMContext &Ctx = MF.getFunction().getContext();
Matt Arsenault3e025382017-04-24 17:49:13 +00004032 Ctx.diagnose(NoTrap);
4033 return Chain;
4034 }
4035 default:
4036 llvm_unreachable("unsupported trap handler type!");
4037 }
4038
4039 return Chain;
4040}
4041
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004042SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
Matt Arsenault99c14522016-04-25 19:27:24 +00004043 SelectionDAG &DAG) const {
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004044 // FIXME: Use inline constants (src_{shared, private}_base) instead.
4045 if (Subtarget->hasApertureRegs()) {
4046 unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ?
4047 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4048 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
4049 unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ?
4050 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4051 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4052 unsigned Encoding =
4053 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4054 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4055 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
Matt Arsenaulte823d922017-02-18 18:29:53 +00004056
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004057 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4058 SDValue ApertureReg = SDValue(
4059 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4060 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4061 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
Matt Arsenaulte823d922017-02-18 18:29:53 +00004062 }
4063
Matt Arsenault99c14522016-04-25 19:27:24 +00004064 MachineFunction &MF = DAG.getMachineFunction();
4065 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004066 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4067 assert(UserSGPR != AMDGPU::NoRegister);
4068
Matt Arsenault99c14522016-04-25 19:27:24 +00004069 SDValue QueuePtr = CreateLiveInRegister(
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004070 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
Matt Arsenault99c14522016-04-25 19:27:24 +00004071
4072 // Offset into amd_queue_t for group_segment_aperture_base_hi /
4073 // private_segment_aperture_base_hi.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004074 uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44;
Matt Arsenault99c14522016-04-25 19:27:24 +00004075
Matt Arsenaultb655fa92017-11-29 01:25:12 +00004076 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
Matt Arsenault99c14522016-04-25 19:27:24 +00004077
4078 // TODO: Use custom target PseudoSourceValue.
4079 // TODO: We should use the value from the IR intrinsic call, but it might not
4080 // be available and how do we get it?
4081 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004082 AMDGPUASI.CONSTANT_ADDRESS));
Matt Arsenault99c14522016-04-25 19:27:24 +00004083
4084 MachinePointerInfo PtrInfo(V, StructOffset);
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004085 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
Justin Lebar9c375812016-07-15 18:27:10 +00004086 MinAlign(64, StructOffset),
Justin Lebaradbf09e2016-09-11 01:38:58 +00004087 MachineMemOperand::MODereferenceable |
4088 MachineMemOperand::MOInvariant);
Matt Arsenault99c14522016-04-25 19:27:24 +00004089}
4090
4091SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4092 SelectionDAG &DAG) const {
4093 SDLoc SL(Op);
4094 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4095
4096 SDValue Src = ASC->getOperand(0);
Matt Arsenault99c14522016-04-25 19:27:24 +00004097 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4098
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004099 const AMDGPUTargetMachine &TM =
4100 static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4101
Matt Arsenault99c14522016-04-25 19:27:24 +00004102 // flat -> local/private
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004103 if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004104 unsigned DestAS = ASC->getDestAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004105
4106 if (DestAS == AMDGPUASI.LOCAL_ADDRESS ||
4107 DestAS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004108 unsigned NullVal = TM.getNullPointerValue(DestAS);
4109 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault99c14522016-04-25 19:27:24 +00004110 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4111 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4112
4113 return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4114 NonNull, Ptr, SegmentNullPtr);
4115 }
4116 }
4117
4118 // local/private -> flat
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004119 if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004120 unsigned SrcAS = ASC->getSrcAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004121
4122 if (SrcAS == AMDGPUASI.LOCAL_ADDRESS ||
4123 SrcAS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004124 unsigned NullVal = TM.getNullPointerValue(SrcAS);
4125 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault971c85e2017-03-13 19:47:31 +00004126
Matt Arsenault99c14522016-04-25 19:27:24 +00004127 SDValue NonNull
4128 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4129
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004130 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00004131 SDValue CvtPtr
4132 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4133
4134 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4135 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4136 FlatNullPtr);
4137 }
4138 }
4139
4140 // global <-> flat are no-ops and never emitted.
4141
4142 const MachineFunction &MF = DAG.getMachineFunction();
4143 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
Matthias Braunf1caa282017-12-15 22:22:58 +00004144 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
Matt Arsenault99c14522016-04-25 19:27:24 +00004145 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4146
4147 return DAG.getUNDEF(ASC->getValueType(0));
4148}
4149
Matt Arsenault3aef8092017-01-23 23:09:58 +00004150SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4151 SelectionDAG &DAG) const {
4152 SDValue Idx = Op.getOperand(2);
4153 if (isa<ConstantSDNode>(Idx))
4154 return SDValue();
4155
4156 // Avoid stack access for dynamic indexing.
4157 SDLoc SL(Op);
4158 SDValue Vec = Op.getOperand(0);
4159 SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1));
4160
4161 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
4162 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val);
4163
4164 // Convert vector index to bit-index.
4165 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx,
4166 DAG.getConstant(16, SL, MVT::i32));
4167
4168 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
4169
4170 SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32,
4171 DAG.getConstant(0xffff, SL, MVT::i32),
4172 ScaledIdx);
4173
4174 SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal);
4175 SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32,
4176 DAG.getNOT(SL, BFM, MVT::i32), BCVec);
4177
4178 SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS);
4179 return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI);
4180}
4181
4182SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4183 SelectionDAG &DAG) const {
4184 SDLoc SL(Op);
4185
4186 EVT ResultVT = Op.getValueType();
4187 SDValue Vec = Op.getOperand(0);
4188 SDValue Idx = Op.getOperand(1);
4189
Matt Arsenault98f29462017-05-17 20:30:58 +00004190 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4191
Hiroshi Inoue372ffa12018-04-13 11:37:06 +00004192 // Make sure we do any optimizations that will make it easier to fold
Matt Arsenault98f29462017-05-17 20:30:58 +00004193 // source modifiers before obscuring it with bit operations.
4194
4195 // XXX - Why doesn't this get called when vector_shuffle is expanded?
4196 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4197 return Combined;
4198
Matt Arsenault3aef8092017-01-23 23:09:58 +00004199 if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
4200 SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
4201
4202 if (CIdx->getZExtValue() == 1) {
4203 Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result,
4204 DAG.getConstant(16, SL, MVT::i32));
4205 } else {
4206 assert(CIdx->getZExtValue() == 0);
4207 }
4208
4209 if (ResultVT.bitsLT(MVT::i32))
4210 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
4211 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4212 }
4213
4214 SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32);
4215
4216 // Convert vector index to bit-index.
4217 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen);
4218
4219 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
4220 SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx);
4221
4222 SDValue Result = Elt;
4223 if (ResultVT.bitsLT(MVT::i32))
4224 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
4225
4226 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4227}
4228
Tom Stellard418beb72016-07-13 14:23:33 +00004229bool
4230SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4231 // We can fold offsets for anything that doesn't require a GOT relocation.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004232 return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +00004233 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
4234 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004235 !shouldEmitGOTReloc(GA->getGlobal());
Tom Stellard418beb72016-07-13 14:23:33 +00004236}
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004237
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004238static SDValue
4239buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4240 const SDLoc &DL, unsigned Offset, EVT PtrVT,
4241 unsigned GAFlags = SIInstrInfo::MO_NONE) {
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004242 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4243 // lowered to the following code sequence:
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004244 //
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004245 // For constant address space:
4246 // s_getpc_b64 s[0:1]
4247 // s_add_u32 s0, s0, $symbol
4248 // s_addc_u32 s1, s1, 0
4249 //
4250 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4251 // a fixup or relocation is emitted to replace $symbol with a literal
4252 // constant, which is a pc-relative offset from the encoding of the $symbol
4253 // operand to the global variable.
4254 //
4255 // For global address space:
4256 // s_getpc_b64 s[0:1]
4257 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4258 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4259 //
4260 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4261 // fixups or relocations are emitted to replace $symbol@*@lo and
4262 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4263 // which is a 64-bit pc-relative offset from the encoding of the $symbol
4264 // operand to the global variable.
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004265 //
4266 // What we want here is an offset from the value returned by s_getpc
4267 // (which is the address of the s_add_u32 instruction) to the global
4268 // variable, but since the encoding of $symbol starts 4 bytes after the start
4269 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4270 // small. This requires us to add 4 to the global variable offset in order to
4271 // compute the correct address.
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004272 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4273 GAFlags);
4274 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4275 GAFlags == SIInstrInfo::MO_NONE ?
4276 GAFlags : GAFlags + 1);
4277 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004278}
4279
Tom Stellard418beb72016-07-13 14:23:33 +00004280SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
4281 SDValue Op,
4282 SelectionDAG &DAG) const {
4283 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00004284 const GlobalValue *GV = GSD->getGlobal();
Tom Stellard418beb72016-07-13 14:23:33 +00004285
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004286 if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
Matt Arsenault923712b2018-02-09 16:57:57 +00004287 GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS_32BIT &&
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00004288 GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS &&
4289 // FIXME: It isn't correct to rely on the type of the pointer. This should
4290 // be removed when address space 0 is 64-bit.
4291 !GV->getType()->getElementType()->isFunctionTy())
Tom Stellard418beb72016-07-13 14:23:33 +00004292 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
4293
4294 SDLoc DL(GSD);
Tom Stellard418beb72016-07-13 14:23:33 +00004295 EVT PtrVT = Op.getValueType();
4296
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004297 if (shouldEmitFixup(GV))
Tom Stellard418beb72016-07-13 14:23:33 +00004298 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004299 else if (shouldEmitPCReloc(GV))
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004300 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
4301 SIInstrInfo::MO_REL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004302
4303 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004304 SIInstrInfo::MO_GOTPCREL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004305
4306 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004307 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
Tom Stellard418beb72016-07-13 14:23:33 +00004308 const DataLayout &DataLayout = DAG.getDataLayout();
4309 unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
4310 // FIXME: Use a PseudoSourceValue once those can be assigned an address space.
4311 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
4312
Justin Lebar9c375812016-07-15 18:27:10 +00004313 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
Justin Lebaradbf09e2016-09-11 01:38:58 +00004314 MachineMemOperand::MODereferenceable |
4315 MachineMemOperand::MOInvariant);
Tom Stellard418beb72016-07-13 14:23:33 +00004316}
4317
Benjamin Kramerbdc49562016-06-12 15:39:02 +00004318SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
4319 const SDLoc &DL, SDValue V) const {
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004320 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
4321 // the destination register.
4322 //
Tom Stellardfc92e772015-05-12 14:18:14 +00004323 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
4324 // so we will end up with redundant moves to m0.
4325 //
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004326 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
4327
4328 // A Null SDValue creates a glue result.
4329 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
4330 V, Chain);
4331 return SDValue(M0, 0);
Tom Stellardfc92e772015-05-12 14:18:14 +00004332}
4333
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004334SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
4335 SDValue Op,
4336 MVT VT,
4337 unsigned Offset) const {
4338 SDLoc SL(Op);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004339 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
4340 DAG.getEntryNode(), Offset, false);
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004341 // The local size values will have the hi 16-bits as zero.
4342 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
4343 DAG.getValueType(VT));
4344}
4345
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004346static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4347 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004348 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004349 "non-hsa intrinsic with hsa target",
4350 DL.getDebugLoc());
4351 DAG.getContext()->diagnose(BadIntrin);
4352 return DAG.getUNDEF(VT);
4353}
4354
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004355static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4356 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004357 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004358 "intrinsic not supported on subtarget",
4359 DL.getDebugLoc());
Matt Arsenaulte0132462016-01-30 05:19:45 +00004360 DAG.getContext()->diagnose(BadIntrin);
4361 return DAG.getUNDEF(VT);
4362}
4363
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004364SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4365 SelectionDAG &DAG) const {
4366 MachineFunction &MF = DAG.getMachineFunction();
Tom Stellarddcb9f092015-07-09 21:20:37 +00004367 auto MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004368
4369 EVT VT = Op.getValueType();
4370 SDLoc DL(Op);
4371 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4372
Sanjay Patela2607012015-09-16 16:31:21 +00004373 // TODO: Should this propagate fast-math-flags?
4374
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004375 switch (IntrinsicID) {
Tom Stellard2f3f9852017-01-25 01:25:13 +00004376 case Intrinsic::amdgcn_implicit_buffer_ptr: {
Matt Arsenault10fc0622017-06-26 03:01:31 +00004377 if (getSubtarget()->isAmdCodeObjectV2(MF))
4378 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004379 return getPreloadedValue(DAG, *MFI, VT,
4380 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
Tom Stellard2f3f9852017-01-25 01:25:13 +00004381 }
Tom Stellard48f29f22015-11-26 00:43:29 +00004382 case Intrinsic::amdgcn_dispatch_ptr:
Matt Arsenault48ab5262016-04-25 19:27:18 +00004383 case Intrinsic::amdgcn_queue_ptr: {
Tom Stellard2f3f9852017-01-25 01:25:13 +00004384 if (!Subtarget->isAmdCodeObjectV2(MF)) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00004385 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00004386 MF.getFunction(), "unsupported hsa intrinsic without hsa target",
Oliver Stannard7e7d9832016-02-02 13:52:43 +00004387 DL.getDebugLoc());
Matt Arsenault800fecf2016-01-11 21:18:33 +00004388 DAG.getContext()->diagnose(BadIntrin);
4389 return DAG.getUNDEF(VT);
4390 }
4391
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004392 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
4393 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
4394 return getPreloadedValue(DAG, *MFI, VT, RegID);
Matt Arsenault48ab5262016-04-25 19:27:18 +00004395 }
Jan Veselyfea814d2016-06-21 20:46:20 +00004396 case Intrinsic::amdgcn_implicitarg_ptr: {
Matt Arsenault9166ce82017-07-28 15:52:08 +00004397 if (MFI->isEntryFunction())
4398 return getImplicitArgPtr(DAG, DL);
Matt Arsenault817c2532017-08-03 23:12:44 +00004399 return getPreloadedValue(DAG, *MFI, VT,
4400 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
Jan Veselyfea814d2016-06-21 20:46:20 +00004401 }
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00004402 case Intrinsic::amdgcn_kernarg_segment_ptr: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004403 return getPreloadedValue(DAG, *MFI, VT,
4404 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00004405 }
Matt Arsenault8d718dc2016-07-22 17:01:30 +00004406 case Intrinsic::amdgcn_dispatch_id: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004407 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
Matt Arsenault8d718dc2016-07-22 17:01:30 +00004408 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004409 case Intrinsic::amdgcn_rcp:
4410 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
4411 case Intrinsic::amdgcn_rsq:
4412 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00004413 case Intrinsic::amdgcn_rsq_legacy:
Matt Arsenault43e92fe2016-06-24 06:30:11 +00004414 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004415 return emitRemovedIntrinsicError(DAG, DL, VT);
4416
4417 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00004418 case Intrinsic::amdgcn_rcp_legacy:
Matt Arsenault32fc5272016-07-26 16:45:45 +00004419 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
4420 return emitRemovedIntrinsicError(DAG, DL, VT);
4421 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
Matt Arsenault09b2c4a2016-07-15 21:26:52 +00004422 case Intrinsic::amdgcn_rsq_clamp: {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00004423 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
Matt Arsenault79963e82016-02-13 01:03:00 +00004424 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
Tom Stellard48f29f22015-11-26 00:43:29 +00004425
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004426 Type *Type = VT.getTypeForEVT(*DAG.getContext());
4427 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
4428 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
4429
4430 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
4431 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
4432 DAG.getConstantFP(Max, DL, VT));
4433 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
4434 DAG.getConstantFP(Min, DL, VT));
4435 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004436 case Intrinsic::r600_read_ngroups_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004437 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004438 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004439
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004440 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4441 SI::KernelInputOffsets::NGROUPS_X, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004442 case Intrinsic::r600_read_ngroups_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004443 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004444 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004445
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004446 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4447 SI::KernelInputOffsets::NGROUPS_Y, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004448 case Intrinsic::r600_read_ngroups_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004449 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004450 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004451
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004452 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4453 SI::KernelInputOffsets::NGROUPS_Z, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004454 case Intrinsic::r600_read_global_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004455 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004456 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004457
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004458 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4459 SI::KernelInputOffsets::GLOBAL_SIZE_X, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004460 case Intrinsic::r600_read_global_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004461 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004462 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004463
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004464 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4465 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004466 case Intrinsic::r600_read_global_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004467 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004468 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004469
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004470 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
4471 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004472 case Intrinsic::r600_read_local_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004473 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004474 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004475
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004476 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4477 SI::KernelInputOffsets::LOCAL_SIZE_X);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004478 case Intrinsic::r600_read_local_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004479 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004480 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004481
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004482 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4483 SI::KernelInputOffsets::LOCAL_SIZE_Y);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004484 case Intrinsic::r600_read_local_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004485 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004486 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004487
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004488 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4489 SI::KernelInputOffsets::LOCAL_SIZE_Z);
Matt Arsenault43976df2016-01-30 04:25:19 +00004490 case Intrinsic::amdgcn_workgroup_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004491 case Intrinsic::r600_read_tgid_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004492 return getPreloadedValue(DAG, *MFI, VT,
4493 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
Matt Arsenault43976df2016-01-30 04:25:19 +00004494 case Intrinsic::amdgcn_workgroup_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004495 case Intrinsic::r600_read_tgid_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004496 return getPreloadedValue(DAG, *MFI, VT,
4497 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
Matt Arsenault43976df2016-01-30 04:25:19 +00004498 case Intrinsic::amdgcn_workgroup_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004499 case Intrinsic::r600_read_tgid_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004500 return getPreloadedValue(DAG, *MFI, VT,
4501 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
4502 case Intrinsic::amdgcn_workitem_id_x: {
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004503 case Intrinsic::r600_read_tidig_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004504 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4505 SDLoc(DAG.getEntryNode()),
4506 MFI->getArgInfo().WorkItemIDX);
4507 }
Matt Arsenault43976df2016-01-30 04:25:19 +00004508 case Intrinsic::amdgcn_workitem_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004509 case Intrinsic::r600_read_tidig_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004510 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4511 SDLoc(DAG.getEntryNode()),
4512 MFI->getArgInfo().WorkItemIDY);
Matt Arsenault43976df2016-01-30 04:25:19 +00004513 case Intrinsic::amdgcn_workitem_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004514 case Intrinsic::r600_read_tidig_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004515 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4516 SDLoc(DAG.getEntryNode()),
4517 MFI->getArgInfo().WorkItemIDZ);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004518 case AMDGPUIntrinsic::SI_load_const: {
4519 SDValue Ops[] = {
4520 Op.getOperand(1),
4521 Op.getOperand(2)
4522 };
4523
4524 MachineMemOperand *MMO = MF.getMachineMemOperand(
Justin Lebaradbf09e2016-09-11 01:38:58 +00004525 MachinePointerInfo(),
4526 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
4527 MachineMemOperand::MOInvariant,
4528 VT.getStoreSize(), 4);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004529 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
4530 Op->getVTList(), Ops, VT, MMO);
4531 }
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004532 case Intrinsic::amdgcn_fdiv_fast:
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00004533 return lowerFDIV_FAST(Op, DAG);
Tom Stellard2187bb82016-12-06 23:52:13 +00004534 case Intrinsic::amdgcn_interp_mov: {
4535 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4536 SDValue Glue = M0.getValue(1);
4537 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
4538 Op.getOperand(2), Op.getOperand(3), Glue);
4539 }
Tom Stellardad7d03d2015-12-15 17:02:49 +00004540 case Intrinsic::amdgcn_interp_p1: {
4541 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4542 SDValue Glue = M0.getValue(1);
4543 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
4544 Op.getOperand(2), Op.getOperand(3), Glue);
4545 }
4546 case Intrinsic::amdgcn_interp_p2: {
4547 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
4548 SDValue Glue = SDValue(M0.getNode(), 1);
4549 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
4550 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
4551 Glue);
4552 }
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004553 case Intrinsic::amdgcn_sin:
4554 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
4555
4556 case Intrinsic::amdgcn_cos:
4557 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
4558
4559 case Intrinsic::amdgcn_log_clamp: {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00004560 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004561 return SDValue();
4562
4563 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00004564 MF.getFunction(), "intrinsic not supported on subtarget",
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004565 DL.getDebugLoc());
4566 DAG.getContext()->diagnose(BadIntrin);
4567 return DAG.getUNDEF(VT);
4568 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004569 case Intrinsic::amdgcn_ldexp:
4570 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
4571 Op.getOperand(1), Op.getOperand(2));
Matt Arsenault74015162016-05-28 00:19:52 +00004572
4573 case Intrinsic::amdgcn_fract:
4574 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
4575
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004576 case Intrinsic::amdgcn_class:
4577 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
4578 Op.getOperand(1), Op.getOperand(2));
4579 case Intrinsic::amdgcn_div_fmas:
4580 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
4581 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
4582 Op.getOperand(4));
4583
4584 case Intrinsic::amdgcn_div_fixup:
4585 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
4586 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4587
4588 case Intrinsic::amdgcn_trig_preop:
4589 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
4590 Op.getOperand(1), Op.getOperand(2));
4591 case Intrinsic::amdgcn_div_scale: {
4592 // 3rd parameter required to be a constant.
4593 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4594 if (!Param)
Matt Arsenault206f8262017-08-01 20:49:41 +00004595 return DAG.getMergeValues({ DAG.getUNDEF(VT), DAG.getUNDEF(MVT::i1) }, DL);
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004596
4597 // Translate to the operands expected by the machine instruction. The
4598 // first parameter must be the same as the first instruction.
4599 SDValue Numerator = Op.getOperand(1);
4600 SDValue Denominator = Op.getOperand(2);
4601
4602 // Note this order is opposite of the machine instruction's operations,
4603 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
4604 // intrinsic has the numerator as the first operand to match a normal
4605 // division operation.
4606
4607 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
4608
4609 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
4610 Denominator, Numerator);
4611 }
Wei Ding07e03712016-07-28 16:42:13 +00004612 case Intrinsic::amdgcn_icmp: {
4613 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004614 if (!CD)
4615 return DAG.getUNDEF(VT);
Wei Ding07e03712016-07-28 16:42:13 +00004616
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004617 int CondCode = CD->getSExtValue();
Wei Ding07e03712016-07-28 16:42:13 +00004618 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004619 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
Wei Ding07e03712016-07-28 16:42:13 +00004620 return DAG.getUNDEF(VT);
4621
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00004622 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
Wei Ding07e03712016-07-28 16:42:13 +00004623 ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4624 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4625 Op.getOperand(2), DAG.getCondCode(CCOpcode));
4626 }
4627 case Intrinsic::amdgcn_fcmp: {
4628 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004629 if (!CD)
4630 return DAG.getUNDEF(VT);
Wei Ding07e03712016-07-28 16:42:13 +00004631
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004632 int CondCode = CD->getSExtValue();
4633 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4634 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE)
Wei Ding07e03712016-07-28 16:42:13 +00004635 return DAG.getUNDEF(VT);
4636
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00004637 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
Wei Ding07e03712016-07-28 16:42:13 +00004638 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4639 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4640 Op.getOperand(2), DAG.getCondCode(CCOpcode));
4641 }
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00004642 case Intrinsic::amdgcn_fmed3:
4643 return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
4644 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Matt Arsenault32fc5272016-07-26 16:45:45 +00004645 case Intrinsic::amdgcn_fmul_legacy:
4646 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
4647 Op.getOperand(1), Op.getOperand(2));
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00004648 case Intrinsic::amdgcn_sffbh:
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00004649 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
Matt Arsenaultf5262252017-02-22 23:04:58 +00004650 case Intrinsic::amdgcn_sbfe:
4651 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
4652 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4653 case Intrinsic::amdgcn_ubfe:
4654 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
4655 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Marek Olsak13e47412018-01-31 20:18:04 +00004656 case Intrinsic::amdgcn_cvt_pkrtz:
4657 case Intrinsic::amdgcn_cvt_pknorm_i16:
4658 case Intrinsic::amdgcn_cvt_pknorm_u16:
4659 case Intrinsic::amdgcn_cvt_pk_i16:
4660 case Intrinsic::amdgcn_cvt_pk_u16: {
4661 // FIXME: Stop adding cast if v2f16/v2i16 are legal.
Matt Arsenault1f17c662017-02-22 00:27:34 +00004662 EVT VT = Op.getValueType();
Marek Olsak13e47412018-01-31 20:18:04 +00004663 unsigned Opcode;
4664
4665 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
4666 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
4667 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
4668 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
4669 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
4670 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
4671 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
4672 Opcode = AMDGPUISD::CVT_PK_I16_I32;
4673 else
4674 Opcode = AMDGPUISD::CVT_PK_U16_U32;
4675
4676 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
Matt Arsenault1f17c662017-02-22 00:27:34 +00004677 Op.getOperand(1), Op.getOperand(2));
4678 return DAG.getNode(ISD::BITCAST, DL, VT, Node);
4679 }
Connor Abbott8c217d02017-08-04 18:36:49 +00004680 case Intrinsic::amdgcn_wqm: {
4681 SDValue Src = Op.getOperand(1);
4682 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src),
4683 0);
4684 }
Connor Abbott92638ab2017-08-04 18:36:52 +00004685 case Intrinsic::amdgcn_wwm: {
4686 SDValue Src = Op.getOperand(1);
4687 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src),
4688 0);
4689 }
Matt Arsenault856777d2017-12-08 20:00:57 +00004690 case Intrinsic::amdgcn_image_getlod:
4691 case Intrinsic::amdgcn_image_getresinfo: {
4692 unsigned Idx = (IntrinsicID == Intrinsic::amdgcn_image_getresinfo) ? 3 : 4;
4693
4694 // Replace dmask with everything disabled with undef.
4695 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(Idx));
4696 if (!DMask || DMask->isNullValue())
4697 return DAG.getUNDEF(Op.getValueType());
4698 return SDValue();
4699 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004700 default:
Matt Arsenault754dd3e2017-04-03 18:08:08 +00004701 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004702 }
4703}
4704
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00004705SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4706 SelectionDAG &DAG) const {
4707 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Tom Stellard6f9ef142016-12-20 17:19:44 +00004708 SDLoc DL(Op);
David Stuttard70e8bc12017-06-22 16:29:22 +00004709
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00004710 switch (IntrID) {
4711 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00004712 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00004713 case Intrinsic::amdgcn_ds_fadd:
4714 case Intrinsic::amdgcn_ds_fmin:
4715 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00004716 MemSDNode *M = cast<MemSDNode>(Op);
Daniil Fukalovd5fca552018-01-17 14:05:05 +00004717 unsigned Opc;
4718 switch (IntrID) {
4719 case Intrinsic::amdgcn_atomic_inc:
4720 Opc = AMDGPUISD::ATOMIC_INC;
4721 break;
4722 case Intrinsic::amdgcn_atomic_dec:
4723 Opc = AMDGPUISD::ATOMIC_DEC;
4724 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00004725 case Intrinsic::amdgcn_ds_fadd:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00004726 Opc = AMDGPUISD::ATOMIC_LOAD_FADD;
4727 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00004728 case Intrinsic::amdgcn_ds_fmin:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00004729 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
4730 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00004731 case Intrinsic::amdgcn_ds_fmax:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00004732 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
4733 break;
4734 default:
4735 llvm_unreachable("Unknown intrinsic!");
4736 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00004737 SDValue Ops[] = {
4738 M->getOperand(0), // Chain
4739 M->getOperand(2), // Ptr
4740 M->getOperand(3) // Value
4741 };
4742
4743 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
4744 M->getMemoryVT(), M->getMemOperand());
4745 }
Tom Stellard6f9ef142016-12-20 17:19:44 +00004746 case Intrinsic::amdgcn_buffer_load:
4747 case Intrinsic::amdgcn_buffer_load_format: {
4748 SDValue Ops[] = {
4749 Op.getOperand(0), // Chain
4750 Op.getOperand(2), // rsrc
4751 Op.getOperand(3), // vindex
4752 Op.getOperand(4), // offset
4753 Op.getOperand(5), // glc
4754 Op.getOperand(6) // slc
4755 };
Tom Stellard6f9ef142016-12-20 17:19:44 +00004756
4757 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
4758 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
4759 EVT VT = Op.getValueType();
4760 EVT IntVT = VT.changeTypeToInteger();
4761
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004762 auto *M = cast<MemSDNode>(Op);
4763 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
4764 M->getMemOperand());
Tom Stellard6f9ef142016-12-20 17:19:44 +00004765 }
David Stuttard70e8bc12017-06-22 16:29:22 +00004766 case Intrinsic::amdgcn_tbuffer_load: {
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004767 MemSDNode *M = cast<MemSDNode>(Op);
David Stuttard70e8bc12017-06-22 16:29:22 +00004768 SDValue Ops[] = {
4769 Op.getOperand(0), // Chain
4770 Op.getOperand(2), // rsrc
4771 Op.getOperand(3), // vindex
4772 Op.getOperand(4), // voffset
4773 Op.getOperand(5), // soffset
4774 Op.getOperand(6), // offset
4775 Op.getOperand(7), // dfmt
4776 Op.getOperand(8), // nfmt
4777 Op.getOperand(9), // glc
4778 Op.getOperand(10) // slc
4779 };
4780
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004781 EVT VT = Op.getValueType();
David Stuttard70e8bc12017-06-22 16:29:22 +00004782
David Stuttard70e8bc12017-06-22 16:29:22 +00004783 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004784 Op->getVTList(), Ops, VT, M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00004785 }
Marek Olsak5cec6412017-11-09 01:52:48 +00004786 case Intrinsic::amdgcn_buffer_atomic_swap:
4787 case Intrinsic::amdgcn_buffer_atomic_add:
4788 case Intrinsic::amdgcn_buffer_atomic_sub:
4789 case Intrinsic::amdgcn_buffer_atomic_smin:
4790 case Intrinsic::amdgcn_buffer_atomic_umin:
4791 case Intrinsic::amdgcn_buffer_atomic_smax:
4792 case Intrinsic::amdgcn_buffer_atomic_umax:
4793 case Intrinsic::amdgcn_buffer_atomic_and:
4794 case Intrinsic::amdgcn_buffer_atomic_or:
4795 case Intrinsic::amdgcn_buffer_atomic_xor: {
4796 SDValue Ops[] = {
4797 Op.getOperand(0), // Chain
4798 Op.getOperand(2), // vdata
4799 Op.getOperand(3), // rsrc
4800 Op.getOperand(4), // vindex
4801 Op.getOperand(5), // offset
4802 Op.getOperand(6) // slc
4803 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004804 EVT VT = Op.getValueType();
4805
4806 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00004807 unsigned Opcode = 0;
4808
4809 switch (IntrID) {
4810 case Intrinsic::amdgcn_buffer_atomic_swap:
4811 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
4812 break;
4813 case Intrinsic::amdgcn_buffer_atomic_add:
4814 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
4815 break;
4816 case Intrinsic::amdgcn_buffer_atomic_sub:
4817 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
4818 break;
4819 case Intrinsic::amdgcn_buffer_atomic_smin:
4820 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
4821 break;
4822 case Intrinsic::amdgcn_buffer_atomic_umin:
4823 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
4824 break;
4825 case Intrinsic::amdgcn_buffer_atomic_smax:
4826 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
4827 break;
4828 case Intrinsic::amdgcn_buffer_atomic_umax:
4829 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
4830 break;
4831 case Intrinsic::amdgcn_buffer_atomic_and:
4832 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
4833 break;
4834 case Intrinsic::amdgcn_buffer_atomic_or:
4835 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
4836 break;
4837 case Intrinsic::amdgcn_buffer_atomic_xor:
4838 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
4839 break;
4840 default:
4841 llvm_unreachable("unhandled atomic opcode");
4842 }
4843
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004844 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
4845 M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00004846 }
4847
4848 case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
4849 SDValue Ops[] = {
4850 Op.getOperand(0), // Chain
4851 Op.getOperand(2), // src
4852 Op.getOperand(3), // cmp
4853 Op.getOperand(4), // rsrc
4854 Op.getOperand(5), // vindex
4855 Op.getOperand(6), // offset
4856 Op.getOperand(7) // slc
4857 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004858 EVT VT = Op.getValueType();
4859 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00004860
4861 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004862 Op->getVTList(), Ops, VT, M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00004863 }
4864
Matt Arsenaultf8fb6052017-03-21 16:32:17 +00004865 // Basic sample.
4866 case Intrinsic::amdgcn_image_sample:
4867 case Intrinsic::amdgcn_image_sample_cl:
4868 case Intrinsic::amdgcn_image_sample_d:
4869 case Intrinsic::amdgcn_image_sample_d_cl:
4870 case Intrinsic::amdgcn_image_sample_l:
4871 case Intrinsic::amdgcn_image_sample_b:
4872 case Intrinsic::amdgcn_image_sample_b_cl:
4873 case Intrinsic::amdgcn_image_sample_lz:
4874 case Intrinsic::amdgcn_image_sample_cd:
4875 case Intrinsic::amdgcn_image_sample_cd_cl:
4876
4877 // Sample with comparison.
4878 case Intrinsic::amdgcn_image_sample_c:
4879 case Intrinsic::amdgcn_image_sample_c_cl:
4880 case Intrinsic::amdgcn_image_sample_c_d:
4881 case Intrinsic::amdgcn_image_sample_c_d_cl:
4882 case Intrinsic::amdgcn_image_sample_c_l:
4883 case Intrinsic::amdgcn_image_sample_c_b:
4884 case Intrinsic::amdgcn_image_sample_c_b_cl:
4885 case Intrinsic::amdgcn_image_sample_c_lz:
4886 case Intrinsic::amdgcn_image_sample_c_cd:
4887 case Intrinsic::amdgcn_image_sample_c_cd_cl:
4888
4889 // Sample with offsets.
4890 case Intrinsic::amdgcn_image_sample_o:
4891 case Intrinsic::amdgcn_image_sample_cl_o:
4892 case Intrinsic::amdgcn_image_sample_d_o:
4893 case Intrinsic::amdgcn_image_sample_d_cl_o:
4894 case Intrinsic::amdgcn_image_sample_l_o:
4895 case Intrinsic::amdgcn_image_sample_b_o:
4896 case Intrinsic::amdgcn_image_sample_b_cl_o:
4897 case Intrinsic::amdgcn_image_sample_lz_o:
4898 case Intrinsic::amdgcn_image_sample_cd_o:
4899 case Intrinsic::amdgcn_image_sample_cd_cl_o:
4900
4901 // Sample with comparison and offsets.
4902 case Intrinsic::amdgcn_image_sample_c_o:
4903 case Intrinsic::amdgcn_image_sample_c_cl_o:
4904 case Intrinsic::amdgcn_image_sample_c_d_o:
4905 case Intrinsic::amdgcn_image_sample_c_d_cl_o:
4906 case Intrinsic::amdgcn_image_sample_c_l_o:
4907 case Intrinsic::amdgcn_image_sample_c_b_o:
4908 case Intrinsic::amdgcn_image_sample_c_b_cl_o:
4909 case Intrinsic::amdgcn_image_sample_c_lz_o:
4910 case Intrinsic::amdgcn_image_sample_c_cd_o:
Matt Arsenault856777d2017-12-08 20:00:57 +00004911 case Intrinsic::amdgcn_image_sample_c_cd_cl_o: {
Matt Arsenaultf8fb6052017-03-21 16:32:17 +00004912 // Replace dmask with everything disabled with undef.
4913 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(5));
4914 if (!DMask || DMask->isNullValue()) {
4915 SDValue Undef = DAG.getUNDEF(Op.getValueType());
4916 return DAG.getMergeValues({ Undef, Op.getOperand(0) }, SDLoc(Op));
4917 }
4918
4919 return SDValue();
4920 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00004921 default:
4922 return SDValue();
4923 }
4924}
4925
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004926SDValue SITargetLowering::handleD16VData(SDValue VData,
4927 SelectionDAG &DAG) const {
4928 EVT StoreVT = VData.getValueType();
4929 SDLoc DL(VData);
4930
4931 if (StoreVT.isVector()) {
4932 assert ((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
4933 if (!Subtarget->hasUnpackedD16VMem()) {
4934 if (!isTypeLegal(StoreVT)) {
4935 // If Target supports packed vmem, we just need to workaround
4936 // the illegal type by casting to an equivalent one.
4937 EVT EquivStoreVT = getEquivalentMemType(*DAG.getContext(), StoreVT);
4938 return DAG.getNode(ISD::BITCAST, DL, EquivStoreVT, VData);
4939 }
4940 } else { // We need to unpack the packed data to store.
4941 EVT IntStoreVT = StoreVT.changeTypeToInteger();
4942 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
4943 EVT EquivStoreVT = (StoreVT == MVT::v2f16) ? MVT::v2i32 : MVT::v4i32;
4944 return DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
4945 }
4946 }
4947 // No change for f16 and legal vector D16 types.
4948 return VData;
4949}
4950
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004951SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4952 SelectionDAG &DAG) const {
Tom Stellardfc92e772015-05-12 14:18:14 +00004953 SDLoc DL(Op);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004954 SDValue Chain = Op.getOperand(0);
4955 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
David Stuttard70e8bc12017-06-22 16:29:22 +00004956 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004957
4958 switch (IntrinsicID) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00004959 case Intrinsic::amdgcn_exp: {
Matt Arsenault4165efd2017-01-17 07:26:53 +00004960 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
4961 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
4962 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
4963 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
4964
4965 const SDValue Ops[] = {
4966 Chain,
4967 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
4968 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
4969 Op.getOperand(4), // src0
4970 Op.getOperand(5), // src1
4971 Op.getOperand(6), // src2
4972 Op.getOperand(7), // src3
4973 DAG.getTargetConstant(0, DL, MVT::i1), // compr
4974 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
4975 };
4976
4977 unsigned Opc = Done->isNullValue() ?
4978 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
4979 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
4980 }
4981 case Intrinsic::amdgcn_exp_compr: {
4982 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
4983 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
4984 SDValue Src0 = Op.getOperand(4);
4985 SDValue Src1 = Op.getOperand(5);
4986 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
4987 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
4988
4989 SDValue Undef = DAG.getUNDEF(MVT::f32);
4990 const SDValue Ops[] = {
4991 Chain,
4992 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
4993 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
4994 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
4995 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
4996 Undef, // src2
4997 Undef, // src3
4998 DAG.getTargetConstant(1, DL, MVT::i1), // compr
4999 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
5000 };
5001
5002 unsigned Opc = Done->isNullValue() ?
5003 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
5004 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
5005 }
5006 case Intrinsic::amdgcn_s_sendmsg:
Matt Arsenaultd3e5cb72017-02-16 02:01:17 +00005007 case Intrinsic::amdgcn_s_sendmsghalt: {
5008 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
5009 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
Tom Stellardfc92e772015-05-12 14:18:14 +00005010 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
5011 SDValue Glue = Chain.getValue(1);
Matt Arsenaulta78ca622017-02-15 22:17:09 +00005012 return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
Jan Veselyd48445d2017-01-04 18:06:55 +00005013 Op.getOperand(2), Glue);
5014 }
Marek Olsak2d825902017-04-28 20:21:58 +00005015 case Intrinsic::amdgcn_init_exec: {
5016 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
5017 Op.getOperand(2));
5018 }
5019 case Intrinsic::amdgcn_init_exec_from_input: {
5020 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
5021 Op.getOperand(2), Op.getOperand(3));
5022 }
Matt Arsenault00568682016-07-13 06:04:22 +00005023 case AMDGPUIntrinsic::AMDGPU_kill: {
Matt Arsenault03006fd2016-07-19 16:27:56 +00005024 SDValue Src = Op.getOperand(2);
5025 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) {
Matt Arsenault00568682016-07-13 06:04:22 +00005026 if (!K->isNegative())
5027 return Chain;
Matt Arsenault03006fd2016-07-19 16:27:56 +00005028
5029 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32);
5030 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne);
Matt Arsenault00568682016-07-13 06:04:22 +00005031 }
5032
Matt Arsenault03006fd2016-07-19 16:27:56 +00005033 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src);
5034 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast);
Matt Arsenault00568682016-07-13 06:04:22 +00005035 }
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00005036 case Intrinsic::amdgcn_s_barrier: {
5037 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00005038 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Matthias Braunf1caa282017-12-15 22:22:58 +00005039 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00005040 if (WGSize <= ST.getWavefrontSize())
5041 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
5042 Op.getOperand(0)), 0);
5043 }
5044 return SDValue();
5045 };
David Stuttard70e8bc12017-06-22 16:29:22 +00005046 case AMDGPUIntrinsic::SI_tbuffer_store: {
5047
5048 // Extract vindex and voffset from vaddr as appropriate
5049 const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10));
5050 const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11));
5051 SDValue VAddr = Op.getOperand(5);
5052
5053 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
5054
5055 assert(!(OffEn->isOne() && IdxEn->isOne()) &&
5056 "Legacy intrinsic doesn't support both offset and index - use new version");
5057
5058 SDValue VIndex = IdxEn->isOne() ? VAddr : Zero;
5059 SDValue VOffset = OffEn->isOne() ? VAddr : Zero;
5060
5061 // Deal with the vec-3 case
5062 const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4));
5063 auto Opcode = NumChannels->getZExtValue() == 3 ?
5064 AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT;
5065
5066 SDValue Ops[] = {
5067 Chain,
5068 Op.getOperand(3), // vdata
5069 Op.getOperand(2), // rsrc
5070 VIndex,
5071 VOffset,
5072 Op.getOperand(6), // soffset
5073 Op.getOperand(7), // inst_offset
5074 Op.getOperand(8), // dfmt
5075 Op.getOperand(9), // nfmt
5076 Op.getOperand(12), // glc
5077 Op.getOperand(13), // slc
5078 };
5079
David Stuttardf6779662017-06-22 17:15:49 +00005080 assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 &&
David Stuttard70e8bc12017-06-22 16:29:22 +00005081 "Value of tfe other than zero is unsupported");
5082
5083 EVT VT = Op.getOperand(3).getValueType();
5084 MachineMemOperand *MMO = MF.getMachineMemOperand(
5085 MachinePointerInfo(),
5086 MachineMemOperand::MOStore,
5087 VT.getStoreSize(), 4);
5088 return DAG.getMemIntrinsicNode(Opcode, DL,
5089 Op->getVTList(), Ops, VT, MMO);
5090 }
5091
5092 case Intrinsic::amdgcn_tbuffer_store: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005093 SDValue VData = Op.getOperand(2);
5094 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
5095 if (IsD16)
5096 VData = handleD16VData(VData, DAG);
David Stuttard70e8bc12017-06-22 16:29:22 +00005097 SDValue Ops[] = {
5098 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005099 VData, // vdata
David Stuttard70e8bc12017-06-22 16:29:22 +00005100 Op.getOperand(3), // rsrc
5101 Op.getOperand(4), // vindex
5102 Op.getOperand(5), // voffset
5103 Op.getOperand(6), // soffset
5104 Op.getOperand(7), // offset
5105 Op.getOperand(8), // dfmt
5106 Op.getOperand(9), // nfmt
5107 Op.getOperand(10), // glc
5108 Op.getOperand(11) // slc
5109 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005110 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
5111 AMDGPUISD::TBUFFER_STORE_FORMAT;
5112 MemSDNode *M = cast<MemSDNode>(Op);
5113 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
5114 M->getMemoryVT(), M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00005115 }
5116
Marek Olsak5cec6412017-11-09 01:52:48 +00005117 case Intrinsic::amdgcn_buffer_store:
5118 case Intrinsic::amdgcn_buffer_store_format: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005119 SDValue VData = Op.getOperand(2);
5120 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
5121 if (IsD16)
5122 VData = handleD16VData(VData, DAG);
Marek Olsak5cec6412017-11-09 01:52:48 +00005123 SDValue Ops[] = {
5124 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005125 VData, // vdata
Marek Olsak5cec6412017-11-09 01:52:48 +00005126 Op.getOperand(3), // rsrc
5127 Op.getOperand(4), // vindex
5128 Op.getOperand(5), // offset
5129 Op.getOperand(6), // glc
5130 Op.getOperand(7) // slc
5131 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005132 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
5133 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
5134 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
5135 MemSDNode *M = cast<MemSDNode>(Op);
5136 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
5137 M->getMemoryVT(), M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005138 }
5139
Changpeng Fang4737e892018-01-18 22:08:53 +00005140 case Intrinsic::amdgcn_image_store:
5141 case Intrinsic::amdgcn_image_store_mip: {
5142 SDValue VData = Op.getOperand(2);
5143 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
5144 if (IsD16)
5145 VData = handleD16VData(VData, DAG);
5146 SDValue Ops[] = {
5147 Chain, // Chain
5148 VData, // vdata
5149 Op.getOperand(3), // vaddr
5150 Op.getOperand(4), // rsrc
5151 Op.getOperand(5), // dmask
5152 Op.getOperand(6), // glc
5153 Op.getOperand(7), // slc
5154 Op.getOperand(8), // lwe
5155 Op.getOperand(9) // da
5156 };
5157 unsigned Opc = (IntrinsicID==Intrinsic::amdgcn_image_store) ?
5158 AMDGPUISD::IMAGE_STORE : AMDGPUISD::IMAGE_STORE_MIP;
5159 MemSDNode *M = cast<MemSDNode>(Op);
5160 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
5161 M->getMemoryVT(), M->getMemOperand());
5162 }
5163
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005164 default: {
5165 const AMDGPU::D16ImageDimIntrinsic *D16ImageDimIntr =
5166 AMDGPU::lookupD16ImageDimIntrinsicByIntr(IntrinsicID);
5167 if (D16ImageDimIntr) {
5168 SDValue VData = Op.getOperand(2);
5169 EVT StoreVT = VData.getValueType();
5170 if ((StoreVT == MVT::v2f16 && !isTypeLegal(StoreVT)) ||
5171 StoreVT == MVT::v4f16) {
5172 VData = handleD16VData(VData, DAG);
5173
5174 SmallVector<SDValue, 12> Ops;
5175 for (auto Value : Op.getNode()->op_values())
5176 Ops.push_back(Value);
5177 Ops[1] = DAG.getConstant(D16ImageDimIntr->D16HelperIntr, DL, MVT::i32);
5178 Ops[2] = VData;
5179
5180 MemSDNode *M = cast<MemSDNode>(Op);
5181 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Op->getVTList(),
5182 Ops, M->getMemoryVT(),
5183 M->getMemOperand());
5184 }
5185 }
5186
Matt Arsenault754dd3e2017-04-03 18:08:08 +00005187 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005188 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005189 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005190}
5191
Tom Stellard81d871d2013-11-13 23:36:50 +00005192SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
5193 SDLoc DL(Op);
5194 LoadSDNode *Load = cast<LoadSDNode>(Op);
Matt Arsenault6dfda962016-02-10 18:21:39 +00005195 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaulta1436412016-02-10 18:21:45 +00005196 EVT MemVT = Load->getMemoryVT();
Matt Arsenault6dfda962016-02-10 18:21:39 +00005197
Matt Arsenaulta1436412016-02-10 18:21:45 +00005198 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
Matt Arsenault65ca292a2017-09-07 05:37:34 +00005199 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
5200 return SDValue();
5201
Matt Arsenault6dfda962016-02-10 18:21:39 +00005202 // FIXME: Copied from PPC
5203 // First, load into 32 bits, then truncate to 1 bit.
5204
5205 SDValue Chain = Load->getChain();
5206 SDValue BasePtr = Load->getBasePtr();
5207 MachineMemOperand *MMO = Load->getMemOperand();
5208
Tom Stellard115a6152016-11-10 16:02:37 +00005209 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
5210
Matt Arsenault6dfda962016-02-10 18:21:39 +00005211 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
Tom Stellard115a6152016-11-10 16:02:37 +00005212 BasePtr, RealMemVT, MMO);
Matt Arsenault6dfda962016-02-10 18:21:39 +00005213
5214 SDValue Ops[] = {
Matt Arsenaulta1436412016-02-10 18:21:45 +00005215 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
Matt Arsenault6dfda962016-02-10 18:21:39 +00005216 NewLD.getValue(1)
5217 };
5218
5219 return DAG.getMergeValues(Ops, DL);
5220 }
Tom Stellard81d871d2013-11-13 23:36:50 +00005221
Matt Arsenaulta1436412016-02-10 18:21:45 +00005222 if (!MemVT.isVector())
5223 return SDValue();
Matt Arsenault4d801cd2015-11-24 12:05:03 +00005224
Matt Arsenaulta1436412016-02-10 18:21:45 +00005225 assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
5226 "Custom lowering for non-i32 vectors hasn't been implemented.");
Matt Arsenault4d801cd2015-11-24 12:05:03 +00005227
Farhana Aleen89196642018-03-07 17:09:18 +00005228 unsigned Alignment = Load->getAlignment();
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005229 unsigned AS = Load->getAddressSpace();
5230 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
Farhana Aleen89196642018-03-07 17:09:18 +00005231 AS, Alignment)) {
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005232 SDValue Ops[2];
5233 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
5234 return DAG.getMergeValues(Ops, DL);
5235 }
5236
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005237 MachineFunction &MF = DAG.getMachineFunction();
5238 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
5239 // If there is a possibilty that flat instruction access scratch memory
5240 // then we need to use the same legalization rules we use for private.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005241 if (AS == AMDGPUASI.FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005242 AS = MFI->hasFlatScratchInit() ?
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005243 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005244
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005245 unsigned NumElements = MemVT.getVectorNumElements();
Matt Arsenault6c041a32018-03-29 19:59:28 +00005246
Matt Arsenault923712b2018-02-09 16:57:57 +00005247 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5248 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) {
Matt Arsenault6c041a32018-03-29 19:59:28 +00005249 if (!Op->isDivergent() && Alignment >= 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00005250 return SDValue();
5251 // Non-uniform loads will be selected to MUBUF instructions, so they
Alexander Timofeev18009562016-12-08 17:28:47 +00005252 // have the same legalization requirements as global and private
Matt Arsenaulta1436412016-02-10 18:21:45 +00005253 // loads.
5254 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005255 }
Matt Arsenault6c041a32018-03-29 19:59:28 +00005256
Matt Arsenault923712b2018-02-09 16:57:57 +00005257 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5258 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT ||
5259 AS == AMDGPUASI.GLOBAL_ADDRESS) {
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00005260 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
Farhana Aleen89196642018-03-07 17:09:18 +00005261 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
Matt Arsenault6c041a32018-03-29 19:59:28 +00005262 Alignment >= 4)
Alexander Timofeev18009562016-12-08 17:28:47 +00005263 return SDValue();
5264 // Non-uniform loads will be selected to MUBUF instructions, so they
5265 // have the same legalization requirements as global and private
5266 // loads.
5267 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005268 }
Matt Arsenault923712b2018-02-09 16:57:57 +00005269 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5270 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT ||
5271 AS == AMDGPUASI.GLOBAL_ADDRESS ||
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005272 AS == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005273 if (NumElements > 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00005274 return SplitVectorLoad(Op, DAG);
5275 // v4 loads are supported for private and global memory.
5276 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005277 }
5278 if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005279 // Depending on the setting of the private_element_size field in the
5280 // resource descriptor, we can only make private accesses up to a certain
5281 // size.
5282 switch (Subtarget->getMaxPrivateElementSize()) {
5283 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00005284 return scalarizeVectorLoad(Load, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005285 case 8:
5286 if (NumElements > 2)
5287 return SplitVectorLoad(Op, DAG);
5288 return SDValue();
5289 case 16:
5290 // Same as global/flat
5291 if (NumElements > 4)
5292 return SplitVectorLoad(Op, DAG);
5293 return SDValue();
5294 default:
5295 llvm_unreachable("unsupported private_element_size");
5296 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005297 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
Farhana Aleena7cb3112018-03-09 17:41:39 +00005298 // Use ds_read_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00005299 if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
Farhana Aleena7cb3112018-03-09 17:41:39 +00005300 MemVT.getStoreSize() == 16)
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005301 return SDValue();
5302
Farhana Aleena7cb3112018-03-09 17:41:39 +00005303 if (NumElements > 2)
5304 return SplitVectorLoad(Op, DAG);
Tom Stellarde9373602014-01-22 19:24:14 +00005305 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005306 return SDValue();
Tom Stellard81d871d2013-11-13 23:36:50 +00005307}
5308
Tom Stellard0ec134f2014-02-04 17:18:40 +00005309SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
5310 if (Op.getValueType() != MVT::i64)
5311 return SDValue();
5312
5313 SDLoc DL(Op);
5314 SDValue Cond = Op.getOperand(0);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005315
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005316 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
5317 SDValue One = DAG.getConstant(1, DL, MVT::i32);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005318
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00005319 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
5320 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
5321
5322 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
5323 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005324
5325 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
5326
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00005327 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
5328 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005329
5330 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
5331
Ahmed Bougacha128f8732016-04-26 21:15:30 +00005332 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00005333 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005334}
5335
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005336// Catch division cases where we can use shortcuts with rcp and rsq
5337// instructions.
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005338SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
5339 SelectionDAG &DAG) const {
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005340 SDLoc SL(Op);
5341 SDValue LHS = Op.getOperand(0);
5342 SDValue RHS = Op.getOperand(1);
5343 EVT VT = Op.getValueType();
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005344 const SDNodeFlags Flags = Op->getFlags();
5345 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath ||
5346 Flags.hasUnsafeAlgebra() || Flags.hasAllowReciprocal();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005347
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00005348 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
5349 return SDValue();
5350
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005351 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00005352 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
Matt Arsenault979902b2016-08-02 22:25:04 +00005353 if (CLHS->isExactlyValue(1.0)) {
5354 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
5355 // the CI documentation has a worst case error of 1 ulp.
5356 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
5357 // use it as long as we aren't trying to use denormals.
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005358 //
5359 // v_rcp_f16 and v_rsq_f16 DO support denormals.
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005360
Matt Arsenault979902b2016-08-02 22:25:04 +00005361 // 1.0 / sqrt(x) -> rsq(x)
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005362
Matt Arsenault979902b2016-08-02 22:25:04 +00005363 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
5364 // error seems really high at 2^29 ULP.
5365 if (RHS.getOpcode() == ISD::FSQRT)
5366 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
5367
5368 // 1.0 / x -> rcp(x)
5369 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
5370 }
5371
5372 // Same as for 1.0, but expand the sign out of the constant.
5373 if (CLHS->isExactlyValue(-1.0)) {
5374 // -1.0 / x -> rcp (fneg x)
5375 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
5376 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
5377 }
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005378 }
5379 }
5380
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005381 if (Unsafe) {
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005382 // Turn into multiply by the reciprocal.
5383 // x / y -> x * (1.0 / y)
5384 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005385 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005386 }
5387
5388 return SDValue();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005389}
5390
Tom Stellard8485fa02016-12-07 02:42:15 +00005391static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
5392 EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
5393 if (GlueChain->getNumValues() <= 1) {
5394 return DAG.getNode(Opcode, SL, VT, A, B);
5395 }
5396
5397 assert(GlueChain->getNumValues() == 3);
5398
5399 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
5400 switch (Opcode) {
5401 default: llvm_unreachable("no chain equivalent for opcode");
5402 case ISD::FMUL:
5403 Opcode = AMDGPUISD::FMUL_W_CHAIN;
5404 break;
5405 }
5406
5407 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
5408 GlueChain.getValue(2));
5409}
5410
5411static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
5412 EVT VT, SDValue A, SDValue B, SDValue C,
5413 SDValue GlueChain) {
5414 if (GlueChain->getNumValues() <= 1) {
5415 return DAG.getNode(Opcode, SL, VT, A, B, C);
5416 }
5417
5418 assert(GlueChain->getNumValues() == 3);
5419
5420 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
5421 switch (Opcode) {
5422 default: llvm_unreachable("no chain equivalent for opcode");
5423 case ISD::FMA:
5424 Opcode = AMDGPUISD::FMA_W_CHAIN;
5425 break;
5426 }
5427
5428 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
5429 GlueChain.getValue(2));
5430}
5431
Matt Arsenault4052a572016-12-22 03:05:41 +00005432SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005433 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
5434 return FastLowered;
5435
Matt Arsenault4052a572016-12-22 03:05:41 +00005436 SDLoc SL(Op);
5437 SDValue Src0 = Op.getOperand(0);
5438 SDValue Src1 = Op.getOperand(1);
5439
5440 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
5441 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
5442
5443 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
5444 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
5445
5446 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
5447 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
5448
5449 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
5450}
5451
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005452// Faster 2.5 ULP division that does not support denormals.
5453SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
5454 SDLoc SL(Op);
5455 SDValue LHS = Op.getOperand(1);
5456 SDValue RHS = Op.getOperand(2);
5457
5458 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
5459
5460 const APFloat K0Val(BitsToFloat(0x6f800000));
5461 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
5462
5463 const APFloat K1Val(BitsToFloat(0x2f800000));
5464 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
5465
5466 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
5467
5468 EVT SetCCVT =
5469 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
5470
5471 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
5472
5473 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
5474
5475 // TODO: Should this propagate fast-math-flags?
5476 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
5477
5478 // rcp does not support denormals.
5479 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
5480
5481 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
5482
5483 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
5484}
5485
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005486SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005487 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
Eric Christopher538d09d02016-06-07 20:27:12 +00005488 return FastLowered;
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005489
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005490 SDLoc SL(Op);
5491 SDValue LHS = Op.getOperand(0);
5492 SDValue RHS = Op.getOperand(1);
5493
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005494 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005495
Wei Dinged0f97f2016-06-09 19:17:15 +00005496 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005497
Tom Stellard8485fa02016-12-07 02:42:15 +00005498 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
5499 RHS, RHS, LHS);
5500 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
5501 LHS, RHS, LHS);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005502
Matt Arsenaultdfec5ce2016-07-09 07:48:11 +00005503 // Denominator is scaled to not be denormal, so using rcp is ok.
Tom Stellard8485fa02016-12-07 02:42:15 +00005504 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
5505 DenominatorScaled);
5506 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
5507 DenominatorScaled);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005508
Tom Stellard8485fa02016-12-07 02:42:15 +00005509 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
5510 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
5511 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005512
Tom Stellard8485fa02016-12-07 02:42:15 +00005513 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005514
Tom Stellard8485fa02016-12-07 02:42:15 +00005515 if (!Subtarget->hasFP32Denormals()) {
5516 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
5517 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
5518 SL, MVT::i32);
5519 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
5520 DAG.getEntryNode(),
5521 EnableDenormValue, BitField);
5522 SDValue Ops[3] = {
5523 NegDivScale0,
5524 EnableDenorm.getValue(0),
5525 EnableDenorm.getValue(1)
5526 };
Matt Arsenault37fefd62016-06-10 02:18:02 +00005527
Tom Stellard8485fa02016-12-07 02:42:15 +00005528 NegDivScale0 = DAG.getMergeValues(Ops, SL);
5529 }
5530
5531 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
5532 ApproxRcp, One, NegDivScale0);
5533
5534 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
5535 ApproxRcp, Fma0);
5536
5537 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
5538 Fma1, Fma1);
5539
5540 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
5541 NumeratorScaled, Mul);
5542
5543 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
5544
5545 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
5546 NumeratorScaled, Fma3);
5547
5548 if (!Subtarget->hasFP32Denormals()) {
5549 const SDValue DisableDenormValue =
5550 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
5551 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
5552 Fma4.getValue(1),
5553 DisableDenormValue,
5554 BitField,
5555 Fma4.getValue(2));
5556
5557 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
5558 DisableDenorm, DAG.getRoot());
5559 DAG.setRoot(OutputChain);
5560 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00005561
Wei Dinged0f97f2016-06-09 19:17:15 +00005562 SDValue Scale = NumeratorScaled.getValue(1);
Tom Stellard8485fa02016-12-07 02:42:15 +00005563 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
5564 Fma4, Fma1, Fma3, Scale);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005565
Wei Dinged0f97f2016-06-09 19:17:15 +00005566 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005567}
5568
5569SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005570 if (DAG.getTarget().Options.UnsafeFPMath)
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005571 return lowerFastUnsafeFDIV(Op, DAG);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005572
5573 SDLoc SL(Op);
5574 SDValue X = Op.getOperand(0);
5575 SDValue Y = Op.getOperand(1);
5576
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005577 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005578
5579 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
5580
5581 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
5582
5583 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
5584
5585 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
5586
5587 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
5588
5589 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
5590
5591 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
5592
5593 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
5594
5595 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
5596 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
5597
5598 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
5599 NegDivScale0, Mul, DivScale1);
5600
5601 SDValue Scale;
5602
Matt Arsenault43e92fe2016-06-24 06:30:11 +00005603 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005604 // Workaround a hardware bug on SI where the condition output from div_scale
5605 // is not usable.
5606
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005607 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005608
5609 // Figure out if the scale to use for div_fmas.
5610 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
5611 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
5612 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
5613 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
5614
5615 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
5616 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
5617
5618 SDValue Scale0Hi
5619 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
5620 SDValue Scale1Hi
5621 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
5622
5623 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
5624 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
5625 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
5626 } else {
5627 Scale = DivScale1.getValue(1);
5628 }
5629
5630 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
5631 Fma4, Fma3, Mul, Scale);
5632
5633 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005634}
5635
5636SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
5637 EVT VT = Op.getValueType();
5638
5639 if (VT == MVT::f32)
5640 return LowerFDIV32(Op, DAG);
5641
5642 if (VT == MVT::f64)
5643 return LowerFDIV64(Op, DAG);
5644
Matt Arsenault4052a572016-12-22 03:05:41 +00005645 if (VT == MVT::f16)
5646 return LowerFDIV16(Op, DAG);
5647
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005648 llvm_unreachable("Unexpected type for fdiv");
5649}
5650
Tom Stellard81d871d2013-11-13 23:36:50 +00005651SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
5652 SDLoc DL(Op);
5653 StoreSDNode *Store = cast<StoreSDNode>(Op);
5654 EVT VT = Store->getMemoryVT();
5655
Matt Arsenault95245662016-02-11 05:32:46 +00005656 if (VT == MVT::i1) {
5657 return DAG.getTruncStore(Store->getChain(), DL,
5658 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
5659 Store->getBasePtr(), MVT::i1, Store->getMemOperand());
Tom Stellardb02094e2014-07-21 15:45:01 +00005660 }
5661
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005662 assert(VT.isVector() &&
5663 Store->getValue().getValueType().getScalarType() == MVT::i32);
5664
5665 unsigned AS = Store->getAddressSpace();
5666 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
5667 AS, Store->getAlignment())) {
5668 return expandUnalignedStore(Store, DAG);
5669 }
Tom Stellard81d871d2013-11-13 23:36:50 +00005670
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005671 MachineFunction &MF = DAG.getMachineFunction();
5672 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
5673 // If there is a possibilty that flat instruction access scratch memory
5674 // then we need to use the same legalization rules we use for private.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005675 if (AS == AMDGPUASI.FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005676 AS = MFI->hasFlatScratchInit() ?
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005677 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005678
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005679 unsigned NumElements = VT.getVectorNumElements();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005680 if (AS == AMDGPUASI.GLOBAL_ADDRESS ||
5681 AS == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005682 if (NumElements > 4)
5683 return SplitVectorStore(Op, DAG);
5684 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005685 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005686 switch (Subtarget->getMaxPrivateElementSize()) {
5687 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00005688 return scalarizeVectorStore(Store, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005689 case 8:
5690 if (NumElements > 2)
5691 return SplitVectorStore(Op, DAG);
5692 return SDValue();
5693 case 16:
5694 if (NumElements > 4)
5695 return SplitVectorStore(Op, DAG);
5696 return SDValue();
5697 default:
5698 llvm_unreachable("unsupported private_element_size");
5699 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005700 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00005701 // Use ds_write_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00005702 if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00005703 VT.getStoreSize() == 16)
5704 return SDValue();
5705
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005706 if (NumElements > 2)
5707 return SplitVectorStore(Op, DAG);
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00005708 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005709 } else {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005710 llvm_unreachable("unhandled address space");
Matt Arsenault95245662016-02-11 05:32:46 +00005711 }
Tom Stellard81d871d2013-11-13 23:36:50 +00005712}
5713
Matt Arsenaultad14ce82014-07-19 18:44:39 +00005714SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005715 SDLoc DL(Op);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00005716 EVT VT = Op.getValueType();
5717 SDValue Arg = Op.getOperand(0);
Sanjay Patela2607012015-09-16 16:31:21 +00005718 // TODO: Should this propagate fast-math-flags?
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005719 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
5720 DAG.getNode(ISD::FMUL, DL, VT, Arg,
5721 DAG.getConstantFP(0.5/M_PI, DL,
5722 VT)));
Matt Arsenaultad14ce82014-07-19 18:44:39 +00005723
5724 switch (Op.getOpcode()) {
5725 case ISD::FCOS:
5726 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart);
5727 case ISD::FSIN:
5728 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart);
5729 default:
5730 llvm_unreachable("Wrong trig opcode");
5731 }
5732}
5733
Tom Stellard354a43c2016-04-01 18:27:37 +00005734SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
5735 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
5736 assert(AtomicNode->isCompareAndSwap());
5737 unsigned AS = AtomicNode->getAddressSpace();
5738
5739 // No custom lowering required for local address space
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005740 if (!isFlatGlobalAddrSpace(AS, AMDGPUASI))
Tom Stellard354a43c2016-04-01 18:27:37 +00005741 return Op;
5742
5743 // Non-local address space requires custom lowering for atomic compare
5744 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
5745 SDLoc DL(Op);
5746 SDValue ChainIn = Op.getOperand(0);
5747 SDValue Addr = Op.getOperand(1);
5748 SDValue Old = Op.getOperand(2);
5749 SDValue New = Op.getOperand(3);
5750 EVT VT = Op.getValueType();
5751 MVT SimpleVT = VT.getSimpleVT();
5752 MVT VecType = MVT::getVectorVT(SimpleVT, 2);
5753
Ahmed Bougacha128f8732016-04-26 21:15:30 +00005754 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
Tom Stellard354a43c2016-04-01 18:27:37 +00005755 SDValue Ops[] = { ChainIn, Addr, NewOld };
Matt Arsenault88701812016-06-09 23:42:48 +00005756
5757 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
5758 Ops, VT, AtomicNode->getMemOperand());
Tom Stellard354a43c2016-04-01 18:27:37 +00005759}
5760
Tom Stellard75aadc22012-12-11 21:25:42 +00005761//===----------------------------------------------------------------------===//
5762// Custom DAG optimizations
5763//===----------------------------------------------------------------------===//
5764
Matt Arsenault364a6742014-06-11 17:50:44 +00005765SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
Matt Arsenaulte6986632015-01-14 01:35:22 +00005766 DAGCombinerInfo &DCI) const {
Matt Arsenault364a6742014-06-11 17:50:44 +00005767 EVT VT = N->getValueType(0);
5768 EVT ScalarVT = VT.getScalarType();
5769 if (ScalarVT != MVT::f32)
5770 return SDValue();
5771
5772 SelectionDAG &DAG = DCI.DAG;
5773 SDLoc DL(N);
5774
5775 SDValue Src = N->getOperand(0);
5776 EVT SrcVT = Src.getValueType();
5777
5778 // TODO: We could try to match extracting the higher bytes, which would be
5779 // easier if i8 vectors weren't promoted to i32 vectors, particularly after
5780 // types are legalized. v4i8 -> v4f32 is probably the only case to worry
5781 // about in practice.
Craig Topper80d3bb32018-03-06 19:44:52 +00005782 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
Matt Arsenault364a6742014-06-11 17:50:44 +00005783 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
5784 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
5785 DCI.AddToWorklist(Cvt.getNode());
5786 return Cvt;
5787 }
5788 }
5789
Matt Arsenault364a6742014-06-11 17:50:44 +00005790 return SDValue();
5791}
5792
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00005793// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
5794
5795// This is a variant of
5796// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
5797//
5798// The normal DAG combiner will do this, but only if the add has one use since
5799// that would increase the number of instructions.
5800//
5801// This prevents us from seeing a constant offset that can be folded into a
5802// memory instruction's addressing mode. If we know the resulting add offset of
5803// a pointer can be folded into an addressing offset, we can replace the pointer
5804// operand with the add of new constant offset. This eliminates one of the uses,
5805// and may allow the remaining use to also be simplified.
5806//
5807SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
5808 unsigned AddrSpace,
Matt Arsenaultfbe95332017-11-13 05:11:54 +00005809 EVT MemVT,
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00005810 DAGCombinerInfo &DCI) const {
5811 SDValue N0 = N->getOperand(0);
5812 SDValue N1 = N->getOperand(1);
5813
Matt Arsenaultfbe95332017-11-13 05:11:54 +00005814 // We only do this to handle cases where it's profitable when there are
5815 // multiple uses of the add, so defer to the standard combine.
Matt Arsenaultc8903122017-11-14 23:46:42 +00005816 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
5817 N0->hasOneUse())
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00005818 return SDValue();
5819
5820 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
5821 if (!CN1)
5822 return SDValue();
5823
5824 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5825 if (!CAdd)
5826 return SDValue();
5827
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00005828 // If the resulting offset is too large, we can't fold it into the addressing
5829 // mode offset.
5830 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
Matt Arsenaultfbe95332017-11-13 05:11:54 +00005831 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
5832
5833 AddrMode AM;
5834 AM.HasBaseReg = true;
5835 AM.BaseOffs = Offset.getSExtValue();
5836 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00005837 return SDValue();
5838
5839 SelectionDAG &DAG = DCI.DAG;
5840 SDLoc SL(N);
5841 EVT VT = N->getValueType(0);
5842
5843 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005844 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00005845
Matt Arsenaulte5e0c742017-11-13 05:33:35 +00005846 SDNodeFlags Flags;
5847 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
5848 (N0.getOpcode() == ISD::OR ||
5849 N0->getFlags().hasNoUnsignedWrap()));
5850
5851 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00005852}
5853
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00005854SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
5855 DAGCombinerInfo &DCI) const {
5856 SDValue Ptr = N->getBasePtr();
5857 SelectionDAG &DAG = DCI.DAG;
5858 SDLoc SL(N);
5859
5860 // TODO: We could also do this for multiplies.
Matt Arsenaultfbe95332017-11-13 05:11:54 +00005861 if (Ptr.getOpcode() == ISD::SHL) {
5862 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(),
5863 N->getMemoryVT(), DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00005864 if (NewPtr) {
5865 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
5866
5867 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
5868 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
5869 }
5870 }
5871
5872 return SDValue();
5873}
5874
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00005875static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
5876 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
5877 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
5878 (Opc == ISD::XOR && Val == 0);
5879}
5880
5881// Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
5882// will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
5883// integer combine opportunities since most 64-bit operations are decomposed
5884// this way. TODO: We won't want this for SALU especially if it is an inline
5885// immediate.
5886SDValue SITargetLowering::splitBinaryBitConstantOp(
5887 DAGCombinerInfo &DCI,
5888 const SDLoc &SL,
5889 unsigned Opc, SDValue LHS,
5890 const ConstantSDNode *CRHS) const {
5891 uint64_t Val = CRHS->getZExtValue();
5892 uint32_t ValLo = Lo_32(Val);
5893 uint32_t ValHi = Hi_32(Val);
5894 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
5895
5896 if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
5897 bitOpWithConstantIsReducible(Opc, ValHi)) ||
5898 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
5899 // If we need to materialize a 64-bit immediate, it will be split up later
5900 // anyway. Avoid creating the harder to understand 64-bit immediate
5901 // materialization.
5902 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
5903 }
5904
5905 return SDValue();
5906}
5907
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00005908// Returns true if argument is a boolean value which is not serialized into
5909// memory or argument and does not require v_cmdmask_b32 to be deserialized.
5910static bool isBoolSGPR(SDValue V) {
5911 if (V.getValueType() != MVT::i1)
5912 return false;
5913 switch (V.getOpcode()) {
5914 default: break;
5915 case ISD::SETCC:
5916 case ISD::AND:
5917 case ISD::OR:
5918 case ISD::XOR:
5919 case AMDGPUISD::FP_CLASS:
5920 return true;
5921 }
5922 return false;
5923}
5924
Matt Arsenaultd0101a22015-01-06 23:00:46 +00005925SDValue SITargetLowering::performAndCombine(SDNode *N,
5926 DAGCombinerInfo &DCI) const {
5927 if (DCI.isBeforeLegalize())
5928 return SDValue();
5929
5930 SelectionDAG &DAG = DCI.DAG;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00005931 EVT VT = N->getValueType(0);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00005932 SDValue LHS = N->getOperand(0);
5933 SDValue RHS = N->getOperand(1);
5934
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00005935
Stanislav Mekhanoshin53a21292017-05-23 19:54:48 +00005936 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
5937 if (VT == MVT::i64 && CRHS) {
5938 if (SDValue Split
5939 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
5940 return Split;
5941 }
5942
5943 if (CRHS && VT == MVT::i32) {
5944 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
5945 // nb = number of trailing zeroes in mask
5946 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
5947 // given that we are selecting 8 or 16 bit fields starting at byte boundary.
5948 uint64_t Mask = CRHS->getZExtValue();
5949 unsigned Bits = countPopulation(Mask);
5950 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
5951 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
5952 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
5953 unsigned Shift = CShift->getZExtValue();
5954 unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
5955 unsigned Offset = NB + Shift;
5956 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
5957 SDLoc SL(N);
5958 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
5959 LHS->getOperand(0),
5960 DAG.getConstant(Offset, SL, MVT::i32),
5961 DAG.getConstant(Bits, SL, MVT::i32));
5962 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
5963 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
5964 DAG.getValueType(NarrowVT));
5965 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
5966 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
5967 return Shl;
5968 }
5969 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00005970 }
5971 }
5972
5973 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
5974 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
5975 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
Matt Arsenaultd0101a22015-01-06 23:00:46 +00005976 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5977 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
5978
5979 SDValue X = LHS.getOperand(0);
5980 SDValue Y = RHS.getOperand(0);
5981 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
5982 return SDValue();
5983
5984 if (LCC == ISD::SETO) {
5985 if (X != LHS.getOperand(1))
5986 return SDValue();
5987
5988 if (RCC == ISD::SETUNE) {
5989 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
5990 if (!C1 || !C1->isInfinity() || C1->isNegative())
5991 return SDValue();
5992
5993 const uint32_t Mask = SIInstrFlags::N_NORMAL |
5994 SIInstrFlags::N_SUBNORMAL |
5995 SIInstrFlags::N_ZERO |
5996 SIInstrFlags::P_ZERO |
5997 SIInstrFlags::P_SUBNORMAL |
5998 SIInstrFlags::P_NORMAL;
5999
6000 static_assert(((~(SIInstrFlags::S_NAN |
6001 SIInstrFlags::Q_NAN |
6002 SIInstrFlags::N_INFINITY |
6003 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
6004 "mask not equal");
6005
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006006 SDLoc DL(N);
6007 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
6008 X, DAG.getConstant(Mask, DL, MVT::i32));
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006009 }
6010 }
6011 }
6012
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00006013 if (VT == MVT::i32 &&
6014 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
6015 // and x, (sext cc from i1) => select cc, x, 0
6016 if (RHS.getOpcode() != ISD::SIGN_EXTEND)
6017 std::swap(LHS, RHS);
6018 if (isBoolSGPR(RHS.getOperand(0)))
6019 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
6020 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
6021 }
6022
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006023 return SDValue();
6024}
6025
Matt Arsenaultf2290332015-01-06 23:00:39 +00006026SDValue SITargetLowering::performOrCombine(SDNode *N,
6027 DAGCombinerInfo &DCI) const {
6028 SelectionDAG &DAG = DCI.DAG;
6029 SDValue LHS = N->getOperand(0);
6030 SDValue RHS = N->getOperand(1);
6031
Matt Arsenault3b082382016-04-12 18:24:38 +00006032 EVT VT = N->getValueType(0);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006033 if (VT == MVT::i1) {
6034 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
6035 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
6036 RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
6037 SDValue Src = LHS.getOperand(0);
6038 if (Src != RHS.getOperand(0))
6039 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00006040
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006041 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
6042 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
6043 if (!CLHS || !CRHS)
6044 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00006045
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006046 // Only 10 bits are used.
6047 static const uint32_t MaxMask = 0x3ff;
Matt Arsenault3b082382016-04-12 18:24:38 +00006048
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006049 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
6050 SDLoc DL(N);
6051 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
6052 Src, DAG.getConstant(NewMask, DL, MVT::i32));
6053 }
Matt Arsenault3b082382016-04-12 18:24:38 +00006054
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006055 return SDValue();
6056 }
6057
6058 if (VT != MVT::i64)
6059 return SDValue();
6060
6061 // TODO: This could be a generic combine with a predicate for extracting the
6062 // high half of an integer being free.
6063
6064 // (or i64:x, (zero_extend i32:y)) ->
6065 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
6066 if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
6067 RHS.getOpcode() != ISD::ZERO_EXTEND)
6068 std::swap(LHS, RHS);
6069
6070 if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
6071 SDValue ExtSrc = RHS.getOperand(0);
6072 EVT SrcVT = ExtSrc.getValueType();
6073 if (SrcVT == MVT::i32) {
6074 SDLoc SL(N);
6075 SDValue LowLHS, HiBits;
6076 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
6077 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
6078
6079 DCI.AddToWorklist(LowOr.getNode());
6080 DCI.AddToWorklist(HiBits.getNode());
6081
6082 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
6083 LowOr, HiBits);
6084 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
Matt Arsenault3b082382016-04-12 18:24:38 +00006085 }
6086 }
6087
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006088 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
6089 if (CRHS) {
6090 if (SDValue Split
6091 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
6092 return Split;
6093 }
Matt Arsenaultf2290332015-01-06 23:00:39 +00006094
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006095 return SDValue();
6096}
Matt Arsenaultf2290332015-01-06 23:00:39 +00006097
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006098SDValue SITargetLowering::performXorCombine(SDNode *N,
6099 DAGCombinerInfo &DCI) const {
6100 EVT VT = N->getValueType(0);
6101 if (VT != MVT::i64)
6102 return SDValue();
Matt Arsenaultf2290332015-01-06 23:00:39 +00006103
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006104 SDValue LHS = N->getOperand(0);
6105 SDValue RHS = N->getOperand(1);
6106
6107 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
6108 if (CRHS) {
6109 if (SDValue Split
6110 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
6111 return Split;
Matt Arsenaultf2290332015-01-06 23:00:39 +00006112 }
6113
6114 return SDValue();
6115}
6116
Matt Arsenault5cf42712017-04-06 20:58:30 +00006117// Instructions that will be lowered with a final instruction that zeros the
6118// high result bits.
6119// XXX - probably only need to list legal operations.
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006120static bool fp16SrcZerosHighBits(unsigned Opc) {
6121 switch (Opc) {
Matt Arsenault5cf42712017-04-06 20:58:30 +00006122 case ISD::FADD:
6123 case ISD::FSUB:
6124 case ISD::FMUL:
6125 case ISD::FDIV:
6126 case ISD::FREM:
6127 case ISD::FMA:
6128 case ISD::FMAD:
6129 case ISD::FCANONICALIZE:
6130 case ISD::FP_ROUND:
6131 case ISD::UINT_TO_FP:
6132 case ISD::SINT_TO_FP:
6133 case ISD::FABS:
6134 // Fabs is lowered to a bit operation, but it's an and which will clear the
6135 // high bits anyway.
6136 case ISD::FSQRT:
6137 case ISD::FSIN:
6138 case ISD::FCOS:
6139 case ISD::FPOWI:
6140 case ISD::FPOW:
6141 case ISD::FLOG:
6142 case ISD::FLOG2:
6143 case ISD::FLOG10:
6144 case ISD::FEXP:
6145 case ISD::FEXP2:
6146 case ISD::FCEIL:
6147 case ISD::FTRUNC:
6148 case ISD::FRINT:
6149 case ISD::FNEARBYINT:
6150 case ISD::FROUND:
6151 case ISD::FFLOOR:
6152 case ISD::FMINNUM:
6153 case ISD::FMAXNUM:
6154 case AMDGPUISD::FRACT:
6155 case AMDGPUISD::CLAMP:
6156 case AMDGPUISD::COS_HW:
6157 case AMDGPUISD::SIN_HW:
6158 case AMDGPUISD::FMIN3:
6159 case AMDGPUISD::FMAX3:
6160 case AMDGPUISD::FMED3:
6161 case AMDGPUISD::FMAD_FTZ:
6162 case AMDGPUISD::RCP:
6163 case AMDGPUISD::RSQ:
6164 case AMDGPUISD::LDEXP:
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006165 return true;
Matt Arsenault5cf42712017-04-06 20:58:30 +00006166 default:
6167 // fcopysign, select and others may be lowered to 32-bit bit operations
6168 // which don't zero the high bits.
6169 return false;
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006170 }
6171}
6172
6173SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
6174 DAGCombinerInfo &DCI) const {
6175 if (!Subtarget->has16BitInsts() ||
6176 DCI.getDAGCombineLevel() < AfterLegalizeDAG)
6177 return SDValue();
6178
6179 EVT VT = N->getValueType(0);
6180 if (VT != MVT::i32)
6181 return SDValue();
6182
6183 SDValue Src = N->getOperand(0);
6184 if (Src.getValueType() != MVT::i16)
6185 return SDValue();
6186
6187 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
6188 // FIXME: It is not universally true that the high bits are zeroed on gfx9.
6189 if (Src.getOpcode() == ISD::BITCAST) {
6190 SDValue BCSrc = Src.getOperand(0);
6191 if (BCSrc.getValueType() == MVT::f16 &&
6192 fp16SrcZerosHighBits(BCSrc.getOpcode()))
6193 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
6194 }
6195
6196 return SDValue();
6197}
6198
Matt Arsenaultf2290332015-01-06 23:00:39 +00006199SDValue SITargetLowering::performClassCombine(SDNode *N,
6200 DAGCombinerInfo &DCI) const {
6201 SelectionDAG &DAG = DCI.DAG;
6202 SDValue Mask = N->getOperand(1);
6203
6204 // fp_class x, 0 -> false
6205 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
6206 if (CMask->isNullValue())
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006207 return DAG.getConstant(0, SDLoc(N), MVT::i1);
Matt Arsenaultf2290332015-01-06 23:00:39 +00006208 }
6209
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00006210 if (N->getOperand(0).isUndef())
6211 return DAG.getUNDEF(MVT::i1);
6212
Matt Arsenaultf2290332015-01-06 23:00:39 +00006213 return SDValue();
6214}
6215
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006216static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
6217 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions())
6218 return true;
6219
6220 return DAG.isKnownNeverNaN(Op);
6221}
6222
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006223static bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
6224 const SISubtarget *ST, unsigned MaxDepth=5) {
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006225 // If source is a result of another standard FP operation it is already in
6226 // canonical form.
6227
6228 switch (Op.getOpcode()) {
6229 default:
6230 break;
6231
6232 // These will flush denorms if required.
6233 case ISD::FADD:
6234 case ISD::FSUB:
6235 case ISD::FMUL:
6236 case ISD::FSQRT:
6237 case ISD::FCEIL:
6238 case ISD::FFLOOR:
6239 case ISD::FMA:
6240 case ISD::FMAD:
6241
6242 case ISD::FCANONICALIZE:
6243 return true;
6244
6245 case ISD::FP_ROUND:
6246 return Op.getValueType().getScalarType() != MVT::f16 ||
6247 ST->hasFP16Denormals();
6248
6249 case ISD::FP_EXTEND:
6250 return Op.getOperand(0).getValueType().getScalarType() != MVT::f16 ||
6251 ST->hasFP16Denormals();
6252
6253 case ISD::FP16_TO_FP:
6254 case ISD::FP_TO_FP16:
6255 return ST->hasFP16Denormals();
6256
6257 // It can/will be lowered or combined as a bit operation.
6258 // Need to check their input recursively to handle.
6259 case ISD::FNEG:
6260 case ISD::FABS:
6261 return (MaxDepth > 0) &&
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006262 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006263
6264 case ISD::FSIN:
6265 case ISD::FCOS:
6266 case ISD::FSINCOS:
6267 return Op.getValueType().getScalarType() != MVT::f16;
6268
6269 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms.
6270 // For such targets need to check their input recursively.
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006271 case ISD::FMINNUM:
6272 case ISD::FMAXNUM:
6273 case ISD::FMINNAN:
6274 case ISD::FMAXNAN:
6275
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006276 if (ST->supportsMinMaxDenormModes() &&
6277 DAG.isKnownNeverNaN(Op.getOperand(0)) &&
6278 DAG.isKnownNeverNaN(Op.getOperand(1)))
6279 return true;
6280
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006281 return (MaxDepth > 0) &&
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006282 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1) &&
6283 isCanonicalized(DAG, Op.getOperand(1), ST, MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006284
6285 case ISD::ConstantFP: {
6286 auto F = cast<ConstantFPSDNode>(Op)->getValueAPF();
6287 return !F.isDenormal() && !(F.isNaN() && F.isSignaling());
6288 }
6289 }
6290 return false;
6291}
6292
Matt Arsenault9cd90712016-04-14 01:42:16 +00006293// Constant fold canonicalize.
6294SDValue SITargetLowering::performFCanonicalizeCombine(
6295 SDNode *N,
6296 DAGCombinerInfo &DCI) const {
Matt Arsenault9cd90712016-04-14 01:42:16 +00006297 SelectionDAG &DAG = DCI.DAG;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006298 ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0));
6299
6300 if (!CFP) {
6301 SDValue N0 = N->getOperand(0);
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006302 EVT VT = N0.getValueType().getScalarType();
6303 auto ST = getSubtarget();
6304
6305 if (((VT == MVT::f32 && ST->hasFP32Denormals()) ||
6306 (VT == MVT::f64 && ST->hasFP64Denormals()) ||
6307 (VT == MVT::f16 && ST->hasFP16Denormals())) &&
6308 DAG.isKnownNeverNaN(N0))
6309 return N0;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006310
6311 bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction());
6312
6313 if ((IsIEEEMode || isKnownNeverSNan(DAG, N0)) &&
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006314 isCanonicalized(DAG, N0, ST))
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006315 return N0;
6316
6317 return SDValue();
6318 }
6319
Matt Arsenault9cd90712016-04-14 01:42:16 +00006320 const APFloat &C = CFP->getValueAPF();
6321
6322 // Flush denormals to 0 if not enabled.
6323 if (C.isDenormal()) {
6324 EVT VT = N->getValueType(0);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00006325 EVT SVT = VT.getScalarType();
6326 if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals())
Matt Arsenault9cd90712016-04-14 01:42:16 +00006327 return DAG.getConstantFP(0.0, SDLoc(N), VT);
6328
Matt Arsenaulteb522e62017-02-27 22:15:25 +00006329 if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals())
Matt Arsenault9cd90712016-04-14 01:42:16 +00006330 return DAG.getConstantFP(0.0, SDLoc(N), VT);
Matt Arsenaultce841302016-12-22 03:05:37 +00006331
Matt Arsenaulteb522e62017-02-27 22:15:25 +00006332 if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals())
Matt Arsenaultce841302016-12-22 03:05:37 +00006333 return DAG.getConstantFP(0.0, SDLoc(N), VT);
Matt Arsenault9cd90712016-04-14 01:42:16 +00006334 }
6335
6336 if (C.isNaN()) {
6337 EVT VT = N->getValueType(0);
6338 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
6339 if (C.isSignaling()) {
6340 // Quiet a signaling NaN.
6341 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
6342 }
6343
6344 // Make sure it is the canonical NaN bitpattern.
6345 //
6346 // TODO: Can we use -1 as the canonical NaN value since it's an inline
6347 // immediate?
6348 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
6349 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
6350 }
6351
Matt Arsenaulteb522e62017-02-27 22:15:25 +00006352 return N->getOperand(0);
Matt Arsenault9cd90712016-04-14 01:42:16 +00006353}
6354
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006355static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
6356 switch (Opc) {
6357 case ISD::FMAXNUM:
6358 return AMDGPUISD::FMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006359 case ISD::SMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006360 return AMDGPUISD::SMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006361 case ISD::UMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006362 return AMDGPUISD::UMAX3;
6363 case ISD::FMINNUM:
6364 return AMDGPUISD::FMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006365 case ISD::SMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006366 return AMDGPUISD::SMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006367 case ISD::UMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006368 return AMDGPUISD::UMIN3;
6369 default:
6370 llvm_unreachable("Not a min/max opcode");
6371 }
6372}
6373
Matt Arsenault10268f92017-02-27 22:40:39 +00006374SDValue SITargetLowering::performIntMed3ImmCombine(
6375 SelectionDAG &DAG, const SDLoc &SL,
6376 SDValue Op0, SDValue Op1, bool Signed) const {
Matt Arsenaultf639c322016-01-28 20:53:42 +00006377 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
6378 if (!K1)
6379 return SDValue();
6380
6381 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
6382 if (!K0)
6383 return SDValue();
6384
Matt Arsenaultf639c322016-01-28 20:53:42 +00006385 if (Signed) {
6386 if (K0->getAPIntValue().sge(K1->getAPIntValue()))
6387 return SDValue();
6388 } else {
6389 if (K0->getAPIntValue().uge(K1->getAPIntValue()))
6390 return SDValue();
6391 }
6392
6393 EVT VT = K0->getValueType(0);
Matt Arsenault10268f92017-02-27 22:40:39 +00006394 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
6395 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
6396 return DAG.getNode(Med3Opc, SL, VT,
6397 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
6398 }
Tom Stellard115a6152016-11-10 16:02:37 +00006399
Matt Arsenault10268f92017-02-27 22:40:39 +00006400 // If there isn't a 16-bit med3 operation, convert to 32-bit.
Tom Stellard115a6152016-11-10 16:02:37 +00006401 MVT NVT = MVT::i32;
6402 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6403
Matt Arsenault10268f92017-02-27 22:40:39 +00006404 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
6405 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
6406 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
Tom Stellard115a6152016-11-10 16:02:37 +00006407
Matt Arsenault10268f92017-02-27 22:40:39 +00006408 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
6409 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
Matt Arsenaultf639c322016-01-28 20:53:42 +00006410}
6411
Matt Arsenault6b114d22017-08-30 01:20:17 +00006412static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
6413 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
6414 return C;
6415
6416 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
6417 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
6418 return C;
6419 }
6420
6421 return nullptr;
6422}
6423
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006424SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
6425 const SDLoc &SL,
6426 SDValue Op0,
6427 SDValue Op1) const {
Matt Arsenault6b114d22017-08-30 01:20:17 +00006428 ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
Matt Arsenaultf639c322016-01-28 20:53:42 +00006429 if (!K1)
6430 return SDValue();
6431
Matt Arsenault6b114d22017-08-30 01:20:17 +00006432 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
Matt Arsenaultf639c322016-01-28 20:53:42 +00006433 if (!K0)
6434 return SDValue();
6435
6436 // Ordered >= (although NaN inputs should have folded away by now).
6437 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
6438 if (Cmp == APFloat::cmpGreaterThan)
6439 return SDValue();
6440
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006441 // TODO: Check IEEE bit enabled?
Matt Arsenault6b114d22017-08-30 01:20:17 +00006442 EVT VT = Op0.getValueType();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006443 if (Subtarget->enableDX10Clamp()) {
6444 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
6445 // hardware fmed3 behavior converting to a min.
6446 // FIXME: Should this be allowing -0.0?
6447 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
6448 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
6449 }
6450
Matt Arsenault6b114d22017-08-30 01:20:17 +00006451 // med3 for f16 is only available on gfx9+, and not available for v2f16.
6452 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
6453 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
6454 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
6455 // then give the other result, which is different from med3 with a NaN
6456 // input.
6457 SDValue Var = Op0.getOperand(0);
6458 if (!isKnownNeverSNan(DAG, Var))
6459 return SDValue();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006460
Matt Arsenault6b114d22017-08-30 01:20:17 +00006461 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
6462 Var, SDValue(K0, 0), SDValue(K1, 0));
6463 }
Matt Arsenaultf639c322016-01-28 20:53:42 +00006464
Matt Arsenault6b114d22017-08-30 01:20:17 +00006465 return SDValue();
Matt Arsenaultf639c322016-01-28 20:53:42 +00006466}
6467
6468SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
6469 DAGCombinerInfo &DCI) const {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006470 SelectionDAG &DAG = DCI.DAG;
6471
Matt Arsenault79a45db2017-02-22 23:53:37 +00006472 EVT VT = N->getValueType(0);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006473 unsigned Opc = N->getOpcode();
6474 SDValue Op0 = N->getOperand(0);
6475 SDValue Op1 = N->getOperand(1);
6476
6477 // Only do this if the inner op has one use since this will just increases
6478 // register pressure for no benefit.
6479
Matt Arsenault79a45db2017-02-22 23:53:37 +00006480
6481 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
Farhana Aleene80aeac2018-04-03 23:00:30 +00006482 !VT.isVector() && VT != MVT::f64 &&
Matt Arsenaultee324ff2017-05-17 19:25:06 +00006483 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) {
Matt Arsenault5b39b342016-01-28 20:53:48 +00006484 // max(max(a, b), c) -> max3(a, b, c)
6485 // min(min(a, b), c) -> min3(a, b, c)
6486 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
6487 SDLoc DL(N);
6488 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
6489 DL,
6490 N->getValueType(0),
6491 Op0.getOperand(0),
6492 Op0.getOperand(1),
6493 Op1);
6494 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006495
Matt Arsenault5b39b342016-01-28 20:53:48 +00006496 // Try commuted.
6497 // max(a, max(b, c)) -> max3(a, b, c)
6498 // min(a, min(b, c)) -> min3(a, b, c)
6499 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
6500 SDLoc DL(N);
6501 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
6502 DL,
6503 N->getValueType(0),
6504 Op0,
6505 Op1.getOperand(0),
6506 Op1.getOperand(1));
6507 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006508 }
6509
Matt Arsenaultf639c322016-01-28 20:53:42 +00006510 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
6511 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
6512 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
6513 return Med3;
6514 }
6515
6516 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
6517 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
6518 return Med3;
6519 }
6520
6521 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
Matt Arsenault5b39b342016-01-28 20:53:48 +00006522 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
6523 (Opc == AMDGPUISD::FMIN_LEGACY &&
6524 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
Matt Arsenault79a45db2017-02-22 23:53:37 +00006525 (VT == MVT::f32 || VT == MVT::f64 ||
Matt Arsenault6b114d22017-08-30 01:20:17 +00006526 (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
6527 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006528 Op0.hasOneUse()) {
Matt Arsenaultf639c322016-01-28 20:53:42 +00006529 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
6530 return Res;
6531 }
6532
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006533 return SDValue();
6534}
6535
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006536static bool isClampZeroToOne(SDValue A, SDValue B) {
6537 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
6538 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
6539 // FIXME: Should this be allowing -0.0?
6540 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
6541 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
6542 }
6543 }
6544
6545 return false;
6546}
6547
6548// FIXME: Should only worry about snans for version with chain.
6549SDValue SITargetLowering::performFMed3Combine(SDNode *N,
6550 DAGCombinerInfo &DCI) const {
6551 EVT VT = N->getValueType(0);
6552 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
6553 // NaNs. With a NaN input, the order of the operands may change the result.
6554
6555 SelectionDAG &DAG = DCI.DAG;
6556 SDLoc SL(N);
6557
6558 SDValue Src0 = N->getOperand(0);
6559 SDValue Src1 = N->getOperand(1);
6560 SDValue Src2 = N->getOperand(2);
6561
6562 if (isClampZeroToOne(Src0, Src1)) {
6563 // const_a, const_b, x -> clamp is safe in all cases including signaling
6564 // nans.
6565 // FIXME: Should this be allowing -0.0?
6566 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
6567 }
6568
6569 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
6570 // handling no dx10-clamp?
6571 if (Subtarget->enableDX10Clamp()) {
6572 // If NaNs is clamped to 0, we are free to reorder the inputs.
6573
6574 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
6575 std::swap(Src0, Src1);
6576
6577 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
6578 std::swap(Src1, Src2);
6579
6580 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
6581 std::swap(Src0, Src1);
6582
6583 if (isClampZeroToOne(Src1, Src2))
6584 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
6585 }
6586
6587 return SDValue();
6588}
6589
Matt Arsenault1f17c662017-02-22 00:27:34 +00006590SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
6591 DAGCombinerInfo &DCI) const {
6592 SDValue Src0 = N->getOperand(0);
6593 SDValue Src1 = N->getOperand(1);
6594 if (Src0.isUndef() && Src1.isUndef())
6595 return DCI.DAG.getUNDEF(N->getValueType(0));
6596 return SDValue();
6597}
6598
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00006599SDValue SITargetLowering::performExtractVectorEltCombine(
6600 SDNode *N, DAGCombinerInfo &DCI) const {
6601 SDValue Vec = N->getOperand(0);
6602
Matt Arsenault8cbb4882017-09-20 21:01:24 +00006603 SelectionDAG &DAG = DCI.DAG;
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00006604 if ((Vec.getOpcode() == ISD::FNEG ||
6605 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00006606 SDLoc SL(N);
6607 EVT EltVT = N->getValueType(0);
6608 SDValue Idx = N->getOperand(1);
6609 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
6610 Vec.getOperand(0), Idx);
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00006611 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00006612 }
6613
6614 return SDValue();
6615}
6616
Matt Arsenault8cbb4882017-09-20 21:01:24 +00006617static bool convertBuildVectorCastElt(SelectionDAG &DAG,
6618 SDValue &Lo, SDValue &Hi) {
6619 if (Hi.getOpcode() == ISD::BITCAST &&
6620 Hi.getOperand(0).getValueType() == MVT::f16 &&
6621 (isa<ConstantSDNode>(Lo) || Lo.isUndef())) {
6622 Lo = DAG.getNode(ISD::BITCAST, SDLoc(Lo), MVT::f16, Lo);
6623 Hi = Hi.getOperand(0);
6624 return true;
6625 }
6626
6627 return false;
6628}
6629
6630SDValue SITargetLowering::performBuildVectorCombine(
6631 SDNode *N, DAGCombinerInfo &DCI) const {
6632 SDLoc SL(N);
6633
6634 if (!isTypeLegal(MVT::v2i16))
6635 return SDValue();
6636 SelectionDAG &DAG = DCI.DAG;
6637 EVT VT = N->getValueType(0);
6638
6639 if (VT == MVT::v2i16) {
6640 SDValue Lo = N->getOperand(0);
6641 SDValue Hi = N->getOperand(1);
6642
6643 // v2i16 build_vector (const|undef), (bitcast f16:$x)
6644 // -> bitcast (v2f16 build_vector const|undef, $x
6645 if (convertBuildVectorCastElt(DAG, Lo, Hi)) {
6646 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Lo, Hi });
6647 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
6648 }
6649
6650 if (convertBuildVectorCastElt(DAG, Hi, Lo)) {
6651 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Hi, Lo });
6652 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
6653 }
6654 }
6655
6656 return SDValue();
6657}
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00006658
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00006659unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
6660 const SDNode *N0,
6661 const SDNode *N1) const {
6662 EVT VT = N0->getValueType(0);
6663
Matt Arsenault770ec862016-12-22 03:55:35 +00006664 // Only do this if we are not trying to support denormals. v_mad_f32 does not
6665 // support denormals ever.
6666 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
6667 (VT == MVT::f16 && !Subtarget->hasFP16Denormals()))
6668 return ISD::FMAD;
6669
6670 const TargetOptions &Options = DAG.getTarget().Options;
Amara Emersond28f0cd42017-05-01 15:17:51 +00006671 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
6672 (N0->getFlags().hasUnsafeAlgebra() &&
6673 N1->getFlags().hasUnsafeAlgebra())) &&
Matt Arsenault770ec862016-12-22 03:55:35 +00006674 isFMAFasterThanFMulAndFAdd(VT)) {
6675 return ISD::FMA;
6676 }
6677
6678 return 0;
6679}
6680
Matt Arsenault4f6318f2017-11-06 17:04:37 +00006681static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
6682 EVT VT,
6683 SDValue N0, SDValue N1, SDValue N2,
6684 bool Signed) {
6685 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
6686 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
6687 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
6688 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
6689}
6690
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00006691SDValue SITargetLowering::performAddCombine(SDNode *N,
6692 DAGCombinerInfo &DCI) const {
6693 SelectionDAG &DAG = DCI.DAG;
6694 EVT VT = N->getValueType(0);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00006695 SDLoc SL(N);
6696 SDValue LHS = N->getOperand(0);
6697 SDValue RHS = N->getOperand(1);
6698
Matt Arsenault4f6318f2017-11-06 17:04:37 +00006699 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
6700 && Subtarget->hasMad64_32() &&
6701 !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
6702 VT.getScalarSizeInBits() <= 64) {
6703 if (LHS.getOpcode() != ISD::MUL)
6704 std::swap(LHS, RHS);
6705
6706 SDValue MulLHS = LHS.getOperand(0);
6707 SDValue MulRHS = LHS.getOperand(1);
6708 SDValue AddRHS = RHS;
6709
6710 // TODO: Maybe restrict if SGPR inputs.
6711 if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
6712 numBitsUnsigned(MulRHS, DAG) <= 32) {
6713 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
6714 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
6715 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
6716 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
6717 }
6718
6719 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
6720 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
6721 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
6722 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
6723 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
6724 }
6725
6726 return SDValue();
6727 }
6728
6729 if (VT != MVT::i32)
6730 return SDValue();
6731
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00006732 // add x, zext (setcc) => addcarry x, 0, setcc
6733 // add x, sext (setcc) => subcarry x, 0, setcc
6734 unsigned Opc = LHS.getOpcode();
6735 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00006736 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00006737 std::swap(RHS, LHS);
6738
6739 Opc = RHS.getOpcode();
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00006740 switch (Opc) {
6741 default: break;
6742 case ISD::ZERO_EXTEND:
6743 case ISD::SIGN_EXTEND:
6744 case ISD::ANY_EXTEND: {
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00006745 auto Cond = RHS.getOperand(0);
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00006746 if (!isBoolSGPR(Cond))
Stanislav Mekhanoshin3ed38c62017-06-21 23:46:22 +00006747 break;
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00006748 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
6749 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
6750 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
6751 return DAG.getNode(Opc, SL, VTList, Args);
6752 }
6753 case ISD::ADDCARRY: {
6754 // add x, (addcarry y, 0, cc) => addcarry x, y, cc
6755 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
6756 if (!C || C->getZExtValue() != 0) break;
6757 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
6758 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
6759 }
6760 }
6761 return SDValue();
6762}
6763
6764SDValue SITargetLowering::performSubCombine(SDNode *N,
6765 DAGCombinerInfo &DCI) const {
6766 SelectionDAG &DAG = DCI.DAG;
6767 EVT VT = N->getValueType(0);
6768
6769 if (VT != MVT::i32)
6770 return SDValue();
6771
6772 SDLoc SL(N);
6773 SDValue LHS = N->getOperand(0);
6774 SDValue RHS = N->getOperand(1);
6775
6776 unsigned Opc = LHS.getOpcode();
6777 if (Opc != ISD::SUBCARRY)
6778 std::swap(RHS, LHS);
6779
6780 if (LHS.getOpcode() == ISD::SUBCARRY) {
6781 // sub (subcarry x, 0, cc), y => subcarry x, y, cc
6782 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
6783 if (!C || C->getZExtValue() != 0)
6784 return SDValue();
6785 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
6786 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
6787 }
6788 return SDValue();
6789}
6790
6791SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
6792 DAGCombinerInfo &DCI) const {
6793
6794 if (N->getValueType(0) != MVT::i32)
6795 return SDValue();
6796
6797 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
6798 if (!C || C->getZExtValue() != 0)
6799 return SDValue();
6800
6801 SelectionDAG &DAG = DCI.DAG;
6802 SDValue LHS = N->getOperand(0);
6803
6804 // addcarry (add x, y), 0, cc => addcarry x, y, cc
6805 // subcarry (sub x, y), 0, cc => subcarry x, y, cc
6806 unsigned LHSOpc = LHS.getOpcode();
6807 unsigned Opc = N->getOpcode();
6808 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
6809 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
6810 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
6811 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00006812 }
6813 return SDValue();
6814}
6815
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006816SDValue SITargetLowering::performFAddCombine(SDNode *N,
6817 DAGCombinerInfo &DCI) const {
6818 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
6819 return SDValue();
6820
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006821 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault770ec862016-12-22 03:55:35 +00006822 EVT VT = N->getValueType(0);
Matt Arsenault770ec862016-12-22 03:55:35 +00006823
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006824 SDLoc SL(N);
6825 SDValue LHS = N->getOperand(0);
6826 SDValue RHS = N->getOperand(1);
6827
6828 // These should really be instruction patterns, but writing patterns with
6829 // source modiifiers is a pain.
6830
6831 // fadd (fadd (a, a), b) -> mad 2.0, a, b
6832 if (LHS.getOpcode() == ISD::FADD) {
6833 SDValue A = LHS.getOperand(0);
6834 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00006835 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00006836 if (FusedOp != 0) {
6837 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00006838 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00006839 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006840 }
6841 }
6842
6843 // fadd (b, fadd (a, a)) -> mad 2.0, a, b
6844 if (RHS.getOpcode() == ISD::FADD) {
6845 SDValue A = RHS.getOperand(0);
6846 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00006847 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00006848 if (FusedOp != 0) {
6849 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00006850 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00006851 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006852 }
6853 }
6854
6855 return SDValue();
6856}
6857
6858SDValue SITargetLowering::performFSubCombine(SDNode *N,
6859 DAGCombinerInfo &DCI) const {
6860 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
6861 return SDValue();
6862
6863 SelectionDAG &DAG = DCI.DAG;
6864 SDLoc SL(N);
6865 EVT VT = N->getValueType(0);
6866 assert(!VT.isVector());
6867
6868 // Try to get the fneg to fold into the source modifier. This undoes generic
6869 // DAG combines and folds them into the mad.
6870 //
6871 // Only do this if we are not trying to support denormals. v_mad_f32 does
6872 // not support denormals ever.
Matt Arsenault770ec862016-12-22 03:55:35 +00006873 SDValue LHS = N->getOperand(0);
6874 SDValue RHS = N->getOperand(1);
6875 if (LHS.getOpcode() == ISD::FADD) {
6876 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
6877 SDValue A = LHS.getOperand(0);
6878 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00006879 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00006880 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006881 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
6882 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
6883
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00006884 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006885 }
6886 }
Matt Arsenault770ec862016-12-22 03:55:35 +00006887 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006888
Matt Arsenault770ec862016-12-22 03:55:35 +00006889 if (RHS.getOpcode() == ISD::FADD) {
6890 // (fsub c, (fadd a, a)) -> mad -2.0, a, c
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006891
Matt Arsenault770ec862016-12-22 03:55:35 +00006892 SDValue A = RHS.getOperand(0);
6893 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00006894 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00006895 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006896 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00006897 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006898 }
6899 }
6900 }
6901
6902 return SDValue();
6903}
6904
Matt Arsenault6f6233d2015-01-06 23:00:41 +00006905SDValue SITargetLowering::performSetCCCombine(SDNode *N,
6906 DAGCombinerInfo &DCI) const {
6907 SelectionDAG &DAG = DCI.DAG;
6908 SDLoc SL(N);
6909
6910 SDValue LHS = N->getOperand(0);
6911 SDValue RHS = N->getOperand(1);
6912 EVT VT = LHS.getValueType();
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00006913 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
6914
6915 auto CRHS = dyn_cast<ConstantSDNode>(RHS);
6916 if (!CRHS) {
6917 CRHS = dyn_cast<ConstantSDNode>(LHS);
6918 if (CRHS) {
6919 std::swap(LHS, RHS);
6920 CC = getSetCCSwappedOperands(CC);
6921 }
6922 }
6923
6924 if (CRHS && VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
6925 isBoolSGPR(LHS.getOperand(0))) {
6926 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
6927 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
6928 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1
6929 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc
6930 if ((CRHS->isAllOnesValue() &&
6931 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
6932 (CRHS->isNullValue() &&
6933 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
6934 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
6935 DAG.getConstant(-1, SL, MVT::i1));
6936 if ((CRHS->isAllOnesValue() &&
6937 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
6938 (CRHS->isNullValue() &&
6939 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
6940 return LHS.getOperand(0);
6941 }
Matt Arsenault6f6233d2015-01-06 23:00:41 +00006942
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00006943 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
6944 VT != MVT::f16))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00006945 return SDValue();
6946
6947 // Match isinf pattern
6948 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00006949 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) {
6950 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
6951 if (!CRHS)
6952 return SDValue();
6953
6954 const APFloat &APF = CRHS->getValueAPF();
6955 if (APF.isInfinity() && !APF.isNegative()) {
6956 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006957 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
6958 DAG.getConstant(Mask, SL, MVT::i32));
Matt Arsenault6f6233d2015-01-06 23:00:41 +00006959 }
6960 }
6961
6962 return SDValue();
6963}
6964
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006965SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
6966 DAGCombinerInfo &DCI) const {
6967 SelectionDAG &DAG = DCI.DAG;
6968 SDLoc SL(N);
6969 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
6970
6971 SDValue Src = N->getOperand(0);
6972 SDValue Srl = N->getOperand(0);
6973 if (Srl.getOpcode() == ISD::ZERO_EXTEND)
6974 Srl = Srl.getOperand(0);
6975
6976 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
6977 if (Srl.getOpcode() == ISD::SRL) {
6978 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
6979 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
6980 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
6981
6982 if (const ConstantSDNode *C =
6983 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
6984 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
6985 EVT(MVT::i32));
6986
6987 unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
6988 if (SrcOffset < 32 && SrcOffset % 8 == 0) {
6989 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
6990 MVT::f32, Srl);
6991 }
6992 }
6993 }
6994
6995 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
6996
Craig Topperd0af7e82017-04-28 05:31:46 +00006997 KnownBits Known;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006998 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
6999 !DCI.isBeforeLegalizeOps());
7000 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Akira Hatanaka22e839f2017-04-21 18:53:12 +00007001 if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) ||
Craig Topperd0af7e82017-04-28 05:31:46 +00007002 TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007003 DCI.CommitTargetLoweringOpt(TLO);
7004 }
7005
7006 return SDValue();
7007}
7008
Tom Stellard75aadc22012-12-11 21:25:42 +00007009SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
7010 DAGCombinerInfo &DCI) const {
Tom Stellard75aadc22012-12-11 21:25:42 +00007011 switch (N->getOpcode()) {
Matt Arsenault22b4c252014-12-21 16:48:42 +00007012 default:
7013 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007014 case ISD::ADD:
7015 return performAddCombine(N, DCI);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007016 case ISD::SUB:
7017 return performSubCombine(N, DCI);
7018 case ISD::ADDCARRY:
7019 case ISD::SUBCARRY:
7020 return performAddCarrySubCarryCombine(N, DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007021 case ISD::FADD:
7022 return performFAddCombine(N, DCI);
7023 case ISD::FSUB:
7024 return performFSubCombine(N, DCI);
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007025 case ISD::SETCC:
7026 return performSetCCCombine(N, DCI);
Matt Arsenault5b39b342016-01-28 20:53:48 +00007027 case ISD::FMAXNUM:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007028 case ISD::FMINNUM:
Matt Arsenault5881f4e2015-06-09 00:52:37 +00007029 case ISD::SMAX:
7030 case ISD::SMIN:
7031 case ISD::UMAX:
Matt Arsenault5b39b342016-01-28 20:53:48 +00007032 case ISD::UMIN:
7033 case AMDGPUISD::FMIN_LEGACY:
7034 case AMDGPUISD::FMAX_LEGACY: {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007035 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
7036 getTargetMachine().getOptLevel() > CodeGenOpt::None)
Matt Arsenaultf639c322016-01-28 20:53:42 +00007037 return performMinMaxCombine(N, DCI);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007038 break;
7039 }
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007040 case ISD::LOAD:
7041 case ISD::STORE:
7042 case ISD::ATOMIC_LOAD:
7043 case ISD::ATOMIC_STORE:
7044 case ISD::ATOMIC_CMP_SWAP:
7045 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
7046 case ISD::ATOMIC_SWAP:
7047 case ISD::ATOMIC_LOAD_ADD:
7048 case ISD::ATOMIC_LOAD_SUB:
7049 case ISD::ATOMIC_LOAD_AND:
7050 case ISD::ATOMIC_LOAD_OR:
7051 case ISD::ATOMIC_LOAD_XOR:
7052 case ISD::ATOMIC_LOAD_NAND:
7053 case ISD::ATOMIC_LOAD_MIN:
7054 case ISD::ATOMIC_LOAD_MAX:
7055 case ISD::ATOMIC_LOAD_UMIN:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00007056 case ISD::ATOMIC_LOAD_UMAX:
7057 case AMDGPUISD::ATOMIC_INC:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00007058 case AMDGPUISD::ATOMIC_DEC:
7059 case AMDGPUISD::ATOMIC_LOAD_FADD:
7060 case AMDGPUISD::ATOMIC_LOAD_FMIN:
7061 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007062 if (DCI.isBeforeLegalize())
7063 break;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007064 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007065 case ISD::AND:
7066 return performAndCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00007067 case ISD::OR:
7068 return performOrCombine(N, DCI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007069 case ISD::XOR:
7070 return performXorCombine(N, DCI);
Matt Arsenault8edfaee2017-03-31 19:53:03 +00007071 case ISD::ZERO_EXTEND:
7072 return performZeroExtendCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00007073 case AMDGPUISD::FP_CLASS:
7074 return performClassCombine(N, DCI);
Matt Arsenault9cd90712016-04-14 01:42:16 +00007075 case ISD::FCANONICALIZE:
7076 return performFCanonicalizeCombine(N, DCI);
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007077 case AMDGPUISD::FRACT:
7078 case AMDGPUISD::RCP:
7079 case AMDGPUISD::RSQ:
Matt Arsenault32fc5272016-07-26 16:45:45 +00007080 case AMDGPUISD::RCP_LEGACY:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007081 case AMDGPUISD::RSQ_LEGACY:
7082 case AMDGPUISD::RSQ_CLAMP:
7083 case AMDGPUISD::LDEXP: {
7084 SDValue Src = N->getOperand(0);
7085 if (Src.isUndef())
7086 return Src;
7087 break;
7088 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007089 case ISD::SINT_TO_FP:
7090 case ISD::UINT_TO_FP:
7091 return performUCharToFloatCombine(N, DCI);
7092 case AMDGPUISD::CVT_F32_UBYTE0:
7093 case AMDGPUISD::CVT_F32_UBYTE1:
7094 case AMDGPUISD::CVT_F32_UBYTE2:
7095 case AMDGPUISD::CVT_F32_UBYTE3:
7096 return performCvtF32UByteNCombine(N, DCI);
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007097 case AMDGPUISD::FMED3:
7098 return performFMed3Combine(N, DCI);
Matt Arsenault1f17c662017-02-22 00:27:34 +00007099 case AMDGPUISD::CVT_PKRTZ_F16_F32:
7100 return performCvtPkRTZCombine(N, DCI);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00007101 case ISD::SCALAR_TO_VECTOR: {
7102 SelectionDAG &DAG = DCI.DAG;
7103 EVT VT = N->getValueType(0);
7104
7105 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
7106 if (VT == MVT::v2i16 || VT == MVT::v2f16) {
7107 SDLoc SL(N);
7108 SDValue Src = N->getOperand(0);
7109 EVT EltVT = Src.getValueType();
7110 if (EltVT == MVT::f16)
7111 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
7112
7113 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
7114 return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
7115 }
7116
7117 break;
7118 }
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007119 case ISD::EXTRACT_VECTOR_ELT:
7120 return performExtractVectorEltCombine(N, DCI);
Matt Arsenault8cbb4882017-09-20 21:01:24 +00007121 case ISD::BUILD_VECTOR:
7122 return performBuildVectorCombine(N, DCI);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007123 }
Matt Arsenault5565f65e2014-05-22 18:09:07 +00007124 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Tom Stellard75aadc22012-12-11 21:25:42 +00007125}
Christian Konigd910b7d2013-02-26 17:52:16 +00007126
Christian Konig8e06e2a2013-04-10 08:39:08 +00007127/// \brief Helper function for adjustWritemask
Benjamin Kramer635e3682013-05-23 15:43:05 +00007128static unsigned SubIdx2Lane(unsigned Idx) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00007129 switch (Idx) {
7130 default: return 0;
7131 case AMDGPU::sub0: return 0;
7132 case AMDGPU::sub1: return 1;
7133 case AMDGPU::sub2: return 2;
7134 case AMDGPU::sub3: return 3;
7135 }
7136}
7137
7138/// \brief Adjust the writemask of MIMG instructions
Matt Arsenault68f05052017-12-04 22:18:27 +00007139SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
7140 SelectionDAG &DAG) const {
7141 SDNode *Users[4] = { nullptr };
Tom Stellard54774e52013-10-23 02:53:47 +00007142 unsigned Lane = 0;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00007143 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3;
7144 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
Tom Stellard54774e52013-10-23 02:53:47 +00007145 unsigned NewDmask = 0;
Matt Arsenault856777d2017-12-08 20:00:57 +00007146 bool HasChain = Node->getNumValues() > 1;
7147
7148 if (OldDmask == 0) {
7149 // These are folded out, but on the chance it happens don't assert.
7150 return Node;
7151 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00007152
7153 // Try to figure out the used register components
7154 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
7155 I != E; ++I) {
7156
Matt Arsenault93e65ea2017-02-22 21:16:41 +00007157 // Don't look at users of the chain.
7158 if (I.getUse().getResNo() != 0)
7159 continue;
7160
Christian Konig8e06e2a2013-04-10 08:39:08 +00007161 // Abort if we can't understand the usage
7162 if (!I->isMachineOpcode() ||
7163 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
Matt Arsenault68f05052017-12-04 22:18:27 +00007164 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007165
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +00007166 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
Tom Stellard54774e52013-10-23 02:53:47 +00007167 // Note that subregs are packed, i.e. Lane==0 is the first bit set
7168 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
7169 // set, etc.
Christian Konig8b1ed282013-04-10 08:39:16 +00007170 Lane = SubIdx2Lane(I->getConstantOperandVal(1));
Christian Konig8e06e2a2013-04-10 08:39:08 +00007171
Tom Stellard54774e52013-10-23 02:53:47 +00007172 // Set which texture component corresponds to the lane.
7173 unsigned Comp;
7174 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
Tom Stellard03a5c082013-10-23 03:50:25 +00007175 Comp = countTrailingZeros(Dmask);
Tom Stellard54774e52013-10-23 02:53:47 +00007176 Dmask &= ~(1 << Comp);
7177 }
7178
Christian Konig8e06e2a2013-04-10 08:39:08 +00007179 // Abort if we have more than one user per component
7180 if (Users[Lane])
Matt Arsenault68f05052017-12-04 22:18:27 +00007181 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007182
7183 Users[Lane] = *I;
Tom Stellard54774e52013-10-23 02:53:47 +00007184 NewDmask |= 1 << Comp;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007185 }
7186
Tom Stellard54774e52013-10-23 02:53:47 +00007187 // Abort if there's no change
7188 if (NewDmask == OldDmask)
Matt Arsenault68f05052017-12-04 22:18:27 +00007189 return Node;
7190
7191 unsigned BitsSet = countPopulation(NewDmask);
7192
7193 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenaultcad7fa82017-12-13 21:07:51 +00007194 int NewOpcode = AMDGPU::getMaskedMIMGOp(*TII,
7195 Node->getMachineOpcode(), BitsSet);
Matt Arsenault68f05052017-12-04 22:18:27 +00007196 assert(NewOpcode != -1 &&
7197 NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
7198 "failed to find equivalent MIMG op");
Christian Konig8e06e2a2013-04-10 08:39:08 +00007199
7200 // Adjust the writemask in the node
Matt Arsenault68f05052017-12-04 22:18:27 +00007201 SmallVector<SDValue, 12> Ops;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00007202 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007203 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
Nikolay Haustov2f684f12016-02-26 09:51:05 +00007204 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
Christian Konig8e06e2a2013-04-10 08:39:08 +00007205
Matt Arsenault68f05052017-12-04 22:18:27 +00007206 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
7207
Matt Arsenault856777d2017-12-08 20:00:57 +00007208 MVT ResultVT = BitsSet == 1 ?
7209 SVT : MVT::getVectorVT(SVT, BitsSet == 3 ? 4 : BitsSet);
7210 SDVTList NewVTList = HasChain ?
7211 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
7212
Matt Arsenault68f05052017-12-04 22:18:27 +00007213
7214 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
7215 NewVTList, Ops);
Matt Arsenaultecad0d532017-12-08 20:00:45 +00007216
Matt Arsenault856777d2017-12-08 20:00:57 +00007217 if (HasChain) {
7218 // Update chain.
7219 NewNode->setMemRefs(Node->memoperands_begin(), Node->memoperands_end());
7220 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
7221 }
Matt Arsenault68f05052017-12-04 22:18:27 +00007222
7223 if (BitsSet == 1) {
7224 assert(Node->hasNUsesOfValue(1, 0));
7225 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
7226 SDLoc(Node), Users[Lane]->getValueType(0),
7227 SDValue(NewNode, 0));
Christian Konig8b1ed282013-04-10 08:39:16 +00007228 DAG.ReplaceAllUsesWith(Users[Lane], Copy);
Matt Arsenault68f05052017-12-04 22:18:27 +00007229 return nullptr;
Christian Konig8b1ed282013-04-10 08:39:16 +00007230 }
7231
Christian Konig8e06e2a2013-04-10 08:39:08 +00007232 // Update the users of the node with the new indices
7233 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00007234 SDNode *User = Users[i];
7235 if (!User)
7236 continue;
7237
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007238 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
Matt Arsenault68f05052017-12-04 22:18:27 +00007239 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
Christian Konig8e06e2a2013-04-10 08:39:08 +00007240
7241 switch (Idx) {
7242 default: break;
7243 case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
7244 case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
7245 case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
7246 }
7247 }
Matt Arsenault68f05052017-12-04 22:18:27 +00007248
7249 DAG.RemoveDeadNode(Node);
7250 return nullptr;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007251}
7252
Tom Stellardc98ee202015-07-16 19:40:07 +00007253static bool isFrameIndexOp(SDValue Op) {
7254 if (Op.getOpcode() == ISD::AssertZext)
7255 Op = Op.getOperand(0);
7256
7257 return isa<FrameIndexSDNode>(Op);
7258}
7259
Tom Stellard3457a842014-10-09 19:06:00 +00007260/// \brief Legalize target independent instructions (e.g. INSERT_SUBREG)
7261/// with frame index operands.
7262/// LLVM assumes that inputs are to these instructions are registers.
Matt Arsenault0d0d6c22017-04-12 21:58:23 +00007263SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
7264 SelectionDAG &DAG) const {
7265 if (Node->getOpcode() == ISD::CopyToReg) {
7266 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
7267 SDValue SrcVal = Node->getOperand(2);
7268
7269 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
7270 // to try understanding copies to physical registers.
7271 if (SrcVal.getValueType() == MVT::i1 &&
7272 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
7273 SDLoc SL(Node);
7274 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
7275 SDValue VReg = DAG.getRegister(
7276 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
7277
7278 SDNode *Glued = Node->getGluedNode();
7279 SDValue ToVReg
7280 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
7281 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
7282 SDValue ToResultReg
7283 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
7284 VReg, ToVReg.getValue(1));
7285 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
7286 DAG.RemoveDeadNode(Node);
7287 return ToResultReg.getNode();
7288 }
7289 }
Tom Stellard8dd392e2014-10-09 18:09:15 +00007290
7291 SmallVector<SDValue, 8> Ops;
Tom Stellard3457a842014-10-09 19:06:00 +00007292 for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
Tom Stellardc98ee202015-07-16 19:40:07 +00007293 if (!isFrameIndexOp(Node->getOperand(i))) {
Tom Stellard3457a842014-10-09 19:06:00 +00007294 Ops.push_back(Node->getOperand(i));
Tom Stellard8dd392e2014-10-09 18:09:15 +00007295 continue;
7296 }
7297
Tom Stellard3457a842014-10-09 19:06:00 +00007298 SDLoc DL(Node);
Tom Stellard8dd392e2014-10-09 18:09:15 +00007299 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
Tom Stellard3457a842014-10-09 19:06:00 +00007300 Node->getOperand(i).getValueType(),
7301 Node->getOperand(i)), 0));
Tom Stellard8dd392e2014-10-09 18:09:15 +00007302 }
7303
Mark Searles4e3d6162017-10-16 23:38:53 +00007304 return DAG.UpdateNodeOperands(Node, Ops);
Tom Stellard8dd392e2014-10-09 18:09:15 +00007305}
7306
Matt Arsenault08d84942014-06-03 23:06:13 +00007307/// \brief Fold the instructions after selecting them.
Matt Arsenault68f05052017-12-04 22:18:27 +00007308/// Returns null if users were already updated.
Christian Konig8e06e2a2013-04-10 08:39:08 +00007309SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
7310 SelectionDAG &DAG) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00007311 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00007312 unsigned Opcode = Node->getMachineOpcode();
Christian Konig8e06e2a2013-04-10 08:39:08 +00007313
Nicolai Haehnlec06bfa12016-07-11 21:59:43 +00007314 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
Changpeng Fang4737e892018-01-18 22:08:53 +00007315 !TII->isGather4(Opcode) && !TII->isD16(Opcode)) {
Matt Arsenault68f05052017-12-04 22:18:27 +00007316 return adjustWritemask(Node, DAG);
7317 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00007318
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00007319 if (Opcode == AMDGPU::INSERT_SUBREG ||
7320 Opcode == AMDGPU::REG_SEQUENCE) {
Tom Stellard8dd392e2014-10-09 18:09:15 +00007321 legalizeTargetIndependentNode(Node, DAG);
7322 return Node;
7323 }
Matt Arsenault206f8262017-08-01 20:49:41 +00007324
7325 switch (Opcode) {
7326 case AMDGPU::V_DIV_SCALE_F32:
7327 case AMDGPU::V_DIV_SCALE_F64: {
7328 // Satisfy the operand register constraint when one of the inputs is
7329 // undefined. Ordinarily each undef value will have its own implicit_def of
7330 // a vreg, so force these to use a single register.
7331 SDValue Src0 = Node->getOperand(0);
7332 SDValue Src1 = Node->getOperand(1);
7333 SDValue Src2 = Node->getOperand(2);
7334
7335 if ((Src0.isMachineOpcode() &&
7336 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
7337 (Src0 == Src1 || Src0 == Src2))
7338 break;
7339
7340 MVT VT = Src0.getValueType().getSimpleVT();
7341 const TargetRegisterClass *RC = getRegClassFor(VT);
7342
7343 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
7344 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
7345
7346 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
7347 UndefReg, Src0, SDValue());
7348
7349 // src0 must be the same register as src1 or src2, even if the value is
7350 // undefined, so make sure we don't violate this constraint.
7351 if (Src0.isMachineOpcode() &&
7352 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
7353 if (Src1.isMachineOpcode() &&
7354 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
7355 Src0 = Src1;
7356 else if (Src2.isMachineOpcode() &&
7357 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
7358 Src0 = Src2;
7359 else {
7360 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
7361 Src0 = UndefReg;
7362 Src1 = UndefReg;
7363 }
7364 } else
7365 break;
7366
7367 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
7368 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
7369 Ops.push_back(Node->getOperand(I));
7370
7371 Ops.push_back(ImpDef.getValue(1));
7372 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
7373 }
7374 default:
7375 break;
7376 }
7377
Tom Stellard654d6692015-01-08 15:08:17 +00007378 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007379}
Christian Konig8b1ed282013-04-10 08:39:16 +00007380
7381/// \brief Assign the register class depending on the number of
7382/// bits set in the writemask
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007383void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
Christian Konig8b1ed282013-04-10 08:39:16 +00007384 SDNode *Node) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00007385 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00007386
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007387 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Matt Arsenault6005fcb2015-10-21 21:51:02 +00007388
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007389 if (TII->isVOP3(MI.getOpcode())) {
Matt Arsenault6005fcb2015-10-21 21:51:02 +00007390 // Make sure constant bus requirements are respected.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007391 TII->legalizeOperandsVOP3(MRI, MI);
Matt Arsenault6005fcb2015-10-21 21:51:02 +00007392 return;
7393 }
Matt Arsenaultcb0ac3d2014-09-26 17:54:59 +00007394
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00007395 // Replace unused atomics with the no return version.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007396 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00007397 if (NoRetAtomicOp != -1) {
7398 if (!Node->hasAnyUseOfValue(0)) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007399 MI.setDesc(TII->get(NoRetAtomicOp));
7400 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00007401 return;
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00007402 }
7403
Tom Stellard354a43c2016-04-01 18:27:37 +00007404 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
7405 // instruction, because the return type of these instructions is a vec2 of
7406 // the memory type, so it can be tied to the input operand.
7407 // This means these instructions always have a use, so we need to add a
7408 // special case to check if the atomic has only one extract_subreg use,
7409 // which itself has no uses.
7410 if ((Node->hasNUsesOfValue(1, 0) &&
Nicolai Haehnle750082d2016-04-15 14:42:36 +00007411 Node->use_begin()->isMachineOpcode() &&
Tom Stellard354a43c2016-04-01 18:27:37 +00007412 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
7413 !Node->use_begin()->hasAnyUseOfValue(0))) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007414 unsigned Def = MI.getOperand(0).getReg();
Tom Stellard354a43c2016-04-01 18:27:37 +00007415
7416 // Change this into a noret atomic.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007417 MI.setDesc(TII->get(NoRetAtomicOp));
7418 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00007419
7420 // If we only remove the def operand from the atomic instruction, the
7421 // extract_subreg will be left with a use of a vreg without a def.
7422 // So we need to insert an implicit_def to avoid machine verifier
7423 // errors.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007424 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
Tom Stellard354a43c2016-04-01 18:27:37 +00007425 TII->get(AMDGPU::IMPLICIT_DEF), Def);
7426 }
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00007427 return;
7428 }
Christian Konig8b1ed282013-04-10 08:39:16 +00007429}
Tom Stellard0518ff82013-06-03 17:39:58 +00007430
Benjamin Kramerbdc49562016-06-12 15:39:02 +00007431static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
7432 uint64_t Val) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007433 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
Matt Arsenault485defe2014-11-05 19:01:17 +00007434 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
7435}
7436
7437MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00007438 const SDLoc &DL,
Matt Arsenault485defe2014-11-05 19:01:17 +00007439 SDValue Ptr) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00007440 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault485defe2014-11-05 19:01:17 +00007441
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00007442 // Build the half of the subregister with the constants before building the
7443 // full 128-bit register. If we are building multiple resource descriptors,
7444 // this will allow CSEing of the 2-component register.
7445 const SDValue Ops0[] = {
7446 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
7447 buildSMovImm32(DAG, DL, 0),
7448 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
7449 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
7450 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
7451 };
Matt Arsenault485defe2014-11-05 19:01:17 +00007452
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00007453 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
7454 MVT::v2i32, Ops0), 0);
Matt Arsenault485defe2014-11-05 19:01:17 +00007455
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00007456 // Combine the constants and the pointer.
7457 const SDValue Ops1[] = {
7458 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
7459 Ptr,
7460 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
7461 SubRegHi,
7462 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
7463 };
Matt Arsenault485defe2014-11-05 19:01:17 +00007464
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00007465 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
Matt Arsenault485defe2014-11-05 19:01:17 +00007466}
7467
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00007468/// \brief Return a resource descriptor with the 'Add TID' bit enabled
Benjamin Kramerdf005cb2015-08-08 18:27:36 +00007469/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
7470/// of the resource descriptor) to create an offset, which is added to
7471/// the resource pointer.
Benjamin Kramerbdc49562016-06-12 15:39:02 +00007472MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
7473 SDValue Ptr, uint32_t RsrcDword1,
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00007474 uint64_t RsrcDword2And3) const {
7475 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
7476 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
7477 if (RsrcDword1) {
7478 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007479 DAG.getConstant(RsrcDword1, DL, MVT::i32)),
7480 0);
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00007481 }
7482
7483 SDValue DataLo = buildSMovImm32(DAG, DL,
7484 RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
7485 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
7486
7487 const SDValue Ops[] = {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007488 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00007489 PtrLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007490 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00007491 PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007492 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00007493 DataLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007494 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00007495 DataHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007496 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00007497 };
7498
7499 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
7500}
7501
Tom Stellardd7e6f132015-04-08 01:09:26 +00007502//===----------------------------------------------------------------------===//
7503// SI Inline Assembly Support
7504//===----------------------------------------------------------------------===//
7505
7506std::pair<unsigned, const TargetRegisterClass *>
7507SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +00007508 StringRef Constraint,
Tom Stellardd7e6f132015-04-08 01:09:26 +00007509 MVT VT) const {
Matt Arsenault742deb22016-11-18 04:42:57 +00007510 if (!isTypeLegal(VT))
7511 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
Tom Stellardb3c3bda2015-12-10 02:12:53 +00007512
7513 if (Constraint.size() == 1) {
7514 switch (Constraint[0]) {
7515 case 's':
7516 case 'r':
7517 switch (VT.getSizeInBits()) {
7518 default:
7519 return std::make_pair(0U, nullptr);
7520 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00007521 case 16:
Marek Olsak79c05872016-11-25 17:37:09 +00007522 return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellardb3c3bda2015-12-10 02:12:53 +00007523 case 64:
7524 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass);
7525 case 128:
7526 return std::make_pair(0U, &AMDGPU::SReg_128RegClass);
7527 case 256:
7528 return std::make_pair(0U, &AMDGPU::SReg_256RegClass);
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +00007529 case 512:
7530 return std::make_pair(0U, &AMDGPU::SReg_512RegClass);
Tom Stellardb3c3bda2015-12-10 02:12:53 +00007531 }
7532
7533 case 'v':
7534 switch (VT.getSizeInBits()) {
7535 default:
7536 return std::make_pair(0U, nullptr);
7537 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00007538 case 16:
Tom Stellardb3c3bda2015-12-10 02:12:53 +00007539 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass);
7540 case 64:
7541 return std::make_pair(0U, &AMDGPU::VReg_64RegClass);
7542 case 96:
7543 return std::make_pair(0U, &AMDGPU::VReg_96RegClass);
7544 case 128:
7545 return std::make_pair(0U, &AMDGPU::VReg_128RegClass);
7546 case 256:
7547 return std::make_pair(0U, &AMDGPU::VReg_256RegClass);
7548 case 512:
7549 return std::make_pair(0U, &AMDGPU::VReg_512RegClass);
7550 }
Tom Stellardd7e6f132015-04-08 01:09:26 +00007551 }
7552 }
7553
7554 if (Constraint.size() > 1) {
7555 const TargetRegisterClass *RC = nullptr;
7556 if (Constraint[1] == 'v') {
7557 RC = &AMDGPU::VGPR_32RegClass;
7558 } else if (Constraint[1] == 's') {
7559 RC = &AMDGPU::SGPR_32RegClass;
7560 }
7561
7562 if (RC) {
Matt Arsenault0b554ed2015-06-23 02:05:55 +00007563 uint32_t Idx;
7564 bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
7565 if (!Failed && Idx < RC->getNumRegs())
Tom Stellardd7e6f132015-04-08 01:09:26 +00007566 return std::make_pair(RC->getRegister(Idx), RC);
7567 }
7568 }
7569 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
7570}
Tom Stellardb3c3bda2015-12-10 02:12:53 +00007571
7572SITargetLowering::ConstraintType
7573SITargetLowering::getConstraintType(StringRef Constraint) const {
7574 if (Constraint.size() == 1) {
7575 switch (Constraint[0]) {
7576 default: break;
7577 case 's':
7578 case 'v':
7579 return C_RegisterClass;
7580 }
7581 }
7582 return TargetLowering::getConstraintType(Constraint);
7583}
Matt Arsenault1cc47f82017-07-18 16:44:56 +00007584
7585// Figure out which registers should be reserved for stack access. Only after
7586// the function is legalized do we know all of the non-spill stack objects or if
7587// calls are present.
7588void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
7589 MachineRegisterInfo &MRI = MF.getRegInfo();
7590 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
7591 const MachineFrameInfo &MFI = MF.getFrameInfo();
7592 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
7593 const SIRegisterInfo *TRI = ST.getRegisterInfo();
7594
7595 if (Info->isEntryFunction()) {
7596 // Callable functions have fixed registers used for stack access.
7597 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
7598 }
7599
7600 // We have to assume the SP is needed in case there are calls in the function
7601 // during lowering. Calls are only detected after the function is
7602 // lowered. We're about to reserve registers, so don't bother using it if we
7603 // aren't really going to use it.
7604 bool NeedSP = !Info->isEntryFunction() ||
7605 MFI.hasVarSizedObjects() ||
7606 MFI.hasCalls();
7607
7608 if (NeedSP) {
7609 unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF);
7610 Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg);
7611
7612 assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg());
7613 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
7614 Info->getStackPtrOffsetReg()));
7615 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
7616 }
7617
7618 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
7619 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
7620 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
7621 Info->getScratchWaveOffsetReg());
7622
7623 TargetLoweringBase::finalizeLowering(MF);
7624}
Matt Arsenault45b98182017-11-15 00:45:43 +00007625
7626void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
7627 KnownBits &Known,
7628 const APInt &DemandedElts,
7629 const SelectionDAG &DAG,
7630 unsigned Depth) const {
7631 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
7632 DAG, Depth);
7633
7634 if (getSubtarget()->enableHugePrivateBuffer())
7635 return;
7636
7637 // Technically it may be possible to have a dispatch with a single workitem
7638 // that uses the full private memory size, but that's not really useful. We
7639 // can't use vaddr in MUBUF instructions if we don't know the address
7640 // calculation won't overflow, so assume the sign bit is never set.
7641 Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits);
7642}