blob: e970f0bec892ed7c95b45cbe1e8ac84f3ee22925 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000011/// Custom DAG lowering for SI
Tom Stellard75aadc22012-12-11 21:25:42 +000012//
13//===----------------------------------------------------------------------===//
14
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000015#ifdef _MSC_VER
16// Provide M_PI.
17#define _USE_MATH_DEFINES
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000018#endif
19
Chandler Carruth6bda14b2017-06-06 11:49:48 +000020#include "SIISelLowering.h"
Christian Konig99ee0f42013-03-07 09:04:14 +000021#include "AMDGPU.h"
Matt Arsenaultc791f392014-06-23 18:00:31 +000022#include "AMDGPUIntrinsicInfo.h"
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000023#include "AMDGPUSubtarget.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000024#include "AMDGPUTargetMachine.h"
Tom Stellard8485fa02016-12-07 02:42:15 +000025#include "SIDefines.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "SIInstrInfo.h"
27#include "SIMachineFunctionInfo.h"
28#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000029#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000030#include "Utils/AMDGPUBaseInfo.h"
31#include "llvm/ADT/APFloat.h"
32#include "llvm/ADT/APInt.h"
33#include "llvm/ADT/ArrayRef.h"
Alexey Samsonova253bf92014-08-27 19:36:53 +000034#include "llvm/ADT/BitVector.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000035#include "llvm/ADT/SmallVector.h"
Matt Arsenault71bcbd42017-08-11 20:42:08 +000036#include "llvm/ADT/Statistic.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000037#include "llvm/ADT/StringRef.h"
Matt Arsenault9a10cea2016-01-26 04:29:24 +000038#include "llvm/ADT/StringSwitch.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000039#include "llvm/ADT/Twine.h"
Wei Ding07e03712016-07-28 16:42:13 +000040#include "llvm/CodeGen/Analysis.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000041#include "llvm/CodeGen/CallingConvLower.h"
42#include "llvm/CodeGen/DAGCombine.h"
43#include "llvm/CodeGen/ISDOpcodes.h"
44#include "llvm/CodeGen/MachineBasicBlock.h"
45#include "llvm/CodeGen/MachineFrameInfo.h"
46#include "llvm/CodeGen/MachineFunction.h"
47#include "llvm/CodeGen/MachineInstr.h"
48#include "llvm/CodeGen/MachineInstrBuilder.h"
49#include "llvm/CodeGen/MachineMemOperand.h"
Matt Arsenault8623e8d2017-08-03 23:00:29 +000050#include "llvm/CodeGen/MachineModuleInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000051#include "llvm/CodeGen/MachineOperand.h"
52#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000053#include "llvm/CodeGen/SelectionDAG.h"
54#include "llvm/CodeGen/SelectionDAGNodes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000055#include "llvm/CodeGen/TargetCallingConv.h"
56#include "llvm/CodeGen/TargetRegisterInfo.h"
Craig Topper2fa14362018-03-29 17:21:10 +000057#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000058#include "llvm/IR/Constants.h"
59#include "llvm/IR/DataLayout.h"
60#include "llvm/IR/DebugLoc.h"
61#include "llvm/IR/DerivedTypes.h"
Oliver Stannard7e7d9832016-02-02 13:52:43 +000062#include "llvm/IR/DiagnosticInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000063#include "llvm/IR/Function.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000064#include "llvm/IR/GlobalValue.h"
65#include "llvm/IR/InstrTypes.h"
66#include "llvm/IR/Instruction.h"
67#include "llvm/IR/Instructions.h"
Matt Arsenault7dc01c92017-03-15 23:15:12 +000068#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000069#include "llvm/IR/Type.h"
70#include "llvm/Support/Casting.h"
71#include "llvm/Support/CodeGen.h"
72#include "llvm/Support/CommandLine.h"
73#include "llvm/Support/Compiler.h"
74#include "llvm/Support/ErrorHandling.h"
Craig Topperd0af7e82017-04-28 05:31:46 +000075#include "llvm/Support/KnownBits.h"
David Blaikie13e77db2018-03-23 23:58:25 +000076#include "llvm/Support/MachineValueType.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000077#include "llvm/Support/MathExtras.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000078#include "llvm/Target/TargetOptions.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000079#include <cassert>
80#include <cmath>
81#include <cstdint>
82#include <iterator>
83#include <tuple>
84#include <utility>
85#include <vector>
Tom Stellard75aadc22012-12-11 21:25:42 +000086
87using namespace llvm;
88
Matt Arsenault71bcbd42017-08-11 20:42:08 +000089#define DEBUG_TYPE "si-lower"
90
91STATISTIC(NumTailCalls, "Number of tail calls");
92
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000093static cl::opt<bool> EnableVGPRIndexMode(
94 "amdgpu-vgpr-index-mode",
95 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
96 cl::init(false));
97
Matt Arsenault45b98182017-11-15 00:45:43 +000098static cl::opt<unsigned> AssumeFrameIndexHighZeroBits(
99 "amdgpu-frame-index-zero-bits",
100 cl::desc("High bits of frame index assumed to be zero"),
101 cl::init(5),
102 cl::ReallyHidden);
103
Tom Stellardf110f8f2016-04-14 16:27:03 +0000104static unsigned findFirstFreeSGPR(CCState &CCInfo) {
105 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
106 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
107 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
108 return AMDGPU::SGPR0 + Reg;
109 }
110 }
111 llvm_unreachable("Cannot allocate sgpr");
112}
113
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000114SITargetLowering::SITargetLowering(const TargetMachine &TM,
Tom Stellard5bfbae52018-07-11 20:59:01 +0000115 const GCNSubtarget &STI)
Tom Stellardc5a154d2018-06-28 23:47:12 +0000116 : AMDGPUTargetLowering(TM, STI),
117 Subtarget(&STI) {
Tom Stellard1bd80722014-04-30 15:31:33 +0000118 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
Tom Stellard436780b2014-05-15 14:41:57 +0000119 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000120
Marek Olsak79c05872016-11-25 17:37:09 +0000121 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000122 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000123
Tom Stellard436780b2014-05-15 14:41:57 +0000124 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
125 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
126 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000127
Matt Arsenault61001bb2015-11-25 19:58:34 +0000128 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
129 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
130
Tom Stellard436780b2014-05-15 14:41:57 +0000131 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
132 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000133
Tom Stellardf0a21072014-11-18 20:39:39 +0000134 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000135 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
136
Tom Stellardf0a21072014-11-18 20:39:39 +0000137 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000138 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000139
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000140 if (Subtarget->has16BitInsts()) {
Marek Olsak79c05872016-11-25 17:37:09 +0000141 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
142 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard115a6152016-11-10 16:02:37 +0000143
Matt Arsenault1349a042018-05-22 06:32:10 +0000144 // Unless there are also VOP3P operations, not operations are really legal.
Matt Arsenault7596f132017-02-27 20:52:10 +0000145 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
146 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000147 addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
148 addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
Matt Arsenault7596f132017-02-27 20:52:10 +0000149 }
150
Tom Stellardc5a154d2018-06-28 23:47:12 +0000151 computeRegisterProperties(Subtarget->getRegisterInfo());
Tom Stellard75aadc22012-12-11 21:25:42 +0000152
Tom Stellard35bb18c2013-08-26 15:06:04 +0000153 // We need to custom lower vector stores from local memory
Matt Arsenault71e66762016-05-21 02:27:49 +0000154 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
Tom Stellard35bb18c2013-08-26 15:06:04 +0000155 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
Tom Stellardaf775432013-10-23 00:44:32 +0000156 setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
157 setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000158 setOperationAction(ISD::LOAD, MVT::i1, Custom);
Matt Arsenault2b957b52016-05-02 20:07:26 +0000159
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000160 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000161 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
162 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
163 setOperationAction(ISD::STORE, MVT::v16i32, Custom);
164 setOperationAction(ISD::STORE, MVT::i1, Custom);
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000165
Jan Vesely06200bd2017-01-06 21:00:46 +0000166 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
167 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
168 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
169 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
170 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
171 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
172 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
173 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
174 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
175 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
176
Matt Arsenault71e66762016-05-21 02:27:49 +0000177 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
178 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000179
180 setOperationAction(ISD::SELECT, MVT::i1, Promote);
Tom Stellard0ec134f2014-02-04 17:18:40 +0000181 setOperationAction(ISD::SELECT, MVT::i64, Custom);
Tom Stellardda99c6e2014-03-24 16:07:30 +0000182 setOperationAction(ISD::SELECT, MVT::f64, Promote);
183 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
Tom Stellard81d871d2013-11-13 23:36:50 +0000184
Tom Stellard3ca1bfc2014-06-10 16:01:22 +0000185 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
186 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
187 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
188 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
Matt Arsenault71e66762016-05-21 02:27:49 +0000189 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
Tom Stellard754f80f2013-04-05 23:31:51 +0000190
Tom Stellardd1efda82016-01-20 21:48:24 +0000191 setOperationAction(ISD::SETCC, MVT::i1, Promote);
Tom Stellard83747202013-07-18 21:43:53 +0000192 setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
193 setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
Matt Arsenault18f56be2016-12-22 16:27:11 +0000194 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
Tom Stellard83747202013-07-18 21:43:53 +0000195
Matt Arsenault71e66762016-05-21 02:27:49 +0000196 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
197 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
Matt Arsenaulte306a322014-10-21 16:25:08 +0000198
Matt Arsenault4e466652014-04-16 01:41:30 +0000199 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
200 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000201 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000203 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
204 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000205 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
206
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000207 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000208 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000209 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
Marek Olsak13e47412018-01-31 20:18:04 +0000210 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000211 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
212
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000213 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
214 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000215 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000216
217 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
Matt Arsenault4165efd2017-01-17 07:26:53 +0000218 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
219 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000220 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000221
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000222 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000223 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000224 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
225 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
226 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
227 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
Tom Stellardafcf12f2013-09-12 02:55:14 +0000228
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +0000229 setOperationAction(ISD::UADDO, MVT::i32, Legal);
230 setOperationAction(ISD::USUBO, MVT::i32, Legal);
231
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000232 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
233 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
234
Matt Arsenault84445dd2017-11-30 22:51:26 +0000235#if 0
236 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
237 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
238#endif
239
Benjamin Kramer867bfc52015-03-07 17:41:00 +0000240 // We only support LOAD/STORE and vector manipulation ops for vectors
241 // with > 4 elements.
Matt Arsenault7596f132017-02-27 20:52:10 +0000242 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000243 MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16 }) {
Tom Stellard967bf582014-02-13 23:34:15 +0000244 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000245 switch (Op) {
Tom Stellard967bf582014-02-13 23:34:15 +0000246 case ISD::LOAD:
247 case ISD::STORE:
248 case ISD::BUILD_VECTOR:
249 case ISD::BITCAST:
250 case ISD::EXTRACT_VECTOR_ELT:
251 case ISD::INSERT_VECTOR_ELT:
Tom Stellard967bf582014-02-13 23:34:15 +0000252 case ISD::INSERT_SUBVECTOR:
253 case ISD::EXTRACT_SUBVECTOR:
Matt Arsenault61001bb2015-11-25 19:58:34 +0000254 case ISD::SCALAR_TO_VECTOR:
Tom Stellard967bf582014-02-13 23:34:15 +0000255 break;
Tom Stellardc0503db2014-08-09 01:06:56 +0000256 case ISD::CONCAT_VECTORS:
257 setOperationAction(Op, VT, Custom);
258 break;
Tom Stellard967bf582014-02-13 23:34:15 +0000259 default:
Matt Arsenaultd504a742014-05-15 21:44:05 +0000260 setOperationAction(Op, VT, Expand);
Tom Stellard967bf582014-02-13 23:34:15 +0000261 break;
262 }
263 }
264 }
265
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000266 setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
267
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000268 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
269 // is expanded to avoid having two separate loops in case the index is a VGPR.
270
Matt Arsenault61001bb2015-11-25 19:58:34 +0000271 // Most operations are naturally 32-bit vector operations. We only support
272 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
273 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
274 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
275 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
276
277 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
278 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
279
280 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
281 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
282
283 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
284 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
285 }
286
Matt Arsenault71e66762016-05-21 02:27:49 +0000287 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
288 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
289 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
290 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000291
Matt Arsenault67a98152018-05-16 11:47:30 +0000292 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
293 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
294
Matt Arsenault3aef8092017-01-23 23:09:58 +0000295 // Avoid stack access for these.
296 // TODO: Generalize to more vector types.
297 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
298 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault67a98152018-05-16 11:47:30 +0000299 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
300 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
301
Matt Arsenault3aef8092017-01-23 23:09:58 +0000302 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
303 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault9224c002018-06-05 19:52:46 +0000304 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
305 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
306 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
307
308 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
309 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
310 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
Matt Arsenault3aef8092017-01-23 23:09:58 +0000311
Matt Arsenault67a98152018-05-16 11:47:30 +0000312 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
313 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
314 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
315 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
316
Tom Stellard354a43c2016-04-01 18:27:37 +0000317 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
318 // and output demarshalling
319 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
320 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
321
322 // We can't return success/failure, only the old value,
323 // let LLVM add the comparison
324 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
325 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
326
Tom Stellardc5a154d2018-06-28 23:47:12 +0000327 if (Subtarget->hasFlatAddressSpace()) {
Matt Arsenault99c14522016-04-25 19:27:24 +0000328 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
329 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
330 }
331
Matt Arsenault71e66762016-05-21 02:27:49 +0000332 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
333 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
334
335 // On SI this is s_memtime and s_memrealtime on VI.
336 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
Matt Arsenault3e025382017-04-24 17:49:13 +0000337 setOperationAction(ISD::TRAP, MVT::Other, Custom);
338 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000339
Tom Stellardc5a154d2018-06-28 23:47:12 +0000340 if (Subtarget->has16BitInsts()) {
341 setOperationAction(ISD::FLOG, MVT::f16, Custom);
342 setOperationAction(ISD::FLOG10, MVT::f16, Custom);
343 }
344
345 // v_mad_f32 does not support denormals according to some sources.
346 if (!Subtarget->hasFP32Denormals())
347 setOperationAction(ISD::FMAD, MVT::f32, Legal);
348
349 if (!Subtarget->hasBFI()) {
350 // fcopysign can be done in a single instruction with BFI.
351 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
352 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
353 }
354
355 if (!Subtarget->hasBCNT(32))
356 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
357
358 if (!Subtarget->hasBCNT(64))
359 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
360
361 if (Subtarget->hasFFBH())
362 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
363
364 if (Subtarget->hasFFBL())
365 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
366
367 // We only really have 32-bit BFE instructions (and 16-bit on VI).
368 //
369 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
370 // effort to match them now. We want this to be false for i64 cases when the
371 // extraction isn't restricted to the upper or lower half. Ideally we would
372 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
373 // span the midpoint are probably relatively rare, so don't worry about them
374 // for now.
375 if (Subtarget->hasBFE())
376 setHasExtractBitsInsn(true);
377
Matt Arsenault71e66762016-05-21 02:27:49 +0000378 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
379 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
380
Tom Stellard5bfbae52018-07-11 20:59:01 +0000381 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000382 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
383 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
384 setOperationAction(ISD::FRINT, MVT::f64, Legal);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000385 } else {
386 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
387 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
388 setOperationAction(ISD::FRINT, MVT::f64, Custom);
389 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000390 }
391
392 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
393
394 setOperationAction(ISD::FSIN, MVT::f32, Custom);
395 setOperationAction(ISD::FCOS, MVT::f32, Custom);
396 setOperationAction(ISD::FDIV, MVT::f32, Custom);
397 setOperationAction(ISD::FDIV, MVT::f64, Custom);
398
Tom Stellard115a6152016-11-10 16:02:37 +0000399 if (Subtarget->has16BitInsts()) {
400 setOperationAction(ISD::Constant, MVT::i16, Legal);
401
402 setOperationAction(ISD::SMIN, MVT::i16, Legal);
403 setOperationAction(ISD::SMAX, MVT::i16, Legal);
404
405 setOperationAction(ISD::UMIN, MVT::i16, Legal);
406 setOperationAction(ISD::UMAX, MVT::i16, Legal);
407
Tom Stellard115a6152016-11-10 16:02:37 +0000408 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
409 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
410
411 setOperationAction(ISD::ROTR, MVT::i16, Promote);
412 setOperationAction(ISD::ROTL, MVT::i16, Promote);
413
414 setOperationAction(ISD::SDIV, MVT::i16, Promote);
415 setOperationAction(ISD::UDIV, MVT::i16, Promote);
416 setOperationAction(ISD::SREM, MVT::i16, Promote);
417 setOperationAction(ISD::UREM, MVT::i16, Promote);
418
419 setOperationAction(ISD::BSWAP, MVT::i16, Promote);
420 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
421
422 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
423 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
424 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
425 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
Jan Veselyb283ea02018-03-02 02:50:22 +0000426 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
Tom Stellard115a6152016-11-10 16:02:37 +0000427
428 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
429
430 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
431
432 setOperationAction(ISD::LOAD, MVT::i16, Custom);
433
434 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
435
Tom Stellard115a6152016-11-10 16:02:37 +0000436 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
437 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
438 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
439 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000440
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000441 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
442 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
443 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
444 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000445
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000446 // F16 - Constant Actions.
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000447 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000448
449 // F16 - Load/Store Actions.
450 setOperationAction(ISD::LOAD, MVT::f16, Promote);
451 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
452 setOperationAction(ISD::STORE, MVT::f16, Promote);
453 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
454
455 // F16 - VOP1 Actions.
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +0000456 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000457 setOperationAction(ISD::FCOS, MVT::f16, Promote);
458 setOperationAction(ISD::FSIN, MVT::f16, Promote);
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000459 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
460 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
461 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
462 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
Matt Arsenaultb5d23272017-03-24 20:04:18 +0000463 setOperationAction(ISD::FROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000464
465 // F16 - VOP2 Actions.
Konstantin Zhuravlyov662e01d2016-11-17 03:49:01 +0000466 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000467 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000468 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
469 setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
Matt Arsenault4052a572016-12-22 03:05:41 +0000470 setOperationAction(ISD::FDIV, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000471
472 // F16 - VOP3 Actions.
473 setOperationAction(ISD::FMA, MVT::f16, Legal);
474 if (!Subtarget->hasFP16Denormals())
475 setOperationAction(ISD::FMAD, MVT::f16, Legal);
Tom Stellard115a6152016-11-10 16:02:37 +0000476
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000477 for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
Matt Arsenault7596f132017-02-27 20:52:10 +0000478 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
479 switch (Op) {
480 case ISD::LOAD:
481 case ISD::STORE:
482 case ISD::BUILD_VECTOR:
483 case ISD::BITCAST:
484 case ISD::EXTRACT_VECTOR_ELT:
485 case ISD::INSERT_VECTOR_ELT:
486 case ISD::INSERT_SUBVECTOR:
487 case ISD::EXTRACT_SUBVECTOR:
488 case ISD::SCALAR_TO_VECTOR:
489 break;
490 case ISD::CONCAT_VECTORS:
491 setOperationAction(Op, VT, Custom);
492 break;
493 default:
494 setOperationAction(Op, VT, Expand);
495 break;
496 }
497 }
498 }
499
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000500 // XXX - Do these do anything? Vector constants turn into build_vector.
501 setOperationAction(ISD::Constant, MVT::v2i16, Legal);
502 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
503
Matt Arsenaultdfb88df2018-05-13 10:04:38 +0000504 setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
505 setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
506
Matt Arsenault7596f132017-02-27 20:52:10 +0000507 setOperationAction(ISD::STORE, MVT::v2i16, Promote);
508 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
509 setOperationAction(ISD::STORE, MVT::v2f16, Promote);
510 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
511
512 setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
513 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
514 setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
515 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000516
517 setOperationAction(ISD::AND, MVT::v2i16, Promote);
518 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
519 setOperationAction(ISD::OR, MVT::v2i16, Promote);
520 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
521 setOperationAction(ISD::XOR, MVT::v2i16, Promote);
522 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000523
Matt Arsenault1349a042018-05-22 06:32:10 +0000524 setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
525 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
526 setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
527 AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
528
529 setOperationAction(ISD::STORE, MVT::v4i16, Promote);
530 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
531 setOperationAction(ISD::STORE, MVT::v4f16, Promote);
532 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
533
534 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
535 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
536 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
537 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
538
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000539 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
540 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
541 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
542
Matt Arsenault1349a042018-05-22 06:32:10 +0000543 if (!Subtarget->hasVOP3PInsts()) {
544 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
545 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
546 }
547
548 setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
549 // This isn't really legal, but this avoids the legalizer unrolling it (and
550 // allows matching fneg (fabs x) patterns)
551 setOperationAction(ISD::FABS, MVT::v2f16, Legal);
552 }
553
554 if (Subtarget->hasVOP3PInsts()) {
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000555 setOperationAction(ISD::ADD, MVT::v2i16, Legal);
556 setOperationAction(ISD::SUB, MVT::v2i16, Legal);
557 setOperationAction(ISD::MUL, MVT::v2i16, Legal);
558 setOperationAction(ISD::SHL, MVT::v2i16, Legal);
559 setOperationAction(ISD::SRL, MVT::v2i16, Legal);
560 setOperationAction(ISD::SRA, MVT::v2i16, Legal);
561 setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
562 setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
563 setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
564 setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
565
566 setOperationAction(ISD::FADD, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000567 setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
568 setOperationAction(ISD::FMA, MVT::v2f16, Legal);
569 setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal);
570 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal);
Matt Arsenault540512c2018-04-26 19:21:37 +0000571 setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000572
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000573 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
574 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000575
576 setOperationAction(ISD::SHL, MVT::v4i16, Custom);
577 setOperationAction(ISD::SRA, MVT::v4i16, Custom);
578 setOperationAction(ISD::SRL, MVT::v4i16, Custom);
579 setOperationAction(ISD::ADD, MVT::v4i16, Custom);
580 setOperationAction(ISD::SUB, MVT::v4i16, Custom);
581 setOperationAction(ISD::MUL, MVT::v4i16, Custom);
582
583 setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
584 setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
585 setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
586 setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
587
588 setOperationAction(ISD::FADD, MVT::v4f16, Custom);
589 setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
590 setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
591 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
592
593 setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
594 setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
Matt Arsenault1349a042018-05-22 06:32:10 +0000595 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000596
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000597 setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
598 setOperationAction(ISD::FABS, MVT::v4f16, Custom);
599
Matt Arsenault1349a042018-05-22 06:32:10 +0000600 if (Subtarget->has16BitInsts()) {
601 setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
602 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
603 setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
604 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
Matt Arsenault4a486232017-04-19 20:53:07 +0000605 } else {
Matt Arsenault1349a042018-05-22 06:32:10 +0000606 // Legalization hack.
Matt Arsenault4a486232017-04-19 20:53:07 +0000607 setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
608 setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
Matt Arsenaulte9524f12018-06-06 21:28:11 +0000609
610 setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
611 setOperationAction(ISD::FABS, MVT::v2f16, Custom);
Matt Arsenault4a486232017-04-19 20:53:07 +0000612 }
613
614 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
615 setOperationAction(ISD::SELECT, VT, Custom);
Matt Arsenault7596f132017-02-27 20:52:10 +0000616 }
617
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000618 setTargetDAGCombine(ISD::ADD);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +0000619 setTargetDAGCombine(ISD::ADDCARRY);
620 setTargetDAGCombine(ISD::SUB);
621 setTargetDAGCombine(ISD::SUBCARRY);
Matt Arsenault02cb0ff2014-09-29 14:59:34 +0000622 setTargetDAGCombine(ISD::FADD);
Matt Arsenault8675db12014-08-29 16:01:14 +0000623 setTargetDAGCombine(ISD::FSUB);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +0000624 setTargetDAGCombine(ISD::FMINNUM);
625 setTargetDAGCombine(ISD::FMAXNUM);
Farhana Aleenc370d7b2018-07-16 18:19:59 +0000626 setTargetDAGCombine(ISD::FMA);
Matt Arsenault5881f4e2015-06-09 00:52:37 +0000627 setTargetDAGCombine(ISD::SMIN);
628 setTargetDAGCombine(ISD::SMAX);
629 setTargetDAGCombine(ISD::UMIN);
630 setTargetDAGCombine(ISD::UMAX);
Tom Stellard75aadc22012-12-11 21:25:42 +0000631 setTargetDAGCombine(ISD::SETCC);
Matt Arsenaultd0101a22015-01-06 23:00:46 +0000632 setTargetDAGCombine(ISD::AND);
Matt Arsenaultf2290332015-01-06 23:00:39 +0000633 setTargetDAGCombine(ISD::OR);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000634 setTargetDAGCombine(ISD::XOR);
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +0000635 setTargetDAGCombine(ISD::SINT_TO_FP);
Matt Arsenault364a6742014-06-11 17:50:44 +0000636 setTargetDAGCombine(ISD::UINT_TO_FP);
Matt Arsenault9cd90712016-04-14 01:42:16 +0000637 setTargetDAGCombine(ISD::FCANONICALIZE);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000638 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
Matt Arsenault8edfaee2017-03-31 19:53:03 +0000639 setTargetDAGCombine(ISD::ZERO_EXTEND);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +0000640 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
Matt Arsenault8cbb4882017-09-20 21:01:24 +0000641 setTargetDAGCombine(ISD::BUILD_VECTOR);
Matt Arsenault364a6742014-06-11 17:50:44 +0000642
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000643 // All memory operations. Some folding on the pointer operand is done to help
644 // matching the constant offsets in the addressing modes.
645 setTargetDAGCombine(ISD::LOAD);
646 setTargetDAGCombine(ISD::STORE);
647 setTargetDAGCombine(ISD::ATOMIC_LOAD);
648 setTargetDAGCombine(ISD::ATOMIC_STORE);
649 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
650 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
651 setTargetDAGCombine(ISD::ATOMIC_SWAP);
652 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
653 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
654 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
655 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
656 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
657 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
658 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
659 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
660 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
661 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
662
Christian Konigeecebd02013-03-26 14:04:02 +0000663 setSchedulingPreference(Sched::RegPressure);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000664
665 // SI at least has hardware support for floating point exceptions, but no way
666 // of using or handling them is implemented. They are also optional in OpenCL
667 // (Section 7.3)
668 setHasFloatingPointExceptions(Subtarget->hasFPExceptions());
Tom Stellard75aadc22012-12-11 21:25:42 +0000669}
670
Tom Stellard5bfbae52018-07-11 20:59:01 +0000671const GCNSubtarget *SITargetLowering::getSubtarget() const {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000672 return Subtarget;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000673}
674
Tom Stellard0125f2a2013-06-25 02:39:35 +0000675//===----------------------------------------------------------------------===//
676// TargetLowering queries
677//===----------------------------------------------------------------------===//
678
Tom Stellardb12f4de2018-05-22 19:37:55 +0000679// v_mad_mix* support a conversion from f16 to f32.
680//
681// There is only one special case when denormals are enabled we don't currently,
682// where this is OK to use.
683bool SITargetLowering::isFPExtFoldable(unsigned Opcode,
684 EVT DestVT, EVT SrcVT) const {
685 return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
686 (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
687 DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
688 SrcVT.getScalarType() == MVT::f16;
689}
690
Zvi Rackover1b736822017-07-26 08:06:58 +0000691bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000692 // SI has some legal vector types, but no legal vector operations. Say no
693 // shuffles are legal in order to prefer scalarizing some vector operations.
694 return false;
695}
696
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000697MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
698 CallingConv::ID CC,
699 EVT VT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000700 // TODO: Consider splitting all arguments into 32-bit pieces.
701 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000702 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000703 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000704 if (Size == 32)
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000705 return ScalarVT.getSimpleVT();
Matt Arsenault0395da72018-07-31 19:17:47 +0000706
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000707 if (Size == 64)
708 return MVT::i32;
709
Matt Arsenault0395da72018-07-31 19:17:47 +0000710 if (Size == 16 &&
711 Subtarget->has16BitInsts() &&
712 isPowerOf2_32(VT.getVectorNumElements()))
713 return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000714 }
715
716 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
717}
718
719unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
720 CallingConv::ID CC,
721 EVT VT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000722 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000723 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000724 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000725 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenault0395da72018-07-31 19:17:47 +0000726
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000727 if (Size == 32)
Matt Arsenault0395da72018-07-31 19:17:47 +0000728 return NumElts;
729
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000730 if (Size == 64)
731 return 2 * NumElts;
732
Matt Arsenault0395da72018-07-31 19:17:47 +0000733 // FIXME: Fails to break down as we want with v3.
734 if (Size == 16 && Subtarget->has16BitInsts() && isPowerOf2_32(NumElts))
735 return VT.getVectorNumElements() / 2;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000736 }
737
738 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
739}
740
741unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
742 LLVMContext &Context, CallingConv::ID CC,
743 EVT VT, EVT &IntermediateVT,
744 unsigned &NumIntermediates, MVT &RegisterVT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000745 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000746 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000747 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000748 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000749 if (Size == 32) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000750 RegisterVT = ScalarVT.getSimpleVT();
751 IntermediateVT = RegisterVT;
Matt Arsenault0395da72018-07-31 19:17:47 +0000752 NumIntermediates = NumElts;
753 return NumIntermediates;
754 }
755
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000756 if (Size == 64) {
757 RegisterVT = MVT::i32;
758 IntermediateVT = RegisterVT;
759 NumIntermediates = 2 * NumElts;
760 return NumIntermediates;
761 }
762
Matt Arsenault0395da72018-07-31 19:17:47 +0000763 // FIXME: We should fix the ABI to be the same on targets without 16-bit
764 // support, but unless we can properly handle 3-vectors, it will be still be
765 // inconsistent.
766 if (Size == 16 && Subtarget->has16BitInsts() && isPowerOf2_32(NumElts)) {
767 RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
768 IntermediateVT = RegisterVT;
769 NumIntermediates = NumElts / 2;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000770 return NumIntermediates;
771 }
772 }
773
774 return TargetLowering::getVectorTypeBreakdownForCallingConv(
775 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
776}
777
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000778bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
779 const CallInst &CI,
Matt Arsenault7d7adf42017-12-14 22:34:10 +0000780 MachineFunction &MF,
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000781 unsigned IntrID) const {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000782 if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +0000783 AMDGPU::lookupRsrcIntrinsic(IntrID)) {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000784 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
785 (Intrinsic::ID)IntrID);
786 if (Attr.hasFnAttribute(Attribute::ReadNone))
787 return false;
788
789 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
790
791 if (RsrcIntr->IsImage) {
792 Info.ptrVal = MFI->getImagePSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000793 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000794 CI.getArgOperand(RsrcIntr->RsrcArg));
795 Info.align = 0;
796 } else {
797 Info.ptrVal = MFI->getBufferPSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000798 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000799 CI.getArgOperand(RsrcIntr->RsrcArg));
800 }
801
802 Info.flags = MachineMemOperand::MODereferenceable;
803 if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
804 Info.opc = ISD::INTRINSIC_W_CHAIN;
805 Info.memVT = MVT::getVT(CI.getType());
806 Info.flags |= MachineMemOperand::MOLoad;
807 } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
808 Info.opc = ISD::INTRINSIC_VOID;
809 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
810 Info.flags |= MachineMemOperand::MOStore;
811 } else {
812 // Atomic
813 Info.opc = ISD::INTRINSIC_W_CHAIN;
814 Info.memVT = MVT::getVT(CI.getType());
815 Info.flags = MachineMemOperand::MOLoad |
816 MachineMemOperand::MOStore |
817 MachineMemOperand::MODereferenceable;
818
819 // XXX - Should this be volatile without known ordering?
820 Info.flags |= MachineMemOperand::MOVolatile;
821 }
822 return true;
823 }
824
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000825 switch (IntrID) {
826 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000827 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000828 case Intrinsic::amdgcn_ds_fadd:
829 case Intrinsic::amdgcn_ds_fmin:
830 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000831 Info.opc = ISD::INTRINSIC_W_CHAIN;
832 Info.memVT = MVT::getVT(CI.getType());
833 Info.ptrVal = CI.getOperand(0);
834 Info.align = 0;
Matt Arsenault11171332017-12-14 21:39:51 +0000835 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000836
837 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
Matt Arsenault11171332017-12-14 21:39:51 +0000838 if (!Vol || !Vol->isZero())
839 Info.flags |= MachineMemOperand::MOVolatile;
840
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000841 return true;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000842 }
Matt Arsenault905f3512017-12-29 17:18:14 +0000843
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000844 default:
845 return false;
846 }
847}
848
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000849bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
850 SmallVectorImpl<Value*> &Ops,
851 Type *&AccessTy) const {
852 switch (II->getIntrinsicID()) {
853 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000854 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000855 case Intrinsic::amdgcn_ds_fadd:
856 case Intrinsic::amdgcn_ds_fmin:
857 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000858 Value *Ptr = II->getArgOperand(0);
859 AccessTy = II->getType();
860 Ops.push_back(Ptr);
861 return true;
862 }
863 default:
864 return false;
865 }
Matt Arsenaulte306a322014-10-21 16:25:08 +0000866}
867
Tom Stellard70580f82015-07-20 14:28:41 +0000868bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
Matt Arsenaultd9b77842017-06-12 17:06:35 +0000869 if (!Subtarget->hasFlatInstOffsets()) {
870 // Flat instructions do not have offsets, and only have the register
871 // address.
872 return AM.BaseOffs == 0 && AM.Scale == 0;
873 }
874
875 // GFX9 added a 13-bit signed offset. When using regular flat instructions,
876 // the sign bit is ignored and is treated as a 12-bit unsigned offset.
877
878 // Just r + i
879 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
Tom Stellard70580f82015-07-20 14:28:41 +0000880}
881
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000882bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
883 if (Subtarget->hasFlatGlobalInsts())
884 return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
885
886 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
887 // Assume the we will use FLAT for all global memory accesses
888 // on VI.
889 // FIXME: This assumption is currently wrong. On VI we still use
890 // MUBUF instructions for the r + i addressing mode. As currently
891 // implemented, the MUBUF instructions only work on buffer < 4GB.
892 // It may be possible to support > 4GB buffers with MUBUF instructions,
893 // by setting the stride value in the resource descriptor which would
894 // increase the size limit to (stride * 4GB). However, this is risky,
895 // because it has never been validated.
896 return isLegalFlatAddressingMode(AM);
897 }
898
899 return isLegalMUBUFAddressingMode(AM);
900}
901
Matt Arsenault711b3902015-08-07 20:18:34 +0000902bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
903 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
904 // additionally can do r + r + i with addr64. 32-bit has more addressing
905 // mode options. Depending on the resource constant, it can also do
906 // (i64 r0) + (i32 r1) * (i14 i).
907 //
908 // Private arrays end up using a scratch buffer most of the time, so also
909 // assume those use MUBUF instructions. Scratch loads / stores are currently
910 // implemented as mubuf instructions with offen bit set, so slightly
911 // different than the normal addr64.
912 if (!isUInt<12>(AM.BaseOffs))
913 return false;
914
915 // FIXME: Since we can split immediate into soffset and immediate offset,
916 // would it make sense to allow any immediate?
917
918 switch (AM.Scale) {
919 case 0: // r + i or just i, depending on HasBaseReg.
920 return true;
921 case 1:
922 return true; // We have r + r or r + i.
923 case 2:
924 if (AM.HasBaseReg) {
925 // Reject 2 * r + r.
926 return false;
927 }
928
929 // Allow 2 * r as r + r
930 // Or 2 * r + i is allowed as r + r + i.
931 return true;
932 default: // Don't allow n * r
933 return false;
934 }
935}
936
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000937bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
938 const AddrMode &AM, Type *Ty,
Jonas Paulsson024e3192017-07-21 11:59:37 +0000939 unsigned AS, Instruction *I) const {
Matt Arsenault5015a892014-08-15 17:17:07 +0000940 // No global is ever allowed as a base.
941 if (AM.BaseGV)
942 return false;
943
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000944 if (AS == AMDGPUASI.GLOBAL_ADDRESS)
945 return isLegalGlobalAddressingMode(AM);
Matt Arsenault5015a892014-08-15 17:17:07 +0000946
Matt Arsenault923712b2018-02-09 16:57:57 +0000947 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
948 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000949 // If the offset isn't a multiple of 4, it probably isn't going to be
950 // correctly aligned.
Matt Arsenault3cc1e002016-08-13 01:43:51 +0000951 // FIXME: Can we get the real alignment here?
Matt Arsenault711b3902015-08-07 20:18:34 +0000952 if (AM.BaseOffs % 4 != 0)
953 return isLegalMUBUFAddressingMode(AM);
954
955 // There are no SMRD extloads, so if we have to do a small type access we
956 // will use a MUBUF load.
957 // FIXME?: We also need to do this if unaligned, but we don't know the
958 // alignment here.
Stanislav Mekhanoshin57d341c2018-05-15 22:07:51 +0000959 if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000960 return isLegalGlobalAddressingMode(AM);
Matt Arsenault711b3902015-08-07 20:18:34 +0000961
Tom Stellard5bfbae52018-07-11 20:59:01 +0000962 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000963 // SMRD instructions have an 8-bit, dword offset on SI.
964 if (!isUInt<8>(AM.BaseOffs / 4))
965 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +0000966 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000967 // On CI+, this can also be a 32-bit literal constant offset. If it fits
968 // in 8-bits, it can use a smaller encoding.
969 if (!isUInt<32>(AM.BaseOffs / 4))
970 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +0000971 } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000972 // On VI, these use the SMEM format and the offset is 20-bit in bytes.
973 if (!isUInt<20>(AM.BaseOffs))
974 return false;
975 } else
976 llvm_unreachable("unhandled generation");
977
978 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
979 return true;
980
981 if (AM.Scale == 1 && AM.HasBaseReg)
982 return true;
983
984 return false;
Matt Arsenault711b3902015-08-07 20:18:34 +0000985
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000986 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000987 return isLegalMUBUFAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000988 } else if (AS == AMDGPUASI.LOCAL_ADDRESS ||
989 AS == AMDGPUASI.REGION_ADDRESS) {
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000990 // Basic, single offset DS instructions allow a 16-bit unsigned immediate
991 // field.
992 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
993 // an 8-bit dword offset but we don't know the alignment here.
994 if (!isUInt<16>(AM.BaseOffs))
Matt Arsenault5015a892014-08-15 17:17:07 +0000995 return false;
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000996
997 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
998 return true;
999
1000 if (AM.Scale == 1 && AM.HasBaseReg)
1001 return true;
1002
Matt Arsenault5015a892014-08-15 17:17:07 +00001003 return false;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001004 } else if (AS == AMDGPUASI.FLAT_ADDRESS ||
1005 AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) {
Matt Arsenault7d1b6c82016-04-29 06:25:10 +00001006 // For an unknown address space, this usually means that this is for some
1007 // reason being used for pure arithmetic, and not based on some addressing
1008 // computation. We don't have instructions that compute pointers with any
1009 // addressing modes, so treat them as having no offset like flat
1010 // instructions.
Tom Stellard70580f82015-07-20 14:28:41 +00001011 return isLegalFlatAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001012 } else {
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001013 llvm_unreachable("unhandled address space");
1014 }
Matt Arsenault5015a892014-08-15 17:17:07 +00001015}
1016
Nirav Dave4dcad5d2017-07-10 20:25:54 +00001017bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1018 const SelectionDAG &DAG) const {
Nirav Daved20066c2017-05-24 15:59:09 +00001019 if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) {
1020 return (MemVT.getSizeInBits() <= 4 * 32);
1021 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
1022 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1023 return (MemVT.getSizeInBits() <= MaxPrivateBits);
1024 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
1025 return (MemVT.getSizeInBits() <= 2 * 32);
1026 }
1027 return true;
1028}
1029
Matt Arsenaulte6986632015-01-14 01:35:22 +00001030bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001031 unsigned AddrSpace,
1032 unsigned Align,
1033 bool *IsFast) const {
Matt Arsenault1018c892014-04-24 17:08:26 +00001034 if (IsFast)
1035 *IsFast = false;
1036
Matt Arsenault1018c892014-04-24 17:08:26 +00001037 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1038 // which isn't a simple VT.
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001039 // Until MVT is extended to handle this, simply check for the size and
1040 // rely on the condition below: allow accesses if the size is a multiple of 4.
1041 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1042 VT.getStoreSize() > 16)) {
Tom Stellard81d871d2013-11-13 23:36:50 +00001043 return false;
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001044 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001045
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001046 if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS ||
1047 AddrSpace == AMDGPUASI.REGION_ADDRESS) {
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001048 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1049 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1050 // with adjacent offsets.
Sanjay Patelce74db92015-09-03 15:03:19 +00001051 bool AlignedBy4 = (Align % 4 == 0);
1052 if (IsFast)
1053 *IsFast = AlignedBy4;
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001054
Sanjay Patelce74db92015-09-03 15:03:19 +00001055 return AlignedBy4;
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001056 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001057
Tom Stellard64a9d082016-10-14 18:10:39 +00001058 // FIXME: We have to be conservative here and assume that flat operations
1059 // will access scratch. If we had access to the IR function, then we
1060 // could determine if any private memory was used in the function.
1061 if (!Subtarget->hasUnalignedScratchAccess() &&
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001062 (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS ||
1063 AddrSpace == AMDGPUASI.FLAT_ADDRESS)) {
Tom Stellard64a9d082016-10-14 18:10:39 +00001064 return false;
1065 }
1066
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001067 if (Subtarget->hasUnalignedBufferAccess()) {
1068 // If we have an uniform constant load, it still requires using a slow
1069 // buffer instruction if unaligned.
1070 if (IsFast) {
Matt Arsenault923712b2018-02-09 16:57:57 +00001071 *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS ||
1072 AddrSpace == AMDGPUASI.CONSTANT_ADDRESS_32BIT) ?
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001073 (Align % 4 == 0) : true;
1074 }
1075
1076 return true;
1077 }
1078
Tom Stellard33e64c62015-02-04 20:49:52 +00001079 // Smaller than dword value must be aligned.
Tom Stellard33e64c62015-02-04 20:49:52 +00001080 if (VT.bitsLT(MVT::i32))
1081 return false;
1082
Matt Arsenault1018c892014-04-24 17:08:26 +00001083 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1084 // byte-address are ignored, thus forcing Dword alignment.
Tom Stellarde812f2f2014-07-21 15:45:06 +00001085 // This applies to private, global, and constant memory.
Matt Arsenault1018c892014-04-24 17:08:26 +00001086 if (IsFast)
1087 *IsFast = true;
Tom Stellardc6b299c2015-02-02 18:02:28 +00001088
1089 return VT.bitsGT(MVT::i32) && Align % 4 == 0;
Tom Stellard0125f2a2013-06-25 02:39:35 +00001090}
1091
Matt Arsenault46645fa2014-07-28 17:49:26 +00001092EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
1093 unsigned SrcAlign, bool IsMemset,
1094 bool ZeroMemset,
1095 bool MemcpyStrSrc,
1096 MachineFunction &MF) const {
1097 // FIXME: Should account for address space here.
1098
1099 // The default fallback uses the private pointer size as a guess for a type to
1100 // use. Make sure we switch these to 64-bit accesses.
1101
1102 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1103 return MVT::v4i32;
1104
1105 if (Size >= 8 && DstAlign >= 4)
1106 return MVT::v2i32;
1107
1108 // Use the default.
1109 return MVT::Other;
1110}
1111
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001112static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) {
1113 return AS == AMDGPUASI.GLOBAL_ADDRESS ||
1114 AS == AMDGPUASI.FLAT_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +00001115 AS == AMDGPUASI.CONSTANT_ADDRESS ||
1116 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT;
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001117}
1118
1119bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1120 unsigned DestAS) const {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001121 return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) &&
1122 isFlatGlobalAddrSpace(DestAS, AMDGPUASI);
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001123}
1124
Alexander Timofeev18009562016-12-08 17:28:47 +00001125bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1126 const MemSDNode *MemNode = cast<MemSDNode>(N);
1127 const Value *Ptr = MemNode->getMemOperand()->getValue();
Matt Arsenault0a0c8712018-03-27 18:39:45 +00001128 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
Alexander Timofeev18009562016-12-08 17:28:47 +00001129 return I && I->getMetadata("amdgpu.noclobber");
1130}
1131
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001132bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
1133 unsigned DestAS) const {
1134 // Flat -> private/local is a simple truncate.
1135 // Flat -> global is no-op
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001136 if (SrcAS == AMDGPUASI.FLAT_ADDRESS)
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001137 return true;
1138
1139 return isNoopAddrSpaceCast(SrcAS, DestAS);
1140}
1141
Tom Stellarda6f24c62015-12-15 20:55:55 +00001142bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1143 const MemSDNode *MemNode = cast<MemSDNode>(N);
Tom Stellarda6f24c62015-12-15 20:55:55 +00001144
Matt Arsenaultbcf7bec2018-02-09 16:57:48 +00001145 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
Tom Stellarda6f24c62015-12-15 20:55:55 +00001146}
1147
Chandler Carruth9d010ff2014-07-03 00:23:43 +00001148TargetLoweringBase::LegalizeTypeAction
1149SITargetLowering::getPreferredVectorAction(EVT VT) const {
1150 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1151 return TypeSplitVector;
1152
1153 return TargetLoweringBase::getPreferredVectorAction(VT);
Tom Stellardd86003e2013-08-14 23:25:00 +00001154}
Tom Stellard0125f2a2013-06-25 02:39:35 +00001155
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001156bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1157 Type *Ty) const {
Matt Arsenault749035b2016-07-30 01:40:36 +00001158 // FIXME: Could be smarter if called for vector constants.
1159 return true;
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001160}
1161
Tom Stellard2e045bb2016-01-20 00:13:22 +00001162bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001163 if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1164 switch (Op) {
1165 case ISD::LOAD:
1166 case ISD::STORE:
Tom Stellard2e045bb2016-01-20 00:13:22 +00001167
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001168 // These operations are done with 32-bit instructions anyway.
1169 case ISD::AND:
1170 case ISD::OR:
1171 case ISD::XOR:
1172 case ISD::SELECT:
1173 // TODO: Extensions?
1174 return true;
1175 default:
1176 return false;
1177 }
1178 }
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +00001179
Tom Stellard2e045bb2016-01-20 00:13:22 +00001180 // SimplifySetCC uses this function to determine whether or not it should
1181 // create setcc with i1 operands. We don't have instructions for i1 setcc.
1182 if (VT == MVT::i1 && Op == ISD::SETCC)
1183 return false;
1184
1185 return TargetLowering::isTypeDesirableForOp(Op, VT);
1186}
1187
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001188SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1189 const SDLoc &SL,
1190 SDValue Chain,
1191 uint64_t Offset) const {
Mehdi Aminia749f2a2015-07-09 02:09:52 +00001192 const DataLayout &DL = DAG.getDataLayout();
Tom Stellardec2e43c2014-09-22 15:35:29 +00001193 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001194 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1195
1196 const ArgDescriptor *InputPtrReg;
1197 const TargetRegisterClass *RC;
1198
1199 std::tie(InputPtrReg, RC)
1200 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Tom Stellard94593ee2013-06-03 17:40:18 +00001201
Matt Arsenault86033ca2014-07-28 17:31:39 +00001202 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001203 MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS);
Matt Arsenaulta0269b62015-06-01 21:58:24 +00001204 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001205 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1206
Matt Arsenault2fb9ccf2018-05-29 17:42:38 +00001207 return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
Jan Veselyfea814d2016-06-21 20:46:20 +00001208}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001209
Matt Arsenault9166ce82017-07-28 15:52:08 +00001210SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1211 const SDLoc &SL) const {
Matt Arsenault75e71922018-06-28 10:18:55 +00001212 uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1213 FIRST_IMPLICIT);
Matt Arsenault9166ce82017-07-28 15:52:08 +00001214 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1215}
1216
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001217SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1218 const SDLoc &SL, SDValue Val,
1219 bool Signed,
Matt Arsenault6dca5422017-01-09 18:52:39 +00001220 const ISD::InputArg *Arg) const {
Matt Arsenault6dca5422017-01-09 18:52:39 +00001221 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1222 VT.bitsLT(MemVT)) {
1223 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1224 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1225 }
1226
Tom Stellardbc6c5232016-10-17 16:21:45 +00001227 if (MemVT.isFloatingPoint())
Matt Arsenault6dca5422017-01-09 18:52:39 +00001228 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001229 else if (Signed)
Matt Arsenault6dca5422017-01-09 18:52:39 +00001230 Val = DAG.getSExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001231 else
Matt Arsenault6dca5422017-01-09 18:52:39 +00001232 Val = DAG.getZExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001233
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001234 return Val;
1235}
1236
1237SDValue SITargetLowering::lowerKernargMemParameter(
1238 SelectionDAG &DAG, EVT VT, EVT MemVT,
1239 const SDLoc &SL, SDValue Chain,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001240 uint64_t Offset, unsigned Align, bool Signed,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001241 const ISD::InputArg *Arg) const {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001242 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1243 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
1244 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1245
Matt Arsenault90083d32018-06-07 09:54:49 +00001246 // Try to avoid using an extload by loading earlier than the argument address,
1247 // and extracting the relevant bits. The load should hopefully be merged with
1248 // the previous argument.
Matt Arsenault4bec7d42018-07-20 09:05:08 +00001249 if (MemVT.getStoreSize() < 4 && Align < 4) {
1250 // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
Matt Arsenault90083d32018-06-07 09:54:49 +00001251 int64_t AlignDownOffset = alignDown(Offset, 4);
1252 int64_t OffsetDiff = Offset - AlignDownOffset;
1253
1254 EVT IntVT = MemVT.changeTypeToInteger();
1255
1256 // TODO: If we passed in the base kernel offset we could have a better
1257 // alignment than 4, but we don't really need it.
1258 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1259 SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1260 MachineMemOperand::MODereferenceable |
1261 MachineMemOperand::MOInvariant);
1262
1263 SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1264 SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1265
1266 SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1267 ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1268 ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1269
1270
1271 return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1272 }
1273
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001274 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1275 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001276 MachineMemOperand::MODereferenceable |
1277 MachineMemOperand::MOInvariant);
1278
1279 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
Matt Arsenault6dca5422017-01-09 18:52:39 +00001280 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
Tom Stellard94593ee2013-06-03 17:40:18 +00001281}
1282
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001283SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1284 const SDLoc &SL, SDValue Chain,
1285 const ISD::InputArg &Arg) const {
1286 MachineFunction &MF = DAG.getMachineFunction();
1287 MachineFrameInfo &MFI = MF.getFrameInfo();
1288
1289 if (Arg.Flags.isByVal()) {
1290 unsigned Size = Arg.Flags.getByValSize();
1291 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1292 return DAG.getFrameIndex(FrameIdx, MVT::i32);
1293 }
1294
1295 unsigned ArgOffset = VA.getLocMemOffset();
1296 unsigned ArgSize = VA.getValVT().getStoreSize();
1297
1298 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1299
1300 // Create load nodes to retrieve arguments from the stack.
1301 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1302 SDValue ArgValue;
1303
1304 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1305 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1306 MVT MemVT = VA.getValVT();
1307
1308 switch (VA.getLocInfo()) {
1309 default:
1310 break;
1311 case CCValAssign::BCvt:
1312 MemVT = VA.getLocVT();
1313 break;
1314 case CCValAssign::SExt:
1315 ExtType = ISD::SEXTLOAD;
1316 break;
1317 case CCValAssign::ZExt:
1318 ExtType = ISD::ZEXTLOAD;
1319 break;
1320 case CCValAssign::AExt:
1321 ExtType = ISD::EXTLOAD;
1322 break;
1323 }
1324
1325 ArgValue = DAG.getExtLoad(
1326 ExtType, SL, VA.getLocVT(), Chain, FIN,
1327 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1328 MemVT);
1329 return ArgValue;
1330}
1331
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001332SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1333 const SIMachineFunctionInfo &MFI,
1334 EVT VT,
1335 AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1336 const ArgDescriptor *Reg;
1337 const TargetRegisterClass *RC;
1338
1339 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1340 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1341}
1342
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001343static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1344 CallingConv::ID CallConv,
1345 ArrayRef<ISD::InputArg> Ins,
1346 BitVector &Skipped,
1347 FunctionType *FType,
1348 SIMachineFunctionInfo *Info) {
1349 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001350 const ISD::InputArg *Arg = &Ins[I];
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001351
Matt Arsenault55ab9212018-08-01 19:57:34 +00001352 assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1353 "vector type argument should have been split");
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001354
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001355 // First check if it's a PS input addr.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001356 if (CallConv == CallingConv::AMDGPU_PS &&
1357 !Arg->Flags.isInReg() && !Arg->Flags.isByVal() && PSInputNum <= 15) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001358
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001359 bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1360
1361 // Inconveniently only the first part of the split is marked as isSplit,
1362 // so skip to the end. We only want to increment PSInputNum once for the
1363 // entire split argument.
1364 if (Arg->Flags.isSplit()) {
1365 while (!Arg->Flags.isSplitEnd()) {
1366 assert(!Arg->VT.isVector() &&
1367 "unexpected vector split in ps argument type");
1368 if (!SkipArg)
1369 Splits.push_back(*Arg);
1370 Arg = &Ins[++I];
1371 }
1372 }
1373
1374 if (SkipArg) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001375 // We can safely skip PS inputs.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001376 Skipped.set(Arg->getOrigArgIndex());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001377 ++PSInputNum;
1378 continue;
1379 }
1380
1381 Info->markPSInputAllocated(PSInputNum);
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001382 if (Arg->Used)
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001383 Info->markPSInputEnabled(PSInputNum);
1384
1385 ++PSInputNum;
1386 }
1387
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001388 Splits.push_back(*Arg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001389 }
1390}
1391
1392// Allocate special inputs passed in VGPRs.
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001393static void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1394 MachineFunction &MF,
1395 const SIRegisterInfo &TRI,
1396 SIMachineFunctionInfo &Info) {
1397 if (Info.hasWorkItemIDX()) {
1398 unsigned Reg = AMDGPU::VGPR0;
1399 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001400
1401 CCInfo.AllocateReg(Reg);
1402 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1403 }
1404
1405 if (Info.hasWorkItemIDY()) {
1406 unsigned Reg = AMDGPU::VGPR1;
1407 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1408
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001409 CCInfo.AllocateReg(Reg);
1410 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1411 }
1412
1413 if (Info.hasWorkItemIDZ()) {
1414 unsigned Reg = AMDGPU::VGPR2;
1415 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1416
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001417 CCInfo.AllocateReg(Reg);
1418 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1419 }
1420}
1421
1422// Try to allocate a VGPR at the end of the argument list, or if no argument
1423// VGPRs are left allocating a stack slot.
1424static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) {
1425 ArrayRef<MCPhysReg> ArgVGPRs
1426 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1427 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1428 if (RegIdx == ArgVGPRs.size()) {
1429 // Spill to stack required.
1430 int64_t Offset = CCInfo.AllocateStack(4, 4);
1431
1432 return ArgDescriptor::createStack(Offset);
1433 }
1434
1435 unsigned Reg = ArgVGPRs[RegIdx];
1436 Reg = CCInfo.AllocateReg(Reg);
1437 assert(Reg != AMDGPU::NoRegister);
1438
1439 MachineFunction &MF = CCInfo.getMachineFunction();
1440 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1441 return ArgDescriptor::createRegister(Reg);
1442}
1443
1444static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1445 const TargetRegisterClass *RC,
1446 unsigned NumArgRegs) {
1447 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1448 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1449 if (RegIdx == ArgSGPRs.size())
1450 report_fatal_error("ran out of SGPRs for arguments");
1451
1452 unsigned Reg = ArgSGPRs[RegIdx];
1453 Reg = CCInfo.AllocateReg(Reg);
1454 assert(Reg != AMDGPU::NoRegister);
1455
1456 MachineFunction &MF = CCInfo.getMachineFunction();
1457 MF.addLiveIn(Reg, RC);
1458 return ArgDescriptor::createRegister(Reg);
1459}
1460
1461static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1462 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1463}
1464
1465static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1466 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1467}
1468
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001469static void allocateSpecialInputVGPRs(CCState &CCInfo,
1470 MachineFunction &MF,
1471 const SIRegisterInfo &TRI,
1472 SIMachineFunctionInfo &Info) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001473 if (Info.hasWorkItemIDX())
1474 Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001475
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001476 if (Info.hasWorkItemIDY())
1477 Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001478
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001479 if (Info.hasWorkItemIDZ())
1480 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1481}
1482
1483static void allocateSpecialInputSGPRs(CCState &CCInfo,
1484 MachineFunction &MF,
1485 const SIRegisterInfo &TRI,
1486 SIMachineFunctionInfo &Info) {
1487 auto &ArgInfo = Info.getArgInfo();
1488
1489 // TODO: Unify handling with private memory pointers.
1490
1491 if (Info.hasDispatchPtr())
1492 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1493
1494 if (Info.hasQueuePtr())
1495 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1496
1497 if (Info.hasKernargSegmentPtr())
1498 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1499
1500 if (Info.hasDispatchID())
1501 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1502
1503 // flat_scratch_init is not applicable for non-kernel functions.
1504
1505 if (Info.hasWorkGroupIDX())
1506 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1507
1508 if (Info.hasWorkGroupIDY())
1509 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1510
1511 if (Info.hasWorkGroupIDZ())
1512 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
Matt Arsenault817c2532017-08-03 23:12:44 +00001513
1514 if (Info.hasImplicitArgPtr())
1515 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001516}
1517
1518// Allocate special inputs passed in user SGPRs.
1519static void allocateHSAUserSGPRs(CCState &CCInfo,
1520 MachineFunction &MF,
1521 const SIRegisterInfo &TRI,
1522 SIMachineFunctionInfo &Info) {
Matt Arsenault10fc0622017-06-26 03:01:31 +00001523 if (Info.hasImplicitBufferPtr()) {
1524 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1525 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1526 CCInfo.AllocateReg(ImplicitBufferPtrReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001527 }
1528
1529 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1530 if (Info.hasPrivateSegmentBuffer()) {
1531 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1532 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1533 CCInfo.AllocateReg(PrivateSegmentBufferReg);
1534 }
1535
1536 if (Info.hasDispatchPtr()) {
1537 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1538 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1539 CCInfo.AllocateReg(DispatchPtrReg);
1540 }
1541
1542 if (Info.hasQueuePtr()) {
1543 unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1544 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1545 CCInfo.AllocateReg(QueuePtrReg);
1546 }
1547
1548 if (Info.hasKernargSegmentPtr()) {
1549 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1550 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1551 CCInfo.AllocateReg(InputPtrReg);
1552 }
1553
1554 if (Info.hasDispatchID()) {
1555 unsigned DispatchIDReg = Info.addDispatchID(TRI);
1556 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1557 CCInfo.AllocateReg(DispatchIDReg);
1558 }
1559
1560 if (Info.hasFlatScratchInit()) {
1561 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1562 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1563 CCInfo.AllocateReg(FlatScratchInitReg);
1564 }
1565
1566 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1567 // these from the dispatch pointer.
1568}
1569
1570// Allocate special input registers that are initialized per-wave.
1571static void allocateSystemSGPRs(CCState &CCInfo,
1572 MachineFunction &MF,
1573 SIMachineFunctionInfo &Info,
Marek Olsak584d2c02017-05-04 22:25:20 +00001574 CallingConv::ID CallConv,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001575 bool IsShader) {
1576 if (Info.hasWorkGroupIDX()) {
1577 unsigned Reg = Info.addWorkGroupIDX();
1578 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1579 CCInfo.AllocateReg(Reg);
1580 }
1581
1582 if (Info.hasWorkGroupIDY()) {
1583 unsigned Reg = Info.addWorkGroupIDY();
1584 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1585 CCInfo.AllocateReg(Reg);
1586 }
1587
1588 if (Info.hasWorkGroupIDZ()) {
1589 unsigned Reg = Info.addWorkGroupIDZ();
1590 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1591 CCInfo.AllocateReg(Reg);
1592 }
1593
1594 if (Info.hasWorkGroupInfo()) {
1595 unsigned Reg = Info.addWorkGroupInfo();
1596 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1597 CCInfo.AllocateReg(Reg);
1598 }
1599
1600 if (Info.hasPrivateSegmentWaveByteOffset()) {
1601 // Scratch wave offset passed in system SGPR.
1602 unsigned PrivateSegmentWaveByteOffsetReg;
1603
1604 if (IsShader) {
Marek Olsak584d2c02017-05-04 22:25:20 +00001605 PrivateSegmentWaveByteOffsetReg =
1606 Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1607
1608 // This is true if the scratch wave byte offset doesn't have a fixed
1609 // location.
1610 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1611 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1612 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1613 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001614 } else
1615 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1616
1617 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1618 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1619 }
1620}
1621
1622static void reservePrivateMemoryRegs(const TargetMachine &TM,
1623 MachineFunction &MF,
1624 const SIRegisterInfo &TRI,
Matt Arsenault1cc47f82017-07-18 16:44:56 +00001625 SIMachineFunctionInfo &Info) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001626 // Now that we've figured out where the scratch register inputs are, see if
1627 // should reserve the arguments and use them directly.
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001628 MachineFrameInfo &MFI = MF.getFrameInfo();
1629 bool HasStackObjects = MFI.hasStackObjects();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001630
1631 // Record that we know we have non-spill stack objects so we don't need to
1632 // check all stack objects later.
1633 if (HasStackObjects)
1634 Info.setHasNonSpillStackObjects(true);
1635
1636 // Everything live out of a block is spilled with fast regalloc, so it's
1637 // almost certain that spilling will be required.
1638 if (TM.getOptLevel() == CodeGenOpt::None)
1639 HasStackObjects = true;
1640
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001641 // For now assume stack access is needed in any callee functions, so we need
1642 // the scratch registers to pass in.
1643 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1644
Tom Stellard5bfbae52018-07-11 20:59:01 +00001645 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001646 if (ST.isAmdCodeObjectV2(MF.getFunction())) {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001647 if (RequiresStackAccess) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001648 // If we have stack objects, we unquestionably need the private buffer
1649 // resource. For the Code Object V2 ABI, this will be the first 4 user
1650 // SGPR inputs. We can reserve those and use them directly.
1651
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001652 unsigned PrivateSegmentBufferReg = Info.getPreloadedReg(
1653 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001654 Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1655
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001656 if (MFI.hasCalls()) {
1657 // If we have calls, we need to keep the frame register in a register
1658 // that won't be clobbered by a call, so ensure it is copied somewhere.
1659
1660 // This is not a problem for the scratch wave offset, because the same
1661 // registers are reserved in all functions.
1662
1663 // FIXME: Nothing is really ensuring this is a call preserved register,
1664 // it's just selected from the end so it happens to be.
1665 unsigned ReservedOffsetReg
1666 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1667 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1668 } else {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001669 unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg(
1670 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001671 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1672 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001673 } else {
1674 unsigned ReservedBufferReg
1675 = TRI.reservedPrivateSegmentBufferReg(MF);
1676 unsigned ReservedOffsetReg
1677 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1678
1679 // We tentatively reserve the last registers (skipping the last two
1680 // which may contain VCC). After register allocation, we'll replace
1681 // these with the ones immediately after those which were really
1682 // allocated. In the prologue copies will be inserted from the argument
1683 // to these reserved registers.
1684 Info.setScratchRSrcReg(ReservedBufferReg);
1685 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1686 }
1687 } else {
1688 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1689
1690 // Without HSA, relocations are used for the scratch pointer and the
1691 // buffer resource setup is always inserted in the prologue. Scratch wave
1692 // offset is still in an input SGPR.
1693 Info.setScratchRSrcReg(ReservedBufferReg);
1694
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001695 if (HasStackObjects && !MFI.hasCalls()) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001696 unsigned ScratchWaveOffsetReg = Info.getPreloadedReg(
1697 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001698 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1699 } else {
1700 unsigned ReservedOffsetReg
1701 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1702 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1703 }
1704 }
1705}
1706
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001707bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1708 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1709 return !Info->isEntryFunction();
1710}
1711
1712void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1713
1714}
1715
1716void SITargetLowering::insertCopiesSplitCSR(
1717 MachineBasicBlock *Entry,
1718 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1719 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1720
1721 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1722 if (!IStart)
1723 return;
1724
1725 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1726 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1727 MachineBasicBlock::iterator MBBI = Entry->begin();
1728 for (const MCPhysReg *I = IStart; *I; ++I) {
1729 const TargetRegisterClass *RC = nullptr;
1730 if (AMDGPU::SReg_64RegClass.contains(*I))
1731 RC = &AMDGPU::SGPR_64RegClass;
1732 else if (AMDGPU::SReg_32RegClass.contains(*I))
1733 RC = &AMDGPU::SGPR_32RegClass;
1734 else
1735 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1736
1737 unsigned NewVR = MRI->createVirtualRegister(RC);
1738 // Create copy from CSR to a virtual register.
1739 Entry->addLiveIn(*I);
1740 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1741 .addReg(*I);
1742
1743 // Insert the copy-back instructions right before the terminator.
1744 for (auto *Exit : Exits)
1745 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1746 TII->get(TargetOpcode::COPY), *I)
1747 .addReg(NewVR);
1748 }
1749}
1750
Christian Konig2c8f6d52013-03-07 09:03:52 +00001751SDValue SITargetLowering::LowerFormalArguments(
Eric Christopher7792e322015-01-30 23:24:40 +00001752 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001753 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1754 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001755 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001756
1757 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001758 const Function &Fn = MF.getFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00001759 FunctionType *FType = MF.getFunction().getFunctionType();
Christian Konig99ee0f42013-03-07 09:04:14 +00001760 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellard5bfbae52018-07-11 20:59:01 +00001761 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001762
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +00001763 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00001764 DiagnosticInfoUnsupported NoGraphicsHSA(
Matthias Braunf1caa282017-12-15 22:22:58 +00001765 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
Matt Arsenaultd48da142015-11-02 23:23:02 +00001766 DAG.getContext()->diagnose(NoGraphicsHSA);
Diana Picus81bc3172016-05-26 15:24:55 +00001767 return DAG.getEntryNode();
Matt Arsenaultd48da142015-11-02 23:23:02 +00001768 }
1769
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00001770 // Create stack objects that are used for emitting debugger prologue if
1771 // "amdgpu-debugger-emit-prologue" attribute was specified.
1772 if (ST.debuggerEmitPrologue())
1773 createDebuggerPrologueStackObjects(MF);
1774
Christian Konig2c8f6d52013-03-07 09:03:52 +00001775 SmallVector<ISD::InputArg, 16> Splits;
Christian Konig2c8f6d52013-03-07 09:03:52 +00001776 SmallVector<CCValAssign, 16> ArgLocs;
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001777 BitVector Skipped(Ins.size());
Eric Christopherb5217502014-08-06 18:45:26 +00001778 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1779 *DAG.getContext());
Christian Konig2c8f6d52013-03-07 09:03:52 +00001780
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001781 bool IsShader = AMDGPU::isShader(CallConv);
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +00001782 bool IsKernel = AMDGPU::isKernel(CallConv);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001783 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
Christian Konig99ee0f42013-03-07 09:04:14 +00001784
Matt Arsenaultd1867c02017-08-02 00:59:51 +00001785 if (!IsEntryFunc) {
1786 // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over
1787 // this when allocating argument fixed offsets.
1788 CCInfo.AllocateStack(4, 4);
1789 }
1790
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001791 if (IsShader) {
1792 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1793
1794 // At least one interpolation mode must be enabled or else the GPU will
1795 // hang.
1796 //
1797 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1798 // set PSInputAddr, the user wants to enable some bits after the compilation
1799 // based on run-time states. Since we can't know what the final PSInputEna
1800 // will look like, so we shouldn't do anything here and the user should take
1801 // responsibility for the correct programming.
1802 //
1803 // Otherwise, the following restrictions apply:
1804 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1805 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1806 // enabled too.
Tim Renoufc8ffffe2017-10-12 16:16:41 +00001807 if (CallConv == CallingConv::AMDGPU_PS) {
1808 if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1809 ((Info->getPSInputAddr() & 0xF) == 0 &&
1810 Info->isPSInputAllocated(11))) {
1811 CCInfo.AllocateReg(AMDGPU::VGPR0);
1812 CCInfo.AllocateReg(AMDGPU::VGPR1);
1813 Info->markPSInputAllocated(0);
1814 Info->markPSInputEnabled(0);
1815 }
1816 if (Subtarget->isAmdPalOS()) {
1817 // For isAmdPalOS, the user does not enable some bits after compilation
1818 // based on run-time states; the register values being generated here are
1819 // the final ones set in hardware. Therefore we need to apply the
1820 // workaround to PSInputAddr and PSInputEnable together. (The case where
1821 // a bit is set in PSInputAddr but not PSInputEnable is where the
1822 // frontend set up an input arg for a particular interpolation mode, but
1823 // nothing uses that input arg. Really we should have an earlier pass
1824 // that removes such an arg.)
1825 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1826 if ((PsInputBits & 0x7F) == 0 ||
1827 ((PsInputBits & 0xF) == 0 &&
1828 (PsInputBits >> 11 & 1)))
1829 Info->markPSInputEnabled(
1830 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
1831 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001832 }
1833
Tom Stellard2f3f9852017-01-25 01:25:13 +00001834 assert(!Info->hasDispatchPtr() &&
Tom Stellardf110f8f2016-04-14 16:27:03 +00001835 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1836 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1837 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1838 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1839 !Info->hasWorkItemIDZ());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001840 } else if (IsKernel) {
1841 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001842 } else {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001843 Splits.append(Ins.begin(), Ins.end());
Tom Stellardaf775432013-10-23 00:44:32 +00001844 }
1845
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001846 if (IsEntryFunc) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001847 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001848 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
Tom Stellard2f3f9852017-01-25 01:25:13 +00001849 }
1850
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001851 if (IsKernel) {
Tom Stellardbbeb45a2016-09-16 21:53:00 +00001852 analyzeFormalArgumentsCompute(CCInfo, Ins);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001853 } else {
1854 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1855 CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1856 }
Christian Konig2c8f6d52013-03-07 09:03:52 +00001857
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001858 SmallVector<SDValue, 16> Chains;
1859
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001860 // FIXME: This is the minimum kernel argument alignment. We should improve
1861 // this to the maximum alignment of the arguments.
1862 //
1863 // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
1864 // kern arg offset.
1865 const unsigned KernelArgBaseAlign = 16;
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001866
1867 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
Christian Konigb7be72d2013-05-17 09:46:48 +00001868 const ISD::InputArg &Arg = Ins[i];
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001869 if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
Christian Konigb7be72d2013-05-17 09:46:48 +00001870 InVals.push_back(DAG.getUNDEF(Arg.VT));
Christian Konig99ee0f42013-03-07 09:04:14 +00001871 continue;
1872 }
1873
Christian Konig2c8f6d52013-03-07 09:03:52 +00001874 CCValAssign &VA = ArgLocs[ArgIdx++];
Craig Topper7f416c82014-11-16 21:17:18 +00001875 MVT VT = VA.getLocVT();
Tom Stellarded882c22013-06-03 17:40:11 +00001876
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001877 if (IsEntryFunc && VA.isMemLoc()) {
Tom Stellardaf775432013-10-23 00:44:32 +00001878 VT = Ins[i].VT;
Tom Stellardbbeb45a2016-09-16 21:53:00 +00001879 EVT MemVT = VA.getLocVT();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001880
Matt Arsenault4bec7d42018-07-20 09:05:08 +00001881 const uint64_t Offset = VA.getLocMemOffset();
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001882 unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001883
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001884 SDValue Arg = lowerKernargMemParameter(
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001885 DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001886 Chains.push_back(Arg.getValue(1));
Tom Stellardca7ecf32014-08-22 18:49:31 +00001887
Craig Toppere3dcce92015-08-01 22:20:21 +00001888 auto *ParamTy =
Andrew Trick05938a52015-02-16 18:10:47 +00001889 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
Tom Stellard5bfbae52018-07-11 20:59:01 +00001890 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001891 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
Tom Stellardca7ecf32014-08-22 18:49:31 +00001892 // On SI local pointers are just offsets into LDS, so they are always
1893 // less than 16-bits. On CI and newer they could potentially be
1894 // real pointers, so we can't guarantee their size.
1895 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
1896 DAG.getValueType(MVT::i16));
1897 }
1898
Tom Stellarded882c22013-06-03 17:40:11 +00001899 InVals.push_back(Arg);
1900 continue;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001901 } else if (!IsEntryFunc && VA.isMemLoc()) {
1902 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
1903 InVals.push_back(Val);
1904 if (!Arg.Flags.isByVal())
1905 Chains.push_back(Val.getValue(1));
1906 continue;
Tom Stellarded882c22013-06-03 17:40:11 +00001907 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001908
Christian Konig2c8f6d52013-03-07 09:03:52 +00001909 assert(VA.isRegLoc() && "Parameter must be in a register!");
1910
1911 unsigned Reg = VA.getLocReg();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001912 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
Matt Arsenaultb3463552017-07-15 05:52:59 +00001913 EVT ValVT = VA.getValVT();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001914
1915 Reg = MF.addLiveIn(Reg, RC);
1916 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1917
Matt Arsenault45b98182017-11-15 00:45:43 +00001918 if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) {
1919 // The return object should be reasonably addressable.
1920
1921 // FIXME: This helps when the return is a real sret. If it is a
1922 // automatically inserted sret (i.e. CanLowerReturn returns false), an
1923 // extra copy is inserted in SelectionDAGBuilder which obscures this.
1924 unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits;
1925 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1926 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
1927 }
1928
Matt Arsenaultb3463552017-07-15 05:52:59 +00001929 // If this is an 8 or 16-bit value, it is really passed promoted
1930 // to 32 bits. Insert an assert[sz]ext to capture this, then
1931 // truncate to the right size.
1932 switch (VA.getLocInfo()) {
1933 case CCValAssign::Full:
1934 break;
1935 case CCValAssign::BCvt:
1936 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
1937 break;
1938 case CCValAssign::SExt:
1939 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
1940 DAG.getValueType(ValVT));
1941 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1942 break;
1943 case CCValAssign::ZExt:
1944 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1945 DAG.getValueType(ValVT));
1946 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1947 break;
1948 case CCValAssign::AExt:
1949 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1950 break;
1951 default:
1952 llvm_unreachable("Unknown loc info!");
1953 }
1954
Christian Konig2c8f6d52013-03-07 09:03:52 +00001955 InVals.push_back(Val);
1956 }
Tom Stellarde99fb652015-01-20 19:33:04 +00001957
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001958 if (!IsEntryFunc) {
1959 // Special inputs come after user arguments.
1960 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
1961 }
1962
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001963 // Start adding system SGPRs.
1964 if (IsEntryFunc) {
1965 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001966 } else {
1967 CCInfo.AllocateReg(Info->getScratchRSrcReg());
1968 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
1969 CCInfo.AllocateReg(Info->getFrameOffsetReg());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001970 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001971 }
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001972
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001973 auto &ArgUsageInfo =
1974 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001975 ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001976
Matt Arsenault71bcbd42017-08-11 20:42:08 +00001977 unsigned StackArgSize = CCInfo.getNextStackOffset();
1978 Info->setBytesInStackArgArea(StackArgSize);
1979
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001980 return Chains.empty() ? Chain :
1981 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
Christian Konig2c8f6d52013-03-07 09:03:52 +00001982}
1983
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001984// TODO: If return values can't fit in registers, we should return as many as
1985// possible in registers before passing on stack.
1986bool SITargetLowering::CanLowerReturn(
1987 CallingConv::ID CallConv,
1988 MachineFunction &MF, bool IsVarArg,
1989 const SmallVectorImpl<ISD::OutputArg> &Outs,
1990 LLVMContext &Context) const {
1991 // Replacing returns with sret/stack usage doesn't make sense for shaders.
1992 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
1993 // for shaders. Vector types should be explicitly handled by CC.
1994 if (AMDGPU::isEntryFunctionCC(CallConv))
1995 return true;
1996
1997 SmallVector<CCValAssign, 16> RVLocs;
1998 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1999 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2000}
2001
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002002SDValue
2003SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2004 bool isVarArg,
2005 const SmallVectorImpl<ISD::OutputArg> &Outs,
2006 const SmallVectorImpl<SDValue> &OutVals,
2007 const SDLoc &DL, SelectionDAG &DAG) const {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002008 MachineFunction &MF = DAG.getMachineFunction();
2009 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2010
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002011 if (AMDGPU::isKernel(CallConv)) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002012 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2013 OutVals, DL, DAG);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002014 }
2015
2016 bool IsShader = AMDGPU::isShader(CallConv);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002017
Matt Arsenault55ab9212018-08-01 19:57:34 +00002018 Info->setIfReturnsVoid(Outs.empty());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002019 bool IsWaveEnd = Info->returnsVoid() && IsShader;
Marek Olsak8e9cc632016-01-13 17:23:09 +00002020
Marek Olsak8a0f3352016-01-13 17:23:04 +00002021 // CCValAssign - represent the assignment of the return value to a location.
2022 SmallVector<CCValAssign, 48> RVLocs;
Matt Arsenault55ab9212018-08-01 19:57:34 +00002023 SmallVector<ISD::OutputArg, 48> Splits;
Marek Olsak8a0f3352016-01-13 17:23:04 +00002024
2025 // CCState - Info about the registers and stack slots.
2026 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2027 *DAG.getContext());
2028
2029 // Analyze outgoing return values.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002030 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
Marek Olsak8a0f3352016-01-13 17:23:04 +00002031
2032 SDValue Flag;
2033 SmallVector<SDValue, 48> RetOps;
2034 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2035
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002036 // Add return address for callable functions.
2037 if (!Info->isEntryFunction()) {
2038 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2039 SDValue ReturnAddrReg = CreateLiveInRegister(
2040 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2041
2042 // FIXME: Should be able to use a vreg here, but need a way to prevent it
2043 // from being allcoated to a CSR.
2044
2045 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2046 MVT::i64);
2047
2048 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
2049 Flag = Chain.getValue(1);
2050
2051 RetOps.push_back(PhysReturnAddrReg);
2052 }
2053
Marek Olsak8a0f3352016-01-13 17:23:04 +00002054 // Copy the result values into the output registers.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002055 for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2056 ++I, ++RealRVLocIdx) {
2057 CCValAssign &VA = RVLocs[I];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002058 assert(VA.isRegLoc() && "Can only return in registers!");
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002059 // TODO: Partially return in registers if return values don't fit.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002060 SDValue Arg = OutVals[RealRVLocIdx];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002061
2062 // Copied from other backends.
2063 switch (VA.getLocInfo()) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002064 case CCValAssign::Full:
2065 break;
2066 case CCValAssign::BCvt:
2067 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2068 break;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002069 case CCValAssign::SExt:
2070 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2071 break;
2072 case CCValAssign::ZExt:
2073 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2074 break;
2075 case CCValAssign::AExt:
2076 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2077 break;
2078 default:
2079 llvm_unreachable("Unknown loc info!");
Marek Olsak8a0f3352016-01-13 17:23:04 +00002080 }
2081
2082 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2083 Flag = Chain.getValue(1);
2084 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2085 }
2086
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002087 // FIXME: Does sret work properly?
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002088 if (!Info->isEntryFunction()) {
Tom Stellardc5a154d2018-06-28 23:47:12 +00002089 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002090 const MCPhysReg *I =
2091 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2092 if (I) {
2093 for (; *I; ++I) {
2094 if (AMDGPU::SReg_64RegClass.contains(*I))
2095 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2096 else if (AMDGPU::SReg_32RegClass.contains(*I))
2097 RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2098 else
2099 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2100 }
2101 }
2102 }
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002103
Marek Olsak8a0f3352016-01-13 17:23:04 +00002104 // Update chain and glue.
2105 RetOps[0] = Chain;
2106 if (Flag.getNode())
2107 RetOps.push_back(Flag);
2108
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002109 unsigned Opc = AMDGPUISD::ENDPGM;
2110 if (!IsWaveEnd)
2111 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
Matt Arsenault9babdf42016-06-22 20:15:28 +00002112 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002113}
2114
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002115SDValue SITargetLowering::LowerCallResult(
2116 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2117 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2118 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2119 SDValue ThisVal) const {
2120 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2121
2122 // Assign locations to each value returned by this call.
2123 SmallVector<CCValAssign, 16> RVLocs;
2124 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2125 *DAG.getContext());
2126 CCInfo.AnalyzeCallResult(Ins, RetCC);
2127
2128 // Copy all of the result registers out of their specified physreg.
2129 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2130 CCValAssign VA = RVLocs[i];
2131 SDValue Val;
2132
2133 if (VA.isRegLoc()) {
2134 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2135 Chain = Val.getValue(1);
2136 InFlag = Val.getValue(2);
2137 } else if (VA.isMemLoc()) {
2138 report_fatal_error("TODO: return values in memory");
2139 } else
2140 llvm_unreachable("unknown argument location type");
2141
2142 switch (VA.getLocInfo()) {
2143 case CCValAssign::Full:
2144 break;
2145 case CCValAssign::BCvt:
2146 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2147 break;
2148 case CCValAssign::ZExt:
2149 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2150 DAG.getValueType(VA.getValVT()));
2151 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2152 break;
2153 case CCValAssign::SExt:
2154 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2155 DAG.getValueType(VA.getValVT()));
2156 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2157 break;
2158 case CCValAssign::AExt:
2159 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2160 break;
2161 default:
2162 llvm_unreachable("Unknown loc info!");
2163 }
2164
2165 InVals.push_back(Val);
2166 }
2167
2168 return Chain;
2169}
2170
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002171// Add code to pass special inputs required depending on used features separate
2172// from the explicit user arguments present in the IR.
2173void SITargetLowering::passSpecialInputs(
2174 CallLoweringInfo &CLI,
2175 const SIMachineFunctionInfo &Info,
2176 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2177 SmallVectorImpl<SDValue> &MemOpChains,
2178 SDValue Chain,
2179 SDValue StackPtr) const {
2180 // If we don't have a call site, this was a call inserted by
2181 // legalization. These can never use special inputs.
2182 if (!CLI.CS)
2183 return;
2184
2185 const Function *CalleeFunc = CLI.CS.getCalledFunction();
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002186 assert(CalleeFunc);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002187
2188 SelectionDAG &DAG = CLI.DAG;
2189 const SDLoc &DL = CLI.DL;
2190
Tom Stellardc5a154d2018-06-28 23:47:12 +00002191 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002192
2193 auto &ArgUsageInfo =
2194 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2195 const AMDGPUFunctionArgInfo &CalleeArgInfo
2196 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2197
2198 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2199
2200 // TODO: Unify with private memory register handling. This is complicated by
2201 // the fact that at least in kernels, the input argument is not necessarily
2202 // in the same location as the input.
2203 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2204 AMDGPUFunctionArgInfo::DISPATCH_PTR,
2205 AMDGPUFunctionArgInfo::QUEUE_PTR,
2206 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2207 AMDGPUFunctionArgInfo::DISPATCH_ID,
2208 AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2209 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2210 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2211 AMDGPUFunctionArgInfo::WORKITEM_ID_X,
2212 AMDGPUFunctionArgInfo::WORKITEM_ID_Y,
Matt Arsenault817c2532017-08-03 23:12:44 +00002213 AMDGPUFunctionArgInfo::WORKITEM_ID_Z,
2214 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002215 };
2216
2217 for (auto InputID : InputRegs) {
2218 const ArgDescriptor *OutgoingArg;
2219 const TargetRegisterClass *ArgRC;
2220
2221 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2222 if (!OutgoingArg)
2223 continue;
2224
2225 const ArgDescriptor *IncomingArg;
2226 const TargetRegisterClass *IncomingArgRC;
2227 std::tie(IncomingArg, IncomingArgRC)
2228 = CallerArgInfo.getPreloadedValue(InputID);
2229 assert(IncomingArgRC == ArgRC);
2230
2231 // All special arguments are ints for now.
2232 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
Matt Arsenault817c2532017-08-03 23:12:44 +00002233 SDValue InputReg;
2234
2235 if (IncomingArg) {
2236 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2237 } else {
2238 // The implicit arg ptr is special because it doesn't have a corresponding
2239 // input for kernels, and is computed from the kernarg segment pointer.
2240 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2241 InputReg = getImplicitArgPtr(DAG, DL);
2242 }
2243
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002244 if (OutgoingArg->isRegister()) {
2245 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2246 } else {
2247 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, StackPtr,
2248 InputReg,
2249 OutgoingArg->getStackOffset());
2250 MemOpChains.push_back(ArgStore);
2251 }
2252 }
2253}
2254
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002255static bool canGuaranteeTCO(CallingConv::ID CC) {
2256 return CC == CallingConv::Fast;
2257}
2258
2259/// Return true if we might ever do TCO for calls with this calling convention.
2260static bool mayTailCallThisCC(CallingConv::ID CC) {
2261 switch (CC) {
2262 case CallingConv::C:
2263 return true;
2264 default:
2265 return canGuaranteeTCO(CC);
2266 }
2267}
2268
2269bool SITargetLowering::isEligibleForTailCallOptimization(
2270 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2271 const SmallVectorImpl<ISD::OutputArg> &Outs,
2272 const SmallVectorImpl<SDValue> &OutVals,
2273 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2274 if (!mayTailCallThisCC(CalleeCC))
2275 return false;
2276
2277 MachineFunction &MF = DAG.getMachineFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00002278 const Function &CallerF = MF.getFunction();
2279 CallingConv::ID CallerCC = CallerF.getCallingConv();
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002280 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2281 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2282
2283 // Kernels aren't callable, and don't have a live in return address so it
2284 // doesn't make sense to do a tail call with entry functions.
2285 if (!CallerPreserved)
2286 return false;
2287
2288 bool CCMatch = CallerCC == CalleeCC;
2289
2290 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2291 if (canGuaranteeTCO(CalleeCC) && CCMatch)
2292 return true;
2293 return false;
2294 }
2295
2296 // TODO: Can we handle var args?
2297 if (IsVarArg)
2298 return false;
2299
Matthias Braunf1caa282017-12-15 22:22:58 +00002300 for (const Argument &Arg : CallerF.args()) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002301 if (Arg.hasByValAttr())
2302 return false;
2303 }
2304
2305 LLVMContext &Ctx = *DAG.getContext();
2306
2307 // Check that the call results are passed in the same way.
2308 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2309 CCAssignFnForCall(CalleeCC, IsVarArg),
2310 CCAssignFnForCall(CallerCC, IsVarArg)))
2311 return false;
2312
2313 // The callee has to preserve all registers the caller needs to preserve.
2314 if (!CCMatch) {
2315 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2316 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2317 return false;
2318 }
2319
2320 // Nothing more to check if the callee is taking no arguments.
2321 if (Outs.empty())
2322 return true;
2323
2324 SmallVector<CCValAssign, 16> ArgLocs;
2325 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2326
2327 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2328
2329 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2330 // If the stack arguments for this call do not fit into our own save area then
2331 // the call cannot be made tail.
2332 // TODO: Is this really necessary?
2333 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2334 return false;
2335
2336 const MachineRegisterInfo &MRI = MF.getRegInfo();
2337 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2338}
2339
2340bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2341 if (!CI->isTailCall())
2342 return false;
2343
2344 const Function *ParentFn = CI->getParent()->getParent();
2345 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2346 return false;
2347
2348 auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2349 return (Attr.getValueAsString() != "true");
2350}
2351
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002352// The wave scratch offset register is used as the global base pointer.
2353SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2354 SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002355 SelectionDAG &DAG = CLI.DAG;
2356 const SDLoc &DL = CLI.DL;
2357 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2358 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2359 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2360 SDValue Chain = CLI.Chain;
2361 SDValue Callee = CLI.Callee;
2362 bool &IsTailCall = CLI.IsTailCall;
2363 CallingConv::ID CallConv = CLI.CallConv;
2364 bool IsVarArg = CLI.IsVarArg;
2365 bool IsSibCall = false;
2366 bool IsThisReturn = false;
2367 MachineFunction &MF = DAG.getMachineFunction();
2368
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002369 if (IsVarArg) {
2370 return lowerUnhandledCall(CLI, InVals,
2371 "unsupported call to variadic function ");
2372 }
2373
2374 if (!CLI.CS.getCalledFunction()) {
2375 return lowerUnhandledCall(CLI, InVals,
2376 "unsupported indirect call to function ");
2377 }
2378
2379 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2380 return lowerUnhandledCall(CLI, InVals,
2381 "unsupported required tail call to function ");
2382 }
2383
Matt Arsenault1fb90132018-06-28 10:18:36 +00002384 if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2385 // Note the issue is with the CC of the calling function, not of the call
2386 // itself.
2387 return lowerUnhandledCall(CLI, InVals,
2388 "unsupported call from graphics shader of function ");
2389 }
2390
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002391 // The first 4 bytes are reserved for the callee's emergency stack slot.
2392 const unsigned CalleeUsableStackOffset = 4;
2393
2394 if (IsTailCall) {
2395 IsTailCall = isEligibleForTailCallOptimization(
2396 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2397 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2398 report_fatal_error("failed to perform tail call elimination on a call "
2399 "site marked musttail");
2400 }
2401
2402 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2403
2404 // A sibling call is one where we're under the usual C ABI and not planning
2405 // to change that but can still do a tail call:
2406 if (!TailCallOpt && IsTailCall)
2407 IsSibCall = true;
2408
2409 if (IsTailCall)
2410 ++NumTailCalls;
2411 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002412
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002413 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee)) {
Yaxun Liu1ac16612017-11-06 13:01:33 +00002414 // FIXME: Remove this hack for function pointer types after removing
2415 // support of old address space mapping. In the new address space
2416 // mapping the pointer in default address space is 64 bit, therefore
2417 // does not need this hack.
2418 if (Callee.getValueType() == MVT::i32) {
2419 const GlobalValue *GV = GA->getGlobal();
2420 Callee = DAG.getGlobalAddress(GV, DL, MVT::i64, GA->getOffset(), false,
2421 GA->getTargetFlags());
2422 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002423 }
Yaxun Liu1ac16612017-11-06 13:01:33 +00002424 assert(Callee.getValueType() == MVT::i64);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002425
2426 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2427
2428 // Analyze operands of the call, assigning locations to each operand.
2429 SmallVector<CCValAssign, 16> ArgLocs;
2430 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2431 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2432 CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2433
2434 // Get a count of how many bytes are to be pushed on the stack.
2435 unsigned NumBytes = CCInfo.getNextStackOffset();
2436
2437 if (IsSibCall) {
2438 // Since we're not changing the ABI to make this a tail call, the memory
2439 // operands are already available in the caller's incoming argument space.
2440 NumBytes = 0;
2441 }
2442
2443 // FPDiff is the byte offset of the call's argument area from the callee's.
2444 // Stores to callee stack arguments will be placed in FixedStackSlots offset
2445 // by this amount for a tail call. In a sibling call it must be 0 because the
2446 // caller will deallocate the entire stack and the callee still expects its
2447 // arguments to begin at SP+0. Completely unused for non-tail calls.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002448 int32_t FPDiff = 0;
2449 MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002450 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2451
Matt Arsenault6efd0822017-09-14 17:14:57 +00002452 SDValue CallerSavedFP;
2453
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002454 // Adjust the stack pointer for the new arguments...
2455 // These operations are automatically eliminated by the prolog/epilog pass
2456 if (!IsSibCall) {
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002457 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002458
2459 unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2460
2461 // In the HSA case, this should be an identity copy.
2462 SDValue ScratchRSrcReg
2463 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2464 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2465
2466 // TODO: Don't hardcode these registers and get from the callee function.
2467 SDValue ScratchWaveOffsetReg
2468 = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2469 RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
Matt Arsenault6efd0822017-09-14 17:14:57 +00002470
2471 if (!Info->isEntryFunction()) {
2472 // Avoid clobbering this function's FP value. In the current convention
2473 // callee will overwrite this, so do save/restore around the call site.
2474 CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2475 Info->getFrameOffsetReg(), MVT::i32);
2476 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002477 }
2478
2479 // Stack pointer relative accesses are done by changing the offset SGPR. This
2480 // is just the VGPR offset component.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002481 SDValue StackPtr = DAG.getConstant(CalleeUsableStackOffset, DL, MVT::i32);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002482
2483 SmallVector<SDValue, 8> MemOpChains;
2484 MVT PtrVT = MVT::i32;
2485
2486 // Walk the register/memloc assignments, inserting copies/loads.
2487 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2488 ++i, ++realArgIdx) {
2489 CCValAssign &VA = ArgLocs[i];
2490 SDValue Arg = OutVals[realArgIdx];
2491
2492 // Promote the value if needed.
2493 switch (VA.getLocInfo()) {
2494 case CCValAssign::Full:
2495 break;
2496 case CCValAssign::BCvt:
2497 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2498 break;
2499 case CCValAssign::ZExt:
2500 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2501 break;
2502 case CCValAssign::SExt:
2503 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2504 break;
2505 case CCValAssign::AExt:
2506 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2507 break;
2508 case CCValAssign::FPExt:
2509 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2510 break;
2511 default:
2512 llvm_unreachable("Unknown loc info!");
2513 }
2514
2515 if (VA.isRegLoc()) {
2516 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2517 } else {
2518 assert(VA.isMemLoc());
2519
2520 SDValue DstAddr;
2521 MachinePointerInfo DstInfo;
2522
2523 unsigned LocMemOffset = VA.getLocMemOffset();
2524 int32_t Offset = LocMemOffset;
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002525
2526 SDValue PtrOff = DAG.getObjectPtrOffset(DL, StackPtr, Offset);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002527
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002528 if (IsTailCall) {
2529 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2530 unsigned OpSize = Flags.isByVal() ?
2531 Flags.getByValSize() : VA.getValVT().getStoreSize();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002532
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002533 Offset = Offset + FPDiff;
2534 int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2535
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002536 DstAddr = DAG.getObjectPtrOffset(DL, DAG.getFrameIndex(FI, PtrVT),
2537 StackPtr);
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002538 DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2539
2540 // Make sure any stack arguments overlapping with where we're storing
2541 // are loaded before this eventual operation. Otherwise they'll be
2542 // clobbered.
2543
2544 // FIXME: Why is this really necessary? This seems to just result in a
2545 // lot of code to copy the stack and write them back to the same
2546 // locations, which are supposed to be immutable?
2547 Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2548 } else {
2549 DstAddr = PtrOff;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002550 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2551 }
2552
2553 if (Outs[i].Flags.isByVal()) {
2554 SDValue SizeNode =
2555 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2556 SDValue Cpy = DAG.getMemcpy(
2557 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2558 /*isVol = */ false, /*AlwaysInline = */ true,
Yaxun Liuc5962262017-11-22 16:13:35 +00002559 /*isTailCall = */ false, DstInfo,
2560 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
2561 *DAG.getContext(), AMDGPUASI.PRIVATE_ADDRESS))));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002562
2563 MemOpChains.push_back(Cpy);
2564 } else {
2565 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
2566 MemOpChains.push_back(Store);
2567 }
2568 }
2569 }
2570
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002571 // Copy special input registers after user input arguments.
2572 passSpecialInputs(CLI, *Info, RegsToPass, MemOpChains, Chain, StackPtr);
2573
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002574 if (!MemOpChains.empty())
2575 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2576
2577 // Build a sequence of copy-to-reg nodes chained together with token chain
2578 // and flag operands which copy the outgoing args into the appropriate regs.
2579 SDValue InFlag;
2580 for (auto &RegToPass : RegsToPass) {
2581 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2582 RegToPass.second, InFlag);
2583 InFlag = Chain.getValue(1);
2584 }
2585
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002586
2587 SDValue PhysReturnAddrReg;
2588 if (IsTailCall) {
2589 // Since the return is being combined with the call, we need to pass on the
2590 // return address.
2591
2592 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2593 SDValue ReturnAddrReg = CreateLiveInRegister(
2594 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2595
2596 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2597 MVT::i64);
2598 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2599 InFlag = Chain.getValue(1);
2600 }
2601
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002602 // We don't usually want to end the call-sequence here because we would tidy
2603 // the frame up *after* the call, however in the ABI-changing tail-call case
2604 // we've carefully laid out the parameters so that when sp is reset they'll be
2605 // in the correct location.
2606 if (IsTailCall && !IsSibCall) {
2607 Chain = DAG.getCALLSEQ_END(Chain,
2608 DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2609 DAG.getTargetConstant(0, DL, MVT::i32),
2610 InFlag, DL);
2611 InFlag = Chain.getValue(1);
2612 }
2613
2614 std::vector<SDValue> Ops;
2615 Ops.push_back(Chain);
2616 Ops.push_back(Callee);
2617
2618 if (IsTailCall) {
2619 // Each tail call may have to adjust the stack by a different amount, so
2620 // this information must travel along with the operation for eventual
2621 // consumption by emitEpilogue.
2622 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002623
2624 Ops.push_back(PhysReturnAddrReg);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002625 }
2626
2627 // Add argument registers to the end of the list so that they are known live
2628 // into the call.
2629 for (auto &RegToPass : RegsToPass) {
2630 Ops.push_back(DAG.getRegister(RegToPass.first,
2631 RegToPass.second.getValueType()));
2632 }
2633
2634 // Add a register mask operand representing the call-preserved registers.
2635
Tom Stellardc5a154d2018-06-28 23:47:12 +00002636 auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002637 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2638 assert(Mask && "Missing call preserved mask for calling convention");
2639 Ops.push_back(DAG.getRegisterMask(Mask));
2640
2641 if (InFlag.getNode())
2642 Ops.push_back(InFlag);
2643
2644 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2645
2646 // If we're doing a tall call, use a TC_RETURN here rather than an
2647 // actual call instruction.
2648 if (IsTailCall) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002649 MFI.setHasTailCall();
2650 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002651 }
2652
2653 // Returns a chain and a flag for retval copy to use.
2654 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2655 Chain = Call.getValue(0);
2656 InFlag = Call.getValue(1);
2657
Matt Arsenault6efd0822017-09-14 17:14:57 +00002658 if (CallerSavedFP) {
2659 SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2660 Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2661 InFlag = Chain.getValue(1);
2662 }
2663
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002664 uint64_t CalleePopBytes = NumBytes;
2665 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002666 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2667 InFlag, DL);
2668 if (!Ins.empty())
2669 InFlag = Chain.getValue(1);
2670
2671 // Handle result values, copying them out of physregs into vregs that we
2672 // return.
2673 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2674 InVals, IsThisReturn,
2675 IsThisReturn ? OutVals[0] : SDValue());
2676}
2677
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002678unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2679 SelectionDAG &DAG) const {
2680 unsigned Reg = StringSwitch<unsigned>(RegName)
2681 .Case("m0", AMDGPU::M0)
2682 .Case("exec", AMDGPU::EXEC)
2683 .Case("exec_lo", AMDGPU::EXEC_LO)
2684 .Case("exec_hi", AMDGPU::EXEC_HI)
2685 .Case("flat_scratch", AMDGPU::FLAT_SCR)
2686 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2687 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2688 .Default(AMDGPU::NoRegister);
2689
2690 if (Reg == AMDGPU::NoRegister) {
2691 report_fatal_error(Twine("invalid register name \""
2692 + StringRef(RegName) + "\"."));
2693
2694 }
2695
Tom Stellard5bfbae52018-07-11 20:59:01 +00002696 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002697 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2698 report_fatal_error(Twine("invalid register \""
2699 + StringRef(RegName) + "\" for subtarget."));
2700 }
2701
2702 switch (Reg) {
2703 case AMDGPU::M0:
2704 case AMDGPU::EXEC_LO:
2705 case AMDGPU::EXEC_HI:
2706 case AMDGPU::FLAT_SCR_LO:
2707 case AMDGPU::FLAT_SCR_HI:
2708 if (VT.getSizeInBits() == 32)
2709 return Reg;
2710 break;
2711 case AMDGPU::EXEC:
2712 case AMDGPU::FLAT_SCR:
2713 if (VT.getSizeInBits() == 64)
2714 return Reg;
2715 break;
2716 default:
2717 llvm_unreachable("missing register type checking");
2718 }
2719
2720 report_fatal_error(Twine("invalid type for register \""
2721 + StringRef(RegName) + "\"."));
2722}
2723
Matt Arsenault786724a2016-07-12 21:41:32 +00002724// If kill is not the last instruction, split the block so kill is always a
2725// proper terminator.
2726MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
2727 MachineBasicBlock *BB) const {
2728 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2729
2730 MachineBasicBlock::iterator SplitPoint(&MI);
2731 ++SplitPoint;
2732
2733 if (SplitPoint == BB->end()) {
2734 // Don't bother with a new block.
Marek Olsakce76ea02017-10-24 10:27:13 +00002735 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002736 return BB;
2737 }
2738
2739 MachineFunction *MF = BB->getParent();
2740 MachineBasicBlock *SplitBB
2741 = MF->CreateMachineBasicBlock(BB->getBasicBlock());
2742
Matt Arsenault786724a2016-07-12 21:41:32 +00002743 MF->insert(++MachineFunction::iterator(BB), SplitBB);
2744 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2745
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002746 SplitBB->transferSuccessorsAndUpdatePHIs(BB);
Matt Arsenault786724a2016-07-12 21:41:32 +00002747 BB->addSuccessor(SplitBB);
2748
Marek Olsakce76ea02017-10-24 10:27:13 +00002749 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002750 return SplitBB;
2751}
2752
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002753// Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2754// wavefront. If the value is uniform and just happens to be in a VGPR, this
2755// will only do one iteration. In the worst case, this will loop 64 times.
2756//
2757// TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002758static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
2759 const SIInstrInfo *TII,
2760 MachineRegisterInfo &MRI,
2761 MachineBasicBlock &OrigBB,
2762 MachineBasicBlock &LoopBB,
2763 const DebugLoc &DL,
2764 const MachineOperand &IdxReg,
2765 unsigned InitReg,
2766 unsigned ResultReg,
2767 unsigned PhiReg,
2768 unsigned InitSaveExecReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002769 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002770 bool UseGPRIdxMode,
2771 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002772 MachineBasicBlock::iterator I = LoopBB.begin();
2773
2774 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2775 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2776 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2777 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2778
2779 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2780 .addReg(InitReg)
2781 .addMBB(&OrigBB)
2782 .addReg(ResultReg)
2783 .addMBB(&LoopBB);
2784
2785 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2786 .addReg(InitSaveExecReg)
2787 .addMBB(&OrigBB)
2788 .addReg(NewExec)
2789 .addMBB(&LoopBB);
2790
2791 // Read the next variant <- also loop target.
2792 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2793 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2794
2795 // Compare the just read M0 value to all possible Idx values.
2796 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2797 .addReg(CurrentIdxReg)
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +00002798 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002799
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002800 // Update EXEC, save the original EXEC value to VCC.
2801 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2802 .addReg(CondReg, RegState::Kill);
2803
2804 MRI.setSimpleHint(NewExec, CondReg);
2805
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002806 if (UseGPRIdxMode) {
2807 unsigned IdxReg;
2808 if (Offset == 0) {
2809 IdxReg = CurrentIdxReg;
2810 } else {
2811 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2812 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2813 .addReg(CurrentIdxReg, RegState::Kill)
2814 .addImm(Offset);
2815 }
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002816 unsigned IdxMode = IsIndirectSrc ?
2817 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
2818 MachineInstr *SetOn =
2819 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2820 .addReg(IdxReg, RegState::Kill)
2821 .addImm(IdxMode);
2822 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002823 } else {
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002824 // Move index from VCC into M0
2825 if (Offset == 0) {
2826 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2827 .addReg(CurrentIdxReg, RegState::Kill);
2828 } else {
2829 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2830 .addReg(CurrentIdxReg, RegState::Kill)
2831 .addImm(Offset);
2832 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002833 }
2834
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002835 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002836 MachineInstr *InsertPt =
2837 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002838 .addReg(AMDGPU::EXEC)
2839 .addReg(NewExec);
2840
2841 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2842 // s_cbranch_scc0?
2843
2844 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
2845 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
2846 .addMBB(&LoopBB);
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002847
2848 return InsertPt->getIterator();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002849}
2850
2851// This has slightly sub-optimal regalloc when the source vector is killed by
2852// the read. The register allocator does not understand that the kill is
2853// per-workitem, so is kept alive for the whole loop so we end up not re-using a
2854// subregister from it, using 1 more VGPR than necessary. This was saved when
2855// this was expanded after register allocation.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002856static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
2857 MachineBasicBlock &MBB,
2858 MachineInstr &MI,
2859 unsigned InitResultReg,
2860 unsigned PhiReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002861 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002862 bool UseGPRIdxMode,
2863 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002864 MachineFunction *MF = MBB.getParent();
2865 MachineRegisterInfo &MRI = MF->getRegInfo();
2866 const DebugLoc &DL = MI.getDebugLoc();
2867 MachineBasicBlock::iterator I(&MI);
2868
2869 unsigned DstReg = MI.getOperand(0).getReg();
Matt Arsenault301162c2017-11-15 21:51:43 +00002870 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2871 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002872
2873 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
2874
2875 // Save the EXEC mask
2876 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
2877 .addReg(AMDGPU::EXEC);
2878
2879 // To insert the loop we need to split the block. Move everything after this
2880 // point to a new block, and insert a new empty block between the two.
2881 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
2882 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
2883 MachineFunction::iterator MBBI(MBB);
2884 ++MBBI;
2885
2886 MF->insert(MBBI, LoopBB);
2887 MF->insert(MBBI, RemainderBB);
2888
2889 LoopBB->addSuccessor(LoopBB);
2890 LoopBB->addSuccessor(RemainderBB);
2891
2892 // Move the rest of the block into a new block.
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002893 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002894 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
2895
2896 MBB.addSuccessor(LoopBB);
2897
2898 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2899
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002900 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
2901 InitResultReg, DstReg, PhiReg, TmpExec,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002902 Offset, UseGPRIdxMode, IsIndirectSrc);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002903
2904 MachineBasicBlock::iterator First = RemainderBB->begin();
2905 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
2906 .addReg(SaveExec);
2907
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002908 return InsPt;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002909}
2910
2911// Returns subreg index, offset
2912static std::pair<unsigned, int>
2913computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
2914 const TargetRegisterClass *SuperRC,
2915 unsigned VecReg,
2916 int Offset) {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002917 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002918
2919 // Skip out of bounds offsets, or else we would end up using an undefined
2920 // register.
2921 if (Offset >= NumElts || Offset < 0)
2922 return std::make_pair(AMDGPU::sub0, Offset);
2923
2924 return std::make_pair(AMDGPU::sub0 + Offset, 0);
2925}
2926
2927// Return true if the index is an SGPR and was set.
2928static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
2929 MachineRegisterInfo &MRI,
2930 MachineInstr &MI,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002931 int Offset,
2932 bool UseGPRIdxMode,
2933 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002934 MachineBasicBlock *MBB = MI.getParent();
2935 const DebugLoc &DL = MI.getDebugLoc();
2936 MachineBasicBlock::iterator I(&MI);
2937
2938 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2939 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
2940
2941 assert(Idx->getReg() != AMDGPU::NoRegister);
2942
2943 if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
2944 return false;
2945
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002946 if (UseGPRIdxMode) {
2947 unsigned IdxMode = IsIndirectSrc ?
2948 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
2949 if (Offset == 0) {
2950 MachineInstr *SetOn =
Diana Picus116bbab2017-01-13 09:58:52 +00002951 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2952 .add(*Idx)
2953 .addImm(IdxMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002954
Matt Arsenaultdac31db2016-10-13 12:45:16 +00002955 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002956 } else {
2957 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
2958 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
Diana Picus116bbab2017-01-13 09:58:52 +00002959 .add(*Idx)
2960 .addImm(Offset);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002961 MachineInstr *SetOn =
2962 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2963 .addReg(Tmp, RegState::Kill)
2964 .addImm(IdxMode);
2965
Matt Arsenaultdac31db2016-10-13 12:45:16 +00002966 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002967 }
2968
2969 return true;
2970 }
2971
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002972 if (Offset == 0) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00002973 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2974 .add(*Idx);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002975 } else {
2976 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00002977 .add(*Idx)
2978 .addImm(Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002979 }
2980
2981 return true;
2982}
2983
2984// Control flow needs to be inserted if indexing with a VGPR.
2985static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
2986 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00002987 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002988 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002989 const SIRegisterInfo &TRI = TII->getRegisterInfo();
2990 MachineFunction *MF = MBB.getParent();
2991 MachineRegisterInfo &MRI = MF->getRegInfo();
2992
2993 unsigned Dst = MI.getOperand(0).getReg();
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002994 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002995 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
2996
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002997 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002998
2999 unsigned SubReg;
3000 std::tie(SubReg, Offset)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003001 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003002
Marek Olsake22fdb92017-03-21 17:00:32 +00003003 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003004
3005 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003006 MachineBasicBlock::iterator I(&MI);
3007 const DebugLoc &DL = MI.getDebugLoc();
3008
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003009 if (UseGPRIdxMode) {
3010 // TODO: Look at the uses to avoid the copy. This may require rescheduling
3011 // to avoid interfering with other uses, so probably requires a new
3012 // optimization pass.
3013 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003014 .addReg(SrcReg, RegState::Undef, SubReg)
3015 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003016 .addReg(AMDGPU::M0, RegState::Implicit);
3017 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3018 } else {
3019 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003020 .addReg(SrcReg, RegState::Undef, SubReg)
3021 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003022 }
3023
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003024 MI.eraseFromParent();
3025
3026 return &MBB;
3027 }
3028
3029 const DebugLoc &DL = MI.getDebugLoc();
3030 MachineBasicBlock::iterator I(&MI);
3031
3032 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3033 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3034
3035 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3036
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003037 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3038 Offset, UseGPRIdxMode, true);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003039 MachineBasicBlock *LoopBB = InsPt->getParent();
3040
3041 if (UseGPRIdxMode) {
3042 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003043 .addReg(SrcReg, RegState::Undef, SubReg)
3044 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003045 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003046 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003047 } else {
3048 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003049 .addReg(SrcReg, RegState::Undef, SubReg)
3050 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003051 }
3052
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003053 MI.eraseFromParent();
3054
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003055 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003056}
3057
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003058static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3059 const TargetRegisterClass *VecRC) {
3060 switch (TRI.getRegSizeInBits(*VecRC)) {
3061 case 32: // 4 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003062 return AMDGPU::V_MOVRELD_B32_V1;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003063 case 64: // 8 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003064 return AMDGPU::V_MOVRELD_B32_V2;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003065 case 128: // 16 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003066 return AMDGPU::V_MOVRELD_B32_V4;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003067 case 256: // 32 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003068 return AMDGPU::V_MOVRELD_B32_V8;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003069 case 512: // 64 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003070 return AMDGPU::V_MOVRELD_B32_V16;
3071 default:
3072 llvm_unreachable("unsupported size for MOVRELD pseudos");
3073 }
3074}
3075
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003076static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3077 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00003078 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003079 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003080 const SIRegisterInfo &TRI = TII->getRegisterInfo();
3081 MachineFunction *MF = MBB.getParent();
3082 MachineRegisterInfo &MRI = MF->getRegInfo();
3083
3084 unsigned Dst = MI.getOperand(0).getReg();
3085 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3086 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3087 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3088 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3089 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3090
3091 // This can be an immediate, but will be folded later.
3092 assert(Val->getReg());
3093
3094 unsigned SubReg;
3095 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3096 SrcVec->getReg(),
3097 Offset);
Marek Olsake22fdb92017-03-21 17:00:32 +00003098 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003099
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003100 if (Idx->getReg() == AMDGPU::NoRegister) {
3101 MachineBasicBlock::iterator I(&MI);
3102 const DebugLoc &DL = MI.getDebugLoc();
3103
3104 assert(Offset == 0);
3105
3106 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
Diana Picus116bbab2017-01-13 09:58:52 +00003107 .add(*SrcVec)
3108 .add(*Val)
3109 .addImm(SubReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003110
3111 MI.eraseFromParent();
3112 return &MBB;
3113 }
3114
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003115 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003116 MachineBasicBlock::iterator I(&MI);
3117 const DebugLoc &DL = MI.getDebugLoc();
3118
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003119 if (UseGPRIdxMode) {
3120 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003121 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3122 .add(*Val)
3123 .addReg(Dst, RegState::ImplicitDefine)
3124 .addReg(SrcVec->getReg(), RegState::Implicit)
3125 .addReg(AMDGPU::M0, RegState::Implicit);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003126
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003127 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3128 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003129 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003130
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003131 BuildMI(MBB, I, DL, MovRelDesc)
3132 .addReg(Dst, RegState::Define)
3133 .addReg(SrcVec->getReg())
Diana Picus116bbab2017-01-13 09:58:52 +00003134 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003135 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003136 }
3137
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003138 MI.eraseFromParent();
3139 return &MBB;
3140 }
3141
3142 if (Val->isReg())
3143 MRI.clearKillFlags(Val->getReg());
3144
3145 const DebugLoc &DL = MI.getDebugLoc();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003146
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003147 unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3148
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003149 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003150 Offset, UseGPRIdxMode, false);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003151 MachineBasicBlock *LoopBB = InsPt->getParent();
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003152
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003153 if (UseGPRIdxMode) {
3154 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003155 .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3156 .add(*Val) // src0
3157 .addReg(Dst, RegState::ImplicitDefine)
3158 .addReg(PhiReg, RegState::Implicit)
3159 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003160 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003161 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003162 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003163
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003164 BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3165 .addReg(Dst, RegState::Define)
3166 .addReg(PhiReg)
Diana Picus116bbab2017-01-13 09:58:52 +00003167 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003168 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003169 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003170
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003171 MI.eraseFromParent();
3172
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003173 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003174}
3175
Matt Arsenault786724a2016-07-12 21:41:32 +00003176MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3177 MachineInstr &MI, MachineBasicBlock *BB) const {
Tom Stellard244891d2016-12-20 15:52:17 +00003178
3179 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3180 MachineFunction *MF = BB->getParent();
3181 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3182
3183 if (TII->isMIMG(MI)) {
Matt Arsenault905f3512017-12-29 17:18:14 +00003184 if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3185 report_fatal_error("missing mem operand from MIMG instruction");
3186 }
Tom Stellard244891d2016-12-20 15:52:17 +00003187 // Add a memoperand for mimg instructions so that they aren't assumed to
3188 // be ordered memory instuctions.
3189
Tom Stellard244891d2016-12-20 15:52:17 +00003190 return BB;
3191 }
3192
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003193 switch (MI.getOpcode()) {
Matt Arsenault301162c2017-11-15 21:51:43 +00003194 case AMDGPU::S_ADD_U64_PSEUDO:
3195 case AMDGPU::S_SUB_U64_PSEUDO: {
3196 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3197 const DebugLoc &DL = MI.getDebugLoc();
3198
3199 MachineOperand &Dest = MI.getOperand(0);
3200 MachineOperand &Src0 = MI.getOperand(1);
3201 MachineOperand &Src1 = MI.getOperand(2);
3202
3203 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3204 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3205
3206 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3207 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3208 &AMDGPU::SReg_32_XM0RegClass);
3209 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3210 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3211 &AMDGPU::SReg_32_XM0RegClass);
3212
3213 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3214 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3215 &AMDGPU::SReg_32_XM0RegClass);
3216 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3217 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3218 &AMDGPU::SReg_32_XM0RegClass);
3219
3220 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3221
3222 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3223 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3224 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3225 .add(Src0Sub0)
3226 .add(Src1Sub0);
3227 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3228 .add(Src0Sub1)
3229 .add(Src1Sub1);
3230 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3231 .addReg(DestSub0)
3232 .addImm(AMDGPU::sub0)
3233 .addReg(DestSub1)
3234 .addImm(AMDGPU::sub1);
3235 MI.eraseFromParent();
3236 return BB;
3237 }
3238 case AMDGPU::SI_INIT_M0: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003239 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
Matt Arsenault4ac341c2016-04-14 21:58:15 +00003240 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Diana Picus116bbab2017-01-13 09:58:52 +00003241 .add(MI.getOperand(0));
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003242 MI.eraseFromParent();
Matt Arsenault20711b72015-02-20 22:10:45 +00003243 return BB;
Matt Arsenault301162c2017-11-15 21:51:43 +00003244 }
Marek Olsak2d825902017-04-28 20:21:58 +00003245 case AMDGPU::SI_INIT_EXEC:
3246 // This should be before all vector instructions.
3247 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3248 AMDGPU::EXEC)
3249 .addImm(MI.getOperand(0).getImm());
3250 MI.eraseFromParent();
3251 return BB;
3252
3253 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3254 // Extract the thread count from an SGPR input and set EXEC accordingly.
3255 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3256 //
3257 // S_BFE_U32 count, input, {shift, 7}
3258 // S_BFM_B64 exec, count, 0
3259 // S_CMP_EQ_U32 count, 64
3260 // S_CMOV_B64 exec, -1
3261 MachineInstr *FirstMI = &*BB->begin();
3262 MachineRegisterInfo &MRI = MF->getRegInfo();
3263 unsigned InputReg = MI.getOperand(0).getReg();
3264 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3265 bool Found = false;
3266
3267 // Move the COPY of the input reg to the beginning, so that we can use it.
3268 for (auto I = BB->begin(); I != &MI; I++) {
3269 if (I->getOpcode() != TargetOpcode::COPY ||
3270 I->getOperand(0).getReg() != InputReg)
3271 continue;
3272
3273 if (I == FirstMI) {
3274 FirstMI = &*++BB->begin();
3275 } else {
3276 I->removeFromParent();
3277 BB->insert(FirstMI, &*I);
3278 }
3279 Found = true;
3280 break;
3281 }
3282 assert(Found);
Davide Italiano0dcc0152017-05-11 19:58:52 +00003283 (void)Found;
Marek Olsak2d825902017-04-28 20:21:58 +00003284
3285 // This should be before all vector instructions.
3286 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3287 .addReg(InputReg)
3288 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3289 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3290 AMDGPU::EXEC)
3291 .addReg(CountReg)
3292 .addImm(0);
3293 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3294 .addReg(CountReg, RegState::Kill)
3295 .addImm(64);
3296 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3297 AMDGPU::EXEC)
3298 .addImm(-1);
3299 MI.eraseFromParent();
3300 return BB;
3301 }
3302
Changpeng Fang01f60622016-03-15 17:28:44 +00003303 case AMDGPU::GET_GROUPSTATICSIZE: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003304 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3c07c812016-07-22 17:01:33 +00003305 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
Diana Picus116bbab2017-01-13 09:58:52 +00003306 .add(MI.getOperand(0))
3307 .addImm(MFI->getLDSSize());
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003308 MI.eraseFromParent();
Changpeng Fang01f60622016-03-15 17:28:44 +00003309 return BB;
3310 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003311 case AMDGPU::SI_INDIRECT_SRC_V1:
3312 case AMDGPU::SI_INDIRECT_SRC_V2:
3313 case AMDGPU::SI_INDIRECT_SRC_V4:
3314 case AMDGPU::SI_INDIRECT_SRC_V8:
3315 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003316 return emitIndirectSrc(MI, *BB, *getSubtarget());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003317 case AMDGPU::SI_INDIRECT_DST_V1:
3318 case AMDGPU::SI_INDIRECT_DST_V2:
3319 case AMDGPU::SI_INDIRECT_DST_V4:
3320 case AMDGPU::SI_INDIRECT_DST_V8:
3321 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003322 return emitIndirectDst(MI, *BB, *getSubtarget());
Marek Olsakce76ea02017-10-24 10:27:13 +00003323 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3324 case AMDGPU::SI_KILL_I1_PSEUDO:
Matt Arsenault786724a2016-07-12 21:41:32 +00003325 return splitKillBlock(MI, BB);
Matt Arsenault22e41792016-08-27 01:00:37 +00003326 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3327 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
Matt Arsenault22e41792016-08-27 01:00:37 +00003328
3329 unsigned Dst = MI.getOperand(0).getReg();
3330 unsigned Src0 = MI.getOperand(1).getReg();
3331 unsigned Src1 = MI.getOperand(2).getReg();
3332 const DebugLoc &DL = MI.getDebugLoc();
3333 unsigned SrcCond = MI.getOperand(3).getReg();
3334
3335 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3336 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003337 unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenault22e41792016-08-27 01:00:37 +00003338
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003339 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3340 .addReg(SrcCond);
Matt Arsenault22e41792016-08-27 01:00:37 +00003341 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3342 .addReg(Src0, 0, AMDGPU::sub0)
3343 .addReg(Src1, 0, AMDGPU::sub0)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003344 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003345 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3346 .addReg(Src0, 0, AMDGPU::sub1)
3347 .addReg(Src1, 0, AMDGPU::sub1)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003348 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003349
3350 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3351 .addReg(DstLo)
3352 .addImm(AMDGPU::sub0)
3353 .addReg(DstHi)
3354 .addImm(AMDGPU::sub1);
3355 MI.eraseFromParent();
3356 return BB;
3357 }
Matt Arsenault327188a2016-12-15 21:57:11 +00003358 case AMDGPU::SI_BR_UNDEF: {
3359 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3360 const DebugLoc &DL = MI.getDebugLoc();
3361 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
Diana Picus116bbab2017-01-13 09:58:52 +00003362 .add(MI.getOperand(0));
Matt Arsenault327188a2016-12-15 21:57:11 +00003363 Br->getOperand(1).setIsUndef(true); // read undef SCC
3364 MI.eraseFromParent();
3365 return BB;
3366 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003367 case AMDGPU::ADJCALLSTACKUP:
3368 case AMDGPU::ADJCALLSTACKDOWN: {
3369 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3370 MachineInstrBuilder MIB(*MF, &MI);
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003371
3372 // Add an implicit use of the frame offset reg to prevent the restore copy
3373 // inserted after the call from being reorderd after stack operations in the
3374 // the caller's frame.
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003375 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003376 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3377 .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003378 return BB;
3379 }
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003380 case AMDGPU::SI_CALL_ISEL:
3381 case AMDGPU::SI_TCRETURN_ISEL: {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003382 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3383 const DebugLoc &DL = MI.getDebugLoc();
3384 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003385
3386 MachineRegisterInfo &MRI = MF->getRegInfo();
3387 unsigned GlobalAddrReg = MI.getOperand(0).getReg();
3388 MachineInstr *PCRel = MRI.getVRegDef(GlobalAddrReg);
3389 assert(PCRel->getOpcode() == AMDGPU::SI_PC_ADD_REL_OFFSET);
3390
3391 const GlobalValue *G = PCRel->getOperand(1).getGlobal();
3392
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003393 MachineInstrBuilder MIB;
3394 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
3395 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg)
3396 .add(MI.getOperand(0))
3397 .addGlobalAddress(G);
3398 } else {
3399 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_TCRETURN))
3400 .add(MI.getOperand(0))
3401 .addGlobalAddress(G);
3402
3403 // There is an additional imm operand for tcreturn, but it should be in the
3404 // right place already.
3405 }
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003406
3407 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003408 MIB.add(MI.getOperand(I));
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003409
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003410 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003411 MI.eraseFromParent();
3412 return BB;
3413 }
Changpeng Fang01f60622016-03-15 17:28:44 +00003414 default:
3415 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
Tom Stellard75aadc22012-12-11 21:25:42 +00003416 }
Tom Stellard75aadc22012-12-11 21:25:42 +00003417}
3418
Matt Arsenaulte11d8ac2017-10-13 21:10:22 +00003419bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3420 return isTypeLegal(VT.getScalarType());
3421}
3422
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003423bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3424 // This currently forces unfolding various combinations of fsub into fma with
3425 // free fneg'd operands. As long as we have fast FMA (controlled by
3426 // isFMAFasterThanFMulAndFAdd), we should perform these.
3427
3428 // When fma is quarter rate, for f64 where add / sub are at best half rate,
3429 // most of these combines appear to be cycle neutral but save on instruction
3430 // count / code size.
3431 return true;
3432}
3433
Mehdi Amini44ede332015-07-09 02:09:04 +00003434EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3435 EVT VT) const {
Tom Stellard83747202013-07-18 21:43:53 +00003436 if (!VT.isVector()) {
3437 return MVT::i1;
3438 }
Matt Arsenault8596f712014-11-28 22:51:38 +00003439 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
Tom Stellard75aadc22012-12-11 21:25:42 +00003440}
3441
Matt Arsenault94163282016-12-22 16:36:25 +00003442MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3443 // TODO: Should i16 be used always if legal? For now it would force VALU
3444 // shifts.
3445 return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
Christian Konig082a14a2013-03-18 11:34:05 +00003446}
3447
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003448// Answering this is somewhat tricky and depends on the specific device which
3449// have different rates for fma or all f64 operations.
3450//
3451// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3452// regardless of which device (although the number of cycles differs between
3453// devices), so it is always profitable for f64.
3454//
3455// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3456// only on full rate devices. Normally, we should prefer selecting v_mad_f32
3457// which we can always do even without fused FP ops since it returns the same
3458// result as the separate operations and since it is always full
3459// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3460// however does not support denormals, so we do report fma as faster if we have
3461// a fast fma device and require denormals.
3462//
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003463bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3464 VT = VT.getScalarType();
3465
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003466 switch (VT.getSimpleVT().SimpleTy) {
Matt Arsenault0084adc2018-04-30 19:08:16 +00003467 case MVT::f32: {
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003468 // This is as fast on some subtargets. However, we always have full rate f32
3469 // mad available which returns the same result as the separate operations
Matt Arsenault8d630032015-02-20 22:10:41 +00003470 // which we should prefer over fma. We can't use this if we want to support
3471 // denormals, so only report this in these cases.
Matt Arsenault0084adc2018-04-30 19:08:16 +00003472 if (Subtarget->hasFP32Denormals())
3473 return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3474
3475 // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3476 return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3477 }
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003478 case MVT::f64:
3479 return true;
Matt Arsenault9e22bc22016-12-22 03:21:48 +00003480 case MVT::f16:
3481 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003482 default:
3483 break;
3484 }
3485
3486 return false;
3487}
3488
Tom Stellard75aadc22012-12-11 21:25:42 +00003489//===----------------------------------------------------------------------===//
3490// Custom DAG Lowering Operations
3491//===----------------------------------------------------------------------===//
3492
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003493// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3494// wider vector type is legal.
3495SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3496 SelectionDAG &DAG) const {
3497 unsigned Opc = Op.getOpcode();
3498 EVT VT = Op.getValueType();
3499 assert(VT == MVT::v4f16);
3500
3501 SDValue Lo, Hi;
3502 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3503
3504 SDLoc SL(Op);
3505 SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3506 Op->getFlags());
3507 SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3508 Op->getFlags());
3509
3510 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3511}
3512
3513// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3514// wider vector type is legal.
3515SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3516 SelectionDAG &DAG) const {
3517 unsigned Opc = Op.getOpcode();
3518 EVT VT = Op.getValueType();
3519 assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3520
3521 SDValue Lo0, Hi0;
3522 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3523 SDValue Lo1, Hi1;
3524 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3525
3526 SDLoc SL(Op);
3527
3528 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3529 Op->getFlags());
3530 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3531 Op->getFlags());
3532
3533 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3534}
3535
Tom Stellard75aadc22012-12-11 21:25:42 +00003536SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3537 switch (Op.getOpcode()) {
3538 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
Tom Stellardf8794352012-12-19 22:10:31 +00003539 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
Tom Stellard35bb18c2013-08-26 15:06:04 +00003540 case ISD::LOAD: {
Tom Stellarde812f2f2014-07-21 15:45:06 +00003541 SDValue Result = LowerLOAD(Op, DAG);
3542 assert((!Result.getNode() ||
3543 Result.getNode()->getNumValues() == 2) &&
3544 "Load should return a value and a chain");
3545 return Result;
Tom Stellard35bb18c2013-08-26 15:06:04 +00003546 }
Tom Stellardaf775432013-10-23 00:44:32 +00003547
Matt Arsenaultad14ce82014-07-19 18:44:39 +00003548 case ISD::FSIN:
3549 case ISD::FCOS:
3550 return LowerTrig(Op, DAG);
Tom Stellard0ec134f2014-02-04 17:18:40 +00003551 case ISD::SELECT: return LowerSELECT(Op, DAG);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00003552 case ISD::FDIV: return LowerFDIV(Op, DAG);
Tom Stellard354a43c2016-04-01 18:27:37 +00003553 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +00003554 case ISD::STORE: return LowerSTORE(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003555 case ISD::GlobalAddress: {
3556 MachineFunction &MF = DAG.getMachineFunction();
3557 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3558 return LowerGlobalAddress(MFI, Op, DAG);
Tom Stellard94593ee2013-06-03 17:40:18 +00003559 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003560 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00003561 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003562 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00003563 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
Matt Arsenault3aef8092017-01-23 23:09:58 +00003564 case ISD::INSERT_VECTOR_ELT:
3565 return lowerINSERT_VECTOR_ELT(Op, DAG);
3566 case ISD::EXTRACT_VECTOR_ELT:
3567 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
Matt Arsenault67a98152018-05-16 11:47:30 +00003568 case ISD::BUILD_VECTOR:
3569 return lowerBUILD_VECTOR(Op, DAG);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003570 case ISD::FP_ROUND:
3571 return lowerFP_ROUND(Op, DAG);
Matt Arsenault3e025382017-04-24 17:49:13 +00003572 case ISD::TRAP:
Matt Arsenault3e025382017-04-24 17:49:13 +00003573 return lowerTRAP(Op, DAG);
Tony Tye43259df2018-05-16 16:19:34 +00003574 case ISD::DEBUGTRAP:
3575 return lowerDEBUGTRAP(Op, DAG);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003576 case ISD::FABS:
3577 case ISD::FNEG:
3578 return splitUnaryVectorOp(Op, DAG);
3579 case ISD::SHL:
3580 case ISD::SRA:
3581 case ISD::SRL:
3582 case ISD::ADD:
3583 case ISD::SUB:
3584 case ISD::MUL:
3585 case ISD::SMIN:
3586 case ISD::SMAX:
3587 case ISD::UMIN:
3588 case ISD::UMAX:
3589 case ISD::FMINNUM:
3590 case ISD::FMAXNUM:
3591 case ISD::FADD:
3592 case ISD::FMUL:
3593 return splitBinaryVectorOp(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +00003594 }
3595 return SDValue();
3596}
3597
Matt Arsenault1349a042018-05-22 06:32:10 +00003598static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
3599 const SDLoc &DL,
3600 SelectionDAG &DAG, bool Unpacked) {
3601 if (!LoadVT.isVector())
3602 return Result;
3603
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003604 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3605 // Truncate to v2i16/v4i16.
3606 EVT IntLoadVT = LoadVT.changeTypeToInteger();
Matt Arsenault1349a042018-05-22 06:32:10 +00003607
3608 // Workaround legalizer not scalarizing truncate after vector op
3609 // legalization byt not creating intermediate vector trunc.
3610 SmallVector<SDValue, 4> Elts;
3611 DAG.ExtractVectorElements(Result, Elts);
3612 for (SDValue &Elt : Elts)
3613 Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
3614
3615 Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
3616
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003617 // Bitcast to original type (v2f16/v4f16).
Matt Arsenault1349a042018-05-22 06:32:10 +00003618 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003619 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003620
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003621 // Cast back to the original packed type.
3622 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3623}
3624
Matt Arsenault1349a042018-05-22 06:32:10 +00003625SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
3626 MemSDNode *M,
3627 SelectionDAG &DAG,
3628 bool IsIntrinsic) const {
3629 SDLoc DL(M);
3630 SmallVector<SDValue, 10> Ops;
3631 Ops.reserve(M->getNumOperands());
3632
3633 Ops.push_back(M->getOperand(0));
3634 if (IsIntrinsic)
3635 Ops.push_back(DAG.getConstant(Opcode, DL, MVT::i32));
3636
3637 // Skip 1, as it is the intrinsic ID.
3638 for (unsigned I = 2, E = M->getNumOperands(); I != E; ++I)
3639 Ops.push_back(M->getOperand(I));
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003640
3641 bool Unpacked = Subtarget->hasUnpackedD16VMem();
Matt Arsenault1349a042018-05-22 06:32:10 +00003642 EVT LoadVT = M->getValueType(0);
3643
Matt Arsenault1349a042018-05-22 06:32:10 +00003644 EVT EquivLoadVT = LoadVT;
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003645 if (Unpacked && LoadVT.isVector()) {
3646 EquivLoadVT = LoadVT.isVector() ?
3647 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3648 LoadVT.getVectorNumElements()) : LoadVT;
Matt Arsenault1349a042018-05-22 06:32:10 +00003649 }
3650
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003651 // Change from v4f16/v2f16 to EquivLoadVT.
3652 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3653
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003654 SDValue Load
3655 = DAG.getMemIntrinsicNode(
3656 IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
3657 VTList, Ops, M->getMemoryVT(),
3658 M->getMemOperand());
3659 if (!Unpacked) // Just adjusted the opcode.
3660 return Load;
Changpeng Fang4737e892018-01-18 22:08:53 +00003661
Matt Arsenault1349a042018-05-22 06:32:10 +00003662 SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
Changpeng Fang4737e892018-01-18 22:08:53 +00003663
Matt Arsenault1349a042018-05-22 06:32:10 +00003664 return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003665}
3666
Matt Arsenault3aef8092017-01-23 23:09:58 +00003667void SITargetLowering::ReplaceNodeResults(SDNode *N,
3668 SmallVectorImpl<SDValue> &Results,
3669 SelectionDAG &DAG) const {
3670 switch (N->getOpcode()) {
3671 case ISD::INSERT_VECTOR_ELT: {
3672 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3673 Results.push_back(Res);
3674 return;
3675 }
3676 case ISD::EXTRACT_VECTOR_ELT: {
3677 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3678 Results.push_back(Res);
3679 return;
3680 }
Matt Arsenault1f17c662017-02-22 00:27:34 +00003681 case ISD::INTRINSIC_WO_CHAIN: {
3682 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Marek Olsak13e47412018-01-31 20:18:04 +00003683 switch (IID) {
3684 case Intrinsic::amdgcn_cvt_pkrtz: {
Matt Arsenault1f17c662017-02-22 00:27:34 +00003685 SDValue Src0 = N->getOperand(1);
3686 SDValue Src1 = N->getOperand(2);
3687 SDLoc SL(N);
3688 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
3689 Src0, Src1);
Matt Arsenault1f17c662017-02-22 00:27:34 +00003690 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3691 return;
3692 }
Marek Olsak13e47412018-01-31 20:18:04 +00003693 case Intrinsic::amdgcn_cvt_pknorm_i16:
3694 case Intrinsic::amdgcn_cvt_pknorm_u16:
3695 case Intrinsic::amdgcn_cvt_pk_i16:
3696 case Intrinsic::amdgcn_cvt_pk_u16: {
3697 SDValue Src0 = N->getOperand(1);
3698 SDValue Src1 = N->getOperand(2);
3699 SDLoc SL(N);
3700 unsigned Opcode;
3701
3702 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
3703 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
3704 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
3705 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
3706 else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
3707 Opcode = AMDGPUISD::CVT_PK_I16_I32;
3708 else
3709 Opcode = AMDGPUISD::CVT_PK_U16_U32;
3710
Matt Arsenault709374d2018-08-01 20:13:58 +00003711 EVT VT = N->getValueType(0);
3712 if (isTypeLegal(VT))
3713 Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
3714 else {
3715 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
3716 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
3717 }
Marek Olsak13e47412018-01-31 20:18:04 +00003718 return;
3719 }
3720 }
Simon Pilgrimd362d272017-07-08 19:50:03 +00003721 break;
Matt Arsenault1f17c662017-02-22 00:27:34 +00003722 }
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003723 case ISD::INTRINSIC_W_CHAIN: {
Matt Arsenault1349a042018-05-22 06:32:10 +00003724 if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003725 Results.push_back(Res);
Matt Arsenault1349a042018-05-22 06:32:10 +00003726 Results.push_back(Res.getValue(1));
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003727 return;
3728 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003729
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003730 break;
3731 }
Matt Arsenault4a486232017-04-19 20:53:07 +00003732 case ISD::SELECT: {
3733 SDLoc SL(N);
3734 EVT VT = N->getValueType(0);
3735 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3736 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3737 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3738
3739 EVT SelectVT = NewVT;
3740 if (NewVT.bitsLT(MVT::i32)) {
3741 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3742 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3743 SelectVT = MVT::i32;
3744 }
3745
3746 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3747 N->getOperand(0), LHS, RHS);
3748
3749 if (NewVT != SelectVT)
3750 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3751 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3752 return;
3753 }
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003754 case ISD::FNEG: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003755 if (N->getValueType(0) != MVT::v2f16)
3756 break;
3757
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003758 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003759 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3760
3761 SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
3762 BC,
3763 DAG.getConstant(0x80008000, SL, MVT::i32));
3764 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3765 return;
3766 }
3767 case ISD::FABS: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003768 if (N->getValueType(0) != MVT::v2f16)
3769 break;
3770
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003771 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003772 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3773
3774 SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
3775 BC,
3776 DAG.getConstant(0x7fff7fff, SL, MVT::i32));
3777 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3778 return;
3779 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00003780 default:
3781 break;
3782 }
3783}
3784
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00003785/// Helper function for LowerBRCOND
Tom Stellardf8794352012-12-19 22:10:31 +00003786static SDNode *findUser(SDValue Value, unsigned Opcode) {
Tom Stellard75aadc22012-12-11 21:25:42 +00003787
Tom Stellardf8794352012-12-19 22:10:31 +00003788 SDNode *Parent = Value.getNode();
3789 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
3790 I != E; ++I) {
3791
3792 if (I.getUse().get() != Value)
3793 continue;
3794
3795 if (I->getOpcode() == Opcode)
3796 return *I;
3797 }
Craig Topper062a2ba2014-04-25 05:30:21 +00003798 return nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00003799}
3800
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003801unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
Matt Arsenault6408c912016-09-16 22:11:18 +00003802 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
3803 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003804 case Intrinsic::amdgcn_if:
3805 return AMDGPUISD::IF;
3806 case Intrinsic::amdgcn_else:
3807 return AMDGPUISD::ELSE;
3808 case Intrinsic::amdgcn_loop:
3809 return AMDGPUISD::LOOP;
3810 case Intrinsic::amdgcn_end_cf:
3811 llvm_unreachable("should not occur");
Matt Arsenault6408c912016-09-16 22:11:18 +00003812 default:
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003813 return 0;
Matt Arsenault6408c912016-09-16 22:11:18 +00003814 }
Tom Stellardbc4497b2016-02-12 23:45:29 +00003815 }
Matt Arsenault6408c912016-09-16 22:11:18 +00003816
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003817 // break, if_break, else_break are all only used as inputs to loop, not
3818 // directly as branch conditions.
3819 return 0;
Tom Stellardbc4497b2016-02-12 23:45:29 +00003820}
3821
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003822void SITargetLowering::createDebuggerPrologueStackObjects(
3823 MachineFunction &MF) const {
3824 // Create stack objects that are used for emitting debugger prologue.
3825 //
3826 // Debugger prologue writes work group IDs and work item IDs to scratch memory
3827 // at fixed location in the following format:
3828 // offset 0: work group ID x
3829 // offset 4: work group ID y
3830 // offset 8: work group ID z
3831 // offset 16: work item ID x
3832 // offset 20: work item ID y
3833 // offset 24: work item ID z
3834 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3835 int ObjectIdx = 0;
3836
3837 // For each dimension:
3838 for (unsigned i = 0; i < 3; ++i) {
3839 // Create fixed stack object for work group ID.
Matthias Braun941a7052016-07-28 18:40:00 +00003840 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003841 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
3842 // Create fixed stack object for work item ID.
Matthias Braun941a7052016-07-28 18:40:00 +00003843 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003844 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
3845 }
3846}
3847
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003848bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
3849 const Triple &TT = getTargetMachine().getTargetTriple();
Matt Arsenault923712b2018-02-09 16:57:57 +00003850 return (GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
3851 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003852 AMDGPU::shouldEmitConstantsToTextSection(TT);
3853}
3854
3855bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00003856 return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +00003857 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
3858 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003859 !shouldEmitFixup(GV) &&
3860 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3861}
3862
3863bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
3864 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
3865}
3866
Tom Stellardf8794352012-12-19 22:10:31 +00003867/// This transforms the control flow intrinsics to get the branch destination as
3868/// last parameter, also switches branch target with BR if the need arise
3869SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
3870 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00003871 SDLoc DL(BRCOND);
Tom Stellardf8794352012-12-19 22:10:31 +00003872
3873 SDNode *Intr = BRCOND.getOperand(1).getNode();
3874 SDValue Target = BRCOND.getOperand(2);
Craig Topper062a2ba2014-04-25 05:30:21 +00003875 SDNode *BR = nullptr;
Tom Stellardbc4497b2016-02-12 23:45:29 +00003876 SDNode *SetCC = nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00003877
3878 if (Intr->getOpcode() == ISD::SETCC) {
3879 // As long as we negate the condition everything is fine
Tom Stellardbc4497b2016-02-12 23:45:29 +00003880 SetCC = Intr;
Tom Stellardf8794352012-12-19 22:10:31 +00003881 Intr = SetCC->getOperand(0).getNode();
3882
3883 } else {
3884 // Get the target from BR if we don't negate the condition
3885 BR = findUser(BRCOND, ISD::BR);
3886 Target = BR->getOperand(1);
3887 }
3888
Matt Arsenault6408c912016-09-16 22:11:18 +00003889 // FIXME: This changes the types of the intrinsics instead of introducing new
3890 // nodes with the correct types.
3891 // e.g. llvm.amdgcn.loop
3892
3893 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
3894 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
3895
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003896 unsigned CFNode = isCFIntrinsic(Intr);
3897 if (CFNode == 0) {
Tom Stellardbc4497b2016-02-12 23:45:29 +00003898 // This is a uniform branch so we don't need to legalize.
3899 return BRCOND;
3900 }
3901
Matt Arsenault6408c912016-09-16 22:11:18 +00003902 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
3903 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
3904
Tom Stellardbc4497b2016-02-12 23:45:29 +00003905 assert(!SetCC ||
3906 (SetCC->getConstantOperandVal(1) == 1 &&
Tom Stellardbc4497b2016-02-12 23:45:29 +00003907 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
3908 ISD::SETNE));
Tom Stellardf8794352012-12-19 22:10:31 +00003909
Tom Stellardf8794352012-12-19 22:10:31 +00003910 // operands of the new intrinsic call
3911 SmallVector<SDValue, 4> Ops;
Matt Arsenault6408c912016-09-16 22:11:18 +00003912 if (HaveChain)
3913 Ops.push_back(BRCOND.getOperand(0));
3914
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003915 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
Tom Stellardf8794352012-12-19 22:10:31 +00003916 Ops.push_back(Target);
3917
Matt Arsenault6408c912016-09-16 22:11:18 +00003918 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
3919
Tom Stellardf8794352012-12-19 22:10:31 +00003920 // build the new intrinsic call
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003921 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00003922
Matt Arsenault6408c912016-09-16 22:11:18 +00003923 if (!HaveChain) {
3924 SDValue Ops[] = {
3925 SDValue(Result, 0),
3926 BRCOND.getOperand(0)
3927 };
3928
3929 Result = DAG.getMergeValues(Ops, DL).getNode();
3930 }
3931
Tom Stellardf8794352012-12-19 22:10:31 +00003932 if (BR) {
3933 // Give the branch instruction our target
3934 SDValue Ops[] = {
3935 BR->getOperand(0),
3936 BRCOND.getOperand(2)
3937 };
Chandler Carruth356665a2014-08-01 22:09:43 +00003938 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
3939 DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
3940 BR = NewBR.getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00003941 }
3942
3943 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
3944
3945 // Copy the intrinsic results to registers
3946 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
3947 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
3948 if (!CopyToReg)
3949 continue;
3950
3951 Chain = DAG.getCopyToReg(
3952 Chain, DL,
3953 CopyToReg->getOperand(1),
3954 SDValue(Result, i - 1),
3955 SDValue());
3956
3957 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
3958 }
3959
3960 // Remove the old intrinsic from the chain
3961 DAG.ReplaceAllUsesOfValueWith(
3962 SDValue(Intr, Intr->getNumValues() - 1),
3963 Intr->getOperand(0));
3964
3965 return Chain;
Tom Stellard75aadc22012-12-11 21:25:42 +00003966}
3967
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003968SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
3969 SDValue Op,
3970 const SDLoc &DL,
3971 EVT VT) const {
3972 return Op.getValueType().bitsLE(VT) ?
3973 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
3974 DAG.getNode(ISD::FTRUNC, DL, VT, Op);
3975}
3976
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003977SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultafe614c2016-11-18 18:33:36 +00003978 assert(Op.getValueType() == MVT::f16 &&
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003979 "Do not know how to custom lower FP_ROUND for non-f16 type");
3980
Matt Arsenaultafe614c2016-11-18 18:33:36 +00003981 SDValue Src = Op.getOperand(0);
3982 EVT SrcVT = Src.getValueType();
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003983 if (SrcVT != MVT::f64)
3984 return Op;
3985
3986 SDLoc DL(Op);
Matt Arsenaultafe614c2016-11-18 18:33:36 +00003987
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003988 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
3989 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
Mandeep Singh Grang5e1697e2017-06-06 05:08:36 +00003990 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003991}
3992
Matt Arsenault3e025382017-04-24 17:49:13 +00003993SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
3994 SDLoc SL(Op);
Matt Arsenault3e025382017-04-24 17:49:13 +00003995 SDValue Chain = Op.getOperand(0);
3996
Tom Stellard5bfbae52018-07-11 20:59:01 +00003997 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00003998 !Subtarget->isTrapHandlerEnabled())
Matt Arsenault3e025382017-04-24 17:49:13 +00003999 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
Tony Tye43259df2018-05-16 16:19:34 +00004000
4001 MachineFunction &MF = DAG.getMachineFunction();
4002 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4003 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4004 assert(UserSGPR != AMDGPU::NoRegister);
4005 SDValue QueuePtr = CreateLiveInRegister(
4006 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4007 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4008 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4009 QueuePtr, SDValue());
4010 SDValue Ops[] = {
4011 ToReg,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004012 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
Tony Tye43259df2018-05-16 16:19:34 +00004013 SGPR01,
4014 ToReg.getValue(1)
4015 };
4016 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4017}
4018
4019SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4020 SDLoc SL(Op);
4021 SDValue Chain = Op.getOperand(0);
4022 MachineFunction &MF = DAG.getMachineFunction();
4023
Tom Stellard5bfbae52018-07-11 20:59:01 +00004024 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00004025 !Subtarget->isTrapHandlerEnabled()) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004026 DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
Matt Arsenault3e025382017-04-24 17:49:13 +00004027 "debugtrap handler not supported",
4028 Op.getDebugLoc(),
4029 DS_Warning);
Matthias Braunf1caa282017-12-15 22:22:58 +00004030 LLVMContext &Ctx = MF.getFunction().getContext();
Matt Arsenault3e025382017-04-24 17:49:13 +00004031 Ctx.diagnose(NoTrap);
4032 return Chain;
4033 }
Matt Arsenault3e025382017-04-24 17:49:13 +00004034
Tony Tye43259df2018-05-16 16:19:34 +00004035 SDValue Ops[] = {
4036 Chain,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004037 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
Tony Tye43259df2018-05-16 16:19:34 +00004038 };
4039 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
Matt Arsenault3e025382017-04-24 17:49:13 +00004040}
4041
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004042SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
Matt Arsenault99c14522016-04-25 19:27:24 +00004043 SelectionDAG &DAG) const {
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004044 // FIXME: Use inline constants (src_{shared, private}_base) instead.
4045 if (Subtarget->hasApertureRegs()) {
4046 unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ?
4047 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4048 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
4049 unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ?
4050 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4051 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4052 unsigned Encoding =
4053 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4054 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4055 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
Matt Arsenaulte823d922017-02-18 18:29:53 +00004056
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004057 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4058 SDValue ApertureReg = SDValue(
4059 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4060 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4061 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
Matt Arsenaulte823d922017-02-18 18:29:53 +00004062 }
4063
Matt Arsenault99c14522016-04-25 19:27:24 +00004064 MachineFunction &MF = DAG.getMachineFunction();
4065 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004066 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4067 assert(UserSGPR != AMDGPU::NoRegister);
4068
Matt Arsenault99c14522016-04-25 19:27:24 +00004069 SDValue QueuePtr = CreateLiveInRegister(
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004070 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
Matt Arsenault99c14522016-04-25 19:27:24 +00004071
4072 // Offset into amd_queue_t for group_segment_aperture_base_hi /
4073 // private_segment_aperture_base_hi.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004074 uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44;
Matt Arsenault99c14522016-04-25 19:27:24 +00004075
Matt Arsenaultb655fa92017-11-29 01:25:12 +00004076 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
Matt Arsenault99c14522016-04-25 19:27:24 +00004077
4078 // TODO: Use custom target PseudoSourceValue.
4079 // TODO: We should use the value from the IR intrinsic call, but it might not
4080 // be available and how do we get it?
4081 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004082 AMDGPUASI.CONSTANT_ADDRESS));
Matt Arsenault99c14522016-04-25 19:27:24 +00004083
4084 MachinePointerInfo PtrInfo(V, StructOffset);
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004085 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
Justin Lebar9c375812016-07-15 18:27:10 +00004086 MinAlign(64, StructOffset),
Justin Lebaradbf09e2016-09-11 01:38:58 +00004087 MachineMemOperand::MODereferenceable |
4088 MachineMemOperand::MOInvariant);
Matt Arsenault99c14522016-04-25 19:27:24 +00004089}
4090
4091SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4092 SelectionDAG &DAG) const {
4093 SDLoc SL(Op);
4094 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4095
4096 SDValue Src = ASC->getOperand(0);
Matt Arsenault99c14522016-04-25 19:27:24 +00004097 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4098
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004099 const AMDGPUTargetMachine &TM =
4100 static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4101
Matt Arsenault99c14522016-04-25 19:27:24 +00004102 // flat -> local/private
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004103 if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004104 unsigned DestAS = ASC->getDestAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004105
4106 if (DestAS == AMDGPUASI.LOCAL_ADDRESS ||
4107 DestAS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004108 unsigned NullVal = TM.getNullPointerValue(DestAS);
4109 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault99c14522016-04-25 19:27:24 +00004110 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4111 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4112
4113 return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4114 NonNull, Ptr, SegmentNullPtr);
4115 }
4116 }
4117
4118 // local/private -> flat
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004119 if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004120 unsigned SrcAS = ASC->getSrcAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004121
4122 if (SrcAS == AMDGPUASI.LOCAL_ADDRESS ||
4123 SrcAS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004124 unsigned NullVal = TM.getNullPointerValue(SrcAS);
4125 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault971c85e2017-03-13 19:47:31 +00004126
Matt Arsenault99c14522016-04-25 19:27:24 +00004127 SDValue NonNull
4128 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4129
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004130 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00004131 SDValue CvtPtr
4132 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4133
4134 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4135 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4136 FlatNullPtr);
4137 }
4138 }
4139
4140 // global <-> flat are no-ops and never emitted.
4141
4142 const MachineFunction &MF = DAG.getMachineFunction();
4143 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
Matthias Braunf1caa282017-12-15 22:22:58 +00004144 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
Matt Arsenault99c14522016-04-25 19:27:24 +00004145 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4146
4147 return DAG.getUNDEF(ASC->getValueType(0));
4148}
4149
Matt Arsenault3aef8092017-01-23 23:09:58 +00004150SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4151 SelectionDAG &DAG) const {
Matt Arsenault67a98152018-05-16 11:47:30 +00004152 SDValue Vec = Op.getOperand(0);
4153 SDValue InsVal = Op.getOperand(1);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004154 SDValue Idx = Op.getOperand(2);
Matt Arsenault67a98152018-05-16 11:47:30 +00004155 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004156 EVT EltVT = VecVT.getVectorElementType();
4157 unsigned VecSize = VecVT.getSizeInBits();
4158 unsigned EltSize = EltVT.getSizeInBits();
Matt Arsenault67a98152018-05-16 11:47:30 +00004159
Matt Arsenault9224c002018-06-05 19:52:46 +00004160
4161 assert(VecSize <= 64);
Matt Arsenault67a98152018-05-16 11:47:30 +00004162
4163 unsigned NumElts = VecVT.getVectorNumElements();
4164 SDLoc SL(Op);
4165 auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4166
Matt Arsenault9224c002018-06-05 19:52:46 +00004167 if (NumElts == 4 && EltSize == 16 && KIdx) {
Matt Arsenault67a98152018-05-16 11:47:30 +00004168 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4169
4170 SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4171 DAG.getConstant(0, SL, MVT::i32));
4172 SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4173 DAG.getConstant(1, SL, MVT::i32));
4174
4175 SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4176 SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4177
4178 unsigned Idx = KIdx->getZExtValue();
4179 bool InsertLo = Idx < 2;
4180 SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4181 InsertLo ? LoVec : HiVec,
4182 DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4183 DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4184
4185 InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4186
4187 SDValue Concat = InsertLo ?
4188 DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4189 DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4190
4191 return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4192 }
4193
Matt Arsenault3aef8092017-01-23 23:09:58 +00004194 if (isa<ConstantSDNode>(Idx))
4195 return SDValue();
4196
Matt Arsenault9224c002018-06-05 19:52:46 +00004197 MVT IntVT = MVT::getIntegerVT(VecSize);
Matt Arsenault67a98152018-05-16 11:47:30 +00004198
Matt Arsenault3aef8092017-01-23 23:09:58 +00004199 // Avoid stack access for dynamic indexing.
Matt Arsenault9224c002018-06-05 19:52:46 +00004200 SDValue Val = InsVal;
4201 if (InsVal.getValueType() == MVT::f16)
4202 Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004203
4204 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
Matt Arsenault67a98152018-05-16 11:47:30 +00004205 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Val);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004206
Matt Arsenault9224c002018-06-05 19:52:46 +00004207 assert(isPowerOf2_32(EltSize));
4208 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4209
Matt Arsenault3aef8092017-01-23 23:09:58 +00004210 // Convert vector index to bit-index.
Matt Arsenault9224c002018-06-05 19:52:46 +00004211 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004212
Matt Arsenault67a98152018-05-16 11:47:30 +00004213 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4214 SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4215 DAG.getConstant(0xffff, SL, IntVT),
Matt Arsenault3aef8092017-01-23 23:09:58 +00004216 ScaledIdx);
4217
Matt Arsenault67a98152018-05-16 11:47:30 +00004218 SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4219 SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4220 DAG.getNOT(SL, BFM, IntVT), BCVec);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004221
Matt Arsenault67a98152018-05-16 11:47:30 +00004222 SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4223 return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004224}
4225
4226SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4227 SelectionDAG &DAG) const {
4228 SDLoc SL(Op);
4229
4230 EVT ResultVT = Op.getValueType();
4231 SDValue Vec = Op.getOperand(0);
4232 SDValue Idx = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004233 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004234 unsigned VecSize = VecVT.getSizeInBits();
4235 EVT EltVT = VecVT.getVectorElementType();
4236 assert(VecSize <= 64);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004237
Matt Arsenault98f29462017-05-17 20:30:58 +00004238 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4239
Hiroshi Inoue372ffa12018-04-13 11:37:06 +00004240 // Make sure we do any optimizations that will make it easier to fold
Matt Arsenault98f29462017-05-17 20:30:58 +00004241 // source modifiers before obscuring it with bit operations.
4242
4243 // XXX - Why doesn't this get called when vector_shuffle is expanded?
4244 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4245 return Combined;
4246
Matt Arsenault9224c002018-06-05 19:52:46 +00004247 unsigned EltSize = EltVT.getSizeInBits();
4248 assert(isPowerOf2_32(EltSize));
Matt Arsenault3aef8092017-01-23 23:09:58 +00004249
Matt Arsenault9224c002018-06-05 19:52:46 +00004250 MVT IntVT = MVT::getIntegerVT(VecSize);
4251 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4252
4253 // Convert vector index to bit-index (* EltSize)
4254 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004255
Matt Arsenault67a98152018-05-16 11:47:30 +00004256 SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4257 SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004258
Matt Arsenault67a98152018-05-16 11:47:30 +00004259 if (ResultVT == MVT::f16) {
4260 SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4261 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4262 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00004263
Matt Arsenault67a98152018-05-16 11:47:30 +00004264 return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4265}
4266
4267SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4268 SelectionDAG &DAG) const {
4269 SDLoc SL(Op);
4270 EVT VT = Op.getValueType();
Matt Arsenault67a98152018-05-16 11:47:30 +00004271
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004272 if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4273 EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4274
4275 // Turn into pair of packed build_vectors.
4276 // TODO: Special case for constants that can be materialized with s_mov_b64.
4277 SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4278 { Op.getOperand(0), Op.getOperand(1) });
4279 SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4280 { Op.getOperand(2), Op.getOperand(3) });
4281
4282 SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4283 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4284
4285 SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4286 return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4287 }
4288
Matt Arsenault1349a042018-05-22 06:32:10 +00004289 assert(VT == MVT::v2f16 || VT == MVT::v2i16);
Matt Arsenault67a98152018-05-16 11:47:30 +00004290
Matt Arsenault1349a042018-05-22 06:32:10 +00004291 SDValue Lo = Op.getOperand(0);
4292 SDValue Hi = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004293
Matt Arsenault1349a042018-05-22 06:32:10 +00004294 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4295 Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
Matt Arsenault67a98152018-05-16 11:47:30 +00004296
Matt Arsenault1349a042018-05-22 06:32:10 +00004297 Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
4298 Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4299
4300 SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4301 DAG.getConstant(16, SL, MVT::i32));
4302
4303 SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
4304
4305 return DAG.getNode(ISD::BITCAST, SL, VT, Or);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004306}
4307
Tom Stellard418beb72016-07-13 14:23:33 +00004308bool
4309SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4310 // We can fold offsets for anything that doesn't require a GOT relocation.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004311 return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +00004312 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
4313 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004314 !shouldEmitGOTReloc(GA->getGlobal());
Tom Stellard418beb72016-07-13 14:23:33 +00004315}
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004316
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004317static SDValue
4318buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4319 const SDLoc &DL, unsigned Offset, EVT PtrVT,
4320 unsigned GAFlags = SIInstrInfo::MO_NONE) {
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004321 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4322 // lowered to the following code sequence:
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004323 //
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004324 // For constant address space:
4325 // s_getpc_b64 s[0:1]
4326 // s_add_u32 s0, s0, $symbol
4327 // s_addc_u32 s1, s1, 0
4328 //
4329 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4330 // a fixup or relocation is emitted to replace $symbol with a literal
4331 // constant, which is a pc-relative offset from the encoding of the $symbol
4332 // operand to the global variable.
4333 //
4334 // For global address space:
4335 // s_getpc_b64 s[0:1]
4336 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4337 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4338 //
4339 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4340 // fixups or relocations are emitted to replace $symbol@*@lo and
4341 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4342 // which is a 64-bit pc-relative offset from the encoding of the $symbol
4343 // operand to the global variable.
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004344 //
4345 // What we want here is an offset from the value returned by s_getpc
4346 // (which is the address of the s_add_u32 instruction) to the global
4347 // variable, but since the encoding of $symbol starts 4 bytes after the start
4348 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4349 // small. This requires us to add 4 to the global variable offset in order to
4350 // compute the correct address.
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004351 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4352 GAFlags);
4353 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4354 GAFlags == SIInstrInfo::MO_NONE ?
4355 GAFlags : GAFlags + 1);
4356 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004357}
4358
Tom Stellard418beb72016-07-13 14:23:33 +00004359SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
4360 SDValue Op,
4361 SelectionDAG &DAG) const {
4362 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00004363 const GlobalValue *GV = GSD->getGlobal();
Tom Stellard418beb72016-07-13 14:23:33 +00004364
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004365 if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
Matt Arsenault923712b2018-02-09 16:57:57 +00004366 GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS_32BIT &&
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00004367 GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS &&
4368 // FIXME: It isn't correct to rely on the type of the pointer. This should
4369 // be removed when address space 0 is 64-bit.
4370 !GV->getType()->getElementType()->isFunctionTy())
Tom Stellard418beb72016-07-13 14:23:33 +00004371 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
4372
4373 SDLoc DL(GSD);
Tom Stellard418beb72016-07-13 14:23:33 +00004374 EVT PtrVT = Op.getValueType();
4375
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004376 if (shouldEmitFixup(GV))
Tom Stellard418beb72016-07-13 14:23:33 +00004377 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004378 else if (shouldEmitPCReloc(GV))
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004379 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
4380 SIInstrInfo::MO_REL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004381
4382 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004383 SIInstrInfo::MO_GOTPCREL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004384
4385 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004386 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
Tom Stellard418beb72016-07-13 14:23:33 +00004387 const DataLayout &DataLayout = DAG.getDataLayout();
4388 unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
4389 // FIXME: Use a PseudoSourceValue once those can be assigned an address space.
4390 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
4391
Justin Lebar9c375812016-07-15 18:27:10 +00004392 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
Justin Lebaradbf09e2016-09-11 01:38:58 +00004393 MachineMemOperand::MODereferenceable |
4394 MachineMemOperand::MOInvariant);
Tom Stellard418beb72016-07-13 14:23:33 +00004395}
4396
Benjamin Kramerbdc49562016-06-12 15:39:02 +00004397SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
4398 const SDLoc &DL, SDValue V) const {
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004399 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
4400 // the destination register.
4401 //
Tom Stellardfc92e772015-05-12 14:18:14 +00004402 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
4403 // so we will end up with redundant moves to m0.
4404 //
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004405 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
4406
4407 // A Null SDValue creates a glue result.
4408 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
4409 V, Chain);
4410 return SDValue(M0, 0);
Tom Stellardfc92e772015-05-12 14:18:14 +00004411}
4412
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004413SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
4414 SDValue Op,
4415 MVT VT,
4416 unsigned Offset) const {
4417 SDLoc SL(Op);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004418 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004419 DAG.getEntryNode(), Offset, 4, false);
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004420 // The local size values will have the hi 16-bits as zero.
4421 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
4422 DAG.getValueType(VT));
4423}
4424
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004425static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4426 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004427 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004428 "non-hsa intrinsic with hsa target",
4429 DL.getDebugLoc());
4430 DAG.getContext()->diagnose(BadIntrin);
4431 return DAG.getUNDEF(VT);
4432}
4433
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004434static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4435 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004436 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004437 "intrinsic not supported on subtarget",
4438 DL.getDebugLoc());
Matt Arsenaulte0132462016-01-30 05:19:45 +00004439 DAG.getContext()->diagnose(BadIntrin);
4440 return DAG.getUNDEF(VT);
4441}
4442
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004443static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
4444 ArrayRef<SDValue> Elts) {
4445 assert(!Elts.empty());
4446 MVT Type;
4447 unsigned NumElts;
4448
4449 if (Elts.size() == 1) {
4450 Type = MVT::f32;
4451 NumElts = 1;
4452 } else if (Elts.size() == 2) {
4453 Type = MVT::v2f32;
4454 NumElts = 2;
4455 } else if (Elts.size() <= 4) {
4456 Type = MVT::v4f32;
4457 NumElts = 4;
4458 } else if (Elts.size() <= 8) {
4459 Type = MVT::v8f32;
4460 NumElts = 8;
4461 } else {
4462 assert(Elts.size() <= 16);
4463 Type = MVT::v16f32;
4464 NumElts = 16;
4465 }
4466
4467 SmallVector<SDValue, 16> VecElts(NumElts);
4468 for (unsigned i = 0; i < Elts.size(); ++i) {
4469 SDValue Elt = Elts[i];
4470 if (Elt.getValueType() != MVT::f32)
4471 Elt = DAG.getBitcast(MVT::f32, Elt);
4472 VecElts[i] = Elt;
4473 }
4474 for (unsigned i = Elts.size(); i < NumElts; ++i)
4475 VecElts[i] = DAG.getUNDEF(MVT::f32);
4476
4477 if (NumElts == 1)
4478 return VecElts[0];
4479 return DAG.getBuildVector(Type, DL, VecElts);
4480}
4481
4482static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
4483 SDValue *GLC, SDValue *SLC) {
4484 auto CachePolicyConst = dyn_cast<ConstantSDNode>(CachePolicy.getNode());
4485 if (!CachePolicyConst)
4486 return false;
4487
4488 uint64_t Value = CachePolicyConst->getZExtValue();
4489 SDLoc DL(CachePolicy);
4490 if (GLC) {
4491 *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
4492 Value &= ~(uint64_t)0x1;
4493 }
4494 if (SLC) {
4495 *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
4496 Value &= ~(uint64_t)0x2;
4497 }
4498
4499 return Value == 0;
4500}
4501
4502SDValue SITargetLowering::lowerImage(SDValue Op,
4503 const AMDGPU::ImageDimIntrinsicInfo *Intr,
4504 SelectionDAG &DAG) const {
4505 SDLoc DL(Op);
4506 MachineFunction &MF = DAG.getMachineFunction();
4507 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
4508 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
4509 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004510 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
4511 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
4512 unsigned IntrOpcode = Intr->BaseOpcode;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004513
4514 SmallVector<EVT, 2> ResultTypes(Op->value_begin(), Op->value_end());
4515 bool IsD16 = false;
4516 SDValue VData;
4517 int NumVDataDwords;
4518 unsigned AddrIdx; // Index of first address argument
4519 unsigned DMask;
4520
4521 if (BaseOpcode->Atomic) {
4522 VData = Op.getOperand(2);
4523
4524 bool Is64Bit = VData.getValueType() == MVT::i64;
4525 if (BaseOpcode->AtomicX2) {
4526 SDValue VData2 = Op.getOperand(3);
4527 VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
4528 {VData, VData2});
4529 if (Is64Bit)
4530 VData = DAG.getBitcast(MVT::v4i32, VData);
4531
4532 ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
4533 DMask = Is64Bit ? 0xf : 0x3;
4534 NumVDataDwords = Is64Bit ? 4 : 2;
4535 AddrIdx = 4;
4536 } else {
4537 DMask = Is64Bit ? 0x3 : 0x1;
4538 NumVDataDwords = Is64Bit ? 2 : 1;
4539 AddrIdx = 3;
4540 }
4541 } else {
4542 unsigned DMaskIdx;
4543
4544 if (BaseOpcode->Store) {
4545 VData = Op.getOperand(2);
4546
4547 MVT StoreVT = VData.getSimpleValueType();
4548 if (StoreVT.getScalarType() == MVT::f16) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004549 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004550 !BaseOpcode->HasD16)
4551 return Op; // D16 is unsupported for this instruction
4552
4553 IsD16 = true;
4554 VData = handleD16VData(VData, DAG);
4555 }
4556
4557 NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
4558 DMaskIdx = 3;
4559 } else {
4560 MVT LoadVT = Op.getSimpleValueType();
4561 if (LoadVT.getScalarType() == MVT::f16) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004562 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004563 !BaseOpcode->HasD16)
4564 return Op; // D16 is unsupported for this instruction
4565
4566 IsD16 = true;
4567 if (LoadVT.isVector() && Subtarget->hasUnpackedD16VMem())
4568 ResultTypes[0] = (LoadVT == MVT::v2f16) ? MVT::v2i32 : MVT::v4i32;
4569 }
4570
4571 NumVDataDwords = (ResultTypes[0].getSizeInBits() + 31) / 32;
4572 DMaskIdx = isa<MemSDNode>(Op) ? 2 : 1;
4573 }
4574
4575 auto DMaskConst = dyn_cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
4576 if (!DMaskConst)
4577 return Op;
4578
4579 AddrIdx = DMaskIdx + 1;
4580 DMask = DMaskConst->getZExtValue();
4581 if (!DMask && !BaseOpcode->Store) {
4582 // Eliminate no-op loads. Stores with dmask == 0 are *not* no-op: they
4583 // store the channels' default values.
4584 SDValue Undef = DAG.getUNDEF(Op.getValueType());
4585 if (isa<MemSDNode>(Op))
4586 return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
4587 return Undef;
4588 }
4589 }
4590
4591 unsigned NumVAddrs = BaseOpcode->NumExtraArgs +
4592 (BaseOpcode->Gradients ? DimInfo->NumGradients : 0) +
4593 (BaseOpcode->Coordinates ? DimInfo->NumCoords : 0) +
4594 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
4595 SmallVector<SDValue, 4> VAddrs;
4596 for (unsigned i = 0; i < NumVAddrs; ++i)
4597 VAddrs.push_back(Op.getOperand(AddrIdx + i));
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004598
4599 // Optimize _L to _LZ when _L is zero
4600 if (LZMappingInfo) {
4601 if (auto ConstantLod =
4602 dyn_cast<ConstantFPSDNode>(VAddrs[NumVAddrs-1].getNode())) {
4603 if (ConstantLod->isZero() || ConstantLod->isNegative()) {
4604 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
4605 VAddrs.pop_back(); // remove 'lod'
4606 }
4607 }
4608 }
4609
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004610 SDValue VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
4611
4612 SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
4613 SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
4614 unsigned CtrlIdx; // Index of texfailctrl argument
4615 SDValue Unorm;
4616 if (!BaseOpcode->Sampler) {
4617 Unorm = True;
4618 CtrlIdx = AddrIdx + NumVAddrs + 1;
4619 } else {
4620 auto UnormConst =
4621 dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
4622 if (!UnormConst)
4623 return Op;
4624
4625 Unorm = UnormConst->getZExtValue() ? True : False;
4626 CtrlIdx = AddrIdx + NumVAddrs + 3;
4627 }
4628
4629 SDValue TexFail = Op.getOperand(CtrlIdx);
4630 auto TexFailConst = dyn_cast<ConstantSDNode>(TexFail.getNode());
4631 if (!TexFailConst || TexFailConst->getZExtValue() != 0)
4632 return Op;
4633
4634 SDValue GLC;
4635 SDValue SLC;
4636 if (BaseOpcode->Atomic) {
4637 GLC = True; // TODO no-return optimization
4638 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC))
4639 return Op;
4640 } else {
4641 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC))
4642 return Op;
4643 }
4644
4645 SmallVector<SDValue, 14> Ops;
4646 if (BaseOpcode->Store || BaseOpcode->Atomic)
4647 Ops.push_back(VData); // vdata
4648 Ops.push_back(VAddr);
4649 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
4650 if (BaseOpcode->Sampler)
4651 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
4652 Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
4653 Ops.push_back(Unorm);
4654 Ops.push_back(GLC);
4655 Ops.push_back(SLC);
4656 Ops.push_back(False); // r128
4657 Ops.push_back(False); // tfe
4658 Ops.push_back(False); // lwe
4659 Ops.push_back(DimInfo->DA ? True : False);
4660 if (BaseOpcode->HasD16)
4661 Ops.push_back(IsD16 ? True : False);
4662 if (isa<MemSDNode>(Op))
4663 Ops.push_back(Op.getOperand(0)); // chain
4664
4665 int NumVAddrDwords = VAddr.getValueType().getSizeInBits() / 32;
4666 int Opcode = -1;
4667
Tom Stellard5bfbae52018-07-11 20:59:01 +00004668 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004669 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004670 NumVDataDwords, NumVAddrDwords);
4671 if (Opcode == -1)
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004672 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004673 NumVDataDwords, NumVAddrDwords);
4674 assert(Opcode != -1);
4675
4676 MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
4677 if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
4678 MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
4679 *MemRefs = MemOp->getMemOperand();
4680 NewNode->setMemRefs(MemRefs, MemRefs + 1);
4681 }
4682
4683 if (BaseOpcode->AtomicX2) {
4684 SmallVector<SDValue, 1> Elt;
4685 DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
4686 return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
4687 } else if (IsD16 && !BaseOpcode->Store) {
4688 MVT LoadVT = Op.getSimpleValueType();
4689 SDValue Adjusted = adjustLoadValueTypeImpl(
4690 SDValue(NewNode, 0), LoadVT, DL, DAG, Subtarget->hasUnpackedD16VMem());
4691 return DAG.getMergeValues({Adjusted, SDValue(NewNode, 1)}, DL);
4692 }
4693
4694 return SDValue(NewNode, 0);
4695}
4696
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004697SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4698 SelectionDAG &DAG) const {
4699 MachineFunction &MF = DAG.getMachineFunction();
Tom Stellarddcb9f092015-07-09 21:20:37 +00004700 auto MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004701
4702 EVT VT = Op.getValueType();
4703 SDLoc DL(Op);
4704 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4705
Sanjay Patela2607012015-09-16 16:31:21 +00004706 // TODO: Should this propagate fast-math-flags?
4707
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004708 switch (IntrinsicID) {
Tom Stellard2f3f9852017-01-25 01:25:13 +00004709 case Intrinsic::amdgcn_implicit_buffer_ptr: {
Matt Arsenaultceafc552018-05-29 17:42:50 +00004710 if (getSubtarget()->isAmdCodeObjectV2(MF.getFunction()))
Matt Arsenault10fc0622017-06-26 03:01:31 +00004711 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004712 return getPreloadedValue(DAG, *MFI, VT,
4713 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
Tom Stellard2f3f9852017-01-25 01:25:13 +00004714 }
Tom Stellard48f29f22015-11-26 00:43:29 +00004715 case Intrinsic::amdgcn_dispatch_ptr:
Matt Arsenault48ab5262016-04-25 19:27:18 +00004716 case Intrinsic::amdgcn_queue_ptr: {
Matt Arsenaultceafc552018-05-29 17:42:50 +00004717 if (!Subtarget->isAmdCodeObjectV2(MF.getFunction())) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00004718 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00004719 MF.getFunction(), "unsupported hsa intrinsic without hsa target",
Oliver Stannard7e7d9832016-02-02 13:52:43 +00004720 DL.getDebugLoc());
Matt Arsenault800fecf2016-01-11 21:18:33 +00004721 DAG.getContext()->diagnose(BadIntrin);
4722 return DAG.getUNDEF(VT);
4723 }
4724
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004725 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
4726 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
4727 return getPreloadedValue(DAG, *MFI, VT, RegID);
Matt Arsenault48ab5262016-04-25 19:27:18 +00004728 }
Jan Veselyfea814d2016-06-21 20:46:20 +00004729 case Intrinsic::amdgcn_implicitarg_ptr: {
Matt Arsenault9166ce82017-07-28 15:52:08 +00004730 if (MFI->isEntryFunction())
4731 return getImplicitArgPtr(DAG, DL);
Matt Arsenault817c2532017-08-03 23:12:44 +00004732 return getPreloadedValue(DAG, *MFI, VT,
4733 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
Jan Veselyfea814d2016-06-21 20:46:20 +00004734 }
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00004735 case Intrinsic::amdgcn_kernarg_segment_ptr: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004736 return getPreloadedValue(DAG, *MFI, VT,
4737 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00004738 }
Matt Arsenault8d718dc2016-07-22 17:01:30 +00004739 case Intrinsic::amdgcn_dispatch_id: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004740 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
Matt Arsenault8d718dc2016-07-22 17:01:30 +00004741 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004742 case Intrinsic::amdgcn_rcp:
4743 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
4744 case Intrinsic::amdgcn_rsq:
4745 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00004746 case Intrinsic::amdgcn_rsq_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00004747 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004748 return emitRemovedIntrinsicError(DAG, DL, VT);
4749
4750 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00004751 case Intrinsic::amdgcn_rcp_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00004752 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault32fc5272016-07-26 16:45:45 +00004753 return emitRemovedIntrinsicError(DAG, DL, VT);
4754 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
Matt Arsenault09b2c4a2016-07-15 21:26:52 +00004755 case Intrinsic::amdgcn_rsq_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004756 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault79963e82016-02-13 01:03:00 +00004757 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
Tom Stellard48f29f22015-11-26 00:43:29 +00004758
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004759 Type *Type = VT.getTypeForEVT(*DAG.getContext());
4760 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
4761 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
4762
4763 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
4764 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
4765 DAG.getConstantFP(Max, DL, VT));
4766 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
4767 DAG.getConstantFP(Min, DL, VT));
4768 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004769 case Intrinsic::r600_read_ngroups_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004770 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004771 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004772
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004773 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004774 SI::KernelInputOffsets::NGROUPS_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004775 case Intrinsic::r600_read_ngroups_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004776 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004777 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004778
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004779 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004780 SI::KernelInputOffsets::NGROUPS_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004781 case Intrinsic::r600_read_ngroups_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004782 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004783 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004784
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004785 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004786 SI::KernelInputOffsets::NGROUPS_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004787 case Intrinsic::r600_read_global_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004788 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004789 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004790
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004791 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004792 SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004793 case Intrinsic::r600_read_global_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004794 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004795 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004796
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004797 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004798 SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004799 case Intrinsic::r600_read_global_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004800 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004801 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004802
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004803 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004804 SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004805 case Intrinsic::r600_read_local_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004806 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004807 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004808
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004809 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4810 SI::KernelInputOffsets::LOCAL_SIZE_X);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004811 case Intrinsic::r600_read_local_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004812 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004813 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004814
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004815 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4816 SI::KernelInputOffsets::LOCAL_SIZE_Y);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004817 case Intrinsic::r600_read_local_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004818 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004819 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004820
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004821 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4822 SI::KernelInputOffsets::LOCAL_SIZE_Z);
Matt Arsenault43976df2016-01-30 04:25:19 +00004823 case Intrinsic::amdgcn_workgroup_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004824 case Intrinsic::r600_read_tgid_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004825 return getPreloadedValue(DAG, *MFI, VT,
4826 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
Matt Arsenault43976df2016-01-30 04:25:19 +00004827 case Intrinsic::amdgcn_workgroup_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004828 case Intrinsic::r600_read_tgid_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004829 return getPreloadedValue(DAG, *MFI, VT,
4830 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
Matt Arsenault43976df2016-01-30 04:25:19 +00004831 case Intrinsic::amdgcn_workgroup_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004832 case Intrinsic::r600_read_tgid_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004833 return getPreloadedValue(DAG, *MFI, VT,
4834 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
4835 case Intrinsic::amdgcn_workitem_id_x: {
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004836 case Intrinsic::r600_read_tidig_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004837 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4838 SDLoc(DAG.getEntryNode()),
4839 MFI->getArgInfo().WorkItemIDX);
4840 }
Matt Arsenault43976df2016-01-30 04:25:19 +00004841 case Intrinsic::amdgcn_workitem_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004842 case Intrinsic::r600_read_tidig_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004843 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4844 SDLoc(DAG.getEntryNode()),
4845 MFI->getArgInfo().WorkItemIDY);
Matt Arsenault43976df2016-01-30 04:25:19 +00004846 case Intrinsic::amdgcn_workitem_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004847 case Intrinsic::r600_read_tidig_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004848 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4849 SDLoc(DAG.getEntryNode()),
4850 MFI->getArgInfo().WorkItemIDZ);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004851 case AMDGPUIntrinsic::SI_load_const: {
4852 SDValue Ops[] = {
4853 Op.getOperand(1),
4854 Op.getOperand(2)
4855 };
4856
4857 MachineMemOperand *MMO = MF.getMachineMemOperand(
Justin Lebaradbf09e2016-09-11 01:38:58 +00004858 MachinePointerInfo(),
4859 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
4860 MachineMemOperand::MOInvariant,
4861 VT.getStoreSize(), 4);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004862 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
4863 Op->getVTList(), Ops, VT, MMO);
4864 }
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004865 case Intrinsic::amdgcn_fdiv_fast:
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00004866 return lowerFDIV_FAST(Op, DAG);
Tom Stellard2187bb82016-12-06 23:52:13 +00004867 case Intrinsic::amdgcn_interp_mov: {
4868 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4869 SDValue Glue = M0.getValue(1);
4870 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
4871 Op.getOperand(2), Op.getOperand(3), Glue);
4872 }
Tom Stellardad7d03d2015-12-15 17:02:49 +00004873 case Intrinsic::amdgcn_interp_p1: {
4874 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4875 SDValue Glue = M0.getValue(1);
4876 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
4877 Op.getOperand(2), Op.getOperand(3), Glue);
4878 }
4879 case Intrinsic::amdgcn_interp_p2: {
4880 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
4881 SDValue Glue = SDValue(M0.getNode(), 1);
4882 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
4883 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
4884 Glue);
4885 }
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004886 case Intrinsic::amdgcn_sin:
4887 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
4888
4889 case Intrinsic::amdgcn_cos:
4890 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
4891
4892 case Intrinsic::amdgcn_log_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004893 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004894 return SDValue();
4895
4896 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00004897 MF.getFunction(), "intrinsic not supported on subtarget",
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004898 DL.getDebugLoc());
4899 DAG.getContext()->diagnose(BadIntrin);
4900 return DAG.getUNDEF(VT);
4901 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004902 case Intrinsic::amdgcn_ldexp:
4903 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
4904 Op.getOperand(1), Op.getOperand(2));
Matt Arsenault74015162016-05-28 00:19:52 +00004905
4906 case Intrinsic::amdgcn_fract:
4907 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
4908
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004909 case Intrinsic::amdgcn_class:
4910 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
4911 Op.getOperand(1), Op.getOperand(2));
4912 case Intrinsic::amdgcn_div_fmas:
4913 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
4914 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
4915 Op.getOperand(4));
4916
4917 case Intrinsic::amdgcn_div_fixup:
4918 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
4919 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4920
4921 case Intrinsic::amdgcn_trig_preop:
4922 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
4923 Op.getOperand(1), Op.getOperand(2));
4924 case Intrinsic::amdgcn_div_scale: {
4925 // 3rd parameter required to be a constant.
4926 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4927 if (!Param)
Matt Arsenault206f8262017-08-01 20:49:41 +00004928 return DAG.getMergeValues({ DAG.getUNDEF(VT), DAG.getUNDEF(MVT::i1) }, DL);
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004929
4930 // Translate to the operands expected by the machine instruction. The
4931 // first parameter must be the same as the first instruction.
4932 SDValue Numerator = Op.getOperand(1);
4933 SDValue Denominator = Op.getOperand(2);
4934
4935 // Note this order is opposite of the machine instruction's operations,
4936 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
4937 // intrinsic has the numerator as the first operand to match a normal
4938 // division operation.
4939
4940 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
4941
4942 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
4943 Denominator, Numerator);
4944 }
Wei Ding07e03712016-07-28 16:42:13 +00004945 case Intrinsic::amdgcn_icmp: {
4946 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004947 if (!CD)
4948 return DAG.getUNDEF(VT);
Wei Ding07e03712016-07-28 16:42:13 +00004949
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004950 int CondCode = CD->getSExtValue();
Wei Ding07e03712016-07-28 16:42:13 +00004951 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004952 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
Wei Ding07e03712016-07-28 16:42:13 +00004953 return DAG.getUNDEF(VT);
4954
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00004955 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
Wei Ding07e03712016-07-28 16:42:13 +00004956 ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4957 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4958 Op.getOperand(2), DAG.getCondCode(CCOpcode));
4959 }
4960 case Intrinsic::amdgcn_fcmp: {
4961 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004962 if (!CD)
4963 return DAG.getUNDEF(VT);
Wei Ding07e03712016-07-28 16:42:13 +00004964
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004965 int CondCode = CD->getSExtValue();
4966 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4967 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE)
Wei Ding07e03712016-07-28 16:42:13 +00004968 return DAG.getUNDEF(VT);
4969
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00004970 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
Wei Ding07e03712016-07-28 16:42:13 +00004971 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4972 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4973 Op.getOperand(2), DAG.getCondCode(CCOpcode));
4974 }
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00004975 case Intrinsic::amdgcn_fmed3:
4976 return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
4977 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Farhana Aleenc370d7b2018-07-16 18:19:59 +00004978 case Intrinsic::amdgcn_fdot2:
4979 return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00004980 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
4981 Op.getOperand(4));
Matt Arsenault32fc5272016-07-26 16:45:45 +00004982 case Intrinsic::amdgcn_fmul_legacy:
4983 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
4984 Op.getOperand(1), Op.getOperand(2));
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00004985 case Intrinsic::amdgcn_sffbh:
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00004986 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
Matt Arsenaultf5262252017-02-22 23:04:58 +00004987 case Intrinsic::amdgcn_sbfe:
4988 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
4989 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4990 case Intrinsic::amdgcn_ubfe:
4991 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
4992 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Marek Olsak13e47412018-01-31 20:18:04 +00004993 case Intrinsic::amdgcn_cvt_pkrtz:
4994 case Intrinsic::amdgcn_cvt_pknorm_i16:
4995 case Intrinsic::amdgcn_cvt_pknorm_u16:
4996 case Intrinsic::amdgcn_cvt_pk_i16:
4997 case Intrinsic::amdgcn_cvt_pk_u16: {
4998 // FIXME: Stop adding cast if v2f16/v2i16 are legal.
Matt Arsenault1f17c662017-02-22 00:27:34 +00004999 EVT VT = Op.getValueType();
Marek Olsak13e47412018-01-31 20:18:04 +00005000 unsigned Opcode;
5001
5002 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
5003 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
5004 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
5005 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
5006 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
5007 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
5008 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
5009 Opcode = AMDGPUISD::CVT_PK_I16_I32;
5010 else
5011 Opcode = AMDGPUISD::CVT_PK_U16_U32;
5012
Matt Arsenault709374d2018-08-01 20:13:58 +00005013 if (isTypeLegal(VT))
5014 return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
5015
Marek Olsak13e47412018-01-31 20:18:04 +00005016 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
Matt Arsenault1f17c662017-02-22 00:27:34 +00005017 Op.getOperand(1), Op.getOperand(2));
5018 return DAG.getNode(ISD::BITCAST, DL, VT, Node);
5019 }
Connor Abbott8c217d02017-08-04 18:36:49 +00005020 case Intrinsic::amdgcn_wqm: {
5021 SDValue Src = Op.getOperand(1);
5022 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src),
5023 0);
5024 }
Connor Abbott92638ab2017-08-04 18:36:52 +00005025 case Intrinsic::amdgcn_wwm: {
5026 SDValue Src = Op.getOperand(1);
5027 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src),
5028 0);
5029 }
Stanislav Mekhanoshindacda792018-06-26 20:04:19 +00005030 case Intrinsic::amdgcn_fmad_ftz:
5031 return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
5032 Op.getOperand(2), Op.getOperand(3));
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005033 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005034 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5035 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
5036 return lowerImage(Op, ImageDimIntr, DAG);
5037
Matt Arsenault754dd3e2017-04-03 18:08:08 +00005038 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005039 }
5040}
5041
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005042SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
5043 SelectionDAG &DAG) const {
5044 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Tom Stellard6f9ef142016-12-20 17:19:44 +00005045 SDLoc DL(Op);
David Stuttard70e8bc12017-06-22 16:29:22 +00005046
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005047 switch (IntrID) {
5048 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005049 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005050 case Intrinsic::amdgcn_ds_fadd:
5051 case Intrinsic::amdgcn_ds_fmin:
5052 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005053 MemSDNode *M = cast<MemSDNode>(Op);
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005054 unsigned Opc;
5055 switch (IntrID) {
5056 case Intrinsic::amdgcn_atomic_inc:
5057 Opc = AMDGPUISD::ATOMIC_INC;
5058 break;
5059 case Intrinsic::amdgcn_atomic_dec:
5060 Opc = AMDGPUISD::ATOMIC_DEC;
5061 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005062 case Intrinsic::amdgcn_ds_fadd:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005063 Opc = AMDGPUISD::ATOMIC_LOAD_FADD;
5064 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005065 case Intrinsic::amdgcn_ds_fmin:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005066 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
5067 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005068 case Intrinsic::amdgcn_ds_fmax:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005069 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
5070 break;
5071 default:
5072 llvm_unreachable("Unknown intrinsic!");
5073 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005074 SDValue Ops[] = {
5075 M->getOperand(0), // Chain
5076 M->getOperand(2), // Ptr
5077 M->getOperand(3) // Value
5078 };
5079
5080 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
5081 M->getMemoryVT(), M->getMemOperand());
5082 }
Tom Stellard6f9ef142016-12-20 17:19:44 +00005083 case Intrinsic::amdgcn_buffer_load:
5084 case Intrinsic::amdgcn_buffer_load_format: {
5085 SDValue Ops[] = {
5086 Op.getOperand(0), // Chain
5087 Op.getOperand(2), // rsrc
5088 Op.getOperand(3), // vindex
5089 Op.getOperand(4), // offset
5090 Op.getOperand(5), // glc
5091 Op.getOperand(6) // slc
5092 };
Tom Stellard6f9ef142016-12-20 17:19:44 +00005093
5094 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
5095 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
5096 EVT VT = Op.getValueType();
5097 EVT IntVT = VT.changeTypeToInteger();
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005098 auto *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00005099 EVT LoadVT = Op.getValueType();
5100 bool IsD16 = LoadVT.getScalarType() == MVT::f16;
5101 if (IsD16)
5102 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG);
5103
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005104 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5105 M->getMemOperand());
Tom Stellard6f9ef142016-12-20 17:19:44 +00005106 }
David Stuttard70e8bc12017-06-22 16:29:22 +00005107 case Intrinsic::amdgcn_tbuffer_load: {
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005108 MemSDNode *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00005109 EVT LoadVT = Op.getValueType();
5110 bool IsD16 = LoadVT.getScalarType() == MVT::f16;
5111 if (IsD16) {
5112 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, M, DAG);
5113 }
5114
David Stuttard70e8bc12017-06-22 16:29:22 +00005115 SDValue Ops[] = {
5116 Op.getOperand(0), // Chain
5117 Op.getOperand(2), // rsrc
5118 Op.getOperand(3), // vindex
5119 Op.getOperand(4), // voffset
5120 Op.getOperand(5), // soffset
5121 Op.getOperand(6), // offset
5122 Op.getOperand(7), // dfmt
5123 Op.getOperand(8), // nfmt
5124 Op.getOperand(9), // glc
5125 Op.getOperand(10) // slc
5126 };
5127
David Stuttard70e8bc12017-06-22 16:29:22 +00005128 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
Matt Arsenault1349a042018-05-22 06:32:10 +00005129 Op->getVTList(), Ops, LoadVT,
5130 M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00005131 }
Marek Olsak5cec6412017-11-09 01:52:48 +00005132 case Intrinsic::amdgcn_buffer_atomic_swap:
5133 case Intrinsic::amdgcn_buffer_atomic_add:
5134 case Intrinsic::amdgcn_buffer_atomic_sub:
5135 case Intrinsic::amdgcn_buffer_atomic_smin:
5136 case Intrinsic::amdgcn_buffer_atomic_umin:
5137 case Intrinsic::amdgcn_buffer_atomic_smax:
5138 case Intrinsic::amdgcn_buffer_atomic_umax:
5139 case Intrinsic::amdgcn_buffer_atomic_and:
5140 case Intrinsic::amdgcn_buffer_atomic_or:
5141 case Intrinsic::amdgcn_buffer_atomic_xor: {
5142 SDValue Ops[] = {
5143 Op.getOperand(0), // Chain
5144 Op.getOperand(2), // vdata
5145 Op.getOperand(3), // rsrc
5146 Op.getOperand(4), // vindex
5147 Op.getOperand(5), // offset
5148 Op.getOperand(6) // slc
5149 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005150 EVT VT = Op.getValueType();
5151
5152 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00005153 unsigned Opcode = 0;
5154
5155 switch (IntrID) {
5156 case Intrinsic::amdgcn_buffer_atomic_swap:
5157 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
5158 break;
5159 case Intrinsic::amdgcn_buffer_atomic_add:
5160 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
5161 break;
5162 case Intrinsic::amdgcn_buffer_atomic_sub:
5163 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
5164 break;
5165 case Intrinsic::amdgcn_buffer_atomic_smin:
5166 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
5167 break;
5168 case Intrinsic::amdgcn_buffer_atomic_umin:
5169 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
5170 break;
5171 case Intrinsic::amdgcn_buffer_atomic_smax:
5172 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
5173 break;
5174 case Intrinsic::amdgcn_buffer_atomic_umax:
5175 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
5176 break;
5177 case Intrinsic::amdgcn_buffer_atomic_and:
5178 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
5179 break;
5180 case Intrinsic::amdgcn_buffer_atomic_or:
5181 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
5182 break;
5183 case Intrinsic::amdgcn_buffer_atomic_xor:
5184 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
5185 break;
5186 default:
5187 llvm_unreachable("unhandled atomic opcode");
5188 }
5189
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005190 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
5191 M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005192 }
5193
5194 case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
5195 SDValue Ops[] = {
5196 Op.getOperand(0), // Chain
5197 Op.getOperand(2), // src
5198 Op.getOperand(3), // cmp
5199 Op.getOperand(4), // rsrc
5200 Op.getOperand(5), // vindex
5201 Op.getOperand(6), // offset
5202 Op.getOperand(7) // slc
5203 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005204 EVT VT = Op.getValueType();
5205 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00005206
5207 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005208 Op->getVTList(), Ops, VT, M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005209 }
5210
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005211 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005212 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5213 AMDGPU::getImageDimIntrinsicInfo(IntrID))
5214 return lowerImage(Op, ImageDimIntr, DAG);
Matt Arsenault1349a042018-05-22 06:32:10 +00005215
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005216 return SDValue();
5217 }
5218}
5219
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005220SDValue SITargetLowering::handleD16VData(SDValue VData,
5221 SelectionDAG &DAG) const {
5222 EVT StoreVT = VData.getValueType();
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005223
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005224 // No change for f16 and legal vector D16 types.
Matt Arsenault1349a042018-05-22 06:32:10 +00005225 if (!StoreVT.isVector())
5226 return VData;
5227
5228 SDLoc DL(VData);
5229 assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
5230
5231 if (Subtarget->hasUnpackedD16VMem()) {
5232 // We need to unpack the packed data to store.
5233 EVT IntStoreVT = StoreVT.changeTypeToInteger();
5234 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
5235
5236 EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
5237 StoreVT.getVectorNumElements());
5238 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
5239 return DAG.UnrollVectorOp(ZExt.getNode());
5240 }
5241
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005242 assert(isTypeLegal(StoreVT));
5243 return VData;
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005244}
5245
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005246SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
5247 SelectionDAG &DAG) const {
Tom Stellardfc92e772015-05-12 14:18:14 +00005248 SDLoc DL(Op);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005249 SDValue Chain = Op.getOperand(0);
5250 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
David Stuttard70e8bc12017-06-22 16:29:22 +00005251 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005252
5253 switch (IntrinsicID) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00005254 case Intrinsic::amdgcn_exp: {
Matt Arsenault4165efd2017-01-17 07:26:53 +00005255 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
5256 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
5257 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
5258 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
5259
5260 const SDValue Ops[] = {
5261 Chain,
5262 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
5263 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
5264 Op.getOperand(4), // src0
5265 Op.getOperand(5), // src1
5266 Op.getOperand(6), // src2
5267 Op.getOperand(7), // src3
5268 DAG.getTargetConstant(0, DL, MVT::i1), // compr
5269 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
5270 };
5271
5272 unsigned Opc = Done->isNullValue() ?
5273 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
5274 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
5275 }
5276 case Intrinsic::amdgcn_exp_compr: {
5277 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
5278 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
5279 SDValue Src0 = Op.getOperand(4);
5280 SDValue Src1 = Op.getOperand(5);
5281 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
5282 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
5283
5284 SDValue Undef = DAG.getUNDEF(MVT::f32);
5285 const SDValue Ops[] = {
5286 Chain,
5287 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
5288 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
5289 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
5290 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
5291 Undef, // src2
5292 Undef, // src3
5293 DAG.getTargetConstant(1, DL, MVT::i1), // compr
5294 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
5295 };
5296
5297 unsigned Opc = Done->isNullValue() ?
5298 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
5299 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
5300 }
5301 case Intrinsic::amdgcn_s_sendmsg:
Matt Arsenaultd3e5cb72017-02-16 02:01:17 +00005302 case Intrinsic::amdgcn_s_sendmsghalt: {
5303 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
5304 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
Tom Stellardfc92e772015-05-12 14:18:14 +00005305 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
5306 SDValue Glue = Chain.getValue(1);
Matt Arsenaulta78ca622017-02-15 22:17:09 +00005307 return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
Jan Veselyd48445d2017-01-04 18:06:55 +00005308 Op.getOperand(2), Glue);
5309 }
Marek Olsak2d825902017-04-28 20:21:58 +00005310 case Intrinsic::amdgcn_init_exec: {
5311 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
5312 Op.getOperand(2));
5313 }
5314 case Intrinsic::amdgcn_init_exec_from_input: {
5315 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
5316 Op.getOperand(2), Op.getOperand(3));
5317 }
Matt Arsenault00568682016-07-13 06:04:22 +00005318 case AMDGPUIntrinsic::AMDGPU_kill: {
Matt Arsenault03006fd2016-07-19 16:27:56 +00005319 SDValue Src = Op.getOperand(2);
5320 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) {
Matt Arsenault00568682016-07-13 06:04:22 +00005321 if (!K->isNegative())
5322 return Chain;
Matt Arsenault03006fd2016-07-19 16:27:56 +00005323
5324 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32);
5325 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne);
Matt Arsenault00568682016-07-13 06:04:22 +00005326 }
5327
Matt Arsenault03006fd2016-07-19 16:27:56 +00005328 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src);
5329 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast);
Matt Arsenault00568682016-07-13 06:04:22 +00005330 }
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00005331 case Intrinsic::amdgcn_s_barrier: {
5332 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00005333 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matthias Braunf1caa282017-12-15 22:22:58 +00005334 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00005335 if (WGSize <= ST.getWavefrontSize())
5336 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
5337 Op.getOperand(0)), 0);
5338 }
5339 return SDValue();
5340 };
David Stuttard70e8bc12017-06-22 16:29:22 +00005341 case AMDGPUIntrinsic::SI_tbuffer_store: {
5342
5343 // Extract vindex and voffset from vaddr as appropriate
5344 const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10));
5345 const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11));
5346 SDValue VAddr = Op.getOperand(5);
5347
5348 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
5349
5350 assert(!(OffEn->isOne() && IdxEn->isOne()) &&
5351 "Legacy intrinsic doesn't support both offset and index - use new version");
5352
5353 SDValue VIndex = IdxEn->isOne() ? VAddr : Zero;
5354 SDValue VOffset = OffEn->isOne() ? VAddr : Zero;
5355
5356 // Deal with the vec-3 case
5357 const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4));
5358 auto Opcode = NumChannels->getZExtValue() == 3 ?
5359 AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT;
5360
5361 SDValue Ops[] = {
5362 Chain,
5363 Op.getOperand(3), // vdata
5364 Op.getOperand(2), // rsrc
5365 VIndex,
5366 VOffset,
5367 Op.getOperand(6), // soffset
5368 Op.getOperand(7), // inst_offset
5369 Op.getOperand(8), // dfmt
5370 Op.getOperand(9), // nfmt
5371 Op.getOperand(12), // glc
5372 Op.getOperand(13), // slc
5373 };
5374
David Stuttardf6779662017-06-22 17:15:49 +00005375 assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 &&
David Stuttard70e8bc12017-06-22 16:29:22 +00005376 "Value of tfe other than zero is unsupported");
5377
5378 EVT VT = Op.getOperand(3).getValueType();
5379 MachineMemOperand *MMO = MF.getMachineMemOperand(
5380 MachinePointerInfo(),
5381 MachineMemOperand::MOStore,
5382 VT.getStoreSize(), 4);
5383 return DAG.getMemIntrinsicNode(Opcode, DL,
5384 Op->getVTList(), Ops, VT, MMO);
5385 }
5386
5387 case Intrinsic::amdgcn_tbuffer_store: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005388 SDValue VData = Op.getOperand(2);
5389 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
5390 if (IsD16)
5391 VData = handleD16VData(VData, DAG);
David Stuttard70e8bc12017-06-22 16:29:22 +00005392 SDValue Ops[] = {
5393 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005394 VData, // vdata
David Stuttard70e8bc12017-06-22 16:29:22 +00005395 Op.getOperand(3), // rsrc
5396 Op.getOperand(4), // vindex
5397 Op.getOperand(5), // voffset
5398 Op.getOperand(6), // soffset
5399 Op.getOperand(7), // offset
5400 Op.getOperand(8), // dfmt
5401 Op.getOperand(9), // nfmt
5402 Op.getOperand(10), // glc
5403 Op.getOperand(11) // slc
5404 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005405 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
5406 AMDGPUISD::TBUFFER_STORE_FORMAT;
5407 MemSDNode *M = cast<MemSDNode>(Op);
5408 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
5409 M->getMemoryVT(), M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00005410 }
5411
Marek Olsak5cec6412017-11-09 01:52:48 +00005412 case Intrinsic::amdgcn_buffer_store:
5413 case Intrinsic::amdgcn_buffer_store_format: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005414 SDValue VData = Op.getOperand(2);
5415 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
5416 if (IsD16)
5417 VData = handleD16VData(VData, DAG);
Marek Olsak5cec6412017-11-09 01:52:48 +00005418 SDValue Ops[] = {
5419 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005420 VData, // vdata
Marek Olsak5cec6412017-11-09 01:52:48 +00005421 Op.getOperand(3), // rsrc
5422 Op.getOperand(4), // vindex
5423 Op.getOperand(5), // offset
5424 Op.getOperand(6), // glc
5425 Op.getOperand(7) // slc
5426 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005427 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
5428 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
5429 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
5430 MemSDNode *M = cast<MemSDNode>(Op);
5431 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
5432 M->getMemoryVT(), M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005433 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005434 default: {
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005435 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5436 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
5437 return lowerImage(Op, ImageDimIntr, DAG);
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005438
Matt Arsenault754dd3e2017-04-03 18:08:08 +00005439 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005440 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005441 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005442}
5443
Matt Arsenault90083d32018-06-07 09:54:49 +00005444static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
5445 ISD::LoadExtType ExtType, SDValue Op,
5446 const SDLoc &SL, EVT VT) {
5447 if (VT.bitsLT(Op.getValueType()))
5448 return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
5449
5450 switch (ExtType) {
5451 case ISD::SEXTLOAD:
5452 return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
5453 case ISD::ZEXTLOAD:
5454 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
5455 case ISD::EXTLOAD:
5456 return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
5457 case ISD::NON_EXTLOAD:
5458 return Op;
5459 }
5460
5461 llvm_unreachable("invalid ext type");
5462}
5463
5464SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
5465 SelectionDAG &DAG = DCI.DAG;
5466 if (Ld->getAlignment() < 4 || Ld->isDivergent())
5467 return SDValue();
5468
5469 // FIXME: Constant loads should all be marked invariant.
5470 unsigned AS = Ld->getAddressSpace();
5471 if (AS != AMDGPUASI.CONSTANT_ADDRESS &&
5472 AS != AMDGPUASI.CONSTANT_ADDRESS_32BIT &&
5473 (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
5474 return SDValue();
5475
5476 // Don't do this early, since it may interfere with adjacent load merging for
5477 // illegal types. We can avoid losing alignment information for exotic types
5478 // pre-legalize.
5479 EVT MemVT = Ld->getMemoryVT();
5480 if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
5481 MemVT.getSizeInBits() >= 32)
5482 return SDValue();
5483
5484 SDLoc SL(Ld);
5485
5486 assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
5487 "unexpected vector extload");
5488
5489 // TODO: Drop only high part of range.
5490 SDValue Ptr = Ld->getBasePtr();
5491 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
5492 MVT::i32, SL, Ld->getChain(), Ptr,
5493 Ld->getOffset(),
5494 Ld->getPointerInfo(), MVT::i32,
5495 Ld->getAlignment(),
5496 Ld->getMemOperand()->getFlags(),
5497 Ld->getAAInfo(),
5498 nullptr); // Drop ranges
5499
5500 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
5501 if (MemVT.isFloatingPoint()) {
5502 assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
5503 "unexpected fp extload");
5504 TruncVT = MemVT.changeTypeToInteger();
5505 }
5506
5507 SDValue Cvt = NewLoad;
5508 if (Ld->getExtensionType() == ISD::SEXTLOAD) {
5509 Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
5510 DAG.getValueType(TruncVT));
5511 } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
5512 Ld->getExtensionType() == ISD::NON_EXTLOAD) {
5513 Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
5514 } else {
5515 assert(Ld->getExtensionType() == ISD::EXTLOAD);
5516 }
5517
5518 EVT VT = Ld->getValueType(0);
5519 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
5520
5521 DCI.AddToWorklist(Cvt.getNode());
5522
5523 // We may need to handle exotic cases, such as i16->i64 extloads, so insert
5524 // the appropriate extension from the 32-bit load.
5525 Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
5526 DCI.AddToWorklist(Cvt.getNode());
5527
5528 // Handle conversion back to floating point if necessary.
5529 Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
5530
5531 return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
5532}
5533
Tom Stellard81d871d2013-11-13 23:36:50 +00005534SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
5535 SDLoc DL(Op);
5536 LoadSDNode *Load = cast<LoadSDNode>(Op);
Matt Arsenault6dfda962016-02-10 18:21:39 +00005537 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaulta1436412016-02-10 18:21:45 +00005538 EVT MemVT = Load->getMemoryVT();
Matt Arsenault6dfda962016-02-10 18:21:39 +00005539
Matt Arsenaulta1436412016-02-10 18:21:45 +00005540 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
Matt Arsenault65ca292a2017-09-07 05:37:34 +00005541 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
5542 return SDValue();
5543
Matt Arsenault6dfda962016-02-10 18:21:39 +00005544 // FIXME: Copied from PPC
5545 // First, load into 32 bits, then truncate to 1 bit.
5546
5547 SDValue Chain = Load->getChain();
5548 SDValue BasePtr = Load->getBasePtr();
5549 MachineMemOperand *MMO = Load->getMemOperand();
5550
Tom Stellard115a6152016-11-10 16:02:37 +00005551 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
5552
Matt Arsenault6dfda962016-02-10 18:21:39 +00005553 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
Tom Stellard115a6152016-11-10 16:02:37 +00005554 BasePtr, RealMemVT, MMO);
Matt Arsenault6dfda962016-02-10 18:21:39 +00005555
5556 SDValue Ops[] = {
Matt Arsenaulta1436412016-02-10 18:21:45 +00005557 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
Matt Arsenault6dfda962016-02-10 18:21:39 +00005558 NewLD.getValue(1)
5559 };
5560
5561 return DAG.getMergeValues(Ops, DL);
5562 }
Tom Stellard81d871d2013-11-13 23:36:50 +00005563
Matt Arsenaulta1436412016-02-10 18:21:45 +00005564 if (!MemVT.isVector())
5565 return SDValue();
Matt Arsenault4d801cd2015-11-24 12:05:03 +00005566
Matt Arsenaulta1436412016-02-10 18:21:45 +00005567 assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
5568 "Custom lowering for non-i32 vectors hasn't been implemented.");
Matt Arsenault4d801cd2015-11-24 12:05:03 +00005569
Farhana Aleen89196642018-03-07 17:09:18 +00005570 unsigned Alignment = Load->getAlignment();
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005571 unsigned AS = Load->getAddressSpace();
5572 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
Farhana Aleen89196642018-03-07 17:09:18 +00005573 AS, Alignment)) {
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005574 SDValue Ops[2];
5575 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
5576 return DAG.getMergeValues(Ops, DL);
5577 }
5578
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005579 MachineFunction &MF = DAG.getMachineFunction();
5580 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
5581 // If there is a possibilty that flat instruction access scratch memory
5582 // then we need to use the same legalization rules we use for private.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005583 if (AS == AMDGPUASI.FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005584 AS = MFI->hasFlatScratchInit() ?
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005585 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005586
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005587 unsigned NumElements = MemVT.getVectorNumElements();
Matt Arsenault6c041a32018-03-29 19:59:28 +00005588
Matt Arsenault923712b2018-02-09 16:57:57 +00005589 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5590 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) {
Matt Arsenault6c041a32018-03-29 19:59:28 +00005591 if (!Op->isDivergent() && Alignment >= 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00005592 return SDValue();
5593 // Non-uniform loads will be selected to MUBUF instructions, so they
Alexander Timofeev18009562016-12-08 17:28:47 +00005594 // have the same legalization requirements as global and private
Matt Arsenaulta1436412016-02-10 18:21:45 +00005595 // loads.
5596 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005597 }
Matt Arsenault6c041a32018-03-29 19:59:28 +00005598
Matt Arsenault923712b2018-02-09 16:57:57 +00005599 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5600 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT ||
5601 AS == AMDGPUASI.GLOBAL_ADDRESS) {
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00005602 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
Farhana Aleen89196642018-03-07 17:09:18 +00005603 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
Matt Arsenault6c041a32018-03-29 19:59:28 +00005604 Alignment >= 4)
Alexander Timofeev18009562016-12-08 17:28:47 +00005605 return SDValue();
5606 // Non-uniform loads will be selected to MUBUF instructions, so they
5607 // have the same legalization requirements as global and private
5608 // loads.
5609 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005610 }
Matt Arsenault923712b2018-02-09 16:57:57 +00005611 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5612 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT ||
5613 AS == AMDGPUASI.GLOBAL_ADDRESS ||
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005614 AS == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005615 if (NumElements > 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00005616 return SplitVectorLoad(Op, DAG);
5617 // v4 loads are supported for private and global memory.
5618 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005619 }
5620 if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005621 // Depending on the setting of the private_element_size field in the
5622 // resource descriptor, we can only make private accesses up to a certain
5623 // size.
5624 switch (Subtarget->getMaxPrivateElementSize()) {
5625 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00005626 return scalarizeVectorLoad(Load, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005627 case 8:
5628 if (NumElements > 2)
5629 return SplitVectorLoad(Op, DAG);
5630 return SDValue();
5631 case 16:
5632 // Same as global/flat
5633 if (NumElements > 4)
5634 return SplitVectorLoad(Op, DAG);
5635 return SDValue();
5636 default:
5637 llvm_unreachable("unsupported private_element_size");
5638 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005639 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
Farhana Aleena7cb3112018-03-09 17:41:39 +00005640 // Use ds_read_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00005641 if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
Farhana Aleena7cb3112018-03-09 17:41:39 +00005642 MemVT.getStoreSize() == 16)
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005643 return SDValue();
5644
Farhana Aleena7cb3112018-03-09 17:41:39 +00005645 if (NumElements > 2)
5646 return SplitVectorLoad(Op, DAG);
Tom Stellarde9373602014-01-22 19:24:14 +00005647 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005648 return SDValue();
Tom Stellard81d871d2013-11-13 23:36:50 +00005649}
5650
Tom Stellard0ec134f2014-02-04 17:18:40 +00005651SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005652 EVT VT = Op.getValueType();
5653 assert(VT.getSizeInBits() == 64);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005654
5655 SDLoc DL(Op);
5656 SDValue Cond = Op.getOperand(0);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005657
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005658 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
5659 SDValue One = DAG.getConstant(1, DL, MVT::i32);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005660
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00005661 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
5662 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
5663
5664 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
5665 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005666
5667 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
5668
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00005669 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
5670 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005671
5672 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
5673
Ahmed Bougacha128f8732016-04-26 21:15:30 +00005674 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005675 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005676}
5677
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005678// Catch division cases where we can use shortcuts with rcp and rsq
5679// instructions.
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005680SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
5681 SelectionDAG &DAG) const {
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005682 SDLoc SL(Op);
5683 SDValue LHS = Op.getOperand(0);
5684 SDValue RHS = Op.getOperand(1);
5685 EVT VT = Op.getValueType();
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005686 const SDNodeFlags Flags = Op->getFlags();
Michael Berg7acc81b2018-05-04 18:48:20 +00005687 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005688
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00005689 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
5690 return SDValue();
5691
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005692 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00005693 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
Matt Arsenault979902b2016-08-02 22:25:04 +00005694 if (CLHS->isExactlyValue(1.0)) {
5695 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
5696 // the CI documentation has a worst case error of 1 ulp.
5697 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
5698 // use it as long as we aren't trying to use denormals.
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005699 //
5700 // v_rcp_f16 and v_rsq_f16 DO support denormals.
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005701
Matt Arsenault979902b2016-08-02 22:25:04 +00005702 // 1.0 / sqrt(x) -> rsq(x)
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005703
Matt Arsenault979902b2016-08-02 22:25:04 +00005704 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
5705 // error seems really high at 2^29 ULP.
5706 if (RHS.getOpcode() == ISD::FSQRT)
5707 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
5708
5709 // 1.0 / x -> rcp(x)
5710 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
5711 }
5712
5713 // Same as for 1.0, but expand the sign out of the constant.
5714 if (CLHS->isExactlyValue(-1.0)) {
5715 // -1.0 / x -> rcp (fneg x)
5716 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
5717 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
5718 }
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005719 }
5720 }
5721
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005722 if (Unsafe) {
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005723 // Turn into multiply by the reciprocal.
5724 // x / y -> x * (1.0 / y)
5725 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005726 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005727 }
5728
5729 return SDValue();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005730}
5731
Tom Stellard8485fa02016-12-07 02:42:15 +00005732static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
5733 EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
5734 if (GlueChain->getNumValues() <= 1) {
5735 return DAG.getNode(Opcode, SL, VT, A, B);
5736 }
5737
5738 assert(GlueChain->getNumValues() == 3);
5739
5740 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
5741 switch (Opcode) {
5742 default: llvm_unreachable("no chain equivalent for opcode");
5743 case ISD::FMUL:
5744 Opcode = AMDGPUISD::FMUL_W_CHAIN;
5745 break;
5746 }
5747
5748 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
5749 GlueChain.getValue(2));
5750}
5751
5752static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
5753 EVT VT, SDValue A, SDValue B, SDValue C,
5754 SDValue GlueChain) {
5755 if (GlueChain->getNumValues() <= 1) {
5756 return DAG.getNode(Opcode, SL, VT, A, B, C);
5757 }
5758
5759 assert(GlueChain->getNumValues() == 3);
5760
5761 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
5762 switch (Opcode) {
5763 default: llvm_unreachable("no chain equivalent for opcode");
5764 case ISD::FMA:
5765 Opcode = AMDGPUISD::FMA_W_CHAIN;
5766 break;
5767 }
5768
5769 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
5770 GlueChain.getValue(2));
5771}
5772
Matt Arsenault4052a572016-12-22 03:05:41 +00005773SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005774 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
5775 return FastLowered;
5776
Matt Arsenault4052a572016-12-22 03:05:41 +00005777 SDLoc SL(Op);
5778 SDValue Src0 = Op.getOperand(0);
5779 SDValue Src1 = Op.getOperand(1);
5780
5781 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
5782 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
5783
5784 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
5785 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
5786
5787 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
5788 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
5789
5790 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
5791}
5792
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005793// Faster 2.5 ULP division that does not support denormals.
5794SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
5795 SDLoc SL(Op);
5796 SDValue LHS = Op.getOperand(1);
5797 SDValue RHS = Op.getOperand(2);
5798
5799 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
5800
5801 const APFloat K0Val(BitsToFloat(0x6f800000));
5802 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
5803
5804 const APFloat K1Val(BitsToFloat(0x2f800000));
5805 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
5806
5807 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
5808
5809 EVT SetCCVT =
5810 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
5811
5812 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
5813
5814 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
5815
5816 // TODO: Should this propagate fast-math-flags?
5817 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
5818
5819 // rcp does not support denormals.
5820 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
5821
5822 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
5823
5824 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
5825}
5826
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005827SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005828 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
Eric Christopher538d09d02016-06-07 20:27:12 +00005829 return FastLowered;
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005830
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005831 SDLoc SL(Op);
5832 SDValue LHS = Op.getOperand(0);
5833 SDValue RHS = Op.getOperand(1);
5834
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005835 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005836
Wei Dinged0f97f2016-06-09 19:17:15 +00005837 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005838
Tom Stellard8485fa02016-12-07 02:42:15 +00005839 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
5840 RHS, RHS, LHS);
5841 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
5842 LHS, RHS, LHS);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005843
Matt Arsenaultdfec5ce2016-07-09 07:48:11 +00005844 // Denominator is scaled to not be denormal, so using rcp is ok.
Tom Stellard8485fa02016-12-07 02:42:15 +00005845 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
5846 DenominatorScaled);
5847 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
5848 DenominatorScaled);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005849
Tom Stellard8485fa02016-12-07 02:42:15 +00005850 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
5851 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
5852 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005853
Tom Stellard8485fa02016-12-07 02:42:15 +00005854 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005855
Tom Stellard8485fa02016-12-07 02:42:15 +00005856 if (!Subtarget->hasFP32Denormals()) {
5857 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
5858 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
5859 SL, MVT::i32);
5860 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
5861 DAG.getEntryNode(),
5862 EnableDenormValue, BitField);
5863 SDValue Ops[3] = {
5864 NegDivScale0,
5865 EnableDenorm.getValue(0),
5866 EnableDenorm.getValue(1)
5867 };
Matt Arsenault37fefd62016-06-10 02:18:02 +00005868
Tom Stellard8485fa02016-12-07 02:42:15 +00005869 NegDivScale0 = DAG.getMergeValues(Ops, SL);
5870 }
5871
5872 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
5873 ApproxRcp, One, NegDivScale0);
5874
5875 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
5876 ApproxRcp, Fma0);
5877
5878 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
5879 Fma1, Fma1);
5880
5881 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
5882 NumeratorScaled, Mul);
5883
5884 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
5885
5886 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
5887 NumeratorScaled, Fma3);
5888
5889 if (!Subtarget->hasFP32Denormals()) {
5890 const SDValue DisableDenormValue =
5891 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
5892 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
5893 Fma4.getValue(1),
5894 DisableDenormValue,
5895 BitField,
5896 Fma4.getValue(2));
5897
5898 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
5899 DisableDenorm, DAG.getRoot());
5900 DAG.setRoot(OutputChain);
5901 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00005902
Wei Dinged0f97f2016-06-09 19:17:15 +00005903 SDValue Scale = NumeratorScaled.getValue(1);
Tom Stellard8485fa02016-12-07 02:42:15 +00005904 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
5905 Fma4, Fma1, Fma3, Scale);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005906
Wei Dinged0f97f2016-06-09 19:17:15 +00005907 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005908}
5909
5910SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005911 if (DAG.getTarget().Options.UnsafeFPMath)
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005912 return lowerFastUnsafeFDIV(Op, DAG);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005913
5914 SDLoc SL(Op);
5915 SDValue X = Op.getOperand(0);
5916 SDValue Y = Op.getOperand(1);
5917
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005918 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005919
5920 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
5921
5922 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
5923
5924 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
5925
5926 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
5927
5928 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
5929
5930 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
5931
5932 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
5933
5934 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
5935
5936 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
5937 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
5938
5939 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
5940 NegDivScale0, Mul, DivScale1);
5941
5942 SDValue Scale;
5943
Tom Stellard5bfbae52018-07-11 20:59:01 +00005944 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005945 // Workaround a hardware bug on SI where the condition output from div_scale
5946 // is not usable.
5947
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005948 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005949
5950 // Figure out if the scale to use for div_fmas.
5951 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
5952 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
5953 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
5954 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
5955
5956 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
5957 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
5958
5959 SDValue Scale0Hi
5960 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
5961 SDValue Scale1Hi
5962 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
5963
5964 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
5965 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
5966 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
5967 } else {
5968 Scale = DivScale1.getValue(1);
5969 }
5970
5971 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
5972 Fma4, Fma3, Mul, Scale);
5973
5974 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005975}
5976
5977SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
5978 EVT VT = Op.getValueType();
5979
5980 if (VT == MVT::f32)
5981 return LowerFDIV32(Op, DAG);
5982
5983 if (VT == MVT::f64)
5984 return LowerFDIV64(Op, DAG);
5985
Matt Arsenault4052a572016-12-22 03:05:41 +00005986 if (VT == MVT::f16)
5987 return LowerFDIV16(Op, DAG);
5988
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005989 llvm_unreachable("Unexpected type for fdiv");
5990}
5991
Tom Stellard81d871d2013-11-13 23:36:50 +00005992SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
5993 SDLoc DL(Op);
5994 StoreSDNode *Store = cast<StoreSDNode>(Op);
5995 EVT VT = Store->getMemoryVT();
5996
Matt Arsenault95245662016-02-11 05:32:46 +00005997 if (VT == MVT::i1) {
5998 return DAG.getTruncStore(Store->getChain(), DL,
5999 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
6000 Store->getBasePtr(), MVT::i1, Store->getMemOperand());
Tom Stellardb02094e2014-07-21 15:45:01 +00006001 }
6002
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006003 assert(VT.isVector() &&
6004 Store->getValue().getValueType().getScalarType() == MVT::i32);
6005
6006 unsigned AS = Store->getAddressSpace();
6007 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
6008 AS, Store->getAlignment())) {
6009 return expandUnalignedStore(Store, DAG);
6010 }
Tom Stellard81d871d2013-11-13 23:36:50 +00006011
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006012 MachineFunction &MF = DAG.getMachineFunction();
6013 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
6014 // If there is a possibilty that flat instruction access scratch memory
6015 // then we need to use the same legalization rules we use for private.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006016 if (AS == AMDGPUASI.FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006017 AS = MFI->hasFlatScratchInit() ?
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006018 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006019
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006020 unsigned NumElements = VT.getVectorNumElements();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006021 if (AS == AMDGPUASI.GLOBAL_ADDRESS ||
6022 AS == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006023 if (NumElements > 4)
6024 return SplitVectorStore(Op, DAG);
6025 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006026 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006027 switch (Subtarget->getMaxPrivateElementSize()) {
6028 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00006029 return scalarizeVectorStore(Store, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006030 case 8:
6031 if (NumElements > 2)
6032 return SplitVectorStore(Op, DAG);
6033 return SDValue();
6034 case 16:
6035 if (NumElements > 4)
6036 return SplitVectorStore(Op, DAG);
6037 return SDValue();
6038 default:
6039 llvm_unreachable("unsupported private_element_size");
6040 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006041 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006042 // Use ds_write_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00006043 if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006044 VT.getStoreSize() == 16)
6045 return SDValue();
6046
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006047 if (NumElements > 2)
6048 return SplitVectorStore(Op, DAG);
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006049 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006050 } else {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006051 llvm_unreachable("unhandled address space");
Matt Arsenault95245662016-02-11 05:32:46 +00006052 }
Tom Stellard81d871d2013-11-13 23:36:50 +00006053}
6054
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006055SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006056 SDLoc DL(Op);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006057 EVT VT = Op.getValueType();
6058 SDValue Arg = Op.getOperand(0);
Sanjay Patela2607012015-09-16 16:31:21 +00006059 // TODO: Should this propagate fast-math-flags?
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006060 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
6061 DAG.getNode(ISD::FMUL, DL, VT, Arg,
6062 DAG.getConstantFP(0.5/M_PI, DL,
6063 VT)));
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006064
6065 switch (Op.getOpcode()) {
6066 case ISD::FCOS:
6067 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart);
6068 case ISD::FSIN:
6069 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart);
6070 default:
6071 llvm_unreachable("Wrong trig opcode");
6072 }
6073}
6074
Tom Stellard354a43c2016-04-01 18:27:37 +00006075SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
6076 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
6077 assert(AtomicNode->isCompareAndSwap());
6078 unsigned AS = AtomicNode->getAddressSpace();
6079
6080 // No custom lowering required for local address space
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006081 if (!isFlatGlobalAddrSpace(AS, AMDGPUASI))
Tom Stellard354a43c2016-04-01 18:27:37 +00006082 return Op;
6083
6084 // Non-local address space requires custom lowering for atomic compare
6085 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
6086 SDLoc DL(Op);
6087 SDValue ChainIn = Op.getOperand(0);
6088 SDValue Addr = Op.getOperand(1);
6089 SDValue Old = Op.getOperand(2);
6090 SDValue New = Op.getOperand(3);
6091 EVT VT = Op.getValueType();
6092 MVT SimpleVT = VT.getSimpleVT();
6093 MVT VecType = MVT::getVectorVT(SimpleVT, 2);
6094
Ahmed Bougacha128f8732016-04-26 21:15:30 +00006095 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
Tom Stellard354a43c2016-04-01 18:27:37 +00006096 SDValue Ops[] = { ChainIn, Addr, NewOld };
Matt Arsenault88701812016-06-09 23:42:48 +00006097
6098 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
6099 Ops, VT, AtomicNode->getMemOperand());
Tom Stellard354a43c2016-04-01 18:27:37 +00006100}
6101
Tom Stellard75aadc22012-12-11 21:25:42 +00006102//===----------------------------------------------------------------------===//
6103// Custom DAG optimizations
6104//===----------------------------------------------------------------------===//
6105
Matt Arsenault364a6742014-06-11 17:50:44 +00006106SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
Matt Arsenaulte6986632015-01-14 01:35:22 +00006107 DAGCombinerInfo &DCI) const {
Matt Arsenault364a6742014-06-11 17:50:44 +00006108 EVT VT = N->getValueType(0);
6109 EVT ScalarVT = VT.getScalarType();
6110 if (ScalarVT != MVT::f32)
6111 return SDValue();
6112
6113 SelectionDAG &DAG = DCI.DAG;
6114 SDLoc DL(N);
6115
6116 SDValue Src = N->getOperand(0);
6117 EVT SrcVT = Src.getValueType();
6118
6119 // TODO: We could try to match extracting the higher bytes, which would be
6120 // easier if i8 vectors weren't promoted to i32 vectors, particularly after
6121 // types are legalized. v4i8 -> v4f32 is probably the only case to worry
6122 // about in practice.
Craig Topper80d3bb32018-03-06 19:44:52 +00006123 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
Matt Arsenault364a6742014-06-11 17:50:44 +00006124 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
6125 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
6126 DCI.AddToWorklist(Cvt.getNode());
6127 return Cvt;
6128 }
6129 }
6130
Matt Arsenault364a6742014-06-11 17:50:44 +00006131 return SDValue();
6132}
6133
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006134// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
6135
6136// This is a variant of
6137// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
6138//
6139// The normal DAG combiner will do this, but only if the add has one use since
6140// that would increase the number of instructions.
6141//
6142// This prevents us from seeing a constant offset that can be folded into a
6143// memory instruction's addressing mode. If we know the resulting add offset of
6144// a pointer can be folded into an addressing offset, we can replace the pointer
6145// operand with the add of new constant offset. This eliminates one of the uses,
6146// and may allow the remaining use to also be simplified.
6147//
6148SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
6149 unsigned AddrSpace,
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006150 EVT MemVT,
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006151 DAGCombinerInfo &DCI) const {
6152 SDValue N0 = N->getOperand(0);
6153 SDValue N1 = N->getOperand(1);
6154
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006155 // We only do this to handle cases where it's profitable when there are
6156 // multiple uses of the add, so defer to the standard combine.
Matt Arsenaultc8903122017-11-14 23:46:42 +00006157 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
6158 N0->hasOneUse())
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006159 return SDValue();
6160
6161 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
6162 if (!CN1)
6163 return SDValue();
6164
6165 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
6166 if (!CAdd)
6167 return SDValue();
6168
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006169 // If the resulting offset is too large, we can't fold it into the addressing
6170 // mode offset.
6171 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006172 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
6173
6174 AddrMode AM;
6175 AM.HasBaseReg = true;
6176 AM.BaseOffs = Offset.getSExtValue();
6177 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006178 return SDValue();
6179
6180 SelectionDAG &DAG = DCI.DAG;
6181 SDLoc SL(N);
6182 EVT VT = N->getValueType(0);
6183
6184 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006185 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006186
Matt Arsenaulte5e0c742017-11-13 05:33:35 +00006187 SDNodeFlags Flags;
6188 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
6189 (N0.getOpcode() == ISD::OR ||
6190 N0->getFlags().hasNoUnsignedWrap()));
6191
6192 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006193}
6194
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006195SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
6196 DAGCombinerInfo &DCI) const {
6197 SDValue Ptr = N->getBasePtr();
6198 SelectionDAG &DAG = DCI.DAG;
6199 SDLoc SL(N);
6200
6201 // TODO: We could also do this for multiplies.
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006202 if (Ptr.getOpcode() == ISD::SHL) {
6203 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(),
6204 N->getMemoryVT(), DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006205 if (NewPtr) {
6206 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
6207
6208 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
6209 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
6210 }
6211 }
6212
6213 return SDValue();
6214}
6215
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006216static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
6217 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
6218 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
6219 (Opc == ISD::XOR && Val == 0);
6220}
6221
6222// Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
6223// will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
6224// integer combine opportunities since most 64-bit operations are decomposed
6225// this way. TODO: We won't want this for SALU especially if it is an inline
6226// immediate.
6227SDValue SITargetLowering::splitBinaryBitConstantOp(
6228 DAGCombinerInfo &DCI,
6229 const SDLoc &SL,
6230 unsigned Opc, SDValue LHS,
6231 const ConstantSDNode *CRHS) const {
6232 uint64_t Val = CRHS->getZExtValue();
6233 uint32_t ValLo = Lo_32(Val);
6234 uint32_t ValHi = Hi_32(Val);
6235 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6236
6237 if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
6238 bitOpWithConstantIsReducible(Opc, ValHi)) ||
6239 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
6240 // If we need to materialize a 64-bit immediate, it will be split up later
6241 // anyway. Avoid creating the harder to understand 64-bit immediate
6242 // materialization.
6243 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
6244 }
6245
6246 return SDValue();
6247}
6248
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00006249// Returns true if argument is a boolean value which is not serialized into
6250// memory or argument and does not require v_cmdmask_b32 to be deserialized.
6251static bool isBoolSGPR(SDValue V) {
6252 if (V.getValueType() != MVT::i1)
6253 return false;
6254 switch (V.getOpcode()) {
6255 default: break;
6256 case ISD::SETCC:
6257 case ISD::AND:
6258 case ISD::OR:
6259 case ISD::XOR:
6260 case AMDGPUISD::FP_CLASS:
6261 return true;
6262 }
6263 return false;
6264}
6265
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006266// If a constant has all zeroes or all ones within each byte return it.
6267// Otherwise return 0.
6268static uint32_t getConstantPermuteMask(uint32_t C) {
6269 // 0xff for any zero byte in the mask
6270 uint32_t ZeroByteMask = 0;
6271 if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
6272 if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
6273 if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
6274 if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
6275 uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
6276 if ((NonZeroByteMask & C) != NonZeroByteMask)
6277 return 0; // Partial bytes selected.
6278 return C;
6279}
6280
6281// Check if a node selects whole bytes from its operand 0 starting at a byte
6282// boundary while masking the rest. Returns select mask as in the v_perm_b32
6283// or -1 if not succeeded.
6284// Note byte select encoding:
6285// value 0-3 selects corresponding source byte;
6286// value 0xc selects zero;
6287// value 0xff selects 0xff.
6288static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
6289 assert(V.getValueSizeInBits() == 32);
6290
6291 if (V.getNumOperands() != 2)
6292 return ~0;
6293
6294 ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
6295 if (!N1)
6296 return ~0;
6297
6298 uint32_t C = N1->getZExtValue();
6299
6300 switch (V.getOpcode()) {
6301 default:
6302 break;
6303 case ISD::AND:
6304 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
6305 return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
6306 }
6307 break;
6308
6309 case ISD::OR:
6310 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
6311 return (0x03020100 & ~ConstMask) | ConstMask;
6312 }
6313 break;
6314
6315 case ISD::SHL:
6316 if (C % 8)
6317 return ~0;
6318
6319 return uint32_t((0x030201000c0c0c0cull << C) >> 32);
6320
6321 case ISD::SRL:
6322 if (C % 8)
6323 return ~0;
6324
6325 return uint32_t(0x0c0c0c0c03020100ull >> C);
6326 }
6327
6328 return ~0;
6329}
6330
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006331SDValue SITargetLowering::performAndCombine(SDNode *N,
6332 DAGCombinerInfo &DCI) const {
6333 if (DCI.isBeforeLegalize())
6334 return SDValue();
6335
6336 SelectionDAG &DAG = DCI.DAG;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006337 EVT VT = N->getValueType(0);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006338 SDValue LHS = N->getOperand(0);
6339 SDValue RHS = N->getOperand(1);
6340
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006341
Stanislav Mekhanoshin53a21292017-05-23 19:54:48 +00006342 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
6343 if (VT == MVT::i64 && CRHS) {
6344 if (SDValue Split
6345 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
6346 return Split;
6347 }
6348
6349 if (CRHS && VT == MVT::i32) {
6350 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
6351 // nb = number of trailing zeroes in mask
6352 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
6353 // given that we are selecting 8 or 16 bit fields starting at byte boundary.
6354 uint64_t Mask = CRHS->getZExtValue();
6355 unsigned Bits = countPopulation(Mask);
6356 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
6357 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
6358 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
6359 unsigned Shift = CShift->getZExtValue();
6360 unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
6361 unsigned Offset = NB + Shift;
6362 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
6363 SDLoc SL(N);
6364 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
6365 LHS->getOperand(0),
6366 DAG.getConstant(Offset, SL, MVT::i32),
6367 DAG.getConstant(Bits, SL, MVT::i32));
6368 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
6369 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
6370 DAG.getValueType(NarrowVT));
6371 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
6372 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
6373 return Shl;
6374 }
6375 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006376 }
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006377
6378 // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
6379 if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
6380 isa<ConstantSDNode>(LHS.getOperand(2))) {
6381 uint32_t Sel = getConstantPermuteMask(Mask);
6382 if (!Sel)
6383 return SDValue();
6384
6385 // Select 0xc for all zero bytes
6386 Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
6387 SDLoc DL(N);
6388 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
6389 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
6390 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006391 }
6392
6393 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
6394 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
6395 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006396 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6397 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
6398
6399 SDValue X = LHS.getOperand(0);
6400 SDValue Y = RHS.getOperand(0);
6401 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
6402 return SDValue();
6403
6404 if (LCC == ISD::SETO) {
6405 if (X != LHS.getOperand(1))
6406 return SDValue();
6407
6408 if (RCC == ISD::SETUNE) {
6409 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
6410 if (!C1 || !C1->isInfinity() || C1->isNegative())
6411 return SDValue();
6412
6413 const uint32_t Mask = SIInstrFlags::N_NORMAL |
6414 SIInstrFlags::N_SUBNORMAL |
6415 SIInstrFlags::N_ZERO |
6416 SIInstrFlags::P_ZERO |
6417 SIInstrFlags::P_SUBNORMAL |
6418 SIInstrFlags::P_NORMAL;
6419
6420 static_assert(((~(SIInstrFlags::S_NAN |
6421 SIInstrFlags::Q_NAN |
6422 SIInstrFlags::N_INFINITY |
6423 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
6424 "mask not equal");
6425
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006426 SDLoc DL(N);
6427 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
6428 X, DAG.getConstant(Mask, DL, MVT::i32));
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006429 }
6430 }
6431 }
6432
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00006433 if (VT == MVT::i32 &&
6434 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
6435 // and x, (sext cc from i1) => select cc, x, 0
6436 if (RHS.getOpcode() != ISD::SIGN_EXTEND)
6437 std::swap(LHS, RHS);
6438 if (isBoolSGPR(RHS.getOperand(0)))
6439 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
6440 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
6441 }
6442
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006443 // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
6444 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6445 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
6446 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
6447 uint32_t LHSMask = getPermuteMask(DAG, LHS);
6448 uint32_t RHSMask = getPermuteMask(DAG, RHS);
6449 if (LHSMask != ~0u && RHSMask != ~0u) {
6450 // Canonicalize the expression in an attempt to have fewer unique masks
6451 // and therefore fewer registers used to hold the masks.
6452 if (LHSMask > RHSMask) {
6453 std::swap(LHSMask, RHSMask);
6454 std::swap(LHS, RHS);
6455 }
6456
6457 // Select 0xc for each lane used from source operand. Zero has 0xc mask
6458 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
6459 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6460 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6461
6462 // Check of we need to combine values from two sources within a byte.
6463 if (!(LHSUsedLanes & RHSUsedLanes) &&
6464 // If we select high and lower word keep it for SDWA.
6465 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
6466 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
6467 // Each byte in each mask is either selector mask 0-3, or has higher
6468 // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
6469 // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
6470 // mask which is not 0xff wins. By anding both masks we have a correct
6471 // result except that 0x0c shall be corrected to give 0x0c only.
6472 uint32_t Mask = LHSMask & RHSMask;
6473 for (unsigned I = 0; I < 32; I += 8) {
6474 uint32_t ByteSel = 0xff << I;
6475 if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
6476 Mask &= (0x0c << I) & 0xffffffff;
6477 }
6478
6479 // Add 4 to each active LHS lane. It will not affect any existing 0xff
6480 // or 0x0c.
6481 uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
6482 SDLoc DL(N);
6483
6484 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
6485 LHS.getOperand(0), RHS.getOperand(0),
6486 DAG.getConstant(Sel, DL, MVT::i32));
6487 }
6488 }
6489 }
6490
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006491 return SDValue();
6492}
6493
Matt Arsenaultf2290332015-01-06 23:00:39 +00006494SDValue SITargetLowering::performOrCombine(SDNode *N,
6495 DAGCombinerInfo &DCI) const {
6496 SelectionDAG &DAG = DCI.DAG;
6497 SDValue LHS = N->getOperand(0);
6498 SDValue RHS = N->getOperand(1);
6499
Matt Arsenault3b082382016-04-12 18:24:38 +00006500 EVT VT = N->getValueType(0);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006501 if (VT == MVT::i1) {
6502 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
6503 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
6504 RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
6505 SDValue Src = LHS.getOperand(0);
6506 if (Src != RHS.getOperand(0))
6507 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00006508
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006509 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
6510 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
6511 if (!CLHS || !CRHS)
6512 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00006513
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006514 // Only 10 bits are used.
6515 static const uint32_t MaxMask = 0x3ff;
Matt Arsenault3b082382016-04-12 18:24:38 +00006516
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006517 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
6518 SDLoc DL(N);
6519 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
6520 Src, DAG.getConstant(NewMask, DL, MVT::i32));
6521 }
Matt Arsenault3b082382016-04-12 18:24:38 +00006522
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006523 return SDValue();
6524 }
6525
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006526 // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
6527 if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
6528 LHS.getOpcode() == AMDGPUISD::PERM &&
6529 isa<ConstantSDNode>(LHS.getOperand(2))) {
6530 uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
6531 if (!Sel)
6532 return SDValue();
6533
6534 Sel |= LHS.getConstantOperandVal(2);
6535 SDLoc DL(N);
6536 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
6537 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
6538 }
6539
6540 // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
6541 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6542 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
6543 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
6544 uint32_t LHSMask = getPermuteMask(DAG, LHS);
6545 uint32_t RHSMask = getPermuteMask(DAG, RHS);
6546 if (LHSMask != ~0u && RHSMask != ~0u) {
6547 // Canonicalize the expression in an attempt to have fewer unique masks
6548 // and therefore fewer registers used to hold the masks.
6549 if (LHSMask > RHSMask) {
6550 std::swap(LHSMask, RHSMask);
6551 std::swap(LHS, RHS);
6552 }
6553
6554 // Select 0xc for each lane used from source operand. Zero has 0xc mask
6555 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
6556 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6557 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6558
6559 // Check of we need to combine values from two sources within a byte.
6560 if (!(LHSUsedLanes & RHSUsedLanes) &&
6561 // If we select high and lower word keep it for SDWA.
6562 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
6563 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
6564 // Kill zero bytes selected by other mask. Zero value is 0xc.
6565 LHSMask &= ~RHSUsedLanes;
6566 RHSMask &= ~LHSUsedLanes;
6567 // Add 4 to each active LHS lane
6568 LHSMask |= LHSUsedLanes & 0x04040404;
6569 // Combine masks
6570 uint32_t Sel = LHSMask | RHSMask;
6571 SDLoc DL(N);
6572
6573 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
6574 LHS.getOperand(0), RHS.getOperand(0),
6575 DAG.getConstant(Sel, DL, MVT::i32));
6576 }
6577 }
6578 }
6579
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006580 if (VT != MVT::i64)
6581 return SDValue();
6582
6583 // TODO: This could be a generic combine with a predicate for extracting the
6584 // high half of an integer being free.
6585
6586 // (or i64:x, (zero_extend i32:y)) ->
6587 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
6588 if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
6589 RHS.getOpcode() != ISD::ZERO_EXTEND)
6590 std::swap(LHS, RHS);
6591
6592 if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
6593 SDValue ExtSrc = RHS.getOperand(0);
6594 EVT SrcVT = ExtSrc.getValueType();
6595 if (SrcVT == MVT::i32) {
6596 SDLoc SL(N);
6597 SDValue LowLHS, HiBits;
6598 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
6599 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
6600
6601 DCI.AddToWorklist(LowOr.getNode());
6602 DCI.AddToWorklist(HiBits.getNode());
6603
6604 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
6605 LowOr, HiBits);
6606 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
Matt Arsenault3b082382016-04-12 18:24:38 +00006607 }
6608 }
6609
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006610 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
6611 if (CRHS) {
6612 if (SDValue Split
6613 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
6614 return Split;
6615 }
Matt Arsenaultf2290332015-01-06 23:00:39 +00006616
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006617 return SDValue();
6618}
Matt Arsenaultf2290332015-01-06 23:00:39 +00006619
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006620SDValue SITargetLowering::performXorCombine(SDNode *N,
6621 DAGCombinerInfo &DCI) const {
6622 EVT VT = N->getValueType(0);
6623 if (VT != MVT::i64)
6624 return SDValue();
Matt Arsenaultf2290332015-01-06 23:00:39 +00006625
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006626 SDValue LHS = N->getOperand(0);
6627 SDValue RHS = N->getOperand(1);
6628
6629 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
6630 if (CRHS) {
6631 if (SDValue Split
6632 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
6633 return Split;
Matt Arsenaultf2290332015-01-06 23:00:39 +00006634 }
6635
6636 return SDValue();
6637}
6638
Matt Arsenault5cf42712017-04-06 20:58:30 +00006639// Instructions that will be lowered with a final instruction that zeros the
6640// high result bits.
6641// XXX - probably only need to list legal operations.
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006642static bool fp16SrcZerosHighBits(unsigned Opc) {
6643 switch (Opc) {
Matt Arsenault5cf42712017-04-06 20:58:30 +00006644 case ISD::FADD:
6645 case ISD::FSUB:
6646 case ISD::FMUL:
6647 case ISD::FDIV:
6648 case ISD::FREM:
6649 case ISD::FMA:
6650 case ISD::FMAD:
6651 case ISD::FCANONICALIZE:
6652 case ISD::FP_ROUND:
6653 case ISD::UINT_TO_FP:
6654 case ISD::SINT_TO_FP:
6655 case ISD::FABS:
6656 // Fabs is lowered to a bit operation, but it's an and which will clear the
6657 // high bits anyway.
6658 case ISD::FSQRT:
6659 case ISD::FSIN:
6660 case ISD::FCOS:
6661 case ISD::FPOWI:
6662 case ISD::FPOW:
6663 case ISD::FLOG:
6664 case ISD::FLOG2:
6665 case ISD::FLOG10:
6666 case ISD::FEXP:
6667 case ISD::FEXP2:
6668 case ISD::FCEIL:
6669 case ISD::FTRUNC:
6670 case ISD::FRINT:
6671 case ISD::FNEARBYINT:
6672 case ISD::FROUND:
6673 case ISD::FFLOOR:
6674 case ISD::FMINNUM:
6675 case ISD::FMAXNUM:
6676 case AMDGPUISD::FRACT:
6677 case AMDGPUISD::CLAMP:
6678 case AMDGPUISD::COS_HW:
6679 case AMDGPUISD::SIN_HW:
6680 case AMDGPUISD::FMIN3:
6681 case AMDGPUISD::FMAX3:
6682 case AMDGPUISD::FMED3:
6683 case AMDGPUISD::FMAD_FTZ:
6684 case AMDGPUISD::RCP:
6685 case AMDGPUISD::RSQ:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00006686 case AMDGPUISD::RCP_IFLAG:
Matt Arsenault5cf42712017-04-06 20:58:30 +00006687 case AMDGPUISD::LDEXP:
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006688 return true;
Matt Arsenault5cf42712017-04-06 20:58:30 +00006689 default:
6690 // fcopysign, select and others may be lowered to 32-bit bit operations
6691 // which don't zero the high bits.
6692 return false;
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006693 }
6694}
6695
6696SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
6697 DAGCombinerInfo &DCI) const {
6698 if (!Subtarget->has16BitInsts() ||
6699 DCI.getDAGCombineLevel() < AfterLegalizeDAG)
6700 return SDValue();
6701
6702 EVT VT = N->getValueType(0);
6703 if (VT != MVT::i32)
6704 return SDValue();
6705
6706 SDValue Src = N->getOperand(0);
6707 if (Src.getValueType() != MVT::i16)
6708 return SDValue();
6709
6710 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
6711 // FIXME: It is not universally true that the high bits are zeroed on gfx9.
6712 if (Src.getOpcode() == ISD::BITCAST) {
6713 SDValue BCSrc = Src.getOperand(0);
6714 if (BCSrc.getValueType() == MVT::f16 &&
6715 fp16SrcZerosHighBits(BCSrc.getOpcode()))
6716 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
6717 }
6718
6719 return SDValue();
6720}
6721
Matt Arsenaultf2290332015-01-06 23:00:39 +00006722SDValue SITargetLowering::performClassCombine(SDNode *N,
6723 DAGCombinerInfo &DCI) const {
6724 SelectionDAG &DAG = DCI.DAG;
6725 SDValue Mask = N->getOperand(1);
6726
6727 // fp_class x, 0 -> false
6728 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
6729 if (CMask->isNullValue())
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006730 return DAG.getConstant(0, SDLoc(N), MVT::i1);
Matt Arsenaultf2290332015-01-06 23:00:39 +00006731 }
6732
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00006733 if (N->getOperand(0).isUndef())
6734 return DAG.getUNDEF(MVT::i1);
6735
Matt Arsenaultf2290332015-01-06 23:00:39 +00006736 return SDValue();
6737}
6738
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00006739SDValue SITargetLowering::performRcpCombine(SDNode *N,
6740 DAGCombinerInfo &DCI) const {
6741 EVT VT = N->getValueType(0);
6742 SDValue N0 = N->getOperand(0);
6743
6744 if (N0.isUndef())
6745 return N0;
6746
6747 if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
6748 N0.getOpcode() == ISD::SINT_TO_FP)) {
6749 return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
6750 N->getFlags());
6751 }
6752
6753 return AMDGPUTargetLowering::performRcpCombine(N, DCI);
6754}
6755
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006756static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
6757 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions())
6758 return true;
6759
6760 return DAG.isKnownNeverNaN(Op);
6761}
6762
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006763static bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
Tom Stellard5bfbae52018-07-11 20:59:01 +00006764 const GCNSubtarget *ST, unsigned MaxDepth=5) {
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006765 // If source is a result of another standard FP operation it is already in
6766 // canonical form.
6767
6768 switch (Op.getOpcode()) {
6769 default:
6770 break;
6771
6772 // These will flush denorms if required.
6773 case ISD::FADD:
6774 case ISD::FSUB:
6775 case ISD::FMUL:
6776 case ISD::FSQRT:
6777 case ISD::FCEIL:
6778 case ISD::FFLOOR:
6779 case ISD::FMA:
6780 case ISD::FMAD:
6781
6782 case ISD::FCANONICALIZE:
6783 return true;
6784
6785 case ISD::FP_ROUND:
6786 return Op.getValueType().getScalarType() != MVT::f16 ||
6787 ST->hasFP16Denormals();
6788
6789 case ISD::FP_EXTEND:
6790 return Op.getOperand(0).getValueType().getScalarType() != MVT::f16 ||
6791 ST->hasFP16Denormals();
6792
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006793 // It can/will be lowered or combined as a bit operation.
6794 // Need to check their input recursively to handle.
6795 case ISD::FNEG:
6796 case ISD::FABS:
6797 return (MaxDepth > 0) &&
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006798 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006799
6800 case ISD::FSIN:
6801 case ISD::FCOS:
6802 case ISD::FSINCOS:
6803 return Op.getValueType().getScalarType() != MVT::f16;
6804
6805 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms.
6806 // For such targets need to check their input recursively.
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006807 case ISD::FMINNUM:
6808 case ISD::FMAXNUM:
6809 case ISD::FMINNAN:
6810 case ISD::FMAXNAN:
6811
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006812 if (ST->supportsMinMaxDenormModes() &&
6813 DAG.isKnownNeverNaN(Op.getOperand(0)) &&
6814 DAG.isKnownNeverNaN(Op.getOperand(1)))
6815 return true;
6816
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006817 return (MaxDepth > 0) &&
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006818 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1) &&
6819 isCanonicalized(DAG, Op.getOperand(1), ST, MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006820
6821 case ISD::ConstantFP: {
6822 auto F = cast<ConstantFPSDNode>(Op)->getValueAPF();
6823 return !F.isDenormal() && !(F.isNaN() && F.isSignaling());
6824 }
6825 }
6826 return false;
6827}
6828
Matt Arsenault9cd90712016-04-14 01:42:16 +00006829// Constant fold canonicalize.
6830SDValue SITargetLowering::performFCanonicalizeCombine(
6831 SDNode *N,
6832 DAGCombinerInfo &DCI) const {
Matt Arsenault9cd90712016-04-14 01:42:16 +00006833 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault4aec86d2018-07-31 13:34:31 +00006834 SDValue N0 = N->getOperand(0);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006835
Matt Arsenault4aec86d2018-07-31 13:34:31 +00006836 // fcanonicalize undef -> qnan
6837 if (N0.isUndef()) {
6838 EVT VT = N->getValueType(0);
6839 APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
6840 return DAG.getConstantFP(QNaN, SDLoc(N), VT);
6841 }
6842
6843 ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006844 if (!CFP) {
6845 SDValue N0 = N->getOperand(0);
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006846 EVT VT = N0.getValueType().getScalarType();
6847 auto ST = getSubtarget();
6848
6849 if (((VT == MVT::f32 && ST->hasFP32Denormals()) ||
6850 (VT == MVT::f64 && ST->hasFP64Denormals()) ||
6851 (VT == MVT::f16 && ST->hasFP16Denormals())) &&
6852 DAG.isKnownNeverNaN(N0))
6853 return N0;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006854
6855 bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction());
6856
6857 if ((IsIEEEMode || isKnownNeverSNan(DAG, N0)) &&
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006858 isCanonicalized(DAG, N0, ST))
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006859 return N0;
6860
6861 return SDValue();
6862 }
6863
Matt Arsenault9cd90712016-04-14 01:42:16 +00006864 const APFloat &C = CFP->getValueAPF();
6865
6866 // Flush denormals to 0 if not enabled.
6867 if (C.isDenormal()) {
6868 EVT VT = N->getValueType(0);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00006869 EVT SVT = VT.getScalarType();
6870 if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals())
Matt Arsenault9cd90712016-04-14 01:42:16 +00006871 return DAG.getConstantFP(0.0, SDLoc(N), VT);
6872
Matt Arsenaulteb522e62017-02-27 22:15:25 +00006873 if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals())
Matt Arsenault9cd90712016-04-14 01:42:16 +00006874 return DAG.getConstantFP(0.0, SDLoc(N), VT);
Matt Arsenaultce841302016-12-22 03:05:37 +00006875
Matt Arsenaulteb522e62017-02-27 22:15:25 +00006876 if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals())
Matt Arsenaultce841302016-12-22 03:05:37 +00006877 return DAG.getConstantFP(0.0, SDLoc(N), VT);
Matt Arsenault9cd90712016-04-14 01:42:16 +00006878 }
6879
6880 if (C.isNaN()) {
6881 EVT VT = N->getValueType(0);
6882 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
6883 if (C.isSignaling()) {
6884 // Quiet a signaling NaN.
6885 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
6886 }
6887
6888 // Make sure it is the canonical NaN bitpattern.
6889 //
6890 // TODO: Can we use -1 as the canonical NaN value since it's an inline
6891 // immediate?
6892 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
6893 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
6894 }
6895
Matt Arsenault4aec86d2018-07-31 13:34:31 +00006896 return N0;
Matt Arsenault9cd90712016-04-14 01:42:16 +00006897}
6898
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006899static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
6900 switch (Opc) {
6901 case ISD::FMAXNUM:
6902 return AMDGPUISD::FMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006903 case ISD::SMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006904 return AMDGPUISD::SMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006905 case ISD::UMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006906 return AMDGPUISD::UMAX3;
6907 case ISD::FMINNUM:
6908 return AMDGPUISD::FMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006909 case ISD::SMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006910 return AMDGPUISD::SMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006911 case ISD::UMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006912 return AMDGPUISD::UMIN3;
6913 default:
6914 llvm_unreachable("Not a min/max opcode");
6915 }
6916}
6917
Matt Arsenault10268f92017-02-27 22:40:39 +00006918SDValue SITargetLowering::performIntMed3ImmCombine(
6919 SelectionDAG &DAG, const SDLoc &SL,
6920 SDValue Op0, SDValue Op1, bool Signed) const {
Matt Arsenaultf639c322016-01-28 20:53:42 +00006921 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
6922 if (!K1)
6923 return SDValue();
6924
6925 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
6926 if (!K0)
6927 return SDValue();
6928
Matt Arsenaultf639c322016-01-28 20:53:42 +00006929 if (Signed) {
6930 if (K0->getAPIntValue().sge(K1->getAPIntValue()))
6931 return SDValue();
6932 } else {
6933 if (K0->getAPIntValue().uge(K1->getAPIntValue()))
6934 return SDValue();
6935 }
6936
6937 EVT VT = K0->getValueType(0);
Matt Arsenault10268f92017-02-27 22:40:39 +00006938 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
6939 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
6940 return DAG.getNode(Med3Opc, SL, VT,
6941 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
6942 }
Tom Stellard115a6152016-11-10 16:02:37 +00006943
Matt Arsenault10268f92017-02-27 22:40:39 +00006944 // If there isn't a 16-bit med3 operation, convert to 32-bit.
Tom Stellard115a6152016-11-10 16:02:37 +00006945 MVT NVT = MVT::i32;
6946 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6947
Matt Arsenault10268f92017-02-27 22:40:39 +00006948 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
6949 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
6950 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
Tom Stellard115a6152016-11-10 16:02:37 +00006951
Matt Arsenault10268f92017-02-27 22:40:39 +00006952 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
6953 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
Matt Arsenaultf639c322016-01-28 20:53:42 +00006954}
6955
Matt Arsenault6b114d22017-08-30 01:20:17 +00006956static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
6957 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
6958 return C;
6959
6960 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
6961 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
6962 return C;
6963 }
6964
6965 return nullptr;
6966}
6967
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006968SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
6969 const SDLoc &SL,
6970 SDValue Op0,
6971 SDValue Op1) const {
Matt Arsenault6b114d22017-08-30 01:20:17 +00006972 ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
Matt Arsenaultf639c322016-01-28 20:53:42 +00006973 if (!K1)
6974 return SDValue();
6975
Matt Arsenault6b114d22017-08-30 01:20:17 +00006976 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
Matt Arsenaultf639c322016-01-28 20:53:42 +00006977 if (!K0)
6978 return SDValue();
6979
6980 // Ordered >= (although NaN inputs should have folded away by now).
6981 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
6982 if (Cmp == APFloat::cmpGreaterThan)
6983 return SDValue();
6984
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006985 // TODO: Check IEEE bit enabled?
Matt Arsenault6b114d22017-08-30 01:20:17 +00006986 EVT VT = Op0.getValueType();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006987 if (Subtarget->enableDX10Clamp()) {
6988 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
6989 // hardware fmed3 behavior converting to a min.
6990 // FIXME: Should this be allowing -0.0?
6991 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
6992 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
6993 }
6994
Matt Arsenault6b114d22017-08-30 01:20:17 +00006995 // med3 for f16 is only available on gfx9+, and not available for v2f16.
6996 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
6997 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
6998 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
6999 // then give the other result, which is different from med3 with a NaN
7000 // input.
7001 SDValue Var = Op0.getOperand(0);
7002 if (!isKnownNeverSNan(DAG, Var))
7003 return SDValue();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007004
Matt Arsenault6b114d22017-08-30 01:20:17 +00007005 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
7006 Var, SDValue(K0, 0), SDValue(K1, 0));
7007 }
Matt Arsenaultf639c322016-01-28 20:53:42 +00007008
Matt Arsenault6b114d22017-08-30 01:20:17 +00007009 return SDValue();
Matt Arsenaultf639c322016-01-28 20:53:42 +00007010}
7011
7012SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
7013 DAGCombinerInfo &DCI) const {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007014 SelectionDAG &DAG = DCI.DAG;
7015
Matt Arsenault79a45db2017-02-22 23:53:37 +00007016 EVT VT = N->getValueType(0);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007017 unsigned Opc = N->getOpcode();
7018 SDValue Op0 = N->getOperand(0);
7019 SDValue Op1 = N->getOperand(1);
7020
7021 // Only do this if the inner op has one use since this will just increases
7022 // register pressure for no benefit.
7023
Matt Arsenault79a45db2017-02-22 23:53:37 +00007024
7025 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
Farhana Aleene80aeac2018-04-03 23:00:30 +00007026 !VT.isVector() && VT != MVT::f64 &&
Matt Arsenaultee324ff2017-05-17 19:25:06 +00007027 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) {
Matt Arsenault5b39b342016-01-28 20:53:48 +00007028 // max(max(a, b), c) -> max3(a, b, c)
7029 // min(min(a, b), c) -> min3(a, b, c)
7030 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
7031 SDLoc DL(N);
7032 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
7033 DL,
7034 N->getValueType(0),
7035 Op0.getOperand(0),
7036 Op0.getOperand(1),
7037 Op1);
7038 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007039
Matt Arsenault5b39b342016-01-28 20:53:48 +00007040 // Try commuted.
7041 // max(a, max(b, c)) -> max3(a, b, c)
7042 // min(a, min(b, c)) -> min3(a, b, c)
7043 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
7044 SDLoc DL(N);
7045 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
7046 DL,
7047 N->getValueType(0),
7048 Op0,
7049 Op1.getOperand(0),
7050 Op1.getOperand(1));
7051 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007052 }
7053
Matt Arsenaultf639c322016-01-28 20:53:42 +00007054 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
7055 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
7056 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
7057 return Med3;
7058 }
7059
7060 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
7061 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
7062 return Med3;
7063 }
7064
7065 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
Matt Arsenault5b39b342016-01-28 20:53:48 +00007066 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
7067 (Opc == AMDGPUISD::FMIN_LEGACY &&
7068 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
Matt Arsenault79a45db2017-02-22 23:53:37 +00007069 (VT == MVT::f32 || VT == MVT::f64 ||
Matt Arsenault6b114d22017-08-30 01:20:17 +00007070 (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
7071 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007072 Op0.hasOneUse()) {
Matt Arsenaultf639c322016-01-28 20:53:42 +00007073 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
7074 return Res;
7075 }
7076
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007077 return SDValue();
7078}
7079
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007080static bool isClampZeroToOne(SDValue A, SDValue B) {
7081 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
7082 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
7083 // FIXME: Should this be allowing -0.0?
7084 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
7085 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
7086 }
7087 }
7088
7089 return false;
7090}
7091
7092// FIXME: Should only worry about snans for version with chain.
7093SDValue SITargetLowering::performFMed3Combine(SDNode *N,
7094 DAGCombinerInfo &DCI) const {
7095 EVT VT = N->getValueType(0);
7096 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
7097 // NaNs. With a NaN input, the order of the operands may change the result.
7098
7099 SelectionDAG &DAG = DCI.DAG;
7100 SDLoc SL(N);
7101
7102 SDValue Src0 = N->getOperand(0);
7103 SDValue Src1 = N->getOperand(1);
7104 SDValue Src2 = N->getOperand(2);
7105
7106 if (isClampZeroToOne(Src0, Src1)) {
7107 // const_a, const_b, x -> clamp is safe in all cases including signaling
7108 // nans.
7109 // FIXME: Should this be allowing -0.0?
7110 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
7111 }
7112
7113 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
7114 // handling no dx10-clamp?
7115 if (Subtarget->enableDX10Clamp()) {
7116 // If NaNs is clamped to 0, we are free to reorder the inputs.
7117
7118 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
7119 std::swap(Src0, Src1);
7120
7121 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
7122 std::swap(Src1, Src2);
7123
7124 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
7125 std::swap(Src0, Src1);
7126
7127 if (isClampZeroToOne(Src1, Src2))
7128 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
7129 }
7130
7131 return SDValue();
7132}
7133
Matt Arsenault1f17c662017-02-22 00:27:34 +00007134SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
7135 DAGCombinerInfo &DCI) const {
7136 SDValue Src0 = N->getOperand(0);
7137 SDValue Src1 = N->getOperand(1);
7138 if (Src0.isUndef() && Src1.isUndef())
7139 return DCI.DAG.getUNDEF(N->getValueType(0));
7140 return SDValue();
7141}
7142
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007143SDValue SITargetLowering::performExtractVectorEltCombine(
7144 SDNode *N, DAGCombinerInfo &DCI) const {
7145 SDValue Vec = N->getOperand(0);
Matt Arsenault8cbb4882017-09-20 21:01:24 +00007146 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault63bc0e32018-06-15 15:31:36 +00007147
7148 EVT VecVT = Vec.getValueType();
7149 EVT EltVT = VecVT.getVectorElementType();
7150
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00007151 if ((Vec.getOpcode() == ISD::FNEG ||
7152 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007153 SDLoc SL(N);
7154 EVT EltVT = N->getValueType(0);
7155 SDValue Idx = N->getOperand(1);
7156 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7157 Vec.getOperand(0), Idx);
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00007158 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007159 }
7160
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00007161 // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
7162 // =>
7163 // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
7164 // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
7165 // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
Farhana Aleene24f3ff2018-05-09 21:18:34 +00007166 if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00007167 SDLoc SL(N);
7168 EVT EltVT = N->getValueType(0);
7169 SDValue Idx = N->getOperand(1);
7170 unsigned Opc = Vec.getOpcode();
7171
7172 switch(Opc) {
7173 default:
7174 return SDValue();
7175 // TODO: Support other binary operations.
7176 case ISD::FADD:
7177 case ISD::ADD:
Farhana Aleene24f3ff2018-05-09 21:18:34 +00007178 case ISD::UMIN:
7179 case ISD::UMAX:
7180 case ISD::SMIN:
7181 case ISD::SMAX:
7182 case ISD::FMAXNUM:
7183 case ISD::FMINNUM:
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00007184 return DAG.getNode(Opc, SL, EltVT,
7185 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7186 Vec.getOperand(0), Idx),
7187 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7188 Vec.getOperand(1), Idx));
7189 }
7190 }
Matt Arsenault63bc0e32018-06-15 15:31:36 +00007191
7192 if (!DCI.isBeforeLegalize())
7193 return SDValue();
7194
7195 unsigned VecSize = VecVT.getSizeInBits();
7196 unsigned EltSize = EltVT.getSizeInBits();
7197
7198 // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
7199 // elements. This exposes more load reduction opportunities by replacing
7200 // multiple small extract_vector_elements with a single 32-bit extract.
7201 auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
7202 if (EltSize <= 16 &&
7203 EltVT.isByteSized() &&
7204 VecSize > 32 &&
7205 VecSize % 32 == 0 &&
7206 Idx) {
7207 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
7208
7209 unsigned BitIndex = Idx->getZExtValue() * EltSize;
7210 unsigned EltIdx = BitIndex / 32;
7211 unsigned LeftoverBitIdx = BitIndex % 32;
7212 SDLoc SL(N);
7213
7214 SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
7215 DCI.AddToWorklist(Cast.getNode());
7216
7217 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
7218 DAG.getConstant(EltIdx, SL, MVT::i32));
7219 DCI.AddToWorklist(Elt.getNode());
7220 SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
7221 DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
7222 DCI.AddToWorklist(Srl.getNode());
7223
7224 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
7225 DCI.AddToWorklist(Trunc.getNode());
7226 return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
7227 }
7228
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007229 return SDValue();
7230}
7231
Matt Arsenault8cbb4882017-09-20 21:01:24 +00007232static bool convertBuildVectorCastElt(SelectionDAG &DAG,
7233 SDValue &Lo, SDValue &Hi) {
7234 if (Hi.getOpcode() == ISD::BITCAST &&
7235 Hi.getOperand(0).getValueType() == MVT::f16 &&
7236 (isa<ConstantSDNode>(Lo) || Lo.isUndef())) {
7237 Lo = DAG.getNode(ISD::BITCAST, SDLoc(Lo), MVT::f16, Lo);
7238 Hi = Hi.getOperand(0);
7239 return true;
7240 }
7241
7242 return false;
7243}
7244
7245SDValue SITargetLowering::performBuildVectorCombine(
7246 SDNode *N, DAGCombinerInfo &DCI) const {
7247 SDLoc SL(N);
7248
7249 if (!isTypeLegal(MVT::v2i16))
7250 return SDValue();
7251 SelectionDAG &DAG = DCI.DAG;
7252 EVT VT = N->getValueType(0);
7253
7254 if (VT == MVT::v2i16) {
7255 SDValue Lo = N->getOperand(0);
7256 SDValue Hi = N->getOperand(1);
7257
7258 // v2i16 build_vector (const|undef), (bitcast f16:$x)
7259 // -> bitcast (v2f16 build_vector const|undef, $x
7260 if (convertBuildVectorCastElt(DAG, Lo, Hi)) {
7261 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Lo, Hi });
7262 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
7263 }
7264
7265 if (convertBuildVectorCastElt(DAG, Hi, Lo)) {
7266 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Hi, Lo });
7267 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
7268 }
7269 }
7270
7271 return SDValue();
7272}
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007273
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007274unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
7275 const SDNode *N0,
7276 const SDNode *N1) const {
7277 EVT VT = N0->getValueType(0);
7278
Matt Arsenault770ec862016-12-22 03:55:35 +00007279 // Only do this if we are not trying to support denormals. v_mad_f32 does not
7280 // support denormals ever.
7281 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
7282 (VT == MVT::f16 && !Subtarget->hasFP16Denormals()))
7283 return ISD::FMAD;
7284
7285 const TargetOptions &Options = DAG.getTarget().Options;
Amara Emersond28f0cd42017-05-01 15:17:51 +00007286 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
Michael Berg7acc81b2018-05-04 18:48:20 +00007287 (N0->getFlags().hasAllowContract() &&
7288 N1->getFlags().hasAllowContract())) &&
Matt Arsenault770ec862016-12-22 03:55:35 +00007289 isFMAFasterThanFMulAndFAdd(VT)) {
7290 return ISD::FMA;
7291 }
7292
7293 return 0;
7294}
7295
Matt Arsenault4f6318f2017-11-06 17:04:37 +00007296static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
7297 EVT VT,
7298 SDValue N0, SDValue N1, SDValue N2,
7299 bool Signed) {
7300 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
7301 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
7302 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
7303 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
7304}
7305
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007306SDValue SITargetLowering::performAddCombine(SDNode *N,
7307 DAGCombinerInfo &DCI) const {
7308 SelectionDAG &DAG = DCI.DAG;
7309 EVT VT = N->getValueType(0);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007310 SDLoc SL(N);
7311 SDValue LHS = N->getOperand(0);
7312 SDValue RHS = N->getOperand(1);
7313
Matt Arsenault4f6318f2017-11-06 17:04:37 +00007314 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
7315 && Subtarget->hasMad64_32() &&
7316 !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
7317 VT.getScalarSizeInBits() <= 64) {
7318 if (LHS.getOpcode() != ISD::MUL)
7319 std::swap(LHS, RHS);
7320
7321 SDValue MulLHS = LHS.getOperand(0);
7322 SDValue MulRHS = LHS.getOperand(1);
7323 SDValue AddRHS = RHS;
7324
7325 // TODO: Maybe restrict if SGPR inputs.
7326 if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
7327 numBitsUnsigned(MulRHS, DAG) <= 32) {
7328 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
7329 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
7330 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
7331 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
7332 }
7333
7334 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
7335 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
7336 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
7337 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
7338 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
7339 }
7340
7341 return SDValue();
7342 }
7343
Farhana Aleen07e61232018-05-02 18:16:39 +00007344 if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
Matt Arsenault4f6318f2017-11-06 17:04:37 +00007345 return SDValue();
7346
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007347 // add x, zext (setcc) => addcarry x, 0, setcc
7348 // add x, sext (setcc) => subcarry x, 0, setcc
7349 unsigned Opc = LHS.getOpcode();
7350 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007351 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007352 std::swap(RHS, LHS);
7353
7354 Opc = RHS.getOpcode();
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007355 switch (Opc) {
7356 default: break;
7357 case ISD::ZERO_EXTEND:
7358 case ISD::SIGN_EXTEND:
7359 case ISD::ANY_EXTEND: {
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007360 auto Cond = RHS.getOperand(0);
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00007361 if (!isBoolSGPR(Cond))
Stanislav Mekhanoshin3ed38c62017-06-21 23:46:22 +00007362 break;
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007363 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
7364 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
7365 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
7366 return DAG.getNode(Opc, SL, VTList, Args);
7367 }
7368 case ISD::ADDCARRY: {
7369 // add x, (addcarry y, 0, cc) => addcarry x, y, cc
7370 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
7371 if (!C || C->getZExtValue() != 0) break;
7372 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
7373 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
7374 }
7375 }
7376 return SDValue();
7377}
7378
7379SDValue SITargetLowering::performSubCombine(SDNode *N,
7380 DAGCombinerInfo &DCI) const {
7381 SelectionDAG &DAG = DCI.DAG;
7382 EVT VT = N->getValueType(0);
7383
7384 if (VT != MVT::i32)
7385 return SDValue();
7386
7387 SDLoc SL(N);
7388 SDValue LHS = N->getOperand(0);
7389 SDValue RHS = N->getOperand(1);
7390
7391 unsigned Opc = LHS.getOpcode();
7392 if (Opc != ISD::SUBCARRY)
7393 std::swap(RHS, LHS);
7394
7395 if (LHS.getOpcode() == ISD::SUBCARRY) {
7396 // sub (subcarry x, 0, cc), y => subcarry x, y, cc
7397 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
7398 if (!C || C->getZExtValue() != 0)
7399 return SDValue();
7400 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
7401 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
7402 }
7403 return SDValue();
7404}
7405
7406SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
7407 DAGCombinerInfo &DCI) const {
7408
7409 if (N->getValueType(0) != MVT::i32)
7410 return SDValue();
7411
7412 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7413 if (!C || C->getZExtValue() != 0)
7414 return SDValue();
7415
7416 SelectionDAG &DAG = DCI.DAG;
7417 SDValue LHS = N->getOperand(0);
7418
7419 // addcarry (add x, y), 0, cc => addcarry x, y, cc
7420 // subcarry (sub x, y), 0, cc => subcarry x, y, cc
7421 unsigned LHSOpc = LHS.getOpcode();
7422 unsigned Opc = N->getOpcode();
7423 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
7424 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
7425 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
7426 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007427 }
7428 return SDValue();
7429}
7430
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007431SDValue SITargetLowering::performFAddCombine(SDNode *N,
7432 DAGCombinerInfo &DCI) const {
7433 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
7434 return SDValue();
7435
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007436 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault770ec862016-12-22 03:55:35 +00007437 EVT VT = N->getValueType(0);
Matt Arsenault770ec862016-12-22 03:55:35 +00007438
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007439 SDLoc SL(N);
7440 SDValue LHS = N->getOperand(0);
7441 SDValue RHS = N->getOperand(1);
7442
7443 // These should really be instruction patterns, but writing patterns with
7444 // source modiifiers is a pain.
7445
7446 // fadd (fadd (a, a), b) -> mad 2.0, a, b
7447 if (LHS.getOpcode() == ISD::FADD) {
7448 SDValue A = LHS.getOperand(0);
7449 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007450 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007451 if (FusedOp != 0) {
7452 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007453 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00007454 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007455 }
7456 }
7457
7458 // fadd (b, fadd (a, a)) -> mad 2.0, a, b
7459 if (RHS.getOpcode() == ISD::FADD) {
7460 SDValue A = RHS.getOperand(0);
7461 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007462 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007463 if (FusedOp != 0) {
7464 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007465 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00007466 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007467 }
7468 }
7469
7470 return SDValue();
7471}
7472
7473SDValue SITargetLowering::performFSubCombine(SDNode *N,
7474 DAGCombinerInfo &DCI) const {
7475 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
7476 return SDValue();
7477
7478 SelectionDAG &DAG = DCI.DAG;
7479 SDLoc SL(N);
7480 EVT VT = N->getValueType(0);
7481 assert(!VT.isVector());
7482
7483 // Try to get the fneg to fold into the source modifier. This undoes generic
7484 // DAG combines and folds them into the mad.
7485 //
7486 // Only do this if we are not trying to support denormals. v_mad_f32 does
7487 // not support denormals ever.
Matt Arsenault770ec862016-12-22 03:55:35 +00007488 SDValue LHS = N->getOperand(0);
7489 SDValue RHS = N->getOperand(1);
7490 if (LHS.getOpcode() == ISD::FADD) {
7491 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
7492 SDValue A = LHS.getOperand(0);
7493 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007494 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007495 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007496 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
7497 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
7498
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007499 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007500 }
7501 }
Matt Arsenault770ec862016-12-22 03:55:35 +00007502 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007503
Matt Arsenault770ec862016-12-22 03:55:35 +00007504 if (RHS.getOpcode() == ISD::FADD) {
7505 // (fsub c, (fadd a, a)) -> mad -2.0, a, c
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007506
Matt Arsenault770ec862016-12-22 03:55:35 +00007507 SDValue A = RHS.getOperand(0);
7508 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007509 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007510 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007511 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007512 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007513 }
7514 }
7515 }
7516
7517 return SDValue();
7518}
7519
Farhana Aleenc370d7b2018-07-16 18:19:59 +00007520SDValue SITargetLowering::performFMACombine(SDNode *N,
7521 DAGCombinerInfo &DCI) const {
7522 SelectionDAG &DAG = DCI.DAG;
7523 EVT VT = N->getValueType(0);
7524 SDLoc SL(N);
7525
7526 if (!Subtarget->hasDLInsts() || VT != MVT::f32)
7527 return SDValue();
7528
7529 // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
7530 // FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
7531 SDValue Op1 = N->getOperand(0);
7532 SDValue Op2 = N->getOperand(1);
7533 SDValue FMA = N->getOperand(2);
7534
7535 if (FMA.getOpcode() != ISD::FMA ||
7536 Op1.getOpcode() != ISD::FP_EXTEND ||
7537 Op2.getOpcode() != ISD::FP_EXTEND)
7538 return SDValue();
7539
7540 // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
7541 // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
7542 // is sufficient to allow generaing fdot2.
7543 const TargetOptions &Options = DAG.getTarget().Options;
7544 if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
7545 (N->getFlags().hasAllowContract() &&
7546 FMA->getFlags().hasAllowContract())) {
7547 Op1 = Op1.getOperand(0);
7548 Op2 = Op2.getOperand(0);
7549 if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7550 Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
7551 return SDValue();
7552
7553 SDValue Vec1 = Op1.getOperand(0);
7554 SDValue Idx1 = Op1.getOperand(1);
7555 SDValue Vec2 = Op2.getOperand(0);
7556
7557 SDValue FMAOp1 = FMA.getOperand(0);
7558 SDValue FMAOp2 = FMA.getOperand(1);
7559 SDValue FMAAcc = FMA.getOperand(2);
7560
7561 if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
7562 FMAOp2.getOpcode() != ISD::FP_EXTEND)
7563 return SDValue();
7564
7565 FMAOp1 = FMAOp1.getOperand(0);
7566 FMAOp2 = FMAOp2.getOperand(0);
7567 if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7568 FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
7569 return SDValue();
7570
7571 SDValue Vec3 = FMAOp1.getOperand(0);
7572 SDValue Vec4 = FMAOp2.getOperand(0);
7573 SDValue Idx2 = FMAOp1.getOperand(1);
7574
7575 if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
7576 // Idx1 and Idx2 cannot be the same.
7577 Idx1 == Idx2)
7578 return SDValue();
7579
7580 if (Vec1 == Vec2 || Vec3 == Vec4)
7581 return SDValue();
7582
7583 if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
7584 return SDValue();
7585
7586 if ((Vec1 == Vec3 && Vec2 == Vec4) ||
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00007587 (Vec1 == Vec4 && Vec2 == Vec3)) {
7588 return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
7589 DAG.getTargetConstant(0, SL, MVT::i1));
7590 }
Farhana Aleenc370d7b2018-07-16 18:19:59 +00007591 }
7592 return SDValue();
7593}
7594
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007595SDValue SITargetLowering::performSetCCCombine(SDNode *N,
7596 DAGCombinerInfo &DCI) const {
7597 SelectionDAG &DAG = DCI.DAG;
7598 SDLoc SL(N);
7599
7600 SDValue LHS = N->getOperand(0);
7601 SDValue RHS = N->getOperand(1);
7602 EVT VT = LHS.getValueType();
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00007603 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
7604
7605 auto CRHS = dyn_cast<ConstantSDNode>(RHS);
7606 if (!CRHS) {
7607 CRHS = dyn_cast<ConstantSDNode>(LHS);
7608 if (CRHS) {
7609 std::swap(LHS, RHS);
7610 CC = getSetCCSwappedOperands(CC);
7611 }
7612 }
7613
Stanislav Mekhanoshin3b117942018-06-16 03:46:59 +00007614 if (CRHS) {
7615 if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
7616 isBoolSGPR(LHS.getOperand(0))) {
7617 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
7618 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
7619 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1
7620 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc
7621 if ((CRHS->isAllOnesValue() &&
7622 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
7623 (CRHS->isNullValue() &&
7624 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
7625 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
7626 DAG.getConstant(-1, SL, MVT::i1));
7627 if ((CRHS->isAllOnesValue() &&
7628 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
7629 (CRHS->isNullValue() &&
7630 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
7631 return LHS.getOperand(0);
7632 }
7633
7634 uint64_t CRHSVal = CRHS->getZExtValue();
7635 if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
7636 LHS.getOpcode() == ISD::SELECT &&
7637 isa<ConstantSDNode>(LHS.getOperand(1)) &&
7638 isa<ConstantSDNode>(LHS.getOperand(2)) &&
7639 LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
7640 isBoolSGPR(LHS.getOperand(0))) {
7641 // Given CT != FT:
7642 // setcc (select cc, CT, CF), CF, eq => xor cc, -1
7643 // setcc (select cc, CT, CF), CF, ne => cc
7644 // setcc (select cc, CT, CF), CT, ne => xor cc, -1
7645 // setcc (select cc, CT, CF), CT, eq => cc
7646 uint64_t CT = LHS.getConstantOperandVal(1);
7647 uint64_t CF = LHS.getConstantOperandVal(2);
7648
7649 if ((CF == CRHSVal && CC == ISD::SETEQ) ||
7650 (CT == CRHSVal && CC == ISD::SETNE))
7651 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
7652 DAG.getConstant(-1, SL, MVT::i1));
7653 if ((CF == CRHSVal && CC == ISD::SETNE) ||
7654 (CT == CRHSVal && CC == ISD::SETEQ))
7655 return LHS.getOperand(0);
7656 }
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00007657 }
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007658
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00007659 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
7660 VT != MVT::f16))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007661 return SDValue();
7662
7663 // Match isinf pattern
7664 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007665 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) {
7666 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
7667 if (!CRHS)
7668 return SDValue();
7669
7670 const APFloat &APF = CRHS->getValueAPF();
7671 if (APF.isInfinity() && !APF.isNegative()) {
7672 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007673 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
7674 DAG.getConstant(Mask, SL, MVT::i32));
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007675 }
7676 }
7677
7678 return SDValue();
7679}
7680
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007681SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
7682 DAGCombinerInfo &DCI) const {
7683 SelectionDAG &DAG = DCI.DAG;
7684 SDLoc SL(N);
7685 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
7686
7687 SDValue Src = N->getOperand(0);
7688 SDValue Srl = N->getOperand(0);
7689 if (Srl.getOpcode() == ISD::ZERO_EXTEND)
7690 Srl = Srl.getOperand(0);
7691
7692 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
7693 if (Srl.getOpcode() == ISD::SRL) {
7694 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
7695 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
7696 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
7697
7698 if (const ConstantSDNode *C =
7699 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
7700 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
7701 EVT(MVT::i32));
7702
7703 unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
7704 if (SrcOffset < 32 && SrcOffset % 8 == 0) {
7705 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
7706 MVT::f32, Srl);
7707 }
7708 }
7709 }
7710
7711 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
7712
Craig Topperd0af7e82017-04-28 05:31:46 +00007713 KnownBits Known;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007714 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
7715 !DCI.isBeforeLegalizeOps());
7716 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Akira Hatanaka22e839f2017-04-21 18:53:12 +00007717 if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) ||
Craig Topperd0af7e82017-04-28 05:31:46 +00007718 TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007719 DCI.CommitTargetLoweringOpt(TLO);
7720 }
7721
7722 return SDValue();
7723}
7724
Tom Stellard1b95fed2018-05-24 05:28:34 +00007725SDValue SITargetLowering::performClampCombine(SDNode *N,
7726 DAGCombinerInfo &DCI) const {
7727 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
7728 if (!CSrc)
7729 return SDValue();
7730
7731 const APFloat &F = CSrc->getValueAPF();
7732 APFloat Zero = APFloat::getZero(F.getSemantics());
7733 APFloat::cmpResult Cmp0 = F.compare(Zero);
7734 if (Cmp0 == APFloat::cmpLessThan ||
7735 (Cmp0 == APFloat::cmpUnordered && Subtarget->enableDX10Clamp())) {
7736 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
7737 }
7738
7739 APFloat One(F.getSemantics(), "1.0");
7740 APFloat::cmpResult Cmp1 = F.compare(One);
7741 if (Cmp1 == APFloat::cmpGreaterThan)
7742 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
7743
7744 return SDValue(CSrc, 0);
7745}
7746
7747
Tom Stellard75aadc22012-12-11 21:25:42 +00007748SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
7749 DAGCombinerInfo &DCI) const {
Tom Stellard75aadc22012-12-11 21:25:42 +00007750 switch (N->getOpcode()) {
Matt Arsenault22b4c252014-12-21 16:48:42 +00007751 default:
7752 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007753 case ISD::ADD:
7754 return performAddCombine(N, DCI);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007755 case ISD::SUB:
7756 return performSubCombine(N, DCI);
7757 case ISD::ADDCARRY:
7758 case ISD::SUBCARRY:
7759 return performAddCarrySubCarryCombine(N, DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007760 case ISD::FADD:
7761 return performFAddCombine(N, DCI);
7762 case ISD::FSUB:
7763 return performFSubCombine(N, DCI);
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007764 case ISD::SETCC:
7765 return performSetCCCombine(N, DCI);
Matt Arsenault5b39b342016-01-28 20:53:48 +00007766 case ISD::FMAXNUM:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007767 case ISD::FMINNUM:
Matt Arsenault5881f4e2015-06-09 00:52:37 +00007768 case ISD::SMAX:
7769 case ISD::SMIN:
7770 case ISD::UMAX:
Matt Arsenault5b39b342016-01-28 20:53:48 +00007771 case ISD::UMIN:
7772 case AMDGPUISD::FMIN_LEGACY:
7773 case AMDGPUISD::FMAX_LEGACY: {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007774 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
7775 getTargetMachine().getOptLevel() > CodeGenOpt::None)
Matt Arsenaultf639c322016-01-28 20:53:42 +00007776 return performMinMaxCombine(N, DCI);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007777 break;
7778 }
Farhana Aleenc370d7b2018-07-16 18:19:59 +00007779 case ISD::FMA:
7780 return performFMACombine(N, DCI);
Matt Arsenault90083d32018-06-07 09:54:49 +00007781 case ISD::LOAD: {
7782 if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
7783 return Widended;
7784 LLVM_FALLTHROUGH;
7785 }
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007786 case ISD::STORE:
7787 case ISD::ATOMIC_LOAD:
7788 case ISD::ATOMIC_STORE:
7789 case ISD::ATOMIC_CMP_SWAP:
7790 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
7791 case ISD::ATOMIC_SWAP:
7792 case ISD::ATOMIC_LOAD_ADD:
7793 case ISD::ATOMIC_LOAD_SUB:
7794 case ISD::ATOMIC_LOAD_AND:
7795 case ISD::ATOMIC_LOAD_OR:
7796 case ISD::ATOMIC_LOAD_XOR:
7797 case ISD::ATOMIC_LOAD_NAND:
7798 case ISD::ATOMIC_LOAD_MIN:
7799 case ISD::ATOMIC_LOAD_MAX:
7800 case ISD::ATOMIC_LOAD_UMIN:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00007801 case ISD::ATOMIC_LOAD_UMAX:
7802 case AMDGPUISD::ATOMIC_INC:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00007803 case AMDGPUISD::ATOMIC_DEC:
7804 case AMDGPUISD::ATOMIC_LOAD_FADD:
7805 case AMDGPUISD::ATOMIC_LOAD_FMIN:
7806 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007807 if (DCI.isBeforeLegalize())
7808 break;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007809 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007810 case ISD::AND:
7811 return performAndCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00007812 case ISD::OR:
7813 return performOrCombine(N, DCI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007814 case ISD::XOR:
7815 return performXorCombine(N, DCI);
Matt Arsenault8edfaee2017-03-31 19:53:03 +00007816 case ISD::ZERO_EXTEND:
7817 return performZeroExtendCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00007818 case AMDGPUISD::FP_CLASS:
7819 return performClassCombine(N, DCI);
Matt Arsenault9cd90712016-04-14 01:42:16 +00007820 case ISD::FCANONICALIZE:
7821 return performFCanonicalizeCombine(N, DCI);
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007822 case AMDGPUISD::RCP:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00007823 return performRcpCombine(N, DCI);
7824 case AMDGPUISD::FRACT:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007825 case AMDGPUISD::RSQ:
Matt Arsenault32fc5272016-07-26 16:45:45 +00007826 case AMDGPUISD::RCP_LEGACY:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007827 case AMDGPUISD::RSQ_LEGACY:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00007828 case AMDGPUISD::RCP_IFLAG:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007829 case AMDGPUISD::RSQ_CLAMP:
7830 case AMDGPUISD::LDEXP: {
7831 SDValue Src = N->getOperand(0);
7832 if (Src.isUndef())
7833 return Src;
7834 break;
7835 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007836 case ISD::SINT_TO_FP:
7837 case ISD::UINT_TO_FP:
7838 return performUCharToFloatCombine(N, DCI);
7839 case AMDGPUISD::CVT_F32_UBYTE0:
7840 case AMDGPUISD::CVT_F32_UBYTE1:
7841 case AMDGPUISD::CVT_F32_UBYTE2:
7842 case AMDGPUISD::CVT_F32_UBYTE3:
7843 return performCvtF32UByteNCombine(N, DCI);
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007844 case AMDGPUISD::FMED3:
7845 return performFMed3Combine(N, DCI);
Matt Arsenault1f17c662017-02-22 00:27:34 +00007846 case AMDGPUISD::CVT_PKRTZ_F16_F32:
7847 return performCvtPkRTZCombine(N, DCI);
Tom Stellard1b95fed2018-05-24 05:28:34 +00007848 case AMDGPUISD::CLAMP:
7849 return performClampCombine(N, DCI);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00007850 case ISD::SCALAR_TO_VECTOR: {
7851 SelectionDAG &DAG = DCI.DAG;
7852 EVT VT = N->getValueType(0);
7853
7854 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
7855 if (VT == MVT::v2i16 || VT == MVT::v2f16) {
7856 SDLoc SL(N);
7857 SDValue Src = N->getOperand(0);
7858 EVT EltVT = Src.getValueType();
7859 if (EltVT == MVT::f16)
7860 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
7861
7862 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
7863 return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
7864 }
7865
7866 break;
7867 }
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007868 case ISD::EXTRACT_VECTOR_ELT:
7869 return performExtractVectorEltCombine(N, DCI);
Matt Arsenault8cbb4882017-09-20 21:01:24 +00007870 case ISD::BUILD_VECTOR:
7871 return performBuildVectorCombine(N, DCI);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007872 }
Matt Arsenault5565f65e2014-05-22 18:09:07 +00007873 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Tom Stellard75aadc22012-12-11 21:25:42 +00007874}
Christian Konigd910b7d2013-02-26 17:52:16 +00007875
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00007876/// Helper function for adjustWritemask
Benjamin Kramer635e3682013-05-23 15:43:05 +00007877static unsigned SubIdx2Lane(unsigned Idx) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00007878 switch (Idx) {
7879 default: return 0;
7880 case AMDGPU::sub0: return 0;
7881 case AMDGPU::sub1: return 1;
7882 case AMDGPU::sub2: return 2;
7883 case AMDGPU::sub3: return 3;
7884 }
7885}
7886
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00007887/// Adjust the writemask of MIMG instructions
Matt Arsenault68f05052017-12-04 22:18:27 +00007888SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
7889 SelectionDAG &DAG) const {
Nicolai Haehnlef2674312018-06-21 13:36:01 +00007890 unsigned Opcode = Node->getMachineOpcode();
7891
7892 // Subtract 1 because the vdata output is not a MachineSDNode operand.
7893 int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
7894 if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
7895 return Node; // not implemented for D16
7896
Matt Arsenault68f05052017-12-04 22:18:27 +00007897 SDNode *Users[4] = { nullptr };
Tom Stellard54774e52013-10-23 02:53:47 +00007898 unsigned Lane = 0;
Nicolai Haehnlef2674312018-06-21 13:36:01 +00007899 unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00007900 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
Tom Stellard54774e52013-10-23 02:53:47 +00007901 unsigned NewDmask = 0;
Matt Arsenault856777d2017-12-08 20:00:57 +00007902 bool HasChain = Node->getNumValues() > 1;
7903
7904 if (OldDmask == 0) {
7905 // These are folded out, but on the chance it happens don't assert.
7906 return Node;
7907 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00007908
7909 // Try to figure out the used register components
7910 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
7911 I != E; ++I) {
7912
Matt Arsenault93e65ea2017-02-22 21:16:41 +00007913 // Don't look at users of the chain.
7914 if (I.getUse().getResNo() != 0)
7915 continue;
7916
Christian Konig8e06e2a2013-04-10 08:39:08 +00007917 // Abort if we can't understand the usage
7918 if (!I->isMachineOpcode() ||
7919 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
Matt Arsenault68f05052017-12-04 22:18:27 +00007920 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007921
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +00007922 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
Tom Stellard54774e52013-10-23 02:53:47 +00007923 // Note that subregs are packed, i.e. Lane==0 is the first bit set
7924 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
7925 // set, etc.
Christian Konig8b1ed282013-04-10 08:39:16 +00007926 Lane = SubIdx2Lane(I->getConstantOperandVal(1));
Christian Konig8e06e2a2013-04-10 08:39:08 +00007927
Tom Stellard54774e52013-10-23 02:53:47 +00007928 // Set which texture component corresponds to the lane.
7929 unsigned Comp;
7930 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
Tom Stellard03a5c082013-10-23 03:50:25 +00007931 Comp = countTrailingZeros(Dmask);
Tom Stellard54774e52013-10-23 02:53:47 +00007932 Dmask &= ~(1 << Comp);
7933 }
7934
Christian Konig8e06e2a2013-04-10 08:39:08 +00007935 // Abort if we have more than one user per component
7936 if (Users[Lane])
Matt Arsenault68f05052017-12-04 22:18:27 +00007937 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007938
7939 Users[Lane] = *I;
Tom Stellard54774e52013-10-23 02:53:47 +00007940 NewDmask |= 1 << Comp;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007941 }
7942
Tom Stellard54774e52013-10-23 02:53:47 +00007943 // Abort if there's no change
7944 if (NewDmask == OldDmask)
Matt Arsenault68f05052017-12-04 22:18:27 +00007945 return Node;
7946
7947 unsigned BitsSet = countPopulation(NewDmask);
7948
Nicolai Haehnle0ab200b2018-06-21 13:36:44 +00007949 int NewOpcode = AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), BitsSet);
Matt Arsenault68f05052017-12-04 22:18:27 +00007950 assert(NewOpcode != -1 &&
7951 NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
7952 "failed to find equivalent MIMG op");
Christian Konig8e06e2a2013-04-10 08:39:08 +00007953
7954 // Adjust the writemask in the node
Matt Arsenault68f05052017-12-04 22:18:27 +00007955 SmallVector<SDValue, 12> Ops;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00007956 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007957 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
Nikolay Haustov2f684f12016-02-26 09:51:05 +00007958 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
Christian Konig8e06e2a2013-04-10 08:39:08 +00007959
Matt Arsenault68f05052017-12-04 22:18:27 +00007960 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
7961
Matt Arsenault856777d2017-12-08 20:00:57 +00007962 MVT ResultVT = BitsSet == 1 ?
7963 SVT : MVT::getVectorVT(SVT, BitsSet == 3 ? 4 : BitsSet);
7964 SDVTList NewVTList = HasChain ?
7965 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
7966
Matt Arsenault68f05052017-12-04 22:18:27 +00007967
7968 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
7969 NewVTList, Ops);
Matt Arsenaultecad0d532017-12-08 20:00:45 +00007970
Matt Arsenault856777d2017-12-08 20:00:57 +00007971 if (HasChain) {
7972 // Update chain.
7973 NewNode->setMemRefs(Node->memoperands_begin(), Node->memoperands_end());
7974 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
7975 }
Matt Arsenault68f05052017-12-04 22:18:27 +00007976
7977 if (BitsSet == 1) {
7978 assert(Node->hasNUsesOfValue(1, 0));
7979 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
7980 SDLoc(Node), Users[Lane]->getValueType(0),
7981 SDValue(NewNode, 0));
Christian Konig8b1ed282013-04-10 08:39:16 +00007982 DAG.ReplaceAllUsesWith(Users[Lane], Copy);
Matt Arsenault68f05052017-12-04 22:18:27 +00007983 return nullptr;
Christian Konig8b1ed282013-04-10 08:39:16 +00007984 }
7985
Christian Konig8e06e2a2013-04-10 08:39:08 +00007986 // Update the users of the node with the new indices
7987 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00007988 SDNode *User = Users[i];
7989 if (!User)
7990 continue;
7991
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007992 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
Matt Arsenault68f05052017-12-04 22:18:27 +00007993 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
Christian Konig8e06e2a2013-04-10 08:39:08 +00007994
7995 switch (Idx) {
7996 default: break;
7997 case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
7998 case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
7999 case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
8000 }
8001 }
Matt Arsenault68f05052017-12-04 22:18:27 +00008002
8003 DAG.RemoveDeadNode(Node);
8004 return nullptr;
Christian Konig8e06e2a2013-04-10 08:39:08 +00008005}
8006
Tom Stellardc98ee202015-07-16 19:40:07 +00008007static bool isFrameIndexOp(SDValue Op) {
8008 if (Op.getOpcode() == ISD::AssertZext)
8009 Op = Op.getOperand(0);
8010
8011 return isa<FrameIndexSDNode>(Op);
8012}
8013
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00008014/// Legalize target independent instructions (e.g. INSERT_SUBREG)
Tom Stellard3457a842014-10-09 19:06:00 +00008015/// with frame index operands.
8016/// LLVM assumes that inputs are to these instructions are registers.
Matt Arsenault0d0d6c22017-04-12 21:58:23 +00008017SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
8018 SelectionDAG &DAG) const {
8019 if (Node->getOpcode() == ISD::CopyToReg) {
8020 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
8021 SDValue SrcVal = Node->getOperand(2);
8022
8023 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
8024 // to try understanding copies to physical registers.
8025 if (SrcVal.getValueType() == MVT::i1 &&
8026 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
8027 SDLoc SL(Node);
8028 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
8029 SDValue VReg = DAG.getRegister(
8030 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
8031
8032 SDNode *Glued = Node->getGluedNode();
8033 SDValue ToVReg
8034 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
8035 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
8036 SDValue ToResultReg
8037 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
8038 VReg, ToVReg.getValue(1));
8039 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
8040 DAG.RemoveDeadNode(Node);
8041 return ToResultReg.getNode();
8042 }
8043 }
Tom Stellard8dd392e2014-10-09 18:09:15 +00008044
8045 SmallVector<SDValue, 8> Ops;
Tom Stellard3457a842014-10-09 19:06:00 +00008046 for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
Tom Stellardc98ee202015-07-16 19:40:07 +00008047 if (!isFrameIndexOp(Node->getOperand(i))) {
Tom Stellard3457a842014-10-09 19:06:00 +00008048 Ops.push_back(Node->getOperand(i));
Tom Stellard8dd392e2014-10-09 18:09:15 +00008049 continue;
8050 }
8051
Tom Stellard3457a842014-10-09 19:06:00 +00008052 SDLoc DL(Node);
Tom Stellard8dd392e2014-10-09 18:09:15 +00008053 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
Tom Stellard3457a842014-10-09 19:06:00 +00008054 Node->getOperand(i).getValueType(),
8055 Node->getOperand(i)), 0));
Tom Stellard8dd392e2014-10-09 18:09:15 +00008056 }
8057
Mark Searles4e3d6162017-10-16 23:38:53 +00008058 return DAG.UpdateNodeOperands(Node, Ops);
Tom Stellard8dd392e2014-10-09 18:09:15 +00008059}
8060
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00008061/// Fold the instructions after selecting them.
Matt Arsenault68f05052017-12-04 22:18:27 +00008062/// Returns null if users were already updated.
Christian Konig8e06e2a2013-04-10 08:39:08 +00008063SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
8064 SelectionDAG &DAG) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00008065 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00008066 unsigned Opcode = Node->getMachineOpcode();
Christian Konig8e06e2a2013-04-10 08:39:08 +00008067
Nicolai Haehnlec06bfa12016-07-11 21:59:43 +00008068 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
Nicolai Haehnlef2674312018-06-21 13:36:01 +00008069 !TII->isGather4(Opcode)) {
Matt Arsenault68f05052017-12-04 22:18:27 +00008070 return adjustWritemask(Node, DAG);
8071 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00008072
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00008073 if (Opcode == AMDGPU::INSERT_SUBREG ||
8074 Opcode == AMDGPU::REG_SEQUENCE) {
Tom Stellard8dd392e2014-10-09 18:09:15 +00008075 legalizeTargetIndependentNode(Node, DAG);
8076 return Node;
8077 }
Matt Arsenault206f8262017-08-01 20:49:41 +00008078
8079 switch (Opcode) {
8080 case AMDGPU::V_DIV_SCALE_F32:
8081 case AMDGPU::V_DIV_SCALE_F64: {
8082 // Satisfy the operand register constraint when one of the inputs is
8083 // undefined. Ordinarily each undef value will have its own implicit_def of
8084 // a vreg, so force these to use a single register.
8085 SDValue Src0 = Node->getOperand(0);
8086 SDValue Src1 = Node->getOperand(1);
8087 SDValue Src2 = Node->getOperand(2);
8088
8089 if ((Src0.isMachineOpcode() &&
8090 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
8091 (Src0 == Src1 || Src0 == Src2))
8092 break;
8093
8094 MVT VT = Src0.getValueType().getSimpleVT();
8095 const TargetRegisterClass *RC = getRegClassFor(VT);
8096
8097 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
8098 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
8099
8100 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
8101 UndefReg, Src0, SDValue());
8102
8103 // src0 must be the same register as src1 or src2, even if the value is
8104 // undefined, so make sure we don't violate this constraint.
8105 if (Src0.isMachineOpcode() &&
8106 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
8107 if (Src1.isMachineOpcode() &&
8108 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
8109 Src0 = Src1;
8110 else if (Src2.isMachineOpcode() &&
8111 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
8112 Src0 = Src2;
8113 else {
8114 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
8115 Src0 = UndefReg;
8116 Src1 = UndefReg;
8117 }
8118 } else
8119 break;
8120
8121 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
8122 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
8123 Ops.push_back(Node->getOperand(I));
8124
8125 Ops.push_back(ImpDef.getValue(1));
8126 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
8127 }
8128 default:
8129 break;
8130 }
8131
Tom Stellard654d6692015-01-08 15:08:17 +00008132 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00008133}
Christian Konig8b1ed282013-04-10 08:39:16 +00008134
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00008135/// Assign the register class depending on the number of
Christian Konig8b1ed282013-04-10 08:39:16 +00008136/// bits set in the writemask
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008137void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
Christian Konig8b1ed282013-04-10 08:39:16 +00008138 SDNode *Node) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00008139 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008140
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008141 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Matt Arsenault6005fcb2015-10-21 21:51:02 +00008142
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008143 if (TII->isVOP3(MI.getOpcode())) {
Matt Arsenault6005fcb2015-10-21 21:51:02 +00008144 // Make sure constant bus requirements are respected.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008145 TII->legalizeOperandsVOP3(MRI, MI);
Matt Arsenault6005fcb2015-10-21 21:51:02 +00008146 return;
8147 }
Matt Arsenaultcb0ac3d2014-09-26 17:54:59 +00008148
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008149 // Replace unused atomics with the no return version.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008150 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008151 if (NoRetAtomicOp != -1) {
8152 if (!Node->hasAnyUseOfValue(0)) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008153 MI.setDesc(TII->get(NoRetAtomicOp));
8154 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00008155 return;
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008156 }
8157
Tom Stellard354a43c2016-04-01 18:27:37 +00008158 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
8159 // instruction, because the return type of these instructions is a vec2 of
8160 // the memory type, so it can be tied to the input operand.
8161 // This means these instructions always have a use, so we need to add a
8162 // special case to check if the atomic has only one extract_subreg use,
8163 // which itself has no uses.
8164 if ((Node->hasNUsesOfValue(1, 0) &&
Nicolai Haehnle750082d2016-04-15 14:42:36 +00008165 Node->use_begin()->isMachineOpcode() &&
Tom Stellard354a43c2016-04-01 18:27:37 +00008166 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
8167 !Node->use_begin()->hasAnyUseOfValue(0))) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008168 unsigned Def = MI.getOperand(0).getReg();
Tom Stellard354a43c2016-04-01 18:27:37 +00008169
8170 // Change this into a noret atomic.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008171 MI.setDesc(TII->get(NoRetAtomicOp));
8172 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00008173
8174 // If we only remove the def operand from the atomic instruction, the
8175 // extract_subreg will be left with a use of a vreg without a def.
8176 // So we need to insert an implicit_def to avoid machine verifier
8177 // errors.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008178 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
Tom Stellard354a43c2016-04-01 18:27:37 +00008179 TII->get(AMDGPU::IMPLICIT_DEF), Def);
8180 }
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008181 return;
8182 }
Christian Konig8b1ed282013-04-10 08:39:16 +00008183}
Tom Stellard0518ff82013-06-03 17:39:58 +00008184
Benjamin Kramerbdc49562016-06-12 15:39:02 +00008185static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
8186 uint64_t Val) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008187 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
Matt Arsenault485defe2014-11-05 19:01:17 +00008188 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
8189}
8190
8191MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00008192 const SDLoc &DL,
Matt Arsenault485defe2014-11-05 19:01:17 +00008193 SDValue Ptr) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00008194 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault485defe2014-11-05 19:01:17 +00008195
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008196 // Build the half of the subregister with the constants before building the
8197 // full 128-bit register. If we are building multiple resource descriptors,
8198 // this will allow CSEing of the 2-component register.
8199 const SDValue Ops0[] = {
8200 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
8201 buildSMovImm32(DAG, DL, 0),
8202 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
8203 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
8204 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
8205 };
Matt Arsenault485defe2014-11-05 19:01:17 +00008206
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008207 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
8208 MVT::v2i32, Ops0), 0);
Matt Arsenault485defe2014-11-05 19:01:17 +00008209
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008210 // Combine the constants and the pointer.
8211 const SDValue Ops1[] = {
8212 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
8213 Ptr,
8214 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
8215 SubRegHi,
8216 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
8217 };
Matt Arsenault485defe2014-11-05 19:01:17 +00008218
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008219 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
Matt Arsenault485defe2014-11-05 19:01:17 +00008220}
8221
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00008222/// Return a resource descriptor with the 'Add TID' bit enabled
Benjamin Kramerdf005cb2015-08-08 18:27:36 +00008223/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
8224/// of the resource descriptor) to create an offset, which is added to
8225/// the resource pointer.
Benjamin Kramerbdc49562016-06-12 15:39:02 +00008226MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
8227 SDValue Ptr, uint32_t RsrcDword1,
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008228 uint64_t RsrcDword2And3) const {
8229 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
8230 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
8231 if (RsrcDword1) {
8232 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008233 DAG.getConstant(RsrcDword1, DL, MVT::i32)),
8234 0);
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008235 }
8236
8237 SDValue DataLo = buildSMovImm32(DAG, DL,
8238 RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
8239 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
8240
8241 const SDValue Ops[] = {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008242 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008243 PtrLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008244 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008245 PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008246 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008247 DataLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008248 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008249 DataHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008250 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008251 };
8252
8253 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
8254}
8255
Tom Stellardd7e6f132015-04-08 01:09:26 +00008256//===----------------------------------------------------------------------===//
8257// SI Inline Assembly Support
8258//===----------------------------------------------------------------------===//
8259
8260std::pair<unsigned, const TargetRegisterClass *>
8261SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +00008262 StringRef Constraint,
Tom Stellardd7e6f132015-04-08 01:09:26 +00008263 MVT VT) const {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008264 const TargetRegisterClass *RC = nullptr;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008265 if (Constraint.size() == 1) {
8266 switch (Constraint[0]) {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008267 default:
8268 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008269 case 's':
8270 case 'r':
8271 switch (VT.getSizeInBits()) {
8272 default:
8273 return std::make_pair(0U, nullptr);
8274 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00008275 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008276 RC = &AMDGPU::SReg_32_XM0RegClass;
8277 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008278 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008279 RC = &AMDGPU::SGPR_64RegClass;
8280 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008281 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008282 RC = &AMDGPU::SReg_128RegClass;
8283 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008284 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008285 RC = &AMDGPU::SReg_256RegClass;
8286 break;
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +00008287 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008288 RC = &AMDGPU::SReg_512RegClass;
8289 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008290 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008291 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008292 case 'v':
8293 switch (VT.getSizeInBits()) {
8294 default:
8295 return std::make_pair(0U, nullptr);
8296 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00008297 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008298 RC = &AMDGPU::VGPR_32RegClass;
8299 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008300 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008301 RC = &AMDGPU::VReg_64RegClass;
8302 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008303 case 96:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008304 RC = &AMDGPU::VReg_96RegClass;
8305 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008306 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008307 RC = &AMDGPU::VReg_128RegClass;
8308 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008309 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008310 RC = &AMDGPU::VReg_256RegClass;
8311 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008312 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008313 RC = &AMDGPU::VReg_512RegClass;
8314 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008315 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008316 break;
Tom Stellardd7e6f132015-04-08 01:09:26 +00008317 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008318 // We actually support i128, i16 and f16 as inline parameters
8319 // even if they are not reported as legal
8320 if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
8321 VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
8322 return std::make_pair(0U, RC);
Tom Stellardd7e6f132015-04-08 01:09:26 +00008323 }
8324
8325 if (Constraint.size() > 1) {
Tom Stellardd7e6f132015-04-08 01:09:26 +00008326 if (Constraint[1] == 'v') {
8327 RC = &AMDGPU::VGPR_32RegClass;
8328 } else if (Constraint[1] == 's') {
8329 RC = &AMDGPU::SGPR_32RegClass;
8330 }
8331
8332 if (RC) {
Matt Arsenault0b554ed2015-06-23 02:05:55 +00008333 uint32_t Idx;
8334 bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
8335 if (!Failed && Idx < RC->getNumRegs())
Tom Stellardd7e6f132015-04-08 01:09:26 +00008336 return std::make_pair(RC->getRegister(Idx), RC);
8337 }
8338 }
8339 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8340}
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008341
8342SITargetLowering::ConstraintType
8343SITargetLowering::getConstraintType(StringRef Constraint) const {
8344 if (Constraint.size() == 1) {
8345 switch (Constraint[0]) {
8346 default: break;
8347 case 's':
8348 case 'v':
8349 return C_RegisterClass;
8350 }
8351 }
8352 return TargetLowering::getConstraintType(Constraint);
8353}
Matt Arsenault1cc47f82017-07-18 16:44:56 +00008354
8355// Figure out which registers should be reserved for stack access. Only after
8356// the function is legalized do we know all of the non-spill stack objects or if
8357// calls are present.
8358void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
8359 MachineRegisterInfo &MRI = MF.getRegInfo();
8360 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
8361 const MachineFrameInfo &MFI = MF.getFrameInfo();
Tom Stellardc5a154d2018-06-28 23:47:12 +00008362 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault1cc47f82017-07-18 16:44:56 +00008363
8364 if (Info->isEntryFunction()) {
8365 // Callable functions have fixed registers used for stack access.
8366 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
8367 }
8368
8369 // We have to assume the SP is needed in case there are calls in the function
8370 // during lowering. Calls are only detected after the function is
8371 // lowered. We're about to reserve registers, so don't bother using it if we
8372 // aren't really going to use it.
8373 bool NeedSP = !Info->isEntryFunction() ||
8374 MFI.hasVarSizedObjects() ||
8375 MFI.hasCalls();
8376
8377 if (NeedSP) {
8378 unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF);
8379 Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg);
8380
8381 assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg());
8382 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
8383 Info->getStackPtrOffsetReg()));
8384 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
8385 }
8386
8387 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
8388 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
8389 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
8390 Info->getScratchWaveOffsetReg());
8391
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +00008392 Info->limitOccupancy(MF);
8393
Matt Arsenault1cc47f82017-07-18 16:44:56 +00008394 TargetLoweringBase::finalizeLowering(MF);
8395}
Matt Arsenault45b98182017-11-15 00:45:43 +00008396
8397void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
8398 KnownBits &Known,
8399 const APInt &DemandedElts,
8400 const SelectionDAG &DAG,
8401 unsigned Depth) const {
8402 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
8403 DAG, Depth);
8404
8405 if (getSubtarget()->enableHugePrivateBuffer())
8406 return;
8407
8408 // Technically it may be possible to have a dispatch with a single workitem
8409 // that uses the full private memory size, but that's not really useful. We
8410 // can't use vaddr in MUBUF instructions if we don't know the address
8411 // calculation won't overflow, so assume the sign bit is never set.
8412 Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits);
8413}
Tom Stellard264c1712018-06-13 15:06:37 +00008414
8415bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
8416 FunctionLoweringInfo * FLI, DivergenceAnalysis * DA) const
8417{
8418 switch (N->getOpcode()) {
8419 case ISD::Register:
8420 case ISD::CopyFromReg:
8421 {
8422 const RegisterSDNode *R = nullptr;
8423 if (N->getOpcode() == ISD::Register) {
8424 R = dyn_cast<RegisterSDNode>(N);
8425 }
8426 else {
8427 R = dyn_cast<RegisterSDNode>(N->getOperand(1));
8428 }
8429 if (R)
8430 {
8431 const MachineFunction * MF = FLI->MF;
Tom Stellard5bfbae52018-07-11 20:59:01 +00008432 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
Tom Stellard264c1712018-06-13 15:06:37 +00008433 const MachineRegisterInfo &MRI = MF->getRegInfo();
8434 const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
8435 unsigned Reg = R->getReg();
8436 if (TRI.isPhysicalRegister(Reg))
8437 return TRI.isVGPR(MRI, Reg);
8438
8439 if (MRI.isLiveIn(Reg)) {
8440 // workitem.id.x workitem.id.y workitem.id.z
8441 // Any VGPR formal argument is also considered divergent
8442 if (TRI.isVGPR(MRI, Reg))
8443 return true;
8444 // Formal arguments of non-entry functions
8445 // are conservatively considered divergent
8446 else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
8447 return true;
8448 }
8449 return !DA || DA->isDivergent(FLI->getValueFromVirtualReg(Reg));
8450 }
8451 }
8452 break;
8453 case ISD::LOAD: {
8454 const LoadSDNode *L = dyn_cast<LoadSDNode>(N);
8455 if (L->getMemOperand()->getAddrSpace() ==
8456 Subtarget->getAMDGPUAS().PRIVATE_ADDRESS)
8457 return true;
8458 } break;
8459 case ISD::CALLSEQ_END:
8460 return true;
8461 break;
8462 case ISD::INTRINSIC_WO_CHAIN:
8463 {
8464
8465 }
8466 return AMDGPU::isIntrinsicSourceOfDivergence(
8467 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
8468 case ISD::INTRINSIC_W_CHAIN:
8469 return AMDGPU::isIntrinsicSourceOfDivergence(
8470 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
8471 // In some cases intrinsics that are a source of divergence have been
8472 // lowered to AMDGPUISD so we also need to check those too.
8473 case AMDGPUISD::INTERP_MOV:
8474 case AMDGPUISD::INTERP_P1:
8475 case AMDGPUISD::INTERP_P2:
8476 return true;
8477 }
8478 return false;
8479}