blob: 6d1588eac012e1f5ad4d934d1d03f3f1aa3cbbaf [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000011/// Custom DAG lowering for SI
Tom Stellard75aadc22012-12-11 21:25:42 +000012//
13//===----------------------------------------------------------------------===//
14
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000015#ifdef _MSC_VER
16// Provide M_PI.
17#define _USE_MATH_DEFINES
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000018#endif
19
Chandler Carruth6bda14b2017-06-06 11:49:48 +000020#include "SIISelLowering.h"
Christian Konig99ee0f42013-03-07 09:04:14 +000021#include "AMDGPU.h"
Matt Arsenaultc791f392014-06-23 18:00:31 +000022#include "AMDGPUIntrinsicInfo.h"
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000023#include "AMDGPUSubtarget.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000024#include "AMDGPUTargetMachine.h"
Tom Stellard8485fa02016-12-07 02:42:15 +000025#include "SIDefines.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "SIInstrInfo.h"
27#include "SIMachineFunctionInfo.h"
28#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000029#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000030#include "Utils/AMDGPUBaseInfo.h"
31#include "llvm/ADT/APFloat.h"
32#include "llvm/ADT/APInt.h"
33#include "llvm/ADT/ArrayRef.h"
Alexey Samsonova253bf92014-08-27 19:36:53 +000034#include "llvm/ADT/BitVector.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000035#include "llvm/ADT/SmallVector.h"
Matt Arsenault71bcbd42017-08-11 20:42:08 +000036#include "llvm/ADT/Statistic.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000037#include "llvm/ADT/StringRef.h"
Matt Arsenault9a10cea2016-01-26 04:29:24 +000038#include "llvm/ADT/StringSwitch.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000039#include "llvm/ADT/Twine.h"
Wei Ding07e03712016-07-28 16:42:13 +000040#include "llvm/CodeGen/Analysis.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000041#include "llvm/CodeGen/CallingConvLower.h"
42#include "llvm/CodeGen/DAGCombine.h"
43#include "llvm/CodeGen/ISDOpcodes.h"
44#include "llvm/CodeGen/MachineBasicBlock.h"
45#include "llvm/CodeGen/MachineFrameInfo.h"
46#include "llvm/CodeGen/MachineFunction.h"
47#include "llvm/CodeGen/MachineInstr.h"
48#include "llvm/CodeGen/MachineInstrBuilder.h"
49#include "llvm/CodeGen/MachineMemOperand.h"
Matt Arsenault8623e8d2017-08-03 23:00:29 +000050#include "llvm/CodeGen/MachineModuleInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000051#include "llvm/CodeGen/MachineOperand.h"
52#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000053#include "llvm/CodeGen/SelectionDAG.h"
54#include "llvm/CodeGen/SelectionDAGNodes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000055#include "llvm/CodeGen/TargetCallingConv.h"
56#include "llvm/CodeGen/TargetRegisterInfo.h"
Craig Topper2fa14362018-03-29 17:21:10 +000057#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000058#include "llvm/IR/Constants.h"
59#include "llvm/IR/DataLayout.h"
60#include "llvm/IR/DebugLoc.h"
61#include "llvm/IR/DerivedTypes.h"
Oliver Stannard7e7d9832016-02-02 13:52:43 +000062#include "llvm/IR/DiagnosticInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000063#include "llvm/IR/Function.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000064#include "llvm/IR/GlobalValue.h"
65#include "llvm/IR/InstrTypes.h"
66#include "llvm/IR/Instruction.h"
67#include "llvm/IR/Instructions.h"
Matt Arsenault7dc01c92017-03-15 23:15:12 +000068#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000069#include "llvm/IR/Type.h"
70#include "llvm/Support/Casting.h"
71#include "llvm/Support/CodeGen.h"
72#include "llvm/Support/CommandLine.h"
73#include "llvm/Support/Compiler.h"
74#include "llvm/Support/ErrorHandling.h"
Craig Topperd0af7e82017-04-28 05:31:46 +000075#include "llvm/Support/KnownBits.h"
David Blaikie13e77db2018-03-23 23:58:25 +000076#include "llvm/Support/MachineValueType.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000077#include "llvm/Support/MathExtras.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000078#include "llvm/Target/TargetOptions.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000079#include <cassert>
80#include <cmath>
81#include <cstdint>
82#include <iterator>
83#include <tuple>
84#include <utility>
85#include <vector>
Tom Stellard75aadc22012-12-11 21:25:42 +000086
87using namespace llvm;
88
Matt Arsenault71bcbd42017-08-11 20:42:08 +000089#define DEBUG_TYPE "si-lower"
90
91STATISTIC(NumTailCalls, "Number of tail calls");
92
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000093static cl::opt<bool> EnableVGPRIndexMode(
94 "amdgpu-vgpr-index-mode",
95 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
96 cl::init(false));
97
Matt Arsenault45b98182017-11-15 00:45:43 +000098static cl::opt<unsigned> AssumeFrameIndexHighZeroBits(
99 "amdgpu-frame-index-zero-bits",
100 cl::desc("High bits of frame index assumed to be zero"),
101 cl::init(5),
102 cl::ReallyHidden);
103
Tom Stellardf110f8f2016-04-14 16:27:03 +0000104static unsigned findFirstFreeSGPR(CCState &CCInfo) {
105 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
106 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
107 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
108 return AMDGPU::SGPR0 + Reg;
109 }
110 }
111 llvm_unreachable("Cannot allocate sgpr");
112}
113
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000114SITargetLowering::SITargetLowering(const TargetMachine &TM,
Tom Stellard5bfbae52018-07-11 20:59:01 +0000115 const GCNSubtarget &STI)
Tom Stellardc5a154d2018-06-28 23:47:12 +0000116 : AMDGPUTargetLowering(TM, STI),
117 Subtarget(&STI) {
Tom Stellard1bd80722014-04-30 15:31:33 +0000118 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
Tom Stellard436780b2014-05-15 14:41:57 +0000119 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000120
Marek Olsak79c05872016-11-25 17:37:09 +0000121 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000122 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000123
Tom Stellard436780b2014-05-15 14:41:57 +0000124 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
125 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
126 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000127
Matt Arsenault61001bb2015-11-25 19:58:34 +0000128 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
129 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
130
Tom Stellard436780b2014-05-15 14:41:57 +0000131 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
132 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000133
Tom Stellardf0a21072014-11-18 20:39:39 +0000134 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000135 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
136
Tom Stellardf0a21072014-11-18 20:39:39 +0000137 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000138 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000139
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000140 if (Subtarget->has16BitInsts()) {
Marek Olsak79c05872016-11-25 17:37:09 +0000141 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
142 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard115a6152016-11-10 16:02:37 +0000143
Matt Arsenault1349a042018-05-22 06:32:10 +0000144 // Unless there are also VOP3P operations, not operations are really legal.
Matt Arsenault7596f132017-02-27 20:52:10 +0000145 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
146 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000147 addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
148 addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
Matt Arsenault7596f132017-02-27 20:52:10 +0000149 }
150
Tom Stellardc5a154d2018-06-28 23:47:12 +0000151 computeRegisterProperties(Subtarget->getRegisterInfo());
Tom Stellard75aadc22012-12-11 21:25:42 +0000152
Tom Stellard35bb18c2013-08-26 15:06:04 +0000153 // We need to custom lower vector stores from local memory
Matt Arsenault71e66762016-05-21 02:27:49 +0000154 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
Tom Stellard35bb18c2013-08-26 15:06:04 +0000155 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
Tom Stellardaf775432013-10-23 00:44:32 +0000156 setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
157 setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000158 setOperationAction(ISD::LOAD, MVT::i1, Custom);
Matt Arsenault2b957b52016-05-02 20:07:26 +0000159
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000160 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000161 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
162 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
163 setOperationAction(ISD::STORE, MVT::v16i32, Custom);
164 setOperationAction(ISD::STORE, MVT::i1, Custom);
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000165
Jan Vesely06200bd2017-01-06 21:00:46 +0000166 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
167 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
168 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
169 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
170 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
171 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
172 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
173 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
174 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
175 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
176
Matt Arsenault71e66762016-05-21 02:27:49 +0000177 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
178 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000179
180 setOperationAction(ISD::SELECT, MVT::i1, Promote);
Tom Stellard0ec134f2014-02-04 17:18:40 +0000181 setOperationAction(ISD::SELECT, MVT::i64, Custom);
Tom Stellardda99c6e2014-03-24 16:07:30 +0000182 setOperationAction(ISD::SELECT, MVT::f64, Promote);
183 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
Tom Stellard81d871d2013-11-13 23:36:50 +0000184
Tom Stellard3ca1bfc2014-06-10 16:01:22 +0000185 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
186 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
187 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
188 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
Matt Arsenault71e66762016-05-21 02:27:49 +0000189 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
Tom Stellard754f80f2013-04-05 23:31:51 +0000190
Tom Stellardd1efda82016-01-20 21:48:24 +0000191 setOperationAction(ISD::SETCC, MVT::i1, Promote);
Tom Stellard83747202013-07-18 21:43:53 +0000192 setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
193 setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
Matt Arsenault18f56be2016-12-22 16:27:11 +0000194 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
Tom Stellard83747202013-07-18 21:43:53 +0000195
Matt Arsenault71e66762016-05-21 02:27:49 +0000196 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
197 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
Matt Arsenaulte306a322014-10-21 16:25:08 +0000198
Matt Arsenault4e466652014-04-16 01:41:30 +0000199 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
200 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000201 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000203 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
204 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000205 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
206
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000207 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000208 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000209 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
Marek Olsak13e47412018-01-31 20:18:04 +0000210 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000211 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
212
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000213 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
214 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000215 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000216
217 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
Matt Arsenault4165efd2017-01-17 07:26:53 +0000218 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
219 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000220 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000221
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000222 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000223 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000224 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
225 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
226 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
227 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
Tom Stellardafcf12f2013-09-12 02:55:14 +0000228
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +0000229 setOperationAction(ISD::UADDO, MVT::i32, Legal);
230 setOperationAction(ISD::USUBO, MVT::i32, Legal);
231
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000232 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
233 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
234
Matt Arsenaulte7191392018-08-08 16:58:33 +0000235 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
236 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
237 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
238
Matt Arsenault84445dd2017-11-30 22:51:26 +0000239#if 0
240 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
241 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
242#endif
243
Benjamin Kramer867bfc52015-03-07 17:41:00 +0000244 // We only support LOAD/STORE and vector manipulation ops for vectors
245 // with > 4 elements.
Matt Arsenault7596f132017-02-27 20:52:10 +0000246 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000247 MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16 }) {
Tom Stellard967bf582014-02-13 23:34:15 +0000248 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000249 switch (Op) {
Tom Stellard967bf582014-02-13 23:34:15 +0000250 case ISD::LOAD:
251 case ISD::STORE:
252 case ISD::BUILD_VECTOR:
253 case ISD::BITCAST:
254 case ISD::EXTRACT_VECTOR_ELT:
255 case ISD::INSERT_VECTOR_ELT:
Tom Stellard967bf582014-02-13 23:34:15 +0000256 case ISD::INSERT_SUBVECTOR:
257 case ISD::EXTRACT_SUBVECTOR:
Matt Arsenault61001bb2015-11-25 19:58:34 +0000258 case ISD::SCALAR_TO_VECTOR:
Tom Stellard967bf582014-02-13 23:34:15 +0000259 break;
Tom Stellardc0503db2014-08-09 01:06:56 +0000260 case ISD::CONCAT_VECTORS:
261 setOperationAction(Op, VT, Custom);
262 break;
Tom Stellard967bf582014-02-13 23:34:15 +0000263 default:
Matt Arsenaultd504a742014-05-15 21:44:05 +0000264 setOperationAction(Op, VT, Expand);
Tom Stellard967bf582014-02-13 23:34:15 +0000265 break;
266 }
267 }
268 }
269
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000270 setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
271
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000272 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
273 // is expanded to avoid having two separate loops in case the index is a VGPR.
274
Matt Arsenault61001bb2015-11-25 19:58:34 +0000275 // Most operations are naturally 32-bit vector operations. We only support
276 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
277 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
278 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
279 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
280
281 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
282 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
283
284 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
285 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
286
287 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
288 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
289 }
290
Matt Arsenault71e66762016-05-21 02:27:49 +0000291 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
292 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
293 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
294 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000295
Matt Arsenault67a98152018-05-16 11:47:30 +0000296 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
297 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
298
Matt Arsenault3aef8092017-01-23 23:09:58 +0000299 // Avoid stack access for these.
300 // TODO: Generalize to more vector types.
301 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
302 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault67a98152018-05-16 11:47:30 +0000303 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
304 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
305
Matt Arsenault3aef8092017-01-23 23:09:58 +0000306 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
307 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault9224c002018-06-05 19:52:46 +0000308 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
309 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
310 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
311
312 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
313 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
314 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
Matt Arsenault3aef8092017-01-23 23:09:58 +0000315
Matt Arsenault67a98152018-05-16 11:47:30 +0000316 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
317 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
318 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
319 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
320
Tom Stellard354a43c2016-04-01 18:27:37 +0000321 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
322 // and output demarshalling
323 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
324 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
325
326 // We can't return success/failure, only the old value,
327 // let LLVM add the comparison
328 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
329 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
330
Tom Stellardc5a154d2018-06-28 23:47:12 +0000331 if (Subtarget->hasFlatAddressSpace()) {
Matt Arsenault99c14522016-04-25 19:27:24 +0000332 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
333 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
334 }
335
Matt Arsenault71e66762016-05-21 02:27:49 +0000336 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
337 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
338
339 // On SI this is s_memtime and s_memrealtime on VI.
340 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
Matt Arsenault3e025382017-04-24 17:49:13 +0000341 setOperationAction(ISD::TRAP, MVT::Other, Custom);
342 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000343
Tom Stellardc5a154d2018-06-28 23:47:12 +0000344 if (Subtarget->has16BitInsts()) {
345 setOperationAction(ISD::FLOG, MVT::f16, Custom);
346 setOperationAction(ISD::FLOG10, MVT::f16, Custom);
347 }
348
349 // v_mad_f32 does not support denormals according to some sources.
350 if (!Subtarget->hasFP32Denormals())
351 setOperationAction(ISD::FMAD, MVT::f32, Legal);
352
353 if (!Subtarget->hasBFI()) {
354 // fcopysign can be done in a single instruction with BFI.
355 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
356 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
357 }
358
359 if (!Subtarget->hasBCNT(32))
360 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
361
362 if (!Subtarget->hasBCNT(64))
363 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
364
365 if (Subtarget->hasFFBH())
366 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
367
368 if (Subtarget->hasFFBL())
369 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
370
371 // We only really have 32-bit BFE instructions (and 16-bit on VI).
372 //
373 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
374 // effort to match them now. We want this to be false for i64 cases when the
375 // extraction isn't restricted to the upper or lower half. Ideally we would
376 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
377 // span the midpoint are probably relatively rare, so don't worry about them
378 // for now.
379 if (Subtarget->hasBFE())
380 setHasExtractBitsInsn(true);
381
Matt Arsenault71e66762016-05-21 02:27:49 +0000382 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
383 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
384
Tom Stellard5bfbae52018-07-11 20:59:01 +0000385 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000386 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
387 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
388 setOperationAction(ISD::FRINT, MVT::f64, Legal);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000389 } else {
390 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
391 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
392 setOperationAction(ISD::FRINT, MVT::f64, Custom);
393 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000394 }
395
396 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
397
398 setOperationAction(ISD::FSIN, MVT::f32, Custom);
399 setOperationAction(ISD::FCOS, MVT::f32, Custom);
400 setOperationAction(ISD::FDIV, MVT::f32, Custom);
401 setOperationAction(ISD::FDIV, MVT::f64, Custom);
402
Tom Stellard115a6152016-11-10 16:02:37 +0000403 if (Subtarget->has16BitInsts()) {
404 setOperationAction(ISD::Constant, MVT::i16, Legal);
405
406 setOperationAction(ISD::SMIN, MVT::i16, Legal);
407 setOperationAction(ISD::SMAX, MVT::i16, Legal);
408
409 setOperationAction(ISD::UMIN, MVT::i16, Legal);
410 setOperationAction(ISD::UMAX, MVT::i16, Legal);
411
Tom Stellard115a6152016-11-10 16:02:37 +0000412 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
413 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
414
415 setOperationAction(ISD::ROTR, MVT::i16, Promote);
416 setOperationAction(ISD::ROTL, MVT::i16, Promote);
417
418 setOperationAction(ISD::SDIV, MVT::i16, Promote);
419 setOperationAction(ISD::UDIV, MVT::i16, Promote);
420 setOperationAction(ISD::SREM, MVT::i16, Promote);
421 setOperationAction(ISD::UREM, MVT::i16, Promote);
422
423 setOperationAction(ISD::BSWAP, MVT::i16, Promote);
424 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
425
426 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
427 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
428 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
429 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
Jan Veselyb283ea02018-03-02 02:50:22 +0000430 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
Tom Stellard115a6152016-11-10 16:02:37 +0000431
432 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
433
434 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
435
436 setOperationAction(ISD::LOAD, MVT::i16, Custom);
437
438 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
439
Tom Stellard115a6152016-11-10 16:02:37 +0000440 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
441 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
442 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
443 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000444
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000445 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
446 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
447 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
448 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000449
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000450 // F16 - Constant Actions.
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000451 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000452
453 // F16 - Load/Store Actions.
454 setOperationAction(ISD::LOAD, MVT::f16, Promote);
455 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
456 setOperationAction(ISD::STORE, MVT::f16, Promote);
457 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
458
459 // F16 - VOP1 Actions.
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +0000460 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000461 setOperationAction(ISD::FCOS, MVT::f16, Promote);
462 setOperationAction(ISD::FSIN, MVT::f16, Promote);
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000463 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
464 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
465 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
466 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
Matt Arsenaultb5d23272017-03-24 20:04:18 +0000467 setOperationAction(ISD::FROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000468
469 // F16 - VOP2 Actions.
Konstantin Zhuravlyov662e01d2016-11-17 03:49:01 +0000470 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000471 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000472 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
473 setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
Matt Arsenault4052a572016-12-22 03:05:41 +0000474 setOperationAction(ISD::FDIV, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000475
476 // F16 - VOP3 Actions.
477 setOperationAction(ISD::FMA, MVT::f16, Legal);
478 if (!Subtarget->hasFP16Denormals())
479 setOperationAction(ISD::FMAD, MVT::f16, Legal);
Tom Stellard115a6152016-11-10 16:02:37 +0000480
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000481 for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
Matt Arsenault7596f132017-02-27 20:52:10 +0000482 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
483 switch (Op) {
484 case ISD::LOAD:
485 case ISD::STORE:
486 case ISD::BUILD_VECTOR:
487 case ISD::BITCAST:
488 case ISD::EXTRACT_VECTOR_ELT:
489 case ISD::INSERT_VECTOR_ELT:
490 case ISD::INSERT_SUBVECTOR:
491 case ISD::EXTRACT_SUBVECTOR:
492 case ISD::SCALAR_TO_VECTOR:
493 break;
494 case ISD::CONCAT_VECTORS:
495 setOperationAction(Op, VT, Custom);
496 break;
497 default:
498 setOperationAction(Op, VT, Expand);
499 break;
500 }
501 }
502 }
503
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000504 // XXX - Do these do anything? Vector constants turn into build_vector.
505 setOperationAction(ISD::Constant, MVT::v2i16, Legal);
506 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
507
Matt Arsenaultdfb88df2018-05-13 10:04:38 +0000508 setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
509 setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
510
Matt Arsenault7596f132017-02-27 20:52:10 +0000511 setOperationAction(ISD::STORE, MVT::v2i16, Promote);
512 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
513 setOperationAction(ISD::STORE, MVT::v2f16, Promote);
514 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
515
516 setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
517 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
518 setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
519 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000520
521 setOperationAction(ISD::AND, MVT::v2i16, Promote);
522 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
523 setOperationAction(ISD::OR, MVT::v2i16, Promote);
524 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
525 setOperationAction(ISD::XOR, MVT::v2i16, Promote);
526 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000527
Matt Arsenault1349a042018-05-22 06:32:10 +0000528 setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
529 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
530 setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
531 AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
532
533 setOperationAction(ISD::STORE, MVT::v4i16, Promote);
534 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
535 setOperationAction(ISD::STORE, MVT::v4f16, Promote);
536 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
537
538 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
539 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
540 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
541 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
542
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000543 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
544 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
545 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
546
Matt Arsenault1349a042018-05-22 06:32:10 +0000547 if (!Subtarget->hasVOP3PInsts()) {
548 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
549 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
550 }
551
552 setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
553 // This isn't really legal, but this avoids the legalizer unrolling it (and
554 // allows matching fneg (fabs x) patterns)
555 setOperationAction(ISD::FABS, MVT::v2f16, Legal);
556 }
557
558 if (Subtarget->hasVOP3PInsts()) {
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000559 setOperationAction(ISD::ADD, MVT::v2i16, Legal);
560 setOperationAction(ISD::SUB, MVT::v2i16, Legal);
561 setOperationAction(ISD::MUL, MVT::v2i16, Legal);
562 setOperationAction(ISD::SHL, MVT::v2i16, Legal);
563 setOperationAction(ISD::SRL, MVT::v2i16, Legal);
564 setOperationAction(ISD::SRA, MVT::v2i16, Legal);
565 setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
566 setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
567 setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
568 setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
569
570 setOperationAction(ISD::FADD, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000571 setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
572 setOperationAction(ISD::FMA, MVT::v2f16, Legal);
573 setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal);
574 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal);
Matt Arsenault540512c2018-04-26 19:21:37 +0000575 setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000576
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000577 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
578 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000579
580 setOperationAction(ISD::SHL, MVT::v4i16, Custom);
581 setOperationAction(ISD::SRA, MVT::v4i16, Custom);
582 setOperationAction(ISD::SRL, MVT::v4i16, Custom);
583 setOperationAction(ISD::ADD, MVT::v4i16, Custom);
584 setOperationAction(ISD::SUB, MVT::v4i16, Custom);
585 setOperationAction(ISD::MUL, MVT::v4i16, Custom);
586
587 setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
588 setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
589 setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
590 setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
591
592 setOperationAction(ISD::FADD, MVT::v4f16, Custom);
593 setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
594 setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
595 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
Matt Arsenault36cdcfa2018-08-02 13:43:42 +0000596 setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000597
598 setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
599 setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
Matt Arsenault1349a042018-05-22 06:32:10 +0000600 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000601
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000602 setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
603 setOperationAction(ISD::FABS, MVT::v4f16, Custom);
604
Matt Arsenault1349a042018-05-22 06:32:10 +0000605 if (Subtarget->has16BitInsts()) {
606 setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
607 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
608 setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
609 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
Matt Arsenault4a486232017-04-19 20:53:07 +0000610 } else {
Matt Arsenault1349a042018-05-22 06:32:10 +0000611 // Legalization hack.
Matt Arsenault4a486232017-04-19 20:53:07 +0000612 setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
613 setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
Matt Arsenaulte9524f12018-06-06 21:28:11 +0000614
615 setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
616 setOperationAction(ISD::FABS, MVT::v2f16, Custom);
Matt Arsenault4a486232017-04-19 20:53:07 +0000617 }
618
619 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
620 setOperationAction(ISD::SELECT, VT, Custom);
Matt Arsenault7596f132017-02-27 20:52:10 +0000621 }
622
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000623 setTargetDAGCombine(ISD::ADD);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +0000624 setTargetDAGCombine(ISD::ADDCARRY);
625 setTargetDAGCombine(ISD::SUB);
626 setTargetDAGCombine(ISD::SUBCARRY);
Matt Arsenault02cb0ff2014-09-29 14:59:34 +0000627 setTargetDAGCombine(ISD::FADD);
Matt Arsenault8675db12014-08-29 16:01:14 +0000628 setTargetDAGCombine(ISD::FSUB);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +0000629 setTargetDAGCombine(ISD::FMINNUM);
630 setTargetDAGCombine(ISD::FMAXNUM);
Farhana Aleenc370d7b2018-07-16 18:19:59 +0000631 setTargetDAGCombine(ISD::FMA);
Matt Arsenault5881f4e2015-06-09 00:52:37 +0000632 setTargetDAGCombine(ISD::SMIN);
633 setTargetDAGCombine(ISD::SMAX);
634 setTargetDAGCombine(ISD::UMIN);
635 setTargetDAGCombine(ISD::UMAX);
Tom Stellard75aadc22012-12-11 21:25:42 +0000636 setTargetDAGCombine(ISD::SETCC);
Matt Arsenaultd0101a22015-01-06 23:00:46 +0000637 setTargetDAGCombine(ISD::AND);
Matt Arsenaultf2290332015-01-06 23:00:39 +0000638 setTargetDAGCombine(ISD::OR);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000639 setTargetDAGCombine(ISD::XOR);
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +0000640 setTargetDAGCombine(ISD::SINT_TO_FP);
Matt Arsenault364a6742014-06-11 17:50:44 +0000641 setTargetDAGCombine(ISD::UINT_TO_FP);
Matt Arsenault9cd90712016-04-14 01:42:16 +0000642 setTargetDAGCombine(ISD::FCANONICALIZE);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000643 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
Matt Arsenault8edfaee2017-03-31 19:53:03 +0000644 setTargetDAGCombine(ISD::ZERO_EXTEND);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +0000645 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
Matt Arsenault8cbb4882017-09-20 21:01:24 +0000646 setTargetDAGCombine(ISD::BUILD_VECTOR);
Matt Arsenault364a6742014-06-11 17:50:44 +0000647
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000648 // All memory operations. Some folding on the pointer operand is done to help
649 // matching the constant offsets in the addressing modes.
650 setTargetDAGCombine(ISD::LOAD);
651 setTargetDAGCombine(ISD::STORE);
652 setTargetDAGCombine(ISD::ATOMIC_LOAD);
653 setTargetDAGCombine(ISD::ATOMIC_STORE);
654 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
655 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
656 setTargetDAGCombine(ISD::ATOMIC_SWAP);
657 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
658 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
659 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
660 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
661 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
662 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
663 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
664 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
665 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
666 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
667
Christian Konigeecebd02013-03-26 14:04:02 +0000668 setSchedulingPreference(Sched::RegPressure);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000669
670 // SI at least has hardware support for floating point exceptions, but no way
671 // of using or handling them is implemented. They are also optional in OpenCL
672 // (Section 7.3)
673 setHasFloatingPointExceptions(Subtarget->hasFPExceptions());
Tom Stellard75aadc22012-12-11 21:25:42 +0000674}
675
Tom Stellard5bfbae52018-07-11 20:59:01 +0000676const GCNSubtarget *SITargetLowering::getSubtarget() const {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000677 return Subtarget;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000678}
679
Tom Stellard0125f2a2013-06-25 02:39:35 +0000680//===----------------------------------------------------------------------===//
681// TargetLowering queries
682//===----------------------------------------------------------------------===//
683
Tom Stellardb12f4de2018-05-22 19:37:55 +0000684// v_mad_mix* support a conversion from f16 to f32.
685//
686// There is only one special case when denormals are enabled we don't currently,
687// where this is OK to use.
688bool SITargetLowering::isFPExtFoldable(unsigned Opcode,
689 EVT DestVT, EVT SrcVT) const {
690 return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
691 (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
692 DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
693 SrcVT.getScalarType() == MVT::f16;
694}
695
Zvi Rackover1b736822017-07-26 08:06:58 +0000696bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000697 // SI has some legal vector types, but no legal vector operations. Say no
698 // shuffles are legal in order to prefer scalarizing some vector operations.
699 return false;
700}
701
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000702MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
703 CallingConv::ID CC,
704 EVT VT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000705 // TODO: Consider splitting all arguments into 32-bit pieces.
706 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000707 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000708 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000709 if (Size == 32)
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000710 return ScalarVT.getSimpleVT();
Matt Arsenault0395da72018-07-31 19:17:47 +0000711
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000712 if (Size == 64)
713 return MVT::i32;
714
Matt Arsenault0395da72018-07-31 19:17:47 +0000715 if (Size == 16 &&
716 Subtarget->has16BitInsts() &&
717 isPowerOf2_32(VT.getVectorNumElements()))
718 return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000719 }
720
721 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
722}
723
724unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
725 CallingConv::ID CC,
726 EVT VT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000727 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000728 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000729 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000730 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenault0395da72018-07-31 19:17:47 +0000731
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000732 if (Size == 32)
Matt Arsenault0395da72018-07-31 19:17:47 +0000733 return NumElts;
734
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000735 if (Size == 64)
736 return 2 * NumElts;
737
Matt Arsenault0395da72018-07-31 19:17:47 +0000738 // FIXME: Fails to break down as we want with v3.
739 if (Size == 16 && Subtarget->has16BitInsts() && isPowerOf2_32(NumElts))
740 return VT.getVectorNumElements() / 2;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000741 }
742
743 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
744}
745
746unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
747 LLVMContext &Context, CallingConv::ID CC,
748 EVT VT, EVT &IntermediateVT,
749 unsigned &NumIntermediates, MVT &RegisterVT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000750 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000751 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000752 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000753 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000754 if (Size == 32) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000755 RegisterVT = ScalarVT.getSimpleVT();
756 IntermediateVT = RegisterVT;
Matt Arsenault0395da72018-07-31 19:17:47 +0000757 NumIntermediates = NumElts;
758 return NumIntermediates;
759 }
760
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000761 if (Size == 64) {
762 RegisterVT = MVT::i32;
763 IntermediateVT = RegisterVT;
764 NumIntermediates = 2 * NumElts;
765 return NumIntermediates;
766 }
767
Matt Arsenault0395da72018-07-31 19:17:47 +0000768 // FIXME: We should fix the ABI to be the same on targets without 16-bit
769 // support, but unless we can properly handle 3-vectors, it will be still be
770 // inconsistent.
771 if (Size == 16 && Subtarget->has16BitInsts() && isPowerOf2_32(NumElts)) {
772 RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
773 IntermediateVT = RegisterVT;
774 NumIntermediates = NumElts / 2;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000775 return NumIntermediates;
776 }
777 }
778
779 return TargetLowering::getVectorTypeBreakdownForCallingConv(
780 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
781}
782
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000783bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
784 const CallInst &CI,
Matt Arsenault7d7adf42017-12-14 22:34:10 +0000785 MachineFunction &MF,
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000786 unsigned IntrID) const {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000787 if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +0000788 AMDGPU::lookupRsrcIntrinsic(IntrID)) {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000789 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
790 (Intrinsic::ID)IntrID);
791 if (Attr.hasFnAttribute(Attribute::ReadNone))
792 return false;
793
794 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
795
796 if (RsrcIntr->IsImage) {
797 Info.ptrVal = MFI->getImagePSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000798 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000799 CI.getArgOperand(RsrcIntr->RsrcArg));
800 Info.align = 0;
801 } else {
802 Info.ptrVal = MFI->getBufferPSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000803 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000804 CI.getArgOperand(RsrcIntr->RsrcArg));
805 }
806
807 Info.flags = MachineMemOperand::MODereferenceable;
808 if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
809 Info.opc = ISD::INTRINSIC_W_CHAIN;
810 Info.memVT = MVT::getVT(CI.getType());
811 Info.flags |= MachineMemOperand::MOLoad;
812 } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
813 Info.opc = ISD::INTRINSIC_VOID;
814 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
815 Info.flags |= MachineMemOperand::MOStore;
816 } else {
817 // Atomic
818 Info.opc = ISD::INTRINSIC_W_CHAIN;
819 Info.memVT = MVT::getVT(CI.getType());
820 Info.flags = MachineMemOperand::MOLoad |
821 MachineMemOperand::MOStore |
822 MachineMemOperand::MODereferenceable;
823
824 // XXX - Should this be volatile without known ordering?
825 Info.flags |= MachineMemOperand::MOVolatile;
826 }
827 return true;
828 }
829
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000830 switch (IntrID) {
831 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000832 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000833 case Intrinsic::amdgcn_ds_fadd:
834 case Intrinsic::amdgcn_ds_fmin:
835 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000836 Info.opc = ISD::INTRINSIC_W_CHAIN;
837 Info.memVT = MVT::getVT(CI.getType());
838 Info.ptrVal = CI.getOperand(0);
839 Info.align = 0;
Matt Arsenault11171332017-12-14 21:39:51 +0000840 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000841
842 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
Matt Arsenault11171332017-12-14 21:39:51 +0000843 if (!Vol || !Vol->isZero())
844 Info.flags |= MachineMemOperand::MOVolatile;
845
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000846 return true;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000847 }
Matt Arsenault905f3512017-12-29 17:18:14 +0000848
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000849 default:
850 return false;
851 }
852}
853
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000854bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
855 SmallVectorImpl<Value*> &Ops,
856 Type *&AccessTy) const {
857 switch (II->getIntrinsicID()) {
858 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000859 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000860 case Intrinsic::amdgcn_ds_fadd:
861 case Intrinsic::amdgcn_ds_fmin:
862 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000863 Value *Ptr = II->getArgOperand(0);
864 AccessTy = II->getType();
865 Ops.push_back(Ptr);
866 return true;
867 }
868 default:
869 return false;
870 }
Matt Arsenaulte306a322014-10-21 16:25:08 +0000871}
872
Tom Stellard70580f82015-07-20 14:28:41 +0000873bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
Matt Arsenaultd9b77842017-06-12 17:06:35 +0000874 if (!Subtarget->hasFlatInstOffsets()) {
875 // Flat instructions do not have offsets, and only have the register
876 // address.
877 return AM.BaseOffs == 0 && AM.Scale == 0;
878 }
879
880 // GFX9 added a 13-bit signed offset. When using regular flat instructions,
881 // the sign bit is ignored and is treated as a 12-bit unsigned offset.
882
883 // Just r + i
884 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
Tom Stellard70580f82015-07-20 14:28:41 +0000885}
886
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000887bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
888 if (Subtarget->hasFlatGlobalInsts())
889 return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
890
891 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
892 // Assume the we will use FLAT for all global memory accesses
893 // on VI.
894 // FIXME: This assumption is currently wrong. On VI we still use
895 // MUBUF instructions for the r + i addressing mode. As currently
896 // implemented, the MUBUF instructions only work on buffer < 4GB.
897 // It may be possible to support > 4GB buffers with MUBUF instructions,
898 // by setting the stride value in the resource descriptor which would
899 // increase the size limit to (stride * 4GB). However, this is risky,
900 // because it has never been validated.
901 return isLegalFlatAddressingMode(AM);
902 }
903
904 return isLegalMUBUFAddressingMode(AM);
905}
906
Matt Arsenault711b3902015-08-07 20:18:34 +0000907bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
908 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
909 // additionally can do r + r + i with addr64. 32-bit has more addressing
910 // mode options. Depending on the resource constant, it can also do
911 // (i64 r0) + (i32 r1) * (i14 i).
912 //
913 // Private arrays end up using a scratch buffer most of the time, so also
914 // assume those use MUBUF instructions. Scratch loads / stores are currently
915 // implemented as mubuf instructions with offen bit set, so slightly
916 // different than the normal addr64.
917 if (!isUInt<12>(AM.BaseOffs))
918 return false;
919
920 // FIXME: Since we can split immediate into soffset and immediate offset,
921 // would it make sense to allow any immediate?
922
923 switch (AM.Scale) {
924 case 0: // r + i or just i, depending on HasBaseReg.
925 return true;
926 case 1:
927 return true; // We have r + r or r + i.
928 case 2:
929 if (AM.HasBaseReg) {
930 // Reject 2 * r + r.
931 return false;
932 }
933
934 // Allow 2 * r as r + r
935 // Or 2 * r + i is allowed as r + r + i.
936 return true;
937 default: // Don't allow n * r
938 return false;
939 }
940}
941
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000942bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
943 const AddrMode &AM, Type *Ty,
Jonas Paulsson024e3192017-07-21 11:59:37 +0000944 unsigned AS, Instruction *I) const {
Matt Arsenault5015a892014-08-15 17:17:07 +0000945 // No global is ever allowed as a base.
946 if (AM.BaseGV)
947 return false;
948
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000949 if (AS == AMDGPUASI.GLOBAL_ADDRESS)
950 return isLegalGlobalAddressingMode(AM);
Matt Arsenault5015a892014-08-15 17:17:07 +0000951
Matt Arsenault923712b2018-02-09 16:57:57 +0000952 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
953 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000954 // If the offset isn't a multiple of 4, it probably isn't going to be
955 // correctly aligned.
Matt Arsenault3cc1e002016-08-13 01:43:51 +0000956 // FIXME: Can we get the real alignment here?
Matt Arsenault711b3902015-08-07 20:18:34 +0000957 if (AM.BaseOffs % 4 != 0)
958 return isLegalMUBUFAddressingMode(AM);
959
960 // There are no SMRD extloads, so if we have to do a small type access we
961 // will use a MUBUF load.
962 // FIXME?: We also need to do this if unaligned, but we don't know the
963 // alignment here.
Stanislav Mekhanoshin57d341c2018-05-15 22:07:51 +0000964 if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000965 return isLegalGlobalAddressingMode(AM);
Matt Arsenault711b3902015-08-07 20:18:34 +0000966
Tom Stellard5bfbae52018-07-11 20:59:01 +0000967 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000968 // SMRD instructions have an 8-bit, dword offset on SI.
969 if (!isUInt<8>(AM.BaseOffs / 4))
970 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +0000971 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000972 // On CI+, this can also be a 32-bit literal constant offset. If it fits
973 // in 8-bits, it can use a smaller encoding.
974 if (!isUInt<32>(AM.BaseOffs / 4))
975 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +0000976 } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000977 // On VI, these use the SMEM format and the offset is 20-bit in bytes.
978 if (!isUInt<20>(AM.BaseOffs))
979 return false;
980 } else
981 llvm_unreachable("unhandled generation");
982
983 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
984 return true;
985
986 if (AM.Scale == 1 && AM.HasBaseReg)
987 return true;
988
989 return false;
Matt Arsenault711b3902015-08-07 20:18:34 +0000990
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000991 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000992 return isLegalMUBUFAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000993 } else if (AS == AMDGPUASI.LOCAL_ADDRESS ||
994 AS == AMDGPUASI.REGION_ADDRESS) {
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000995 // Basic, single offset DS instructions allow a 16-bit unsigned immediate
996 // field.
997 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
998 // an 8-bit dword offset but we don't know the alignment here.
999 if (!isUInt<16>(AM.BaseOffs))
Matt Arsenault5015a892014-08-15 17:17:07 +00001000 return false;
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001001
1002 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1003 return true;
1004
1005 if (AM.Scale == 1 && AM.HasBaseReg)
1006 return true;
1007
Matt Arsenault5015a892014-08-15 17:17:07 +00001008 return false;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001009 } else if (AS == AMDGPUASI.FLAT_ADDRESS ||
1010 AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) {
Matt Arsenault7d1b6c82016-04-29 06:25:10 +00001011 // For an unknown address space, this usually means that this is for some
1012 // reason being used for pure arithmetic, and not based on some addressing
1013 // computation. We don't have instructions that compute pointers with any
1014 // addressing modes, so treat them as having no offset like flat
1015 // instructions.
Tom Stellard70580f82015-07-20 14:28:41 +00001016 return isLegalFlatAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001017 } else {
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001018 llvm_unreachable("unhandled address space");
1019 }
Matt Arsenault5015a892014-08-15 17:17:07 +00001020}
1021
Nirav Dave4dcad5d2017-07-10 20:25:54 +00001022bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1023 const SelectionDAG &DAG) const {
Nirav Daved20066c2017-05-24 15:59:09 +00001024 if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) {
1025 return (MemVT.getSizeInBits() <= 4 * 32);
1026 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
1027 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1028 return (MemVT.getSizeInBits() <= MaxPrivateBits);
1029 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
1030 return (MemVT.getSizeInBits() <= 2 * 32);
1031 }
1032 return true;
1033}
1034
Matt Arsenaulte6986632015-01-14 01:35:22 +00001035bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001036 unsigned AddrSpace,
1037 unsigned Align,
1038 bool *IsFast) const {
Matt Arsenault1018c892014-04-24 17:08:26 +00001039 if (IsFast)
1040 *IsFast = false;
1041
Matt Arsenault1018c892014-04-24 17:08:26 +00001042 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1043 // which isn't a simple VT.
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001044 // Until MVT is extended to handle this, simply check for the size and
1045 // rely on the condition below: allow accesses if the size is a multiple of 4.
1046 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1047 VT.getStoreSize() > 16)) {
Tom Stellard81d871d2013-11-13 23:36:50 +00001048 return false;
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001049 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001050
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001051 if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS ||
1052 AddrSpace == AMDGPUASI.REGION_ADDRESS) {
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001053 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1054 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1055 // with adjacent offsets.
Sanjay Patelce74db92015-09-03 15:03:19 +00001056 bool AlignedBy4 = (Align % 4 == 0);
1057 if (IsFast)
1058 *IsFast = AlignedBy4;
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001059
Sanjay Patelce74db92015-09-03 15:03:19 +00001060 return AlignedBy4;
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001061 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001062
Tom Stellard64a9d082016-10-14 18:10:39 +00001063 // FIXME: We have to be conservative here and assume that flat operations
1064 // will access scratch. If we had access to the IR function, then we
1065 // could determine if any private memory was used in the function.
1066 if (!Subtarget->hasUnalignedScratchAccess() &&
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001067 (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS ||
1068 AddrSpace == AMDGPUASI.FLAT_ADDRESS)) {
Tom Stellard64a9d082016-10-14 18:10:39 +00001069 return false;
1070 }
1071
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001072 if (Subtarget->hasUnalignedBufferAccess()) {
1073 // If we have an uniform constant load, it still requires using a slow
1074 // buffer instruction if unaligned.
1075 if (IsFast) {
Matt Arsenault923712b2018-02-09 16:57:57 +00001076 *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS ||
1077 AddrSpace == AMDGPUASI.CONSTANT_ADDRESS_32BIT) ?
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001078 (Align % 4 == 0) : true;
1079 }
1080
1081 return true;
1082 }
1083
Tom Stellard33e64c62015-02-04 20:49:52 +00001084 // Smaller than dword value must be aligned.
Tom Stellard33e64c62015-02-04 20:49:52 +00001085 if (VT.bitsLT(MVT::i32))
1086 return false;
1087
Matt Arsenault1018c892014-04-24 17:08:26 +00001088 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1089 // byte-address are ignored, thus forcing Dword alignment.
Tom Stellarde812f2f2014-07-21 15:45:06 +00001090 // This applies to private, global, and constant memory.
Matt Arsenault1018c892014-04-24 17:08:26 +00001091 if (IsFast)
1092 *IsFast = true;
Tom Stellardc6b299c2015-02-02 18:02:28 +00001093
1094 return VT.bitsGT(MVT::i32) && Align % 4 == 0;
Tom Stellard0125f2a2013-06-25 02:39:35 +00001095}
1096
Matt Arsenault46645fa2014-07-28 17:49:26 +00001097EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
1098 unsigned SrcAlign, bool IsMemset,
1099 bool ZeroMemset,
1100 bool MemcpyStrSrc,
1101 MachineFunction &MF) const {
1102 // FIXME: Should account for address space here.
1103
1104 // The default fallback uses the private pointer size as a guess for a type to
1105 // use. Make sure we switch these to 64-bit accesses.
1106
1107 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1108 return MVT::v4i32;
1109
1110 if (Size >= 8 && DstAlign >= 4)
1111 return MVT::v2i32;
1112
1113 // Use the default.
1114 return MVT::Other;
1115}
1116
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001117static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) {
1118 return AS == AMDGPUASI.GLOBAL_ADDRESS ||
1119 AS == AMDGPUASI.FLAT_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +00001120 AS == AMDGPUASI.CONSTANT_ADDRESS ||
1121 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT;
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001122}
1123
1124bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1125 unsigned DestAS) const {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001126 return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) &&
1127 isFlatGlobalAddrSpace(DestAS, AMDGPUASI);
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001128}
1129
Alexander Timofeev18009562016-12-08 17:28:47 +00001130bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1131 const MemSDNode *MemNode = cast<MemSDNode>(N);
1132 const Value *Ptr = MemNode->getMemOperand()->getValue();
Matt Arsenault0a0c8712018-03-27 18:39:45 +00001133 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
Alexander Timofeev18009562016-12-08 17:28:47 +00001134 return I && I->getMetadata("amdgpu.noclobber");
1135}
1136
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001137bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
1138 unsigned DestAS) const {
1139 // Flat -> private/local is a simple truncate.
1140 // Flat -> global is no-op
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001141 if (SrcAS == AMDGPUASI.FLAT_ADDRESS)
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001142 return true;
1143
1144 return isNoopAddrSpaceCast(SrcAS, DestAS);
1145}
1146
Tom Stellarda6f24c62015-12-15 20:55:55 +00001147bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1148 const MemSDNode *MemNode = cast<MemSDNode>(N);
Tom Stellarda6f24c62015-12-15 20:55:55 +00001149
Matt Arsenaultbcf7bec2018-02-09 16:57:48 +00001150 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
Tom Stellarda6f24c62015-12-15 20:55:55 +00001151}
1152
Chandler Carruth9d010ff2014-07-03 00:23:43 +00001153TargetLoweringBase::LegalizeTypeAction
1154SITargetLowering::getPreferredVectorAction(EVT VT) const {
1155 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1156 return TypeSplitVector;
1157
1158 return TargetLoweringBase::getPreferredVectorAction(VT);
Tom Stellardd86003e2013-08-14 23:25:00 +00001159}
Tom Stellard0125f2a2013-06-25 02:39:35 +00001160
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001161bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1162 Type *Ty) const {
Matt Arsenault749035b2016-07-30 01:40:36 +00001163 // FIXME: Could be smarter if called for vector constants.
1164 return true;
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001165}
1166
Tom Stellard2e045bb2016-01-20 00:13:22 +00001167bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001168 if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1169 switch (Op) {
1170 case ISD::LOAD:
1171 case ISD::STORE:
Tom Stellard2e045bb2016-01-20 00:13:22 +00001172
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001173 // These operations are done with 32-bit instructions anyway.
1174 case ISD::AND:
1175 case ISD::OR:
1176 case ISD::XOR:
1177 case ISD::SELECT:
1178 // TODO: Extensions?
1179 return true;
1180 default:
1181 return false;
1182 }
1183 }
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +00001184
Tom Stellard2e045bb2016-01-20 00:13:22 +00001185 // SimplifySetCC uses this function to determine whether or not it should
1186 // create setcc with i1 operands. We don't have instructions for i1 setcc.
1187 if (VT == MVT::i1 && Op == ISD::SETCC)
1188 return false;
1189
1190 return TargetLowering::isTypeDesirableForOp(Op, VT);
1191}
1192
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001193SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1194 const SDLoc &SL,
1195 SDValue Chain,
1196 uint64_t Offset) const {
Mehdi Aminia749f2a2015-07-09 02:09:52 +00001197 const DataLayout &DL = DAG.getDataLayout();
Tom Stellardec2e43c2014-09-22 15:35:29 +00001198 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001199 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1200
1201 const ArgDescriptor *InputPtrReg;
1202 const TargetRegisterClass *RC;
1203
1204 std::tie(InputPtrReg, RC)
1205 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Tom Stellard94593ee2013-06-03 17:40:18 +00001206
Matt Arsenault86033ca2014-07-28 17:31:39 +00001207 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001208 MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS);
Matt Arsenaulta0269b62015-06-01 21:58:24 +00001209 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001210 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1211
Matt Arsenault2fb9ccf2018-05-29 17:42:38 +00001212 return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
Jan Veselyfea814d2016-06-21 20:46:20 +00001213}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001214
Matt Arsenault9166ce82017-07-28 15:52:08 +00001215SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1216 const SDLoc &SL) const {
Matt Arsenault75e71922018-06-28 10:18:55 +00001217 uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1218 FIRST_IMPLICIT);
Matt Arsenault9166ce82017-07-28 15:52:08 +00001219 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1220}
1221
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001222SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1223 const SDLoc &SL, SDValue Val,
1224 bool Signed,
Matt Arsenault6dca5422017-01-09 18:52:39 +00001225 const ISD::InputArg *Arg) const {
Matt Arsenault6dca5422017-01-09 18:52:39 +00001226 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1227 VT.bitsLT(MemVT)) {
1228 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1229 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1230 }
1231
Tom Stellardbc6c5232016-10-17 16:21:45 +00001232 if (MemVT.isFloatingPoint())
Matt Arsenault6dca5422017-01-09 18:52:39 +00001233 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001234 else if (Signed)
Matt Arsenault6dca5422017-01-09 18:52:39 +00001235 Val = DAG.getSExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001236 else
Matt Arsenault6dca5422017-01-09 18:52:39 +00001237 Val = DAG.getZExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001238
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001239 return Val;
1240}
1241
1242SDValue SITargetLowering::lowerKernargMemParameter(
1243 SelectionDAG &DAG, EVT VT, EVT MemVT,
1244 const SDLoc &SL, SDValue Chain,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001245 uint64_t Offset, unsigned Align, bool Signed,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001246 const ISD::InputArg *Arg) const {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001247 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1248 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
1249 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1250
Matt Arsenault90083d32018-06-07 09:54:49 +00001251 // Try to avoid using an extload by loading earlier than the argument address,
1252 // and extracting the relevant bits. The load should hopefully be merged with
1253 // the previous argument.
Matt Arsenault4bec7d42018-07-20 09:05:08 +00001254 if (MemVT.getStoreSize() < 4 && Align < 4) {
1255 // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
Matt Arsenault90083d32018-06-07 09:54:49 +00001256 int64_t AlignDownOffset = alignDown(Offset, 4);
1257 int64_t OffsetDiff = Offset - AlignDownOffset;
1258
1259 EVT IntVT = MemVT.changeTypeToInteger();
1260
1261 // TODO: If we passed in the base kernel offset we could have a better
1262 // alignment than 4, but we don't really need it.
1263 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1264 SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1265 MachineMemOperand::MODereferenceable |
1266 MachineMemOperand::MOInvariant);
1267
1268 SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1269 SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1270
1271 SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1272 ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1273 ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1274
1275
1276 return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1277 }
1278
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001279 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1280 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001281 MachineMemOperand::MODereferenceable |
1282 MachineMemOperand::MOInvariant);
1283
1284 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
Matt Arsenault6dca5422017-01-09 18:52:39 +00001285 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
Tom Stellard94593ee2013-06-03 17:40:18 +00001286}
1287
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001288SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1289 const SDLoc &SL, SDValue Chain,
1290 const ISD::InputArg &Arg) const {
1291 MachineFunction &MF = DAG.getMachineFunction();
1292 MachineFrameInfo &MFI = MF.getFrameInfo();
1293
1294 if (Arg.Flags.isByVal()) {
1295 unsigned Size = Arg.Flags.getByValSize();
1296 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1297 return DAG.getFrameIndex(FrameIdx, MVT::i32);
1298 }
1299
1300 unsigned ArgOffset = VA.getLocMemOffset();
1301 unsigned ArgSize = VA.getValVT().getStoreSize();
1302
1303 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1304
1305 // Create load nodes to retrieve arguments from the stack.
1306 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1307 SDValue ArgValue;
1308
1309 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1310 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1311 MVT MemVT = VA.getValVT();
1312
1313 switch (VA.getLocInfo()) {
1314 default:
1315 break;
1316 case CCValAssign::BCvt:
1317 MemVT = VA.getLocVT();
1318 break;
1319 case CCValAssign::SExt:
1320 ExtType = ISD::SEXTLOAD;
1321 break;
1322 case CCValAssign::ZExt:
1323 ExtType = ISD::ZEXTLOAD;
1324 break;
1325 case CCValAssign::AExt:
1326 ExtType = ISD::EXTLOAD;
1327 break;
1328 }
1329
1330 ArgValue = DAG.getExtLoad(
1331 ExtType, SL, VA.getLocVT(), Chain, FIN,
1332 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1333 MemVT);
1334 return ArgValue;
1335}
1336
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001337SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1338 const SIMachineFunctionInfo &MFI,
1339 EVT VT,
1340 AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1341 const ArgDescriptor *Reg;
1342 const TargetRegisterClass *RC;
1343
1344 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1345 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1346}
1347
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001348static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1349 CallingConv::ID CallConv,
1350 ArrayRef<ISD::InputArg> Ins,
1351 BitVector &Skipped,
1352 FunctionType *FType,
1353 SIMachineFunctionInfo *Info) {
1354 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001355 const ISD::InputArg *Arg = &Ins[I];
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001356
Matt Arsenault55ab9212018-08-01 19:57:34 +00001357 assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1358 "vector type argument should have been split");
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001359
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001360 // First check if it's a PS input addr.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001361 if (CallConv == CallingConv::AMDGPU_PS &&
1362 !Arg->Flags.isInReg() && !Arg->Flags.isByVal() && PSInputNum <= 15) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001363
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001364 bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1365
1366 // Inconveniently only the first part of the split is marked as isSplit,
1367 // so skip to the end. We only want to increment PSInputNum once for the
1368 // entire split argument.
1369 if (Arg->Flags.isSplit()) {
1370 while (!Arg->Flags.isSplitEnd()) {
1371 assert(!Arg->VT.isVector() &&
1372 "unexpected vector split in ps argument type");
1373 if (!SkipArg)
1374 Splits.push_back(*Arg);
1375 Arg = &Ins[++I];
1376 }
1377 }
1378
1379 if (SkipArg) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001380 // We can safely skip PS inputs.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001381 Skipped.set(Arg->getOrigArgIndex());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001382 ++PSInputNum;
1383 continue;
1384 }
1385
1386 Info->markPSInputAllocated(PSInputNum);
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001387 if (Arg->Used)
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001388 Info->markPSInputEnabled(PSInputNum);
1389
1390 ++PSInputNum;
1391 }
1392
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001393 Splits.push_back(*Arg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001394 }
1395}
1396
1397// Allocate special inputs passed in VGPRs.
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001398static void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1399 MachineFunction &MF,
1400 const SIRegisterInfo &TRI,
1401 SIMachineFunctionInfo &Info) {
1402 if (Info.hasWorkItemIDX()) {
1403 unsigned Reg = AMDGPU::VGPR0;
1404 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001405
1406 CCInfo.AllocateReg(Reg);
1407 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1408 }
1409
1410 if (Info.hasWorkItemIDY()) {
1411 unsigned Reg = AMDGPU::VGPR1;
1412 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1413
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001414 CCInfo.AllocateReg(Reg);
1415 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1416 }
1417
1418 if (Info.hasWorkItemIDZ()) {
1419 unsigned Reg = AMDGPU::VGPR2;
1420 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1421
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001422 CCInfo.AllocateReg(Reg);
1423 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1424 }
1425}
1426
1427// Try to allocate a VGPR at the end of the argument list, or if no argument
1428// VGPRs are left allocating a stack slot.
1429static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) {
1430 ArrayRef<MCPhysReg> ArgVGPRs
1431 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1432 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1433 if (RegIdx == ArgVGPRs.size()) {
1434 // Spill to stack required.
1435 int64_t Offset = CCInfo.AllocateStack(4, 4);
1436
1437 return ArgDescriptor::createStack(Offset);
1438 }
1439
1440 unsigned Reg = ArgVGPRs[RegIdx];
1441 Reg = CCInfo.AllocateReg(Reg);
1442 assert(Reg != AMDGPU::NoRegister);
1443
1444 MachineFunction &MF = CCInfo.getMachineFunction();
1445 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1446 return ArgDescriptor::createRegister(Reg);
1447}
1448
1449static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1450 const TargetRegisterClass *RC,
1451 unsigned NumArgRegs) {
1452 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1453 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1454 if (RegIdx == ArgSGPRs.size())
1455 report_fatal_error("ran out of SGPRs for arguments");
1456
1457 unsigned Reg = ArgSGPRs[RegIdx];
1458 Reg = CCInfo.AllocateReg(Reg);
1459 assert(Reg != AMDGPU::NoRegister);
1460
1461 MachineFunction &MF = CCInfo.getMachineFunction();
1462 MF.addLiveIn(Reg, RC);
1463 return ArgDescriptor::createRegister(Reg);
1464}
1465
1466static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1467 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1468}
1469
1470static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1471 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1472}
1473
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001474static void allocateSpecialInputVGPRs(CCState &CCInfo,
1475 MachineFunction &MF,
1476 const SIRegisterInfo &TRI,
1477 SIMachineFunctionInfo &Info) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001478 if (Info.hasWorkItemIDX())
1479 Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001480
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001481 if (Info.hasWorkItemIDY())
1482 Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001483
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001484 if (Info.hasWorkItemIDZ())
1485 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1486}
1487
1488static void allocateSpecialInputSGPRs(CCState &CCInfo,
1489 MachineFunction &MF,
1490 const SIRegisterInfo &TRI,
1491 SIMachineFunctionInfo &Info) {
1492 auto &ArgInfo = Info.getArgInfo();
1493
1494 // TODO: Unify handling with private memory pointers.
1495
1496 if (Info.hasDispatchPtr())
1497 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1498
1499 if (Info.hasQueuePtr())
1500 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1501
1502 if (Info.hasKernargSegmentPtr())
1503 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1504
1505 if (Info.hasDispatchID())
1506 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1507
1508 // flat_scratch_init is not applicable for non-kernel functions.
1509
1510 if (Info.hasWorkGroupIDX())
1511 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1512
1513 if (Info.hasWorkGroupIDY())
1514 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1515
1516 if (Info.hasWorkGroupIDZ())
1517 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
Matt Arsenault817c2532017-08-03 23:12:44 +00001518
1519 if (Info.hasImplicitArgPtr())
1520 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001521}
1522
1523// Allocate special inputs passed in user SGPRs.
1524static void allocateHSAUserSGPRs(CCState &CCInfo,
1525 MachineFunction &MF,
1526 const SIRegisterInfo &TRI,
1527 SIMachineFunctionInfo &Info) {
Matt Arsenault10fc0622017-06-26 03:01:31 +00001528 if (Info.hasImplicitBufferPtr()) {
1529 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1530 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1531 CCInfo.AllocateReg(ImplicitBufferPtrReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001532 }
1533
1534 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1535 if (Info.hasPrivateSegmentBuffer()) {
1536 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1537 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1538 CCInfo.AllocateReg(PrivateSegmentBufferReg);
1539 }
1540
1541 if (Info.hasDispatchPtr()) {
1542 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1543 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1544 CCInfo.AllocateReg(DispatchPtrReg);
1545 }
1546
1547 if (Info.hasQueuePtr()) {
1548 unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1549 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1550 CCInfo.AllocateReg(QueuePtrReg);
1551 }
1552
1553 if (Info.hasKernargSegmentPtr()) {
1554 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1555 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1556 CCInfo.AllocateReg(InputPtrReg);
1557 }
1558
1559 if (Info.hasDispatchID()) {
1560 unsigned DispatchIDReg = Info.addDispatchID(TRI);
1561 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1562 CCInfo.AllocateReg(DispatchIDReg);
1563 }
1564
1565 if (Info.hasFlatScratchInit()) {
1566 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1567 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1568 CCInfo.AllocateReg(FlatScratchInitReg);
1569 }
1570
1571 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1572 // these from the dispatch pointer.
1573}
1574
1575// Allocate special input registers that are initialized per-wave.
1576static void allocateSystemSGPRs(CCState &CCInfo,
1577 MachineFunction &MF,
1578 SIMachineFunctionInfo &Info,
Marek Olsak584d2c02017-05-04 22:25:20 +00001579 CallingConv::ID CallConv,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001580 bool IsShader) {
1581 if (Info.hasWorkGroupIDX()) {
1582 unsigned Reg = Info.addWorkGroupIDX();
1583 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1584 CCInfo.AllocateReg(Reg);
1585 }
1586
1587 if (Info.hasWorkGroupIDY()) {
1588 unsigned Reg = Info.addWorkGroupIDY();
1589 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1590 CCInfo.AllocateReg(Reg);
1591 }
1592
1593 if (Info.hasWorkGroupIDZ()) {
1594 unsigned Reg = Info.addWorkGroupIDZ();
1595 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1596 CCInfo.AllocateReg(Reg);
1597 }
1598
1599 if (Info.hasWorkGroupInfo()) {
1600 unsigned Reg = Info.addWorkGroupInfo();
1601 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1602 CCInfo.AllocateReg(Reg);
1603 }
1604
1605 if (Info.hasPrivateSegmentWaveByteOffset()) {
1606 // Scratch wave offset passed in system SGPR.
1607 unsigned PrivateSegmentWaveByteOffsetReg;
1608
1609 if (IsShader) {
Marek Olsak584d2c02017-05-04 22:25:20 +00001610 PrivateSegmentWaveByteOffsetReg =
1611 Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1612
1613 // This is true if the scratch wave byte offset doesn't have a fixed
1614 // location.
1615 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1616 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1617 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1618 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001619 } else
1620 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1621
1622 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1623 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1624 }
1625}
1626
1627static void reservePrivateMemoryRegs(const TargetMachine &TM,
1628 MachineFunction &MF,
1629 const SIRegisterInfo &TRI,
Matt Arsenault1cc47f82017-07-18 16:44:56 +00001630 SIMachineFunctionInfo &Info) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001631 // Now that we've figured out where the scratch register inputs are, see if
1632 // should reserve the arguments and use them directly.
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001633 MachineFrameInfo &MFI = MF.getFrameInfo();
1634 bool HasStackObjects = MFI.hasStackObjects();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001635
1636 // Record that we know we have non-spill stack objects so we don't need to
1637 // check all stack objects later.
1638 if (HasStackObjects)
1639 Info.setHasNonSpillStackObjects(true);
1640
1641 // Everything live out of a block is spilled with fast regalloc, so it's
1642 // almost certain that spilling will be required.
1643 if (TM.getOptLevel() == CodeGenOpt::None)
1644 HasStackObjects = true;
1645
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001646 // For now assume stack access is needed in any callee functions, so we need
1647 // the scratch registers to pass in.
1648 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1649
Tom Stellard5bfbae52018-07-11 20:59:01 +00001650 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001651 if (ST.isAmdCodeObjectV2(MF.getFunction())) {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001652 if (RequiresStackAccess) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001653 // If we have stack objects, we unquestionably need the private buffer
1654 // resource. For the Code Object V2 ABI, this will be the first 4 user
1655 // SGPR inputs. We can reserve those and use them directly.
1656
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001657 unsigned PrivateSegmentBufferReg = Info.getPreloadedReg(
1658 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001659 Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1660
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001661 if (MFI.hasCalls()) {
1662 // If we have calls, we need to keep the frame register in a register
1663 // that won't be clobbered by a call, so ensure it is copied somewhere.
1664
1665 // This is not a problem for the scratch wave offset, because the same
1666 // registers are reserved in all functions.
1667
1668 // FIXME: Nothing is really ensuring this is a call preserved register,
1669 // it's just selected from the end so it happens to be.
1670 unsigned ReservedOffsetReg
1671 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1672 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1673 } else {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001674 unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg(
1675 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001676 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1677 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001678 } else {
1679 unsigned ReservedBufferReg
1680 = TRI.reservedPrivateSegmentBufferReg(MF);
1681 unsigned ReservedOffsetReg
1682 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1683
1684 // We tentatively reserve the last registers (skipping the last two
1685 // which may contain VCC). After register allocation, we'll replace
1686 // these with the ones immediately after those which were really
1687 // allocated. In the prologue copies will be inserted from the argument
1688 // to these reserved registers.
1689 Info.setScratchRSrcReg(ReservedBufferReg);
1690 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1691 }
1692 } else {
1693 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1694
1695 // Without HSA, relocations are used for the scratch pointer and the
1696 // buffer resource setup is always inserted in the prologue. Scratch wave
1697 // offset is still in an input SGPR.
1698 Info.setScratchRSrcReg(ReservedBufferReg);
1699
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001700 if (HasStackObjects && !MFI.hasCalls()) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001701 unsigned ScratchWaveOffsetReg = Info.getPreloadedReg(
1702 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001703 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1704 } else {
1705 unsigned ReservedOffsetReg
1706 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1707 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1708 }
1709 }
1710}
1711
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001712bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1713 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1714 return !Info->isEntryFunction();
1715}
1716
1717void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1718
1719}
1720
1721void SITargetLowering::insertCopiesSplitCSR(
1722 MachineBasicBlock *Entry,
1723 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1724 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1725
1726 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1727 if (!IStart)
1728 return;
1729
1730 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1731 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1732 MachineBasicBlock::iterator MBBI = Entry->begin();
1733 for (const MCPhysReg *I = IStart; *I; ++I) {
1734 const TargetRegisterClass *RC = nullptr;
1735 if (AMDGPU::SReg_64RegClass.contains(*I))
1736 RC = &AMDGPU::SGPR_64RegClass;
1737 else if (AMDGPU::SReg_32RegClass.contains(*I))
1738 RC = &AMDGPU::SGPR_32RegClass;
1739 else
1740 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1741
1742 unsigned NewVR = MRI->createVirtualRegister(RC);
1743 // Create copy from CSR to a virtual register.
1744 Entry->addLiveIn(*I);
1745 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1746 .addReg(*I);
1747
1748 // Insert the copy-back instructions right before the terminator.
1749 for (auto *Exit : Exits)
1750 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1751 TII->get(TargetOpcode::COPY), *I)
1752 .addReg(NewVR);
1753 }
1754}
1755
Christian Konig2c8f6d52013-03-07 09:03:52 +00001756SDValue SITargetLowering::LowerFormalArguments(
Eric Christopher7792e322015-01-30 23:24:40 +00001757 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001758 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1759 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001760 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001761
1762 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001763 const Function &Fn = MF.getFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00001764 FunctionType *FType = MF.getFunction().getFunctionType();
Christian Konig99ee0f42013-03-07 09:04:14 +00001765 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellard5bfbae52018-07-11 20:59:01 +00001766 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001767
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +00001768 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00001769 DiagnosticInfoUnsupported NoGraphicsHSA(
Matthias Braunf1caa282017-12-15 22:22:58 +00001770 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
Matt Arsenaultd48da142015-11-02 23:23:02 +00001771 DAG.getContext()->diagnose(NoGraphicsHSA);
Diana Picus81bc3172016-05-26 15:24:55 +00001772 return DAG.getEntryNode();
Matt Arsenaultd48da142015-11-02 23:23:02 +00001773 }
1774
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00001775 // Create stack objects that are used for emitting debugger prologue if
1776 // "amdgpu-debugger-emit-prologue" attribute was specified.
1777 if (ST.debuggerEmitPrologue())
1778 createDebuggerPrologueStackObjects(MF);
1779
Christian Konig2c8f6d52013-03-07 09:03:52 +00001780 SmallVector<ISD::InputArg, 16> Splits;
Christian Konig2c8f6d52013-03-07 09:03:52 +00001781 SmallVector<CCValAssign, 16> ArgLocs;
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001782 BitVector Skipped(Ins.size());
Eric Christopherb5217502014-08-06 18:45:26 +00001783 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1784 *DAG.getContext());
Christian Konig2c8f6d52013-03-07 09:03:52 +00001785
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001786 bool IsShader = AMDGPU::isShader(CallConv);
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +00001787 bool IsKernel = AMDGPU::isKernel(CallConv);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001788 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
Christian Konig99ee0f42013-03-07 09:04:14 +00001789
Matt Arsenaultd1867c02017-08-02 00:59:51 +00001790 if (!IsEntryFunc) {
1791 // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over
1792 // this when allocating argument fixed offsets.
1793 CCInfo.AllocateStack(4, 4);
1794 }
1795
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001796 if (IsShader) {
1797 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1798
1799 // At least one interpolation mode must be enabled or else the GPU will
1800 // hang.
1801 //
1802 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1803 // set PSInputAddr, the user wants to enable some bits after the compilation
1804 // based on run-time states. Since we can't know what the final PSInputEna
1805 // will look like, so we shouldn't do anything here and the user should take
1806 // responsibility for the correct programming.
1807 //
1808 // Otherwise, the following restrictions apply:
1809 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1810 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1811 // enabled too.
Tim Renoufc8ffffe2017-10-12 16:16:41 +00001812 if (CallConv == CallingConv::AMDGPU_PS) {
1813 if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1814 ((Info->getPSInputAddr() & 0xF) == 0 &&
1815 Info->isPSInputAllocated(11))) {
1816 CCInfo.AllocateReg(AMDGPU::VGPR0);
1817 CCInfo.AllocateReg(AMDGPU::VGPR1);
1818 Info->markPSInputAllocated(0);
1819 Info->markPSInputEnabled(0);
1820 }
1821 if (Subtarget->isAmdPalOS()) {
1822 // For isAmdPalOS, the user does not enable some bits after compilation
1823 // based on run-time states; the register values being generated here are
1824 // the final ones set in hardware. Therefore we need to apply the
1825 // workaround to PSInputAddr and PSInputEnable together. (The case where
1826 // a bit is set in PSInputAddr but not PSInputEnable is where the
1827 // frontend set up an input arg for a particular interpolation mode, but
1828 // nothing uses that input arg. Really we should have an earlier pass
1829 // that removes such an arg.)
1830 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1831 if ((PsInputBits & 0x7F) == 0 ||
1832 ((PsInputBits & 0xF) == 0 &&
1833 (PsInputBits >> 11 & 1)))
1834 Info->markPSInputEnabled(
1835 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
1836 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001837 }
1838
Tom Stellard2f3f9852017-01-25 01:25:13 +00001839 assert(!Info->hasDispatchPtr() &&
Tom Stellardf110f8f2016-04-14 16:27:03 +00001840 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1841 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1842 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1843 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1844 !Info->hasWorkItemIDZ());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001845 } else if (IsKernel) {
1846 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001847 } else {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001848 Splits.append(Ins.begin(), Ins.end());
Tom Stellardaf775432013-10-23 00:44:32 +00001849 }
1850
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001851 if (IsEntryFunc) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001852 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001853 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
Tom Stellard2f3f9852017-01-25 01:25:13 +00001854 }
1855
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001856 if (IsKernel) {
Tom Stellardbbeb45a2016-09-16 21:53:00 +00001857 analyzeFormalArgumentsCompute(CCInfo, Ins);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001858 } else {
1859 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1860 CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1861 }
Christian Konig2c8f6d52013-03-07 09:03:52 +00001862
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001863 SmallVector<SDValue, 16> Chains;
1864
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001865 // FIXME: This is the minimum kernel argument alignment. We should improve
1866 // this to the maximum alignment of the arguments.
1867 //
1868 // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
1869 // kern arg offset.
1870 const unsigned KernelArgBaseAlign = 16;
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001871
1872 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
Christian Konigb7be72d2013-05-17 09:46:48 +00001873 const ISD::InputArg &Arg = Ins[i];
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001874 if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
Christian Konigb7be72d2013-05-17 09:46:48 +00001875 InVals.push_back(DAG.getUNDEF(Arg.VT));
Christian Konig99ee0f42013-03-07 09:04:14 +00001876 continue;
1877 }
1878
Christian Konig2c8f6d52013-03-07 09:03:52 +00001879 CCValAssign &VA = ArgLocs[ArgIdx++];
Craig Topper7f416c82014-11-16 21:17:18 +00001880 MVT VT = VA.getLocVT();
Tom Stellarded882c22013-06-03 17:40:11 +00001881
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001882 if (IsEntryFunc && VA.isMemLoc()) {
Tom Stellardaf775432013-10-23 00:44:32 +00001883 VT = Ins[i].VT;
Tom Stellardbbeb45a2016-09-16 21:53:00 +00001884 EVT MemVT = VA.getLocVT();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001885
Matt Arsenault4bec7d42018-07-20 09:05:08 +00001886 const uint64_t Offset = VA.getLocMemOffset();
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001887 unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001888
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001889 SDValue Arg = lowerKernargMemParameter(
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001890 DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001891 Chains.push_back(Arg.getValue(1));
Tom Stellardca7ecf32014-08-22 18:49:31 +00001892
Craig Toppere3dcce92015-08-01 22:20:21 +00001893 auto *ParamTy =
Andrew Trick05938a52015-02-16 18:10:47 +00001894 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
Tom Stellard5bfbae52018-07-11 20:59:01 +00001895 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001896 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
Tom Stellardca7ecf32014-08-22 18:49:31 +00001897 // On SI local pointers are just offsets into LDS, so they are always
1898 // less than 16-bits. On CI and newer they could potentially be
1899 // real pointers, so we can't guarantee their size.
1900 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
1901 DAG.getValueType(MVT::i16));
1902 }
1903
Tom Stellarded882c22013-06-03 17:40:11 +00001904 InVals.push_back(Arg);
1905 continue;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001906 } else if (!IsEntryFunc && VA.isMemLoc()) {
1907 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
1908 InVals.push_back(Val);
1909 if (!Arg.Flags.isByVal())
1910 Chains.push_back(Val.getValue(1));
1911 continue;
Tom Stellarded882c22013-06-03 17:40:11 +00001912 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001913
Christian Konig2c8f6d52013-03-07 09:03:52 +00001914 assert(VA.isRegLoc() && "Parameter must be in a register!");
1915
1916 unsigned Reg = VA.getLocReg();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001917 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
Matt Arsenaultb3463552017-07-15 05:52:59 +00001918 EVT ValVT = VA.getValVT();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001919
1920 Reg = MF.addLiveIn(Reg, RC);
1921 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1922
Matt Arsenault45b98182017-11-15 00:45:43 +00001923 if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) {
1924 // The return object should be reasonably addressable.
1925
1926 // FIXME: This helps when the return is a real sret. If it is a
1927 // automatically inserted sret (i.e. CanLowerReturn returns false), an
1928 // extra copy is inserted in SelectionDAGBuilder which obscures this.
1929 unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits;
1930 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1931 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
1932 }
1933
Matt Arsenaultb3463552017-07-15 05:52:59 +00001934 // If this is an 8 or 16-bit value, it is really passed promoted
1935 // to 32 bits. Insert an assert[sz]ext to capture this, then
1936 // truncate to the right size.
1937 switch (VA.getLocInfo()) {
1938 case CCValAssign::Full:
1939 break;
1940 case CCValAssign::BCvt:
1941 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
1942 break;
1943 case CCValAssign::SExt:
1944 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
1945 DAG.getValueType(ValVT));
1946 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1947 break;
1948 case CCValAssign::ZExt:
1949 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1950 DAG.getValueType(ValVT));
1951 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1952 break;
1953 case CCValAssign::AExt:
1954 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1955 break;
1956 default:
1957 llvm_unreachable("Unknown loc info!");
1958 }
1959
Christian Konig2c8f6d52013-03-07 09:03:52 +00001960 InVals.push_back(Val);
1961 }
Tom Stellarde99fb652015-01-20 19:33:04 +00001962
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001963 if (!IsEntryFunc) {
1964 // Special inputs come after user arguments.
1965 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
1966 }
1967
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001968 // Start adding system SGPRs.
1969 if (IsEntryFunc) {
1970 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001971 } else {
1972 CCInfo.AllocateReg(Info->getScratchRSrcReg());
1973 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
1974 CCInfo.AllocateReg(Info->getFrameOffsetReg());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001975 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001976 }
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001977
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001978 auto &ArgUsageInfo =
1979 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001980 ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001981
Matt Arsenault71bcbd42017-08-11 20:42:08 +00001982 unsigned StackArgSize = CCInfo.getNextStackOffset();
1983 Info->setBytesInStackArgArea(StackArgSize);
1984
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001985 return Chains.empty() ? Chain :
1986 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
Christian Konig2c8f6d52013-03-07 09:03:52 +00001987}
1988
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001989// TODO: If return values can't fit in registers, we should return as many as
1990// possible in registers before passing on stack.
1991bool SITargetLowering::CanLowerReturn(
1992 CallingConv::ID CallConv,
1993 MachineFunction &MF, bool IsVarArg,
1994 const SmallVectorImpl<ISD::OutputArg> &Outs,
1995 LLVMContext &Context) const {
1996 // Replacing returns with sret/stack usage doesn't make sense for shaders.
1997 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
1998 // for shaders. Vector types should be explicitly handled by CC.
1999 if (AMDGPU::isEntryFunctionCC(CallConv))
2000 return true;
2001
2002 SmallVector<CCValAssign, 16> RVLocs;
2003 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2004 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2005}
2006
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002007SDValue
2008SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2009 bool isVarArg,
2010 const SmallVectorImpl<ISD::OutputArg> &Outs,
2011 const SmallVectorImpl<SDValue> &OutVals,
2012 const SDLoc &DL, SelectionDAG &DAG) const {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002013 MachineFunction &MF = DAG.getMachineFunction();
2014 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2015
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002016 if (AMDGPU::isKernel(CallConv)) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002017 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2018 OutVals, DL, DAG);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002019 }
2020
2021 bool IsShader = AMDGPU::isShader(CallConv);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002022
Matt Arsenault55ab9212018-08-01 19:57:34 +00002023 Info->setIfReturnsVoid(Outs.empty());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002024 bool IsWaveEnd = Info->returnsVoid() && IsShader;
Marek Olsak8e9cc632016-01-13 17:23:09 +00002025
Marek Olsak8a0f3352016-01-13 17:23:04 +00002026 // CCValAssign - represent the assignment of the return value to a location.
2027 SmallVector<CCValAssign, 48> RVLocs;
Matt Arsenault55ab9212018-08-01 19:57:34 +00002028 SmallVector<ISD::OutputArg, 48> Splits;
Marek Olsak8a0f3352016-01-13 17:23:04 +00002029
2030 // CCState - Info about the registers and stack slots.
2031 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2032 *DAG.getContext());
2033
2034 // Analyze outgoing return values.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002035 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
Marek Olsak8a0f3352016-01-13 17:23:04 +00002036
2037 SDValue Flag;
2038 SmallVector<SDValue, 48> RetOps;
2039 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2040
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002041 // Add return address for callable functions.
2042 if (!Info->isEntryFunction()) {
2043 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2044 SDValue ReturnAddrReg = CreateLiveInRegister(
2045 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2046
2047 // FIXME: Should be able to use a vreg here, but need a way to prevent it
2048 // from being allcoated to a CSR.
2049
2050 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2051 MVT::i64);
2052
2053 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
2054 Flag = Chain.getValue(1);
2055
2056 RetOps.push_back(PhysReturnAddrReg);
2057 }
2058
Marek Olsak8a0f3352016-01-13 17:23:04 +00002059 // Copy the result values into the output registers.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002060 for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2061 ++I, ++RealRVLocIdx) {
2062 CCValAssign &VA = RVLocs[I];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002063 assert(VA.isRegLoc() && "Can only return in registers!");
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002064 // TODO: Partially return in registers if return values don't fit.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002065 SDValue Arg = OutVals[RealRVLocIdx];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002066
2067 // Copied from other backends.
2068 switch (VA.getLocInfo()) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002069 case CCValAssign::Full:
2070 break;
2071 case CCValAssign::BCvt:
2072 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2073 break;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002074 case CCValAssign::SExt:
2075 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2076 break;
2077 case CCValAssign::ZExt:
2078 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2079 break;
2080 case CCValAssign::AExt:
2081 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2082 break;
2083 default:
2084 llvm_unreachable("Unknown loc info!");
Marek Olsak8a0f3352016-01-13 17:23:04 +00002085 }
2086
2087 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2088 Flag = Chain.getValue(1);
2089 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2090 }
2091
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002092 // FIXME: Does sret work properly?
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002093 if (!Info->isEntryFunction()) {
Tom Stellardc5a154d2018-06-28 23:47:12 +00002094 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002095 const MCPhysReg *I =
2096 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2097 if (I) {
2098 for (; *I; ++I) {
2099 if (AMDGPU::SReg_64RegClass.contains(*I))
2100 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2101 else if (AMDGPU::SReg_32RegClass.contains(*I))
2102 RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2103 else
2104 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2105 }
2106 }
2107 }
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002108
Marek Olsak8a0f3352016-01-13 17:23:04 +00002109 // Update chain and glue.
2110 RetOps[0] = Chain;
2111 if (Flag.getNode())
2112 RetOps.push_back(Flag);
2113
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002114 unsigned Opc = AMDGPUISD::ENDPGM;
2115 if (!IsWaveEnd)
2116 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
Matt Arsenault9babdf42016-06-22 20:15:28 +00002117 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002118}
2119
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002120SDValue SITargetLowering::LowerCallResult(
2121 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2122 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2123 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2124 SDValue ThisVal) const {
2125 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2126
2127 // Assign locations to each value returned by this call.
2128 SmallVector<CCValAssign, 16> RVLocs;
2129 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2130 *DAG.getContext());
2131 CCInfo.AnalyzeCallResult(Ins, RetCC);
2132
2133 // Copy all of the result registers out of their specified physreg.
2134 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2135 CCValAssign VA = RVLocs[i];
2136 SDValue Val;
2137
2138 if (VA.isRegLoc()) {
2139 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2140 Chain = Val.getValue(1);
2141 InFlag = Val.getValue(2);
2142 } else if (VA.isMemLoc()) {
2143 report_fatal_error("TODO: return values in memory");
2144 } else
2145 llvm_unreachable("unknown argument location type");
2146
2147 switch (VA.getLocInfo()) {
2148 case CCValAssign::Full:
2149 break;
2150 case CCValAssign::BCvt:
2151 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2152 break;
2153 case CCValAssign::ZExt:
2154 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2155 DAG.getValueType(VA.getValVT()));
2156 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2157 break;
2158 case CCValAssign::SExt:
2159 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2160 DAG.getValueType(VA.getValVT()));
2161 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2162 break;
2163 case CCValAssign::AExt:
2164 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2165 break;
2166 default:
2167 llvm_unreachable("Unknown loc info!");
2168 }
2169
2170 InVals.push_back(Val);
2171 }
2172
2173 return Chain;
2174}
2175
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002176// Add code to pass special inputs required depending on used features separate
2177// from the explicit user arguments present in the IR.
2178void SITargetLowering::passSpecialInputs(
2179 CallLoweringInfo &CLI,
2180 const SIMachineFunctionInfo &Info,
2181 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2182 SmallVectorImpl<SDValue> &MemOpChains,
2183 SDValue Chain,
2184 SDValue StackPtr) const {
2185 // If we don't have a call site, this was a call inserted by
2186 // legalization. These can never use special inputs.
2187 if (!CLI.CS)
2188 return;
2189
2190 const Function *CalleeFunc = CLI.CS.getCalledFunction();
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002191 assert(CalleeFunc);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002192
2193 SelectionDAG &DAG = CLI.DAG;
2194 const SDLoc &DL = CLI.DL;
2195
Tom Stellardc5a154d2018-06-28 23:47:12 +00002196 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002197
2198 auto &ArgUsageInfo =
2199 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2200 const AMDGPUFunctionArgInfo &CalleeArgInfo
2201 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2202
2203 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2204
2205 // TODO: Unify with private memory register handling. This is complicated by
2206 // the fact that at least in kernels, the input argument is not necessarily
2207 // in the same location as the input.
2208 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2209 AMDGPUFunctionArgInfo::DISPATCH_PTR,
2210 AMDGPUFunctionArgInfo::QUEUE_PTR,
2211 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2212 AMDGPUFunctionArgInfo::DISPATCH_ID,
2213 AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2214 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2215 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2216 AMDGPUFunctionArgInfo::WORKITEM_ID_X,
2217 AMDGPUFunctionArgInfo::WORKITEM_ID_Y,
Matt Arsenault817c2532017-08-03 23:12:44 +00002218 AMDGPUFunctionArgInfo::WORKITEM_ID_Z,
2219 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002220 };
2221
2222 for (auto InputID : InputRegs) {
2223 const ArgDescriptor *OutgoingArg;
2224 const TargetRegisterClass *ArgRC;
2225
2226 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2227 if (!OutgoingArg)
2228 continue;
2229
2230 const ArgDescriptor *IncomingArg;
2231 const TargetRegisterClass *IncomingArgRC;
2232 std::tie(IncomingArg, IncomingArgRC)
2233 = CallerArgInfo.getPreloadedValue(InputID);
2234 assert(IncomingArgRC == ArgRC);
2235
2236 // All special arguments are ints for now.
2237 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
Matt Arsenault817c2532017-08-03 23:12:44 +00002238 SDValue InputReg;
2239
2240 if (IncomingArg) {
2241 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2242 } else {
2243 // The implicit arg ptr is special because it doesn't have a corresponding
2244 // input for kernels, and is computed from the kernarg segment pointer.
2245 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2246 InputReg = getImplicitArgPtr(DAG, DL);
2247 }
2248
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002249 if (OutgoingArg->isRegister()) {
2250 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2251 } else {
2252 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, StackPtr,
2253 InputReg,
2254 OutgoingArg->getStackOffset());
2255 MemOpChains.push_back(ArgStore);
2256 }
2257 }
2258}
2259
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002260static bool canGuaranteeTCO(CallingConv::ID CC) {
2261 return CC == CallingConv::Fast;
2262}
2263
2264/// Return true if we might ever do TCO for calls with this calling convention.
2265static bool mayTailCallThisCC(CallingConv::ID CC) {
2266 switch (CC) {
2267 case CallingConv::C:
2268 return true;
2269 default:
2270 return canGuaranteeTCO(CC);
2271 }
2272}
2273
2274bool SITargetLowering::isEligibleForTailCallOptimization(
2275 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2276 const SmallVectorImpl<ISD::OutputArg> &Outs,
2277 const SmallVectorImpl<SDValue> &OutVals,
2278 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2279 if (!mayTailCallThisCC(CalleeCC))
2280 return false;
2281
2282 MachineFunction &MF = DAG.getMachineFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00002283 const Function &CallerF = MF.getFunction();
2284 CallingConv::ID CallerCC = CallerF.getCallingConv();
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002285 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2286 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2287
2288 // Kernels aren't callable, and don't have a live in return address so it
2289 // doesn't make sense to do a tail call with entry functions.
2290 if (!CallerPreserved)
2291 return false;
2292
2293 bool CCMatch = CallerCC == CalleeCC;
2294
2295 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2296 if (canGuaranteeTCO(CalleeCC) && CCMatch)
2297 return true;
2298 return false;
2299 }
2300
2301 // TODO: Can we handle var args?
2302 if (IsVarArg)
2303 return false;
2304
Matthias Braunf1caa282017-12-15 22:22:58 +00002305 for (const Argument &Arg : CallerF.args()) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002306 if (Arg.hasByValAttr())
2307 return false;
2308 }
2309
2310 LLVMContext &Ctx = *DAG.getContext();
2311
2312 // Check that the call results are passed in the same way.
2313 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2314 CCAssignFnForCall(CalleeCC, IsVarArg),
2315 CCAssignFnForCall(CallerCC, IsVarArg)))
2316 return false;
2317
2318 // The callee has to preserve all registers the caller needs to preserve.
2319 if (!CCMatch) {
2320 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2321 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2322 return false;
2323 }
2324
2325 // Nothing more to check if the callee is taking no arguments.
2326 if (Outs.empty())
2327 return true;
2328
2329 SmallVector<CCValAssign, 16> ArgLocs;
2330 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2331
2332 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2333
2334 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2335 // If the stack arguments for this call do not fit into our own save area then
2336 // the call cannot be made tail.
2337 // TODO: Is this really necessary?
2338 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2339 return false;
2340
2341 const MachineRegisterInfo &MRI = MF.getRegInfo();
2342 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2343}
2344
2345bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2346 if (!CI->isTailCall())
2347 return false;
2348
2349 const Function *ParentFn = CI->getParent()->getParent();
2350 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2351 return false;
2352
2353 auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2354 return (Attr.getValueAsString() != "true");
2355}
2356
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002357// The wave scratch offset register is used as the global base pointer.
2358SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2359 SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002360 SelectionDAG &DAG = CLI.DAG;
2361 const SDLoc &DL = CLI.DL;
2362 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2363 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2364 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2365 SDValue Chain = CLI.Chain;
2366 SDValue Callee = CLI.Callee;
2367 bool &IsTailCall = CLI.IsTailCall;
2368 CallingConv::ID CallConv = CLI.CallConv;
2369 bool IsVarArg = CLI.IsVarArg;
2370 bool IsSibCall = false;
2371 bool IsThisReturn = false;
2372 MachineFunction &MF = DAG.getMachineFunction();
2373
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002374 if (IsVarArg) {
2375 return lowerUnhandledCall(CLI, InVals,
2376 "unsupported call to variadic function ");
2377 }
2378
Matt Arsenault935f3b72018-08-08 16:58:39 +00002379 if (!CLI.CS.getInstruction())
2380 report_fatal_error("unsupported libcall legalization");
2381
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002382 if (!CLI.CS.getCalledFunction()) {
2383 return lowerUnhandledCall(CLI, InVals,
2384 "unsupported indirect call to function ");
2385 }
2386
2387 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2388 return lowerUnhandledCall(CLI, InVals,
2389 "unsupported required tail call to function ");
2390 }
2391
Matt Arsenault1fb90132018-06-28 10:18:36 +00002392 if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2393 // Note the issue is with the CC of the calling function, not of the call
2394 // itself.
2395 return lowerUnhandledCall(CLI, InVals,
2396 "unsupported call from graphics shader of function ");
2397 }
2398
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002399 // The first 4 bytes are reserved for the callee's emergency stack slot.
2400 const unsigned CalleeUsableStackOffset = 4;
2401
2402 if (IsTailCall) {
2403 IsTailCall = isEligibleForTailCallOptimization(
2404 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2405 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2406 report_fatal_error("failed to perform tail call elimination on a call "
2407 "site marked musttail");
2408 }
2409
2410 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2411
2412 // A sibling call is one where we're under the usual C ABI and not planning
2413 // to change that but can still do a tail call:
2414 if (!TailCallOpt && IsTailCall)
2415 IsSibCall = true;
2416
2417 if (IsTailCall)
2418 ++NumTailCalls;
2419 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002420
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002421 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee)) {
Yaxun Liu1ac16612017-11-06 13:01:33 +00002422 // FIXME: Remove this hack for function pointer types after removing
2423 // support of old address space mapping. In the new address space
2424 // mapping the pointer in default address space is 64 bit, therefore
2425 // does not need this hack.
2426 if (Callee.getValueType() == MVT::i32) {
2427 const GlobalValue *GV = GA->getGlobal();
2428 Callee = DAG.getGlobalAddress(GV, DL, MVT::i64, GA->getOffset(), false,
2429 GA->getTargetFlags());
2430 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002431 }
Yaxun Liu1ac16612017-11-06 13:01:33 +00002432 assert(Callee.getValueType() == MVT::i64);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002433
2434 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2435
2436 // Analyze operands of the call, assigning locations to each operand.
2437 SmallVector<CCValAssign, 16> ArgLocs;
2438 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2439 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2440 CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2441
2442 // Get a count of how many bytes are to be pushed on the stack.
2443 unsigned NumBytes = CCInfo.getNextStackOffset();
2444
2445 if (IsSibCall) {
2446 // Since we're not changing the ABI to make this a tail call, the memory
2447 // operands are already available in the caller's incoming argument space.
2448 NumBytes = 0;
2449 }
2450
2451 // FPDiff is the byte offset of the call's argument area from the callee's.
2452 // Stores to callee stack arguments will be placed in FixedStackSlots offset
2453 // by this amount for a tail call. In a sibling call it must be 0 because the
2454 // caller will deallocate the entire stack and the callee still expects its
2455 // arguments to begin at SP+0. Completely unused for non-tail calls.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002456 int32_t FPDiff = 0;
2457 MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002458 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2459
Matt Arsenault6efd0822017-09-14 17:14:57 +00002460 SDValue CallerSavedFP;
2461
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002462 // Adjust the stack pointer for the new arguments...
2463 // These operations are automatically eliminated by the prolog/epilog pass
2464 if (!IsSibCall) {
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002465 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002466
2467 unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2468
2469 // In the HSA case, this should be an identity copy.
2470 SDValue ScratchRSrcReg
2471 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2472 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2473
2474 // TODO: Don't hardcode these registers and get from the callee function.
2475 SDValue ScratchWaveOffsetReg
2476 = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2477 RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
Matt Arsenault6efd0822017-09-14 17:14:57 +00002478
2479 if (!Info->isEntryFunction()) {
2480 // Avoid clobbering this function's FP value. In the current convention
2481 // callee will overwrite this, so do save/restore around the call site.
2482 CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2483 Info->getFrameOffsetReg(), MVT::i32);
2484 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002485 }
2486
2487 // Stack pointer relative accesses are done by changing the offset SGPR. This
2488 // is just the VGPR offset component.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002489 SDValue StackPtr = DAG.getConstant(CalleeUsableStackOffset, DL, MVT::i32);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002490
2491 SmallVector<SDValue, 8> MemOpChains;
2492 MVT PtrVT = MVT::i32;
2493
2494 // Walk the register/memloc assignments, inserting copies/loads.
2495 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2496 ++i, ++realArgIdx) {
2497 CCValAssign &VA = ArgLocs[i];
2498 SDValue Arg = OutVals[realArgIdx];
2499
2500 // Promote the value if needed.
2501 switch (VA.getLocInfo()) {
2502 case CCValAssign::Full:
2503 break;
2504 case CCValAssign::BCvt:
2505 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2506 break;
2507 case CCValAssign::ZExt:
2508 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2509 break;
2510 case CCValAssign::SExt:
2511 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2512 break;
2513 case CCValAssign::AExt:
2514 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2515 break;
2516 case CCValAssign::FPExt:
2517 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2518 break;
2519 default:
2520 llvm_unreachable("Unknown loc info!");
2521 }
2522
2523 if (VA.isRegLoc()) {
2524 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2525 } else {
2526 assert(VA.isMemLoc());
2527
2528 SDValue DstAddr;
2529 MachinePointerInfo DstInfo;
2530
2531 unsigned LocMemOffset = VA.getLocMemOffset();
2532 int32_t Offset = LocMemOffset;
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002533
2534 SDValue PtrOff = DAG.getObjectPtrOffset(DL, StackPtr, Offset);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002535
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002536 if (IsTailCall) {
2537 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2538 unsigned OpSize = Flags.isByVal() ?
2539 Flags.getByValSize() : VA.getValVT().getStoreSize();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002540
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002541 Offset = Offset + FPDiff;
2542 int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2543
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002544 DstAddr = DAG.getObjectPtrOffset(DL, DAG.getFrameIndex(FI, PtrVT),
2545 StackPtr);
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002546 DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2547
2548 // Make sure any stack arguments overlapping with where we're storing
2549 // are loaded before this eventual operation. Otherwise they'll be
2550 // clobbered.
2551
2552 // FIXME: Why is this really necessary? This seems to just result in a
2553 // lot of code to copy the stack and write them back to the same
2554 // locations, which are supposed to be immutable?
2555 Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2556 } else {
2557 DstAddr = PtrOff;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002558 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2559 }
2560
2561 if (Outs[i].Flags.isByVal()) {
2562 SDValue SizeNode =
2563 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2564 SDValue Cpy = DAG.getMemcpy(
2565 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2566 /*isVol = */ false, /*AlwaysInline = */ true,
Yaxun Liuc5962262017-11-22 16:13:35 +00002567 /*isTailCall = */ false, DstInfo,
2568 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
2569 *DAG.getContext(), AMDGPUASI.PRIVATE_ADDRESS))));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002570
2571 MemOpChains.push_back(Cpy);
2572 } else {
2573 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
2574 MemOpChains.push_back(Store);
2575 }
2576 }
2577 }
2578
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002579 // Copy special input registers after user input arguments.
2580 passSpecialInputs(CLI, *Info, RegsToPass, MemOpChains, Chain, StackPtr);
2581
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002582 if (!MemOpChains.empty())
2583 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2584
2585 // Build a sequence of copy-to-reg nodes chained together with token chain
2586 // and flag operands which copy the outgoing args into the appropriate regs.
2587 SDValue InFlag;
2588 for (auto &RegToPass : RegsToPass) {
2589 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2590 RegToPass.second, InFlag);
2591 InFlag = Chain.getValue(1);
2592 }
2593
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002594
2595 SDValue PhysReturnAddrReg;
2596 if (IsTailCall) {
2597 // Since the return is being combined with the call, we need to pass on the
2598 // return address.
2599
2600 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2601 SDValue ReturnAddrReg = CreateLiveInRegister(
2602 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2603
2604 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2605 MVT::i64);
2606 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2607 InFlag = Chain.getValue(1);
2608 }
2609
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002610 // We don't usually want to end the call-sequence here because we would tidy
2611 // the frame up *after* the call, however in the ABI-changing tail-call case
2612 // we've carefully laid out the parameters so that when sp is reset they'll be
2613 // in the correct location.
2614 if (IsTailCall && !IsSibCall) {
2615 Chain = DAG.getCALLSEQ_END(Chain,
2616 DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2617 DAG.getTargetConstant(0, DL, MVT::i32),
2618 InFlag, DL);
2619 InFlag = Chain.getValue(1);
2620 }
2621
2622 std::vector<SDValue> Ops;
2623 Ops.push_back(Chain);
2624 Ops.push_back(Callee);
2625
2626 if (IsTailCall) {
2627 // Each tail call may have to adjust the stack by a different amount, so
2628 // this information must travel along with the operation for eventual
2629 // consumption by emitEpilogue.
2630 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002631
2632 Ops.push_back(PhysReturnAddrReg);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002633 }
2634
2635 // Add argument registers to the end of the list so that they are known live
2636 // into the call.
2637 for (auto &RegToPass : RegsToPass) {
2638 Ops.push_back(DAG.getRegister(RegToPass.first,
2639 RegToPass.second.getValueType()));
2640 }
2641
2642 // Add a register mask operand representing the call-preserved registers.
2643
Tom Stellardc5a154d2018-06-28 23:47:12 +00002644 auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002645 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2646 assert(Mask && "Missing call preserved mask for calling convention");
2647 Ops.push_back(DAG.getRegisterMask(Mask));
2648
2649 if (InFlag.getNode())
2650 Ops.push_back(InFlag);
2651
2652 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2653
2654 // If we're doing a tall call, use a TC_RETURN here rather than an
2655 // actual call instruction.
2656 if (IsTailCall) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002657 MFI.setHasTailCall();
2658 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002659 }
2660
2661 // Returns a chain and a flag for retval copy to use.
2662 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2663 Chain = Call.getValue(0);
2664 InFlag = Call.getValue(1);
2665
Matt Arsenault6efd0822017-09-14 17:14:57 +00002666 if (CallerSavedFP) {
2667 SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2668 Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2669 InFlag = Chain.getValue(1);
2670 }
2671
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002672 uint64_t CalleePopBytes = NumBytes;
2673 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002674 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2675 InFlag, DL);
2676 if (!Ins.empty())
2677 InFlag = Chain.getValue(1);
2678
2679 // Handle result values, copying them out of physregs into vregs that we
2680 // return.
2681 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2682 InVals, IsThisReturn,
2683 IsThisReturn ? OutVals[0] : SDValue());
2684}
2685
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002686unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2687 SelectionDAG &DAG) const {
2688 unsigned Reg = StringSwitch<unsigned>(RegName)
2689 .Case("m0", AMDGPU::M0)
2690 .Case("exec", AMDGPU::EXEC)
2691 .Case("exec_lo", AMDGPU::EXEC_LO)
2692 .Case("exec_hi", AMDGPU::EXEC_HI)
2693 .Case("flat_scratch", AMDGPU::FLAT_SCR)
2694 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2695 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2696 .Default(AMDGPU::NoRegister);
2697
2698 if (Reg == AMDGPU::NoRegister) {
2699 report_fatal_error(Twine("invalid register name \""
2700 + StringRef(RegName) + "\"."));
2701
2702 }
2703
Tom Stellard5bfbae52018-07-11 20:59:01 +00002704 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002705 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2706 report_fatal_error(Twine("invalid register \""
2707 + StringRef(RegName) + "\" for subtarget."));
2708 }
2709
2710 switch (Reg) {
2711 case AMDGPU::M0:
2712 case AMDGPU::EXEC_LO:
2713 case AMDGPU::EXEC_HI:
2714 case AMDGPU::FLAT_SCR_LO:
2715 case AMDGPU::FLAT_SCR_HI:
2716 if (VT.getSizeInBits() == 32)
2717 return Reg;
2718 break;
2719 case AMDGPU::EXEC:
2720 case AMDGPU::FLAT_SCR:
2721 if (VT.getSizeInBits() == 64)
2722 return Reg;
2723 break;
2724 default:
2725 llvm_unreachable("missing register type checking");
2726 }
2727
2728 report_fatal_error(Twine("invalid type for register \""
2729 + StringRef(RegName) + "\"."));
2730}
2731
Matt Arsenault786724a2016-07-12 21:41:32 +00002732// If kill is not the last instruction, split the block so kill is always a
2733// proper terminator.
2734MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
2735 MachineBasicBlock *BB) const {
2736 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2737
2738 MachineBasicBlock::iterator SplitPoint(&MI);
2739 ++SplitPoint;
2740
2741 if (SplitPoint == BB->end()) {
2742 // Don't bother with a new block.
Marek Olsakce76ea02017-10-24 10:27:13 +00002743 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002744 return BB;
2745 }
2746
2747 MachineFunction *MF = BB->getParent();
2748 MachineBasicBlock *SplitBB
2749 = MF->CreateMachineBasicBlock(BB->getBasicBlock());
2750
Matt Arsenault786724a2016-07-12 21:41:32 +00002751 MF->insert(++MachineFunction::iterator(BB), SplitBB);
2752 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2753
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002754 SplitBB->transferSuccessorsAndUpdatePHIs(BB);
Matt Arsenault786724a2016-07-12 21:41:32 +00002755 BB->addSuccessor(SplitBB);
2756
Marek Olsakce76ea02017-10-24 10:27:13 +00002757 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002758 return SplitBB;
2759}
2760
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002761// Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2762// wavefront. If the value is uniform and just happens to be in a VGPR, this
2763// will only do one iteration. In the worst case, this will loop 64 times.
2764//
2765// TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002766static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
2767 const SIInstrInfo *TII,
2768 MachineRegisterInfo &MRI,
2769 MachineBasicBlock &OrigBB,
2770 MachineBasicBlock &LoopBB,
2771 const DebugLoc &DL,
2772 const MachineOperand &IdxReg,
2773 unsigned InitReg,
2774 unsigned ResultReg,
2775 unsigned PhiReg,
2776 unsigned InitSaveExecReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002777 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002778 bool UseGPRIdxMode,
2779 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002780 MachineBasicBlock::iterator I = LoopBB.begin();
2781
2782 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2783 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2784 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2785 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2786
2787 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2788 .addReg(InitReg)
2789 .addMBB(&OrigBB)
2790 .addReg(ResultReg)
2791 .addMBB(&LoopBB);
2792
2793 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2794 .addReg(InitSaveExecReg)
2795 .addMBB(&OrigBB)
2796 .addReg(NewExec)
2797 .addMBB(&LoopBB);
2798
2799 // Read the next variant <- also loop target.
2800 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2801 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2802
2803 // Compare the just read M0 value to all possible Idx values.
2804 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2805 .addReg(CurrentIdxReg)
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +00002806 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002807
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002808 // Update EXEC, save the original EXEC value to VCC.
2809 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2810 .addReg(CondReg, RegState::Kill);
2811
2812 MRI.setSimpleHint(NewExec, CondReg);
2813
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002814 if (UseGPRIdxMode) {
2815 unsigned IdxReg;
2816 if (Offset == 0) {
2817 IdxReg = CurrentIdxReg;
2818 } else {
2819 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2820 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2821 .addReg(CurrentIdxReg, RegState::Kill)
2822 .addImm(Offset);
2823 }
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002824 unsigned IdxMode = IsIndirectSrc ?
2825 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
2826 MachineInstr *SetOn =
2827 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2828 .addReg(IdxReg, RegState::Kill)
2829 .addImm(IdxMode);
2830 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002831 } else {
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002832 // Move index from VCC into M0
2833 if (Offset == 0) {
2834 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2835 .addReg(CurrentIdxReg, RegState::Kill);
2836 } else {
2837 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2838 .addReg(CurrentIdxReg, RegState::Kill)
2839 .addImm(Offset);
2840 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002841 }
2842
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002843 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002844 MachineInstr *InsertPt =
2845 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002846 .addReg(AMDGPU::EXEC)
2847 .addReg(NewExec);
2848
2849 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2850 // s_cbranch_scc0?
2851
2852 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
2853 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
2854 .addMBB(&LoopBB);
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002855
2856 return InsertPt->getIterator();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002857}
2858
2859// This has slightly sub-optimal regalloc when the source vector is killed by
2860// the read. The register allocator does not understand that the kill is
2861// per-workitem, so is kept alive for the whole loop so we end up not re-using a
2862// subregister from it, using 1 more VGPR than necessary. This was saved when
2863// this was expanded after register allocation.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002864static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
2865 MachineBasicBlock &MBB,
2866 MachineInstr &MI,
2867 unsigned InitResultReg,
2868 unsigned PhiReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002869 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002870 bool UseGPRIdxMode,
2871 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002872 MachineFunction *MF = MBB.getParent();
2873 MachineRegisterInfo &MRI = MF->getRegInfo();
2874 const DebugLoc &DL = MI.getDebugLoc();
2875 MachineBasicBlock::iterator I(&MI);
2876
2877 unsigned DstReg = MI.getOperand(0).getReg();
Matt Arsenault301162c2017-11-15 21:51:43 +00002878 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2879 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002880
2881 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
2882
2883 // Save the EXEC mask
2884 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
2885 .addReg(AMDGPU::EXEC);
2886
2887 // To insert the loop we need to split the block. Move everything after this
2888 // point to a new block, and insert a new empty block between the two.
2889 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
2890 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
2891 MachineFunction::iterator MBBI(MBB);
2892 ++MBBI;
2893
2894 MF->insert(MBBI, LoopBB);
2895 MF->insert(MBBI, RemainderBB);
2896
2897 LoopBB->addSuccessor(LoopBB);
2898 LoopBB->addSuccessor(RemainderBB);
2899
2900 // Move the rest of the block into a new block.
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002901 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002902 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
2903
2904 MBB.addSuccessor(LoopBB);
2905
2906 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2907
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002908 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
2909 InitResultReg, DstReg, PhiReg, TmpExec,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002910 Offset, UseGPRIdxMode, IsIndirectSrc);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002911
2912 MachineBasicBlock::iterator First = RemainderBB->begin();
2913 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
2914 .addReg(SaveExec);
2915
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002916 return InsPt;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002917}
2918
2919// Returns subreg index, offset
2920static std::pair<unsigned, int>
2921computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
2922 const TargetRegisterClass *SuperRC,
2923 unsigned VecReg,
2924 int Offset) {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002925 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002926
2927 // Skip out of bounds offsets, or else we would end up using an undefined
2928 // register.
2929 if (Offset >= NumElts || Offset < 0)
2930 return std::make_pair(AMDGPU::sub0, Offset);
2931
2932 return std::make_pair(AMDGPU::sub0 + Offset, 0);
2933}
2934
2935// Return true if the index is an SGPR and was set.
2936static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
2937 MachineRegisterInfo &MRI,
2938 MachineInstr &MI,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002939 int Offset,
2940 bool UseGPRIdxMode,
2941 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002942 MachineBasicBlock *MBB = MI.getParent();
2943 const DebugLoc &DL = MI.getDebugLoc();
2944 MachineBasicBlock::iterator I(&MI);
2945
2946 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2947 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
2948
2949 assert(Idx->getReg() != AMDGPU::NoRegister);
2950
2951 if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
2952 return false;
2953
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002954 if (UseGPRIdxMode) {
2955 unsigned IdxMode = IsIndirectSrc ?
2956 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
2957 if (Offset == 0) {
2958 MachineInstr *SetOn =
Diana Picus116bbab2017-01-13 09:58:52 +00002959 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2960 .add(*Idx)
2961 .addImm(IdxMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002962
Matt Arsenaultdac31db2016-10-13 12:45:16 +00002963 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002964 } else {
2965 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
2966 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
Diana Picus116bbab2017-01-13 09:58:52 +00002967 .add(*Idx)
2968 .addImm(Offset);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002969 MachineInstr *SetOn =
2970 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2971 .addReg(Tmp, RegState::Kill)
2972 .addImm(IdxMode);
2973
Matt Arsenaultdac31db2016-10-13 12:45:16 +00002974 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002975 }
2976
2977 return true;
2978 }
2979
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002980 if (Offset == 0) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00002981 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2982 .add(*Idx);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002983 } else {
2984 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00002985 .add(*Idx)
2986 .addImm(Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002987 }
2988
2989 return true;
2990}
2991
2992// Control flow needs to be inserted if indexing with a VGPR.
2993static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
2994 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00002995 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002996 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002997 const SIRegisterInfo &TRI = TII->getRegisterInfo();
2998 MachineFunction *MF = MBB.getParent();
2999 MachineRegisterInfo &MRI = MF->getRegInfo();
3000
3001 unsigned Dst = MI.getOperand(0).getReg();
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003002 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003003 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3004
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003005 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003006
3007 unsigned SubReg;
3008 std::tie(SubReg, Offset)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003009 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003010
Marek Olsake22fdb92017-03-21 17:00:32 +00003011 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003012
3013 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003014 MachineBasicBlock::iterator I(&MI);
3015 const DebugLoc &DL = MI.getDebugLoc();
3016
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003017 if (UseGPRIdxMode) {
3018 // TODO: Look at the uses to avoid the copy. This may require rescheduling
3019 // to avoid interfering with other uses, so probably requires a new
3020 // optimization pass.
3021 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003022 .addReg(SrcReg, RegState::Undef, SubReg)
3023 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003024 .addReg(AMDGPU::M0, RegState::Implicit);
3025 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3026 } else {
3027 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003028 .addReg(SrcReg, RegState::Undef, SubReg)
3029 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003030 }
3031
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003032 MI.eraseFromParent();
3033
3034 return &MBB;
3035 }
3036
3037 const DebugLoc &DL = MI.getDebugLoc();
3038 MachineBasicBlock::iterator I(&MI);
3039
3040 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3041 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3042
3043 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3044
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003045 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3046 Offset, UseGPRIdxMode, true);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003047 MachineBasicBlock *LoopBB = InsPt->getParent();
3048
3049 if (UseGPRIdxMode) {
3050 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003051 .addReg(SrcReg, RegState::Undef, SubReg)
3052 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003053 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003054 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003055 } else {
3056 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003057 .addReg(SrcReg, RegState::Undef, SubReg)
3058 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003059 }
3060
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003061 MI.eraseFromParent();
3062
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003063 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003064}
3065
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003066static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3067 const TargetRegisterClass *VecRC) {
3068 switch (TRI.getRegSizeInBits(*VecRC)) {
3069 case 32: // 4 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003070 return AMDGPU::V_MOVRELD_B32_V1;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003071 case 64: // 8 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003072 return AMDGPU::V_MOVRELD_B32_V2;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003073 case 128: // 16 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003074 return AMDGPU::V_MOVRELD_B32_V4;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003075 case 256: // 32 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003076 return AMDGPU::V_MOVRELD_B32_V8;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003077 case 512: // 64 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003078 return AMDGPU::V_MOVRELD_B32_V16;
3079 default:
3080 llvm_unreachable("unsupported size for MOVRELD pseudos");
3081 }
3082}
3083
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003084static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3085 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00003086 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003087 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003088 const SIRegisterInfo &TRI = TII->getRegisterInfo();
3089 MachineFunction *MF = MBB.getParent();
3090 MachineRegisterInfo &MRI = MF->getRegInfo();
3091
3092 unsigned Dst = MI.getOperand(0).getReg();
3093 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3094 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3095 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3096 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3097 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3098
3099 // This can be an immediate, but will be folded later.
3100 assert(Val->getReg());
3101
3102 unsigned SubReg;
3103 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3104 SrcVec->getReg(),
3105 Offset);
Marek Olsake22fdb92017-03-21 17:00:32 +00003106 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003107
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003108 if (Idx->getReg() == AMDGPU::NoRegister) {
3109 MachineBasicBlock::iterator I(&MI);
3110 const DebugLoc &DL = MI.getDebugLoc();
3111
3112 assert(Offset == 0);
3113
3114 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
Diana Picus116bbab2017-01-13 09:58:52 +00003115 .add(*SrcVec)
3116 .add(*Val)
3117 .addImm(SubReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003118
3119 MI.eraseFromParent();
3120 return &MBB;
3121 }
3122
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003123 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003124 MachineBasicBlock::iterator I(&MI);
3125 const DebugLoc &DL = MI.getDebugLoc();
3126
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003127 if (UseGPRIdxMode) {
3128 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003129 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3130 .add(*Val)
3131 .addReg(Dst, RegState::ImplicitDefine)
3132 .addReg(SrcVec->getReg(), RegState::Implicit)
3133 .addReg(AMDGPU::M0, RegState::Implicit);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003134
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003135 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3136 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003137 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003138
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003139 BuildMI(MBB, I, DL, MovRelDesc)
3140 .addReg(Dst, RegState::Define)
3141 .addReg(SrcVec->getReg())
Diana Picus116bbab2017-01-13 09:58:52 +00003142 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003143 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003144 }
3145
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003146 MI.eraseFromParent();
3147 return &MBB;
3148 }
3149
3150 if (Val->isReg())
3151 MRI.clearKillFlags(Val->getReg());
3152
3153 const DebugLoc &DL = MI.getDebugLoc();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003154
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003155 unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3156
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003157 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003158 Offset, UseGPRIdxMode, false);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003159 MachineBasicBlock *LoopBB = InsPt->getParent();
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003160
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003161 if (UseGPRIdxMode) {
3162 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003163 .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3164 .add(*Val) // src0
3165 .addReg(Dst, RegState::ImplicitDefine)
3166 .addReg(PhiReg, RegState::Implicit)
3167 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003168 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003169 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003170 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003171
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003172 BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3173 .addReg(Dst, RegState::Define)
3174 .addReg(PhiReg)
Diana Picus116bbab2017-01-13 09:58:52 +00003175 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003176 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003177 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003178
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003179 MI.eraseFromParent();
3180
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003181 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003182}
3183
Matt Arsenault786724a2016-07-12 21:41:32 +00003184MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3185 MachineInstr &MI, MachineBasicBlock *BB) const {
Tom Stellard244891d2016-12-20 15:52:17 +00003186
3187 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3188 MachineFunction *MF = BB->getParent();
3189 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3190
3191 if (TII->isMIMG(MI)) {
Matt Arsenault905f3512017-12-29 17:18:14 +00003192 if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3193 report_fatal_error("missing mem operand from MIMG instruction");
3194 }
Tom Stellard244891d2016-12-20 15:52:17 +00003195 // Add a memoperand for mimg instructions so that they aren't assumed to
3196 // be ordered memory instuctions.
3197
Tom Stellard244891d2016-12-20 15:52:17 +00003198 return BB;
3199 }
3200
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003201 switch (MI.getOpcode()) {
Matt Arsenault301162c2017-11-15 21:51:43 +00003202 case AMDGPU::S_ADD_U64_PSEUDO:
3203 case AMDGPU::S_SUB_U64_PSEUDO: {
3204 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3205 const DebugLoc &DL = MI.getDebugLoc();
3206
3207 MachineOperand &Dest = MI.getOperand(0);
3208 MachineOperand &Src0 = MI.getOperand(1);
3209 MachineOperand &Src1 = MI.getOperand(2);
3210
3211 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3212 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3213
3214 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3215 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3216 &AMDGPU::SReg_32_XM0RegClass);
3217 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3218 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3219 &AMDGPU::SReg_32_XM0RegClass);
3220
3221 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3222 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3223 &AMDGPU::SReg_32_XM0RegClass);
3224 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3225 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3226 &AMDGPU::SReg_32_XM0RegClass);
3227
3228 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3229
3230 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3231 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3232 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3233 .add(Src0Sub0)
3234 .add(Src1Sub0);
3235 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3236 .add(Src0Sub1)
3237 .add(Src1Sub1);
3238 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3239 .addReg(DestSub0)
3240 .addImm(AMDGPU::sub0)
3241 .addReg(DestSub1)
3242 .addImm(AMDGPU::sub1);
3243 MI.eraseFromParent();
3244 return BB;
3245 }
3246 case AMDGPU::SI_INIT_M0: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003247 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
Matt Arsenault4ac341c2016-04-14 21:58:15 +00003248 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Diana Picus116bbab2017-01-13 09:58:52 +00003249 .add(MI.getOperand(0));
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003250 MI.eraseFromParent();
Matt Arsenault20711b72015-02-20 22:10:45 +00003251 return BB;
Matt Arsenault301162c2017-11-15 21:51:43 +00003252 }
Marek Olsak2d825902017-04-28 20:21:58 +00003253 case AMDGPU::SI_INIT_EXEC:
3254 // This should be before all vector instructions.
3255 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3256 AMDGPU::EXEC)
3257 .addImm(MI.getOperand(0).getImm());
3258 MI.eraseFromParent();
3259 return BB;
3260
3261 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3262 // Extract the thread count from an SGPR input and set EXEC accordingly.
3263 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3264 //
3265 // S_BFE_U32 count, input, {shift, 7}
3266 // S_BFM_B64 exec, count, 0
3267 // S_CMP_EQ_U32 count, 64
3268 // S_CMOV_B64 exec, -1
3269 MachineInstr *FirstMI = &*BB->begin();
3270 MachineRegisterInfo &MRI = MF->getRegInfo();
3271 unsigned InputReg = MI.getOperand(0).getReg();
3272 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3273 bool Found = false;
3274
3275 // Move the COPY of the input reg to the beginning, so that we can use it.
3276 for (auto I = BB->begin(); I != &MI; I++) {
3277 if (I->getOpcode() != TargetOpcode::COPY ||
3278 I->getOperand(0).getReg() != InputReg)
3279 continue;
3280
3281 if (I == FirstMI) {
3282 FirstMI = &*++BB->begin();
3283 } else {
3284 I->removeFromParent();
3285 BB->insert(FirstMI, &*I);
3286 }
3287 Found = true;
3288 break;
3289 }
3290 assert(Found);
Davide Italiano0dcc0152017-05-11 19:58:52 +00003291 (void)Found;
Marek Olsak2d825902017-04-28 20:21:58 +00003292
3293 // This should be before all vector instructions.
3294 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3295 .addReg(InputReg)
3296 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3297 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3298 AMDGPU::EXEC)
3299 .addReg(CountReg)
3300 .addImm(0);
3301 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3302 .addReg(CountReg, RegState::Kill)
3303 .addImm(64);
3304 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3305 AMDGPU::EXEC)
3306 .addImm(-1);
3307 MI.eraseFromParent();
3308 return BB;
3309 }
3310
Changpeng Fang01f60622016-03-15 17:28:44 +00003311 case AMDGPU::GET_GROUPSTATICSIZE: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003312 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3c07c812016-07-22 17:01:33 +00003313 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
Diana Picus116bbab2017-01-13 09:58:52 +00003314 .add(MI.getOperand(0))
3315 .addImm(MFI->getLDSSize());
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003316 MI.eraseFromParent();
Changpeng Fang01f60622016-03-15 17:28:44 +00003317 return BB;
3318 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003319 case AMDGPU::SI_INDIRECT_SRC_V1:
3320 case AMDGPU::SI_INDIRECT_SRC_V2:
3321 case AMDGPU::SI_INDIRECT_SRC_V4:
3322 case AMDGPU::SI_INDIRECT_SRC_V8:
3323 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003324 return emitIndirectSrc(MI, *BB, *getSubtarget());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003325 case AMDGPU::SI_INDIRECT_DST_V1:
3326 case AMDGPU::SI_INDIRECT_DST_V2:
3327 case AMDGPU::SI_INDIRECT_DST_V4:
3328 case AMDGPU::SI_INDIRECT_DST_V8:
3329 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003330 return emitIndirectDst(MI, *BB, *getSubtarget());
Marek Olsakce76ea02017-10-24 10:27:13 +00003331 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3332 case AMDGPU::SI_KILL_I1_PSEUDO:
Matt Arsenault786724a2016-07-12 21:41:32 +00003333 return splitKillBlock(MI, BB);
Matt Arsenault22e41792016-08-27 01:00:37 +00003334 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3335 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
Matt Arsenault22e41792016-08-27 01:00:37 +00003336
3337 unsigned Dst = MI.getOperand(0).getReg();
3338 unsigned Src0 = MI.getOperand(1).getReg();
3339 unsigned Src1 = MI.getOperand(2).getReg();
3340 const DebugLoc &DL = MI.getDebugLoc();
3341 unsigned SrcCond = MI.getOperand(3).getReg();
3342
3343 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3344 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003345 unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenault22e41792016-08-27 01:00:37 +00003346
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003347 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3348 .addReg(SrcCond);
Matt Arsenault22e41792016-08-27 01:00:37 +00003349 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3350 .addReg(Src0, 0, AMDGPU::sub0)
3351 .addReg(Src1, 0, AMDGPU::sub0)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003352 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003353 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3354 .addReg(Src0, 0, AMDGPU::sub1)
3355 .addReg(Src1, 0, AMDGPU::sub1)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003356 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003357
3358 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3359 .addReg(DstLo)
3360 .addImm(AMDGPU::sub0)
3361 .addReg(DstHi)
3362 .addImm(AMDGPU::sub1);
3363 MI.eraseFromParent();
3364 return BB;
3365 }
Matt Arsenault327188a2016-12-15 21:57:11 +00003366 case AMDGPU::SI_BR_UNDEF: {
3367 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3368 const DebugLoc &DL = MI.getDebugLoc();
3369 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
Diana Picus116bbab2017-01-13 09:58:52 +00003370 .add(MI.getOperand(0));
Matt Arsenault327188a2016-12-15 21:57:11 +00003371 Br->getOperand(1).setIsUndef(true); // read undef SCC
3372 MI.eraseFromParent();
3373 return BB;
3374 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003375 case AMDGPU::ADJCALLSTACKUP:
3376 case AMDGPU::ADJCALLSTACKDOWN: {
3377 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3378 MachineInstrBuilder MIB(*MF, &MI);
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003379
3380 // Add an implicit use of the frame offset reg to prevent the restore copy
3381 // inserted after the call from being reorderd after stack operations in the
3382 // the caller's frame.
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003383 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003384 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3385 .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003386 return BB;
3387 }
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003388 case AMDGPU::SI_CALL_ISEL:
3389 case AMDGPU::SI_TCRETURN_ISEL: {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003390 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3391 const DebugLoc &DL = MI.getDebugLoc();
3392 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003393
3394 MachineRegisterInfo &MRI = MF->getRegInfo();
3395 unsigned GlobalAddrReg = MI.getOperand(0).getReg();
3396 MachineInstr *PCRel = MRI.getVRegDef(GlobalAddrReg);
3397 assert(PCRel->getOpcode() == AMDGPU::SI_PC_ADD_REL_OFFSET);
3398
3399 const GlobalValue *G = PCRel->getOperand(1).getGlobal();
3400
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003401 MachineInstrBuilder MIB;
3402 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
3403 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg)
3404 .add(MI.getOperand(0))
3405 .addGlobalAddress(G);
3406 } else {
3407 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_TCRETURN))
3408 .add(MI.getOperand(0))
3409 .addGlobalAddress(G);
3410
3411 // There is an additional imm operand for tcreturn, but it should be in the
3412 // right place already.
3413 }
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003414
3415 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003416 MIB.add(MI.getOperand(I));
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003417
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003418 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003419 MI.eraseFromParent();
3420 return BB;
3421 }
Changpeng Fang01f60622016-03-15 17:28:44 +00003422 default:
3423 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
Tom Stellard75aadc22012-12-11 21:25:42 +00003424 }
Tom Stellard75aadc22012-12-11 21:25:42 +00003425}
3426
Matt Arsenaulte11d8ac2017-10-13 21:10:22 +00003427bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3428 return isTypeLegal(VT.getScalarType());
3429}
3430
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003431bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3432 // This currently forces unfolding various combinations of fsub into fma with
3433 // free fneg'd operands. As long as we have fast FMA (controlled by
3434 // isFMAFasterThanFMulAndFAdd), we should perform these.
3435
3436 // When fma is quarter rate, for f64 where add / sub are at best half rate,
3437 // most of these combines appear to be cycle neutral but save on instruction
3438 // count / code size.
3439 return true;
3440}
3441
Mehdi Amini44ede332015-07-09 02:09:04 +00003442EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3443 EVT VT) const {
Tom Stellard83747202013-07-18 21:43:53 +00003444 if (!VT.isVector()) {
3445 return MVT::i1;
3446 }
Matt Arsenault8596f712014-11-28 22:51:38 +00003447 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
Tom Stellard75aadc22012-12-11 21:25:42 +00003448}
3449
Matt Arsenault94163282016-12-22 16:36:25 +00003450MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3451 // TODO: Should i16 be used always if legal? For now it would force VALU
3452 // shifts.
3453 return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
Christian Konig082a14a2013-03-18 11:34:05 +00003454}
3455
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003456// Answering this is somewhat tricky and depends on the specific device which
3457// have different rates for fma or all f64 operations.
3458//
3459// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3460// regardless of which device (although the number of cycles differs between
3461// devices), so it is always profitable for f64.
3462//
3463// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3464// only on full rate devices. Normally, we should prefer selecting v_mad_f32
3465// which we can always do even without fused FP ops since it returns the same
3466// result as the separate operations and since it is always full
3467// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3468// however does not support denormals, so we do report fma as faster if we have
3469// a fast fma device and require denormals.
3470//
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003471bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3472 VT = VT.getScalarType();
3473
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003474 switch (VT.getSimpleVT().SimpleTy) {
Matt Arsenault0084adc2018-04-30 19:08:16 +00003475 case MVT::f32: {
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003476 // This is as fast on some subtargets. However, we always have full rate f32
3477 // mad available which returns the same result as the separate operations
Matt Arsenault8d630032015-02-20 22:10:41 +00003478 // which we should prefer over fma. We can't use this if we want to support
3479 // denormals, so only report this in these cases.
Matt Arsenault0084adc2018-04-30 19:08:16 +00003480 if (Subtarget->hasFP32Denormals())
3481 return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3482
3483 // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3484 return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3485 }
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003486 case MVT::f64:
3487 return true;
Matt Arsenault9e22bc22016-12-22 03:21:48 +00003488 case MVT::f16:
3489 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003490 default:
3491 break;
3492 }
3493
3494 return false;
3495}
3496
Tom Stellard75aadc22012-12-11 21:25:42 +00003497//===----------------------------------------------------------------------===//
3498// Custom DAG Lowering Operations
3499//===----------------------------------------------------------------------===//
3500
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003501// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3502// wider vector type is legal.
3503SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3504 SelectionDAG &DAG) const {
3505 unsigned Opc = Op.getOpcode();
3506 EVT VT = Op.getValueType();
3507 assert(VT == MVT::v4f16);
3508
3509 SDValue Lo, Hi;
3510 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3511
3512 SDLoc SL(Op);
3513 SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3514 Op->getFlags());
3515 SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3516 Op->getFlags());
3517
3518 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3519}
3520
3521// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3522// wider vector type is legal.
3523SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3524 SelectionDAG &DAG) const {
3525 unsigned Opc = Op.getOpcode();
3526 EVT VT = Op.getValueType();
3527 assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3528
3529 SDValue Lo0, Hi0;
3530 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3531 SDValue Lo1, Hi1;
3532 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3533
3534 SDLoc SL(Op);
3535
3536 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3537 Op->getFlags());
3538 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3539 Op->getFlags());
3540
3541 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3542}
3543
Tom Stellard75aadc22012-12-11 21:25:42 +00003544SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3545 switch (Op.getOpcode()) {
3546 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
Tom Stellardf8794352012-12-19 22:10:31 +00003547 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
Tom Stellard35bb18c2013-08-26 15:06:04 +00003548 case ISD::LOAD: {
Tom Stellarde812f2f2014-07-21 15:45:06 +00003549 SDValue Result = LowerLOAD(Op, DAG);
3550 assert((!Result.getNode() ||
3551 Result.getNode()->getNumValues() == 2) &&
3552 "Load should return a value and a chain");
3553 return Result;
Tom Stellard35bb18c2013-08-26 15:06:04 +00003554 }
Tom Stellardaf775432013-10-23 00:44:32 +00003555
Matt Arsenaultad14ce82014-07-19 18:44:39 +00003556 case ISD::FSIN:
3557 case ISD::FCOS:
3558 return LowerTrig(Op, DAG);
Tom Stellard0ec134f2014-02-04 17:18:40 +00003559 case ISD::SELECT: return LowerSELECT(Op, DAG);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00003560 case ISD::FDIV: return LowerFDIV(Op, DAG);
Tom Stellard354a43c2016-04-01 18:27:37 +00003561 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +00003562 case ISD::STORE: return LowerSTORE(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003563 case ISD::GlobalAddress: {
3564 MachineFunction &MF = DAG.getMachineFunction();
3565 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3566 return LowerGlobalAddress(MFI, Op, DAG);
Tom Stellard94593ee2013-06-03 17:40:18 +00003567 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003568 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00003569 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003570 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00003571 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
Matt Arsenault3aef8092017-01-23 23:09:58 +00003572 case ISD::INSERT_VECTOR_ELT:
3573 return lowerINSERT_VECTOR_ELT(Op, DAG);
3574 case ISD::EXTRACT_VECTOR_ELT:
3575 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
Matt Arsenault67a98152018-05-16 11:47:30 +00003576 case ISD::BUILD_VECTOR:
3577 return lowerBUILD_VECTOR(Op, DAG);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003578 case ISD::FP_ROUND:
3579 return lowerFP_ROUND(Op, DAG);
Matt Arsenault3e025382017-04-24 17:49:13 +00003580 case ISD::TRAP:
Matt Arsenault3e025382017-04-24 17:49:13 +00003581 return lowerTRAP(Op, DAG);
Tony Tye43259df2018-05-16 16:19:34 +00003582 case ISD::DEBUGTRAP:
3583 return lowerDEBUGTRAP(Op, DAG);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003584 case ISD::FABS:
3585 case ISD::FNEG:
Matt Arsenault36cdcfa2018-08-02 13:43:42 +00003586 case ISD::FCANONICALIZE:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003587 return splitUnaryVectorOp(Op, DAG);
3588 case ISD::SHL:
3589 case ISD::SRA:
3590 case ISD::SRL:
3591 case ISD::ADD:
3592 case ISD::SUB:
3593 case ISD::MUL:
3594 case ISD::SMIN:
3595 case ISD::SMAX:
3596 case ISD::UMIN:
3597 case ISD::UMAX:
3598 case ISD::FMINNUM:
3599 case ISD::FMAXNUM:
3600 case ISD::FADD:
3601 case ISD::FMUL:
3602 return splitBinaryVectorOp(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +00003603 }
3604 return SDValue();
3605}
3606
Matt Arsenault1349a042018-05-22 06:32:10 +00003607static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
3608 const SDLoc &DL,
3609 SelectionDAG &DAG, bool Unpacked) {
3610 if (!LoadVT.isVector())
3611 return Result;
3612
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003613 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3614 // Truncate to v2i16/v4i16.
3615 EVT IntLoadVT = LoadVT.changeTypeToInteger();
Matt Arsenault1349a042018-05-22 06:32:10 +00003616
3617 // Workaround legalizer not scalarizing truncate after vector op
3618 // legalization byt not creating intermediate vector trunc.
3619 SmallVector<SDValue, 4> Elts;
3620 DAG.ExtractVectorElements(Result, Elts);
3621 for (SDValue &Elt : Elts)
3622 Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
3623
3624 Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
3625
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003626 // Bitcast to original type (v2f16/v4f16).
Matt Arsenault1349a042018-05-22 06:32:10 +00003627 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003628 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003629
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003630 // Cast back to the original packed type.
3631 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3632}
3633
Matt Arsenault1349a042018-05-22 06:32:10 +00003634SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
3635 MemSDNode *M,
3636 SelectionDAG &DAG,
Tim Renouf366a49d2018-08-02 23:33:01 +00003637 ArrayRef<SDValue> Ops,
Matt Arsenault1349a042018-05-22 06:32:10 +00003638 bool IsIntrinsic) const {
3639 SDLoc DL(M);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003640
3641 bool Unpacked = Subtarget->hasUnpackedD16VMem();
Matt Arsenault1349a042018-05-22 06:32:10 +00003642 EVT LoadVT = M->getValueType(0);
3643
Matt Arsenault1349a042018-05-22 06:32:10 +00003644 EVT EquivLoadVT = LoadVT;
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003645 if (Unpacked && LoadVT.isVector()) {
3646 EquivLoadVT = LoadVT.isVector() ?
3647 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3648 LoadVT.getVectorNumElements()) : LoadVT;
Matt Arsenault1349a042018-05-22 06:32:10 +00003649 }
3650
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003651 // Change from v4f16/v2f16 to EquivLoadVT.
3652 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3653
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003654 SDValue Load
3655 = DAG.getMemIntrinsicNode(
3656 IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
3657 VTList, Ops, M->getMemoryVT(),
3658 M->getMemOperand());
3659 if (!Unpacked) // Just adjusted the opcode.
3660 return Load;
Changpeng Fang4737e892018-01-18 22:08:53 +00003661
Matt Arsenault1349a042018-05-22 06:32:10 +00003662 SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
Changpeng Fang4737e892018-01-18 22:08:53 +00003663
Matt Arsenault1349a042018-05-22 06:32:10 +00003664 return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003665}
3666
Matt Arsenault3aef8092017-01-23 23:09:58 +00003667void SITargetLowering::ReplaceNodeResults(SDNode *N,
3668 SmallVectorImpl<SDValue> &Results,
3669 SelectionDAG &DAG) const {
3670 switch (N->getOpcode()) {
3671 case ISD::INSERT_VECTOR_ELT: {
3672 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3673 Results.push_back(Res);
3674 return;
3675 }
3676 case ISD::EXTRACT_VECTOR_ELT: {
3677 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3678 Results.push_back(Res);
3679 return;
3680 }
Matt Arsenault1f17c662017-02-22 00:27:34 +00003681 case ISD::INTRINSIC_WO_CHAIN: {
3682 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Marek Olsak13e47412018-01-31 20:18:04 +00003683 switch (IID) {
3684 case Intrinsic::amdgcn_cvt_pkrtz: {
Matt Arsenault1f17c662017-02-22 00:27:34 +00003685 SDValue Src0 = N->getOperand(1);
3686 SDValue Src1 = N->getOperand(2);
3687 SDLoc SL(N);
3688 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
3689 Src0, Src1);
Matt Arsenault1f17c662017-02-22 00:27:34 +00003690 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3691 return;
3692 }
Marek Olsak13e47412018-01-31 20:18:04 +00003693 case Intrinsic::amdgcn_cvt_pknorm_i16:
3694 case Intrinsic::amdgcn_cvt_pknorm_u16:
3695 case Intrinsic::amdgcn_cvt_pk_i16:
3696 case Intrinsic::amdgcn_cvt_pk_u16: {
3697 SDValue Src0 = N->getOperand(1);
3698 SDValue Src1 = N->getOperand(2);
3699 SDLoc SL(N);
3700 unsigned Opcode;
3701
3702 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
3703 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
3704 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
3705 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
3706 else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
3707 Opcode = AMDGPUISD::CVT_PK_I16_I32;
3708 else
3709 Opcode = AMDGPUISD::CVT_PK_U16_U32;
3710
Matt Arsenault709374d2018-08-01 20:13:58 +00003711 EVT VT = N->getValueType(0);
3712 if (isTypeLegal(VT))
3713 Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
3714 else {
3715 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
3716 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
3717 }
Marek Olsak13e47412018-01-31 20:18:04 +00003718 return;
3719 }
3720 }
Simon Pilgrimd362d272017-07-08 19:50:03 +00003721 break;
Matt Arsenault1f17c662017-02-22 00:27:34 +00003722 }
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003723 case ISD::INTRINSIC_W_CHAIN: {
Matt Arsenault1349a042018-05-22 06:32:10 +00003724 if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003725 Results.push_back(Res);
Matt Arsenault1349a042018-05-22 06:32:10 +00003726 Results.push_back(Res.getValue(1));
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003727 return;
3728 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003729
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003730 break;
3731 }
Matt Arsenault4a486232017-04-19 20:53:07 +00003732 case ISD::SELECT: {
3733 SDLoc SL(N);
3734 EVT VT = N->getValueType(0);
3735 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3736 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3737 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3738
3739 EVT SelectVT = NewVT;
3740 if (NewVT.bitsLT(MVT::i32)) {
3741 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3742 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3743 SelectVT = MVT::i32;
3744 }
3745
3746 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3747 N->getOperand(0), LHS, RHS);
3748
3749 if (NewVT != SelectVT)
3750 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3751 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3752 return;
3753 }
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003754 case ISD::FNEG: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003755 if (N->getValueType(0) != MVT::v2f16)
3756 break;
3757
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003758 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003759 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3760
3761 SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
3762 BC,
3763 DAG.getConstant(0x80008000, SL, MVT::i32));
3764 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3765 return;
3766 }
3767 case ISD::FABS: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003768 if (N->getValueType(0) != MVT::v2f16)
3769 break;
3770
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003771 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003772 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3773
3774 SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
3775 BC,
3776 DAG.getConstant(0x7fff7fff, SL, MVT::i32));
3777 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3778 return;
3779 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00003780 default:
3781 break;
3782 }
3783}
3784
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00003785/// Helper function for LowerBRCOND
Tom Stellardf8794352012-12-19 22:10:31 +00003786static SDNode *findUser(SDValue Value, unsigned Opcode) {
Tom Stellard75aadc22012-12-11 21:25:42 +00003787
Tom Stellardf8794352012-12-19 22:10:31 +00003788 SDNode *Parent = Value.getNode();
3789 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
3790 I != E; ++I) {
3791
3792 if (I.getUse().get() != Value)
3793 continue;
3794
3795 if (I->getOpcode() == Opcode)
3796 return *I;
3797 }
Craig Topper062a2ba2014-04-25 05:30:21 +00003798 return nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00003799}
3800
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003801unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
Matt Arsenault6408c912016-09-16 22:11:18 +00003802 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
3803 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003804 case Intrinsic::amdgcn_if:
3805 return AMDGPUISD::IF;
3806 case Intrinsic::amdgcn_else:
3807 return AMDGPUISD::ELSE;
3808 case Intrinsic::amdgcn_loop:
3809 return AMDGPUISD::LOOP;
3810 case Intrinsic::amdgcn_end_cf:
3811 llvm_unreachable("should not occur");
Matt Arsenault6408c912016-09-16 22:11:18 +00003812 default:
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003813 return 0;
Matt Arsenault6408c912016-09-16 22:11:18 +00003814 }
Tom Stellardbc4497b2016-02-12 23:45:29 +00003815 }
Matt Arsenault6408c912016-09-16 22:11:18 +00003816
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003817 // break, if_break, else_break are all only used as inputs to loop, not
3818 // directly as branch conditions.
3819 return 0;
Tom Stellardbc4497b2016-02-12 23:45:29 +00003820}
3821
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003822void SITargetLowering::createDebuggerPrologueStackObjects(
3823 MachineFunction &MF) const {
3824 // Create stack objects that are used for emitting debugger prologue.
3825 //
3826 // Debugger prologue writes work group IDs and work item IDs to scratch memory
3827 // at fixed location in the following format:
3828 // offset 0: work group ID x
3829 // offset 4: work group ID y
3830 // offset 8: work group ID z
3831 // offset 16: work item ID x
3832 // offset 20: work item ID y
3833 // offset 24: work item ID z
3834 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3835 int ObjectIdx = 0;
3836
3837 // For each dimension:
3838 for (unsigned i = 0; i < 3; ++i) {
3839 // Create fixed stack object for work group ID.
Matthias Braun941a7052016-07-28 18:40:00 +00003840 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003841 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
3842 // Create fixed stack object for work item ID.
Matthias Braun941a7052016-07-28 18:40:00 +00003843 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003844 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
3845 }
3846}
3847
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003848bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
3849 const Triple &TT = getTargetMachine().getTargetTriple();
Matt Arsenault923712b2018-02-09 16:57:57 +00003850 return (GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
3851 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003852 AMDGPU::shouldEmitConstantsToTextSection(TT);
3853}
3854
3855bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00003856 return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +00003857 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
3858 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003859 !shouldEmitFixup(GV) &&
3860 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3861}
3862
3863bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
3864 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
3865}
3866
Tom Stellardf8794352012-12-19 22:10:31 +00003867/// This transforms the control flow intrinsics to get the branch destination as
3868/// last parameter, also switches branch target with BR if the need arise
3869SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
3870 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00003871 SDLoc DL(BRCOND);
Tom Stellardf8794352012-12-19 22:10:31 +00003872
3873 SDNode *Intr = BRCOND.getOperand(1).getNode();
3874 SDValue Target = BRCOND.getOperand(2);
Craig Topper062a2ba2014-04-25 05:30:21 +00003875 SDNode *BR = nullptr;
Tom Stellardbc4497b2016-02-12 23:45:29 +00003876 SDNode *SetCC = nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00003877
3878 if (Intr->getOpcode() == ISD::SETCC) {
3879 // As long as we negate the condition everything is fine
Tom Stellardbc4497b2016-02-12 23:45:29 +00003880 SetCC = Intr;
Tom Stellardf8794352012-12-19 22:10:31 +00003881 Intr = SetCC->getOperand(0).getNode();
3882
3883 } else {
3884 // Get the target from BR if we don't negate the condition
3885 BR = findUser(BRCOND, ISD::BR);
3886 Target = BR->getOperand(1);
3887 }
3888
Matt Arsenault6408c912016-09-16 22:11:18 +00003889 // FIXME: This changes the types of the intrinsics instead of introducing new
3890 // nodes with the correct types.
3891 // e.g. llvm.amdgcn.loop
3892
3893 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
3894 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
3895
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003896 unsigned CFNode = isCFIntrinsic(Intr);
3897 if (CFNode == 0) {
Tom Stellardbc4497b2016-02-12 23:45:29 +00003898 // This is a uniform branch so we don't need to legalize.
3899 return BRCOND;
3900 }
3901
Matt Arsenault6408c912016-09-16 22:11:18 +00003902 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
3903 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
3904
Tom Stellardbc4497b2016-02-12 23:45:29 +00003905 assert(!SetCC ||
3906 (SetCC->getConstantOperandVal(1) == 1 &&
Tom Stellardbc4497b2016-02-12 23:45:29 +00003907 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
3908 ISD::SETNE));
Tom Stellardf8794352012-12-19 22:10:31 +00003909
Tom Stellardf8794352012-12-19 22:10:31 +00003910 // operands of the new intrinsic call
3911 SmallVector<SDValue, 4> Ops;
Matt Arsenault6408c912016-09-16 22:11:18 +00003912 if (HaveChain)
3913 Ops.push_back(BRCOND.getOperand(0));
3914
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003915 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
Tom Stellardf8794352012-12-19 22:10:31 +00003916 Ops.push_back(Target);
3917
Matt Arsenault6408c912016-09-16 22:11:18 +00003918 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
3919
Tom Stellardf8794352012-12-19 22:10:31 +00003920 // build the new intrinsic call
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003921 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00003922
Matt Arsenault6408c912016-09-16 22:11:18 +00003923 if (!HaveChain) {
3924 SDValue Ops[] = {
3925 SDValue(Result, 0),
3926 BRCOND.getOperand(0)
3927 };
3928
3929 Result = DAG.getMergeValues(Ops, DL).getNode();
3930 }
3931
Tom Stellardf8794352012-12-19 22:10:31 +00003932 if (BR) {
3933 // Give the branch instruction our target
3934 SDValue Ops[] = {
3935 BR->getOperand(0),
3936 BRCOND.getOperand(2)
3937 };
Chandler Carruth356665a2014-08-01 22:09:43 +00003938 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
3939 DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
3940 BR = NewBR.getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00003941 }
3942
3943 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
3944
3945 // Copy the intrinsic results to registers
3946 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
3947 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
3948 if (!CopyToReg)
3949 continue;
3950
3951 Chain = DAG.getCopyToReg(
3952 Chain, DL,
3953 CopyToReg->getOperand(1),
3954 SDValue(Result, i - 1),
3955 SDValue());
3956
3957 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
3958 }
3959
3960 // Remove the old intrinsic from the chain
3961 DAG.ReplaceAllUsesOfValueWith(
3962 SDValue(Intr, Intr->getNumValues() - 1),
3963 Intr->getOperand(0));
3964
3965 return Chain;
Tom Stellard75aadc22012-12-11 21:25:42 +00003966}
3967
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003968SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
3969 SDValue Op,
3970 const SDLoc &DL,
3971 EVT VT) const {
3972 return Op.getValueType().bitsLE(VT) ?
3973 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
3974 DAG.getNode(ISD::FTRUNC, DL, VT, Op);
3975}
3976
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003977SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultafe614c2016-11-18 18:33:36 +00003978 assert(Op.getValueType() == MVT::f16 &&
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003979 "Do not know how to custom lower FP_ROUND for non-f16 type");
3980
Matt Arsenaultafe614c2016-11-18 18:33:36 +00003981 SDValue Src = Op.getOperand(0);
3982 EVT SrcVT = Src.getValueType();
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003983 if (SrcVT != MVT::f64)
3984 return Op;
3985
3986 SDLoc DL(Op);
Matt Arsenaultafe614c2016-11-18 18:33:36 +00003987
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003988 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
3989 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
Mandeep Singh Grang5e1697e2017-06-06 05:08:36 +00003990 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003991}
3992
Matt Arsenault3e025382017-04-24 17:49:13 +00003993SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
3994 SDLoc SL(Op);
Matt Arsenault3e025382017-04-24 17:49:13 +00003995 SDValue Chain = Op.getOperand(0);
3996
Tom Stellard5bfbae52018-07-11 20:59:01 +00003997 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00003998 !Subtarget->isTrapHandlerEnabled())
Matt Arsenault3e025382017-04-24 17:49:13 +00003999 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
Tony Tye43259df2018-05-16 16:19:34 +00004000
4001 MachineFunction &MF = DAG.getMachineFunction();
4002 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4003 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4004 assert(UserSGPR != AMDGPU::NoRegister);
4005 SDValue QueuePtr = CreateLiveInRegister(
4006 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4007 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4008 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4009 QueuePtr, SDValue());
4010 SDValue Ops[] = {
4011 ToReg,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004012 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
Tony Tye43259df2018-05-16 16:19:34 +00004013 SGPR01,
4014 ToReg.getValue(1)
4015 };
4016 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4017}
4018
4019SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4020 SDLoc SL(Op);
4021 SDValue Chain = Op.getOperand(0);
4022 MachineFunction &MF = DAG.getMachineFunction();
4023
Tom Stellard5bfbae52018-07-11 20:59:01 +00004024 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00004025 !Subtarget->isTrapHandlerEnabled()) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004026 DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
Matt Arsenault3e025382017-04-24 17:49:13 +00004027 "debugtrap handler not supported",
4028 Op.getDebugLoc(),
4029 DS_Warning);
Matthias Braunf1caa282017-12-15 22:22:58 +00004030 LLVMContext &Ctx = MF.getFunction().getContext();
Matt Arsenault3e025382017-04-24 17:49:13 +00004031 Ctx.diagnose(NoTrap);
4032 return Chain;
4033 }
Matt Arsenault3e025382017-04-24 17:49:13 +00004034
Tony Tye43259df2018-05-16 16:19:34 +00004035 SDValue Ops[] = {
4036 Chain,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004037 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
Tony Tye43259df2018-05-16 16:19:34 +00004038 };
4039 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
Matt Arsenault3e025382017-04-24 17:49:13 +00004040}
4041
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004042SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
Matt Arsenault99c14522016-04-25 19:27:24 +00004043 SelectionDAG &DAG) const {
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004044 // FIXME: Use inline constants (src_{shared, private}_base) instead.
4045 if (Subtarget->hasApertureRegs()) {
4046 unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ?
4047 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4048 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
4049 unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ?
4050 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4051 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4052 unsigned Encoding =
4053 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4054 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4055 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
Matt Arsenaulte823d922017-02-18 18:29:53 +00004056
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004057 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4058 SDValue ApertureReg = SDValue(
4059 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4060 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4061 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
Matt Arsenaulte823d922017-02-18 18:29:53 +00004062 }
4063
Matt Arsenault99c14522016-04-25 19:27:24 +00004064 MachineFunction &MF = DAG.getMachineFunction();
4065 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004066 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4067 assert(UserSGPR != AMDGPU::NoRegister);
4068
Matt Arsenault99c14522016-04-25 19:27:24 +00004069 SDValue QueuePtr = CreateLiveInRegister(
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004070 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
Matt Arsenault99c14522016-04-25 19:27:24 +00004071
4072 // Offset into amd_queue_t for group_segment_aperture_base_hi /
4073 // private_segment_aperture_base_hi.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004074 uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44;
Matt Arsenault99c14522016-04-25 19:27:24 +00004075
Matt Arsenaultb655fa92017-11-29 01:25:12 +00004076 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
Matt Arsenault99c14522016-04-25 19:27:24 +00004077
4078 // TODO: Use custom target PseudoSourceValue.
4079 // TODO: We should use the value from the IR intrinsic call, but it might not
4080 // be available and how do we get it?
4081 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004082 AMDGPUASI.CONSTANT_ADDRESS));
Matt Arsenault99c14522016-04-25 19:27:24 +00004083
4084 MachinePointerInfo PtrInfo(V, StructOffset);
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004085 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
Justin Lebar9c375812016-07-15 18:27:10 +00004086 MinAlign(64, StructOffset),
Justin Lebaradbf09e2016-09-11 01:38:58 +00004087 MachineMemOperand::MODereferenceable |
4088 MachineMemOperand::MOInvariant);
Matt Arsenault99c14522016-04-25 19:27:24 +00004089}
4090
4091SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4092 SelectionDAG &DAG) const {
4093 SDLoc SL(Op);
4094 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4095
4096 SDValue Src = ASC->getOperand(0);
Matt Arsenault99c14522016-04-25 19:27:24 +00004097 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4098
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004099 const AMDGPUTargetMachine &TM =
4100 static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4101
Matt Arsenault99c14522016-04-25 19:27:24 +00004102 // flat -> local/private
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004103 if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004104 unsigned DestAS = ASC->getDestAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004105
4106 if (DestAS == AMDGPUASI.LOCAL_ADDRESS ||
4107 DestAS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004108 unsigned NullVal = TM.getNullPointerValue(DestAS);
4109 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault99c14522016-04-25 19:27:24 +00004110 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4111 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4112
4113 return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4114 NonNull, Ptr, SegmentNullPtr);
4115 }
4116 }
4117
4118 // local/private -> flat
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004119 if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004120 unsigned SrcAS = ASC->getSrcAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004121
4122 if (SrcAS == AMDGPUASI.LOCAL_ADDRESS ||
4123 SrcAS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004124 unsigned NullVal = TM.getNullPointerValue(SrcAS);
4125 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault971c85e2017-03-13 19:47:31 +00004126
Matt Arsenault99c14522016-04-25 19:27:24 +00004127 SDValue NonNull
4128 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4129
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004130 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00004131 SDValue CvtPtr
4132 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4133
4134 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4135 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4136 FlatNullPtr);
4137 }
4138 }
4139
4140 // global <-> flat are no-ops and never emitted.
4141
4142 const MachineFunction &MF = DAG.getMachineFunction();
4143 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
Matthias Braunf1caa282017-12-15 22:22:58 +00004144 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
Matt Arsenault99c14522016-04-25 19:27:24 +00004145 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4146
4147 return DAG.getUNDEF(ASC->getValueType(0));
4148}
4149
Matt Arsenault3aef8092017-01-23 23:09:58 +00004150SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4151 SelectionDAG &DAG) const {
Matt Arsenault67a98152018-05-16 11:47:30 +00004152 SDValue Vec = Op.getOperand(0);
4153 SDValue InsVal = Op.getOperand(1);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004154 SDValue Idx = Op.getOperand(2);
Matt Arsenault67a98152018-05-16 11:47:30 +00004155 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004156 EVT EltVT = VecVT.getVectorElementType();
4157 unsigned VecSize = VecVT.getSizeInBits();
4158 unsigned EltSize = EltVT.getSizeInBits();
Matt Arsenault67a98152018-05-16 11:47:30 +00004159
Matt Arsenault9224c002018-06-05 19:52:46 +00004160
4161 assert(VecSize <= 64);
Matt Arsenault67a98152018-05-16 11:47:30 +00004162
4163 unsigned NumElts = VecVT.getVectorNumElements();
4164 SDLoc SL(Op);
4165 auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4166
Matt Arsenault9224c002018-06-05 19:52:46 +00004167 if (NumElts == 4 && EltSize == 16 && KIdx) {
Matt Arsenault67a98152018-05-16 11:47:30 +00004168 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4169
4170 SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4171 DAG.getConstant(0, SL, MVT::i32));
4172 SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4173 DAG.getConstant(1, SL, MVT::i32));
4174
4175 SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4176 SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4177
4178 unsigned Idx = KIdx->getZExtValue();
4179 bool InsertLo = Idx < 2;
4180 SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4181 InsertLo ? LoVec : HiVec,
4182 DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4183 DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4184
4185 InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4186
4187 SDValue Concat = InsertLo ?
4188 DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4189 DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4190
4191 return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4192 }
4193
Matt Arsenault3aef8092017-01-23 23:09:58 +00004194 if (isa<ConstantSDNode>(Idx))
4195 return SDValue();
4196
Matt Arsenault9224c002018-06-05 19:52:46 +00004197 MVT IntVT = MVT::getIntegerVT(VecSize);
Matt Arsenault67a98152018-05-16 11:47:30 +00004198
Matt Arsenault3aef8092017-01-23 23:09:58 +00004199 // Avoid stack access for dynamic indexing.
Matt Arsenault9224c002018-06-05 19:52:46 +00004200 SDValue Val = InsVal;
4201 if (InsVal.getValueType() == MVT::f16)
4202 Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004203
4204 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
Matt Arsenault67a98152018-05-16 11:47:30 +00004205 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Val);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004206
Matt Arsenault9224c002018-06-05 19:52:46 +00004207 assert(isPowerOf2_32(EltSize));
4208 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4209
Matt Arsenault3aef8092017-01-23 23:09:58 +00004210 // Convert vector index to bit-index.
Matt Arsenault9224c002018-06-05 19:52:46 +00004211 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004212
Matt Arsenault67a98152018-05-16 11:47:30 +00004213 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4214 SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4215 DAG.getConstant(0xffff, SL, IntVT),
Matt Arsenault3aef8092017-01-23 23:09:58 +00004216 ScaledIdx);
4217
Matt Arsenault67a98152018-05-16 11:47:30 +00004218 SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4219 SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4220 DAG.getNOT(SL, BFM, IntVT), BCVec);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004221
Matt Arsenault67a98152018-05-16 11:47:30 +00004222 SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4223 return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004224}
4225
4226SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4227 SelectionDAG &DAG) const {
4228 SDLoc SL(Op);
4229
4230 EVT ResultVT = Op.getValueType();
4231 SDValue Vec = Op.getOperand(0);
4232 SDValue Idx = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004233 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004234 unsigned VecSize = VecVT.getSizeInBits();
4235 EVT EltVT = VecVT.getVectorElementType();
4236 assert(VecSize <= 64);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004237
Matt Arsenault98f29462017-05-17 20:30:58 +00004238 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4239
Hiroshi Inoue372ffa12018-04-13 11:37:06 +00004240 // Make sure we do any optimizations that will make it easier to fold
Matt Arsenault98f29462017-05-17 20:30:58 +00004241 // source modifiers before obscuring it with bit operations.
4242
4243 // XXX - Why doesn't this get called when vector_shuffle is expanded?
4244 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4245 return Combined;
4246
Matt Arsenault9224c002018-06-05 19:52:46 +00004247 unsigned EltSize = EltVT.getSizeInBits();
4248 assert(isPowerOf2_32(EltSize));
Matt Arsenault3aef8092017-01-23 23:09:58 +00004249
Matt Arsenault9224c002018-06-05 19:52:46 +00004250 MVT IntVT = MVT::getIntegerVT(VecSize);
4251 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4252
4253 // Convert vector index to bit-index (* EltSize)
4254 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004255
Matt Arsenault67a98152018-05-16 11:47:30 +00004256 SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4257 SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004258
Matt Arsenault67a98152018-05-16 11:47:30 +00004259 if (ResultVT == MVT::f16) {
4260 SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4261 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4262 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00004263
Matt Arsenault67a98152018-05-16 11:47:30 +00004264 return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4265}
4266
4267SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4268 SelectionDAG &DAG) const {
4269 SDLoc SL(Op);
4270 EVT VT = Op.getValueType();
Matt Arsenault67a98152018-05-16 11:47:30 +00004271
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004272 if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4273 EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4274
4275 // Turn into pair of packed build_vectors.
4276 // TODO: Special case for constants that can be materialized with s_mov_b64.
4277 SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4278 { Op.getOperand(0), Op.getOperand(1) });
4279 SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4280 { Op.getOperand(2), Op.getOperand(3) });
4281
4282 SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4283 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4284
4285 SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4286 return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4287 }
4288
Matt Arsenault1349a042018-05-22 06:32:10 +00004289 assert(VT == MVT::v2f16 || VT == MVT::v2i16);
Matt Arsenault67a98152018-05-16 11:47:30 +00004290
Matt Arsenault1349a042018-05-22 06:32:10 +00004291 SDValue Lo = Op.getOperand(0);
4292 SDValue Hi = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004293
Matt Arsenault1349a042018-05-22 06:32:10 +00004294 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4295 Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
Matt Arsenault67a98152018-05-16 11:47:30 +00004296
Matt Arsenault1349a042018-05-22 06:32:10 +00004297 Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
4298 Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4299
4300 SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4301 DAG.getConstant(16, SL, MVT::i32));
4302
4303 SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
4304
4305 return DAG.getNode(ISD::BITCAST, SL, VT, Or);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004306}
4307
Tom Stellard418beb72016-07-13 14:23:33 +00004308bool
4309SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4310 // We can fold offsets for anything that doesn't require a GOT relocation.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004311 return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +00004312 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
4313 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004314 !shouldEmitGOTReloc(GA->getGlobal());
Tom Stellard418beb72016-07-13 14:23:33 +00004315}
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004316
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004317static SDValue
4318buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4319 const SDLoc &DL, unsigned Offset, EVT PtrVT,
4320 unsigned GAFlags = SIInstrInfo::MO_NONE) {
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004321 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4322 // lowered to the following code sequence:
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004323 //
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004324 // For constant address space:
4325 // s_getpc_b64 s[0:1]
4326 // s_add_u32 s0, s0, $symbol
4327 // s_addc_u32 s1, s1, 0
4328 //
4329 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4330 // a fixup or relocation is emitted to replace $symbol with a literal
4331 // constant, which is a pc-relative offset from the encoding of the $symbol
4332 // operand to the global variable.
4333 //
4334 // For global address space:
4335 // s_getpc_b64 s[0:1]
4336 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4337 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4338 //
4339 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4340 // fixups or relocations are emitted to replace $symbol@*@lo and
4341 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4342 // which is a 64-bit pc-relative offset from the encoding of the $symbol
4343 // operand to the global variable.
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004344 //
4345 // What we want here is an offset from the value returned by s_getpc
4346 // (which is the address of the s_add_u32 instruction) to the global
4347 // variable, but since the encoding of $symbol starts 4 bytes after the start
4348 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4349 // small. This requires us to add 4 to the global variable offset in order to
4350 // compute the correct address.
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004351 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4352 GAFlags);
4353 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4354 GAFlags == SIInstrInfo::MO_NONE ?
4355 GAFlags : GAFlags + 1);
4356 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004357}
4358
Tom Stellard418beb72016-07-13 14:23:33 +00004359SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
4360 SDValue Op,
4361 SelectionDAG &DAG) const {
4362 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00004363 const GlobalValue *GV = GSD->getGlobal();
Tom Stellard418beb72016-07-13 14:23:33 +00004364
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004365 if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
Matt Arsenault923712b2018-02-09 16:57:57 +00004366 GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS_32BIT &&
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00004367 GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS &&
4368 // FIXME: It isn't correct to rely on the type of the pointer. This should
4369 // be removed when address space 0 is 64-bit.
4370 !GV->getType()->getElementType()->isFunctionTy())
Tom Stellard418beb72016-07-13 14:23:33 +00004371 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
4372
4373 SDLoc DL(GSD);
Tom Stellard418beb72016-07-13 14:23:33 +00004374 EVT PtrVT = Op.getValueType();
4375
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004376 if (shouldEmitFixup(GV))
Tom Stellard418beb72016-07-13 14:23:33 +00004377 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004378 else if (shouldEmitPCReloc(GV))
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004379 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
4380 SIInstrInfo::MO_REL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004381
4382 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004383 SIInstrInfo::MO_GOTPCREL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004384
4385 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004386 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
Tom Stellard418beb72016-07-13 14:23:33 +00004387 const DataLayout &DataLayout = DAG.getDataLayout();
4388 unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
4389 // FIXME: Use a PseudoSourceValue once those can be assigned an address space.
4390 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
4391
Justin Lebar9c375812016-07-15 18:27:10 +00004392 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
Justin Lebaradbf09e2016-09-11 01:38:58 +00004393 MachineMemOperand::MODereferenceable |
4394 MachineMemOperand::MOInvariant);
Tom Stellard418beb72016-07-13 14:23:33 +00004395}
4396
Benjamin Kramerbdc49562016-06-12 15:39:02 +00004397SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
4398 const SDLoc &DL, SDValue V) const {
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004399 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
4400 // the destination register.
4401 //
Tom Stellardfc92e772015-05-12 14:18:14 +00004402 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
4403 // so we will end up with redundant moves to m0.
4404 //
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004405 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
4406
4407 // A Null SDValue creates a glue result.
4408 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
4409 V, Chain);
4410 return SDValue(M0, 0);
Tom Stellardfc92e772015-05-12 14:18:14 +00004411}
4412
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004413SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
4414 SDValue Op,
4415 MVT VT,
4416 unsigned Offset) const {
4417 SDLoc SL(Op);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004418 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004419 DAG.getEntryNode(), Offset, 4, false);
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004420 // The local size values will have the hi 16-bits as zero.
4421 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
4422 DAG.getValueType(VT));
4423}
4424
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004425static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4426 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004427 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004428 "non-hsa intrinsic with hsa target",
4429 DL.getDebugLoc());
4430 DAG.getContext()->diagnose(BadIntrin);
4431 return DAG.getUNDEF(VT);
4432}
4433
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004434static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4435 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004436 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004437 "intrinsic not supported on subtarget",
4438 DL.getDebugLoc());
Matt Arsenaulte0132462016-01-30 05:19:45 +00004439 DAG.getContext()->diagnose(BadIntrin);
4440 return DAG.getUNDEF(VT);
4441}
4442
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004443static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
4444 ArrayRef<SDValue> Elts) {
4445 assert(!Elts.empty());
4446 MVT Type;
4447 unsigned NumElts;
4448
4449 if (Elts.size() == 1) {
4450 Type = MVT::f32;
4451 NumElts = 1;
4452 } else if (Elts.size() == 2) {
4453 Type = MVT::v2f32;
4454 NumElts = 2;
4455 } else if (Elts.size() <= 4) {
4456 Type = MVT::v4f32;
4457 NumElts = 4;
4458 } else if (Elts.size() <= 8) {
4459 Type = MVT::v8f32;
4460 NumElts = 8;
4461 } else {
4462 assert(Elts.size() <= 16);
4463 Type = MVT::v16f32;
4464 NumElts = 16;
4465 }
4466
4467 SmallVector<SDValue, 16> VecElts(NumElts);
4468 for (unsigned i = 0; i < Elts.size(); ++i) {
4469 SDValue Elt = Elts[i];
4470 if (Elt.getValueType() != MVT::f32)
4471 Elt = DAG.getBitcast(MVT::f32, Elt);
4472 VecElts[i] = Elt;
4473 }
4474 for (unsigned i = Elts.size(); i < NumElts; ++i)
4475 VecElts[i] = DAG.getUNDEF(MVT::f32);
4476
4477 if (NumElts == 1)
4478 return VecElts[0];
4479 return DAG.getBuildVector(Type, DL, VecElts);
4480}
4481
4482static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
4483 SDValue *GLC, SDValue *SLC) {
4484 auto CachePolicyConst = dyn_cast<ConstantSDNode>(CachePolicy.getNode());
4485 if (!CachePolicyConst)
4486 return false;
4487
4488 uint64_t Value = CachePolicyConst->getZExtValue();
4489 SDLoc DL(CachePolicy);
4490 if (GLC) {
4491 *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
4492 Value &= ~(uint64_t)0x1;
4493 }
4494 if (SLC) {
4495 *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
4496 Value &= ~(uint64_t)0x2;
4497 }
4498
4499 return Value == 0;
4500}
4501
4502SDValue SITargetLowering::lowerImage(SDValue Op,
4503 const AMDGPU::ImageDimIntrinsicInfo *Intr,
4504 SelectionDAG &DAG) const {
4505 SDLoc DL(Op);
4506 MachineFunction &MF = DAG.getMachineFunction();
4507 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
4508 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
4509 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004510 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
4511 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
4512 unsigned IntrOpcode = Intr->BaseOpcode;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004513
4514 SmallVector<EVT, 2> ResultTypes(Op->value_begin(), Op->value_end());
4515 bool IsD16 = false;
4516 SDValue VData;
4517 int NumVDataDwords;
4518 unsigned AddrIdx; // Index of first address argument
4519 unsigned DMask;
4520
4521 if (BaseOpcode->Atomic) {
4522 VData = Op.getOperand(2);
4523
4524 bool Is64Bit = VData.getValueType() == MVT::i64;
4525 if (BaseOpcode->AtomicX2) {
4526 SDValue VData2 = Op.getOperand(3);
4527 VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
4528 {VData, VData2});
4529 if (Is64Bit)
4530 VData = DAG.getBitcast(MVT::v4i32, VData);
4531
4532 ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
4533 DMask = Is64Bit ? 0xf : 0x3;
4534 NumVDataDwords = Is64Bit ? 4 : 2;
4535 AddrIdx = 4;
4536 } else {
4537 DMask = Is64Bit ? 0x3 : 0x1;
4538 NumVDataDwords = Is64Bit ? 2 : 1;
4539 AddrIdx = 3;
4540 }
4541 } else {
4542 unsigned DMaskIdx;
4543
4544 if (BaseOpcode->Store) {
4545 VData = Op.getOperand(2);
4546
4547 MVT StoreVT = VData.getSimpleValueType();
4548 if (StoreVT.getScalarType() == MVT::f16) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004549 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004550 !BaseOpcode->HasD16)
4551 return Op; // D16 is unsupported for this instruction
4552
4553 IsD16 = true;
4554 VData = handleD16VData(VData, DAG);
4555 }
4556
4557 NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
4558 DMaskIdx = 3;
4559 } else {
4560 MVT LoadVT = Op.getSimpleValueType();
4561 if (LoadVT.getScalarType() == MVT::f16) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004562 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004563 !BaseOpcode->HasD16)
4564 return Op; // D16 is unsupported for this instruction
4565
4566 IsD16 = true;
4567 if (LoadVT.isVector() && Subtarget->hasUnpackedD16VMem())
4568 ResultTypes[0] = (LoadVT == MVT::v2f16) ? MVT::v2i32 : MVT::v4i32;
4569 }
4570
4571 NumVDataDwords = (ResultTypes[0].getSizeInBits() + 31) / 32;
4572 DMaskIdx = isa<MemSDNode>(Op) ? 2 : 1;
4573 }
4574
4575 auto DMaskConst = dyn_cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
4576 if (!DMaskConst)
4577 return Op;
4578
4579 AddrIdx = DMaskIdx + 1;
4580 DMask = DMaskConst->getZExtValue();
4581 if (!DMask && !BaseOpcode->Store) {
4582 // Eliminate no-op loads. Stores with dmask == 0 are *not* no-op: they
4583 // store the channels' default values.
4584 SDValue Undef = DAG.getUNDEF(Op.getValueType());
4585 if (isa<MemSDNode>(Op))
4586 return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
4587 return Undef;
4588 }
4589 }
4590
4591 unsigned NumVAddrs = BaseOpcode->NumExtraArgs +
4592 (BaseOpcode->Gradients ? DimInfo->NumGradients : 0) +
4593 (BaseOpcode->Coordinates ? DimInfo->NumCoords : 0) +
4594 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
4595 SmallVector<SDValue, 4> VAddrs;
4596 for (unsigned i = 0; i < NumVAddrs; ++i)
4597 VAddrs.push_back(Op.getOperand(AddrIdx + i));
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004598
4599 // Optimize _L to _LZ when _L is zero
4600 if (LZMappingInfo) {
4601 if (auto ConstantLod =
4602 dyn_cast<ConstantFPSDNode>(VAddrs[NumVAddrs-1].getNode())) {
4603 if (ConstantLod->isZero() || ConstantLod->isNegative()) {
4604 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
4605 VAddrs.pop_back(); // remove 'lod'
4606 }
4607 }
4608 }
4609
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004610 SDValue VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
4611
4612 SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
4613 SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
4614 unsigned CtrlIdx; // Index of texfailctrl argument
4615 SDValue Unorm;
4616 if (!BaseOpcode->Sampler) {
4617 Unorm = True;
4618 CtrlIdx = AddrIdx + NumVAddrs + 1;
4619 } else {
4620 auto UnormConst =
4621 dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
4622 if (!UnormConst)
4623 return Op;
4624
4625 Unorm = UnormConst->getZExtValue() ? True : False;
4626 CtrlIdx = AddrIdx + NumVAddrs + 3;
4627 }
4628
4629 SDValue TexFail = Op.getOperand(CtrlIdx);
4630 auto TexFailConst = dyn_cast<ConstantSDNode>(TexFail.getNode());
4631 if (!TexFailConst || TexFailConst->getZExtValue() != 0)
4632 return Op;
4633
4634 SDValue GLC;
4635 SDValue SLC;
4636 if (BaseOpcode->Atomic) {
4637 GLC = True; // TODO no-return optimization
4638 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC))
4639 return Op;
4640 } else {
4641 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC))
4642 return Op;
4643 }
4644
4645 SmallVector<SDValue, 14> Ops;
4646 if (BaseOpcode->Store || BaseOpcode->Atomic)
4647 Ops.push_back(VData); // vdata
4648 Ops.push_back(VAddr);
4649 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
4650 if (BaseOpcode->Sampler)
4651 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
4652 Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
4653 Ops.push_back(Unorm);
4654 Ops.push_back(GLC);
4655 Ops.push_back(SLC);
4656 Ops.push_back(False); // r128
4657 Ops.push_back(False); // tfe
4658 Ops.push_back(False); // lwe
4659 Ops.push_back(DimInfo->DA ? True : False);
4660 if (BaseOpcode->HasD16)
4661 Ops.push_back(IsD16 ? True : False);
4662 if (isa<MemSDNode>(Op))
4663 Ops.push_back(Op.getOperand(0)); // chain
4664
4665 int NumVAddrDwords = VAddr.getValueType().getSizeInBits() / 32;
4666 int Opcode = -1;
4667
Tom Stellard5bfbae52018-07-11 20:59:01 +00004668 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004669 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004670 NumVDataDwords, NumVAddrDwords);
4671 if (Opcode == -1)
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004672 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004673 NumVDataDwords, NumVAddrDwords);
4674 assert(Opcode != -1);
4675
4676 MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
4677 if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
4678 MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
4679 *MemRefs = MemOp->getMemOperand();
4680 NewNode->setMemRefs(MemRefs, MemRefs + 1);
4681 }
4682
4683 if (BaseOpcode->AtomicX2) {
4684 SmallVector<SDValue, 1> Elt;
4685 DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
4686 return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
4687 } else if (IsD16 && !BaseOpcode->Store) {
4688 MVT LoadVT = Op.getSimpleValueType();
4689 SDValue Adjusted = adjustLoadValueTypeImpl(
4690 SDValue(NewNode, 0), LoadVT, DL, DAG, Subtarget->hasUnpackedD16VMem());
4691 return DAG.getMergeValues({Adjusted, SDValue(NewNode, 1)}, DL);
4692 }
4693
4694 return SDValue(NewNode, 0);
4695}
4696
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004697SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4698 SelectionDAG &DAG) const {
4699 MachineFunction &MF = DAG.getMachineFunction();
Tom Stellarddcb9f092015-07-09 21:20:37 +00004700 auto MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004701
4702 EVT VT = Op.getValueType();
4703 SDLoc DL(Op);
4704 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4705
Sanjay Patela2607012015-09-16 16:31:21 +00004706 // TODO: Should this propagate fast-math-flags?
4707
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004708 switch (IntrinsicID) {
Tom Stellard2f3f9852017-01-25 01:25:13 +00004709 case Intrinsic::amdgcn_implicit_buffer_ptr: {
Matt Arsenaultceafc552018-05-29 17:42:50 +00004710 if (getSubtarget()->isAmdCodeObjectV2(MF.getFunction()))
Matt Arsenault10fc0622017-06-26 03:01:31 +00004711 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004712 return getPreloadedValue(DAG, *MFI, VT,
4713 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
Tom Stellard2f3f9852017-01-25 01:25:13 +00004714 }
Tom Stellard48f29f22015-11-26 00:43:29 +00004715 case Intrinsic::amdgcn_dispatch_ptr:
Matt Arsenault48ab5262016-04-25 19:27:18 +00004716 case Intrinsic::amdgcn_queue_ptr: {
Matt Arsenaultceafc552018-05-29 17:42:50 +00004717 if (!Subtarget->isAmdCodeObjectV2(MF.getFunction())) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00004718 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00004719 MF.getFunction(), "unsupported hsa intrinsic without hsa target",
Oliver Stannard7e7d9832016-02-02 13:52:43 +00004720 DL.getDebugLoc());
Matt Arsenault800fecf2016-01-11 21:18:33 +00004721 DAG.getContext()->diagnose(BadIntrin);
4722 return DAG.getUNDEF(VT);
4723 }
4724
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004725 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
4726 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
4727 return getPreloadedValue(DAG, *MFI, VT, RegID);
Matt Arsenault48ab5262016-04-25 19:27:18 +00004728 }
Jan Veselyfea814d2016-06-21 20:46:20 +00004729 case Intrinsic::amdgcn_implicitarg_ptr: {
Matt Arsenault9166ce82017-07-28 15:52:08 +00004730 if (MFI->isEntryFunction())
4731 return getImplicitArgPtr(DAG, DL);
Matt Arsenault817c2532017-08-03 23:12:44 +00004732 return getPreloadedValue(DAG, *MFI, VT,
4733 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
Jan Veselyfea814d2016-06-21 20:46:20 +00004734 }
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00004735 case Intrinsic::amdgcn_kernarg_segment_ptr: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004736 return getPreloadedValue(DAG, *MFI, VT,
4737 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00004738 }
Matt Arsenault8d718dc2016-07-22 17:01:30 +00004739 case Intrinsic::amdgcn_dispatch_id: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004740 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
Matt Arsenault8d718dc2016-07-22 17:01:30 +00004741 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004742 case Intrinsic::amdgcn_rcp:
4743 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
4744 case Intrinsic::amdgcn_rsq:
4745 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00004746 case Intrinsic::amdgcn_rsq_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00004747 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004748 return emitRemovedIntrinsicError(DAG, DL, VT);
4749
4750 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00004751 case Intrinsic::amdgcn_rcp_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00004752 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault32fc5272016-07-26 16:45:45 +00004753 return emitRemovedIntrinsicError(DAG, DL, VT);
4754 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
Matt Arsenault09b2c4a2016-07-15 21:26:52 +00004755 case Intrinsic::amdgcn_rsq_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004756 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault79963e82016-02-13 01:03:00 +00004757 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
Tom Stellard48f29f22015-11-26 00:43:29 +00004758
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004759 Type *Type = VT.getTypeForEVT(*DAG.getContext());
4760 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
4761 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
4762
4763 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
4764 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
4765 DAG.getConstantFP(Max, DL, VT));
4766 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
4767 DAG.getConstantFP(Min, DL, VT));
4768 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004769 case Intrinsic::r600_read_ngroups_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004770 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004771 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004772
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004773 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004774 SI::KernelInputOffsets::NGROUPS_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004775 case Intrinsic::r600_read_ngroups_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004776 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004777 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004778
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004779 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004780 SI::KernelInputOffsets::NGROUPS_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004781 case Intrinsic::r600_read_ngroups_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004782 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004783 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004784
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004785 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004786 SI::KernelInputOffsets::NGROUPS_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004787 case Intrinsic::r600_read_global_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004788 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004789 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004790
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004791 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004792 SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004793 case Intrinsic::r600_read_global_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004794 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004795 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004796
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004797 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004798 SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004799 case Intrinsic::r600_read_global_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004800 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004801 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004802
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004803 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004804 SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004805 case Intrinsic::r600_read_local_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004806 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004807 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004808
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004809 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4810 SI::KernelInputOffsets::LOCAL_SIZE_X);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004811 case Intrinsic::r600_read_local_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004812 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004813 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004814
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004815 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4816 SI::KernelInputOffsets::LOCAL_SIZE_Y);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004817 case Intrinsic::r600_read_local_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004818 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004819 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004820
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004821 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4822 SI::KernelInputOffsets::LOCAL_SIZE_Z);
Matt Arsenault43976df2016-01-30 04:25:19 +00004823 case Intrinsic::amdgcn_workgroup_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004824 case Intrinsic::r600_read_tgid_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004825 return getPreloadedValue(DAG, *MFI, VT,
4826 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
Matt Arsenault43976df2016-01-30 04:25:19 +00004827 case Intrinsic::amdgcn_workgroup_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004828 case Intrinsic::r600_read_tgid_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004829 return getPreloadedValue(DAG, *MFI, VT,
4830 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
Matt Arsenault43976df2016-01-30 04:25:19 +00004831 case Intrinsic::amdgcn_workgroup_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004832 case Intrinsic::r600_read_tgid_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004833 return getPreloadedValue(DAG, *MFI, VT,
4834 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
4835 case Intrinsic::amdgcn_workitem_id_x: {
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004836 case Intrinsic::r600_read_tidig_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004837 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4838 SDLoc(DAG.getEntryNode()),
4839 MFI->getArgInfo().WorkItemIDX);
4840 }
Matt Arsenault43976df2016-01-30 04:25:19 +00004841 case Intrinsic::amdgcn_workitem_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004842 case Intrinsic::r600_read_tidig_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004843 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4844 SDLoc(DAG.getEntryNode()),
4845 MFI->getArgInfo().WorkItemIDY);
Matt Arsenault43976df2016-01-30 04:25:19 +00004846 case Intrinsic::amdgcn_workitem_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004847 case Intrinsic::r600_read_tidig_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004848 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4849 SDLoc(DAG.getEntryNode()),
4850 MFI->getArgInfo().WorkItemIDZ);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004851 case AMDGPUIntrinsic::SI_load_const: {
4852 SDValue Ops[] = {
4853 Op.getOperand(1),
4854 Op.getOperand(2)
4855 };
4856
4857 MachineMemOperand *MMO = MF.getMachineMemOperand(
Justin Lebaradbf09e2016-09-11 01:38:58 +00004858 MachinePointerInfo(),
4859 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
4860 MachineMemOperand::MOInvariant,
4861 VT.getStoreSize(), 4);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004862 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
4863 Op->getVTList(), Ops, VT, MMO);
4864 }
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004865 case Intrinsic::amdgcn_fdiv_fast:
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00004866 return lowerFDIV_FAST(Op, DAG);
Tom Stellard2187bb82016-12-06 23:52:13 +00004867 case Intrinsic::amdgcn_interp_mov: {
4868 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4869 SDValue Glue = M0.getValue(1);
4870 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
4871 Op.getOperand(2), Op.getOperand(3), Glue);
4872 }
Tom Stellardad7d03d2015-12-15 17:02:49 +00004873 case Intrinsic::amdgcn_interp_p1: {
4874 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4875 SDValue Glue = M0.getValue(1);
4876 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
4877 Op.getOperand(2), Op.getOperand(3), Glue);
4878 }
4879 case Intrinsic::amdgcn_interp_p2: {
4880 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
4881 SDValue Glue = SDValue(M0.getNode(), 1);
4882 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
4883 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
4884 Glue);
4885 }
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004886 case Intrinsic::amdgcn_sin:
4887 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
4888
4889 case Intrinsic::amdgcn_cos:
4890 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
4891
4892 case Intrinsic::amdgcn_log_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004893 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004894 return SDValue();
4895
4896 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00004897 MF.getFunction(), "intrinsic not supported on subtarget",
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004898 DL.getDebugLoc());
4899 DAG.getContext()->diagnose(BadIntrin);
4900 return DAG.getUNDEF(VT);
4901 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004902 case Intrinsic::amdgcn_ldexp:
4903 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
4904 Op.getOperand(1), Op.getOperand(2));
Matt Arsenault74015162016-05-28 00:19:52 +00004905
4906 case Intrinsic::amdgcn_fract:
4907 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
4908
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004909 case Intrinsic::amdgcn_class:
4910 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
4911 Op.getOperand(1), Op.getOperand(2));
4912 case Intrinsic::amdgcn_div_fmas:
4913 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
4914 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
4915 Op.getOperand(4));
4916
4917 case Intrinsic::amdgcn_div_fixup:
4918 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
4919 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4920
4921 case Intrinsic::amdgcn_trig_preop:
4922 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
4923 Op.getOperand(1), Op.getOperand(2));
4924 case Intrinsic::amdgcn_div_scale: {
4925 // 3rd parameter required to be a constant.
4926 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4927 if (!Param)
Matt Arsenault206f8262017-08-01 20:49:41 +00004928 return DAG.getMergeValues({ DAG.getUNDEF(VT), DAG.getUNDEF(MVT::i1) }, DL);
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004929
4930 // Translate to the operands expected by the machine instruction. The
4931 // first parameter must be the same as the first instruction.
4932 SDValue Numerator = Op.getOperand(1);
4933 SDValue Denominator = Op.getOperand(2);
4934
4935 // Note this order is opposite of the machine instruction's operations,
4936 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
4937 // intrinsic has the numerator as the first operand to match a normal
4938 // division operation.
4939
4940 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
4941
4942 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
4943 Denominator, Numerator);
4944 }
Wei Ding07e03712016-07-28 16:42:13 +00004945 case Intrinsic::amdgcn_icmp: {
4946 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004947 if (!CD)
4948 return DAG.getUNDEF(VT);
Wei Ding07e03712016-07-28 16:42:13 +00004949
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004950 int CondCode = CD->getSExtValue();
Wei Ding07e03712016-07-28 16:42:13 +00004951 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004952 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
Wei Ding07e03712016-07-28 16:42:13 +00004953 return DAG.getUNDEF(VT);
4954
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00004955 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
Wei Ding07e03712016-07-28 16:42:13 +00004956 ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4957 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4958 Op.getOperand(2), DAG.getCondCode(CCOpcode));
4959 }
4960 case Intrinsic::amdgcn_fcmp: {
4961 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004962 if (!CD)
4963 return DAG.getUNDEF(VT);
Wei Ding07e03712016-07-28 16:42:13 +00004964
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004965 int CondCode = CD->getSExtValue();
4966 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4967 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE)
Wei Ding07e03712016-07-28 16:42:13 +00004968 return DAG.getUNDEF(VT);
4969
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00004970 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
Wei Ding07e03712016-07-28 16:42:13 +00004971 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4972 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4973 Op.getOperand(2), DAG.getCondCode(CCOpcode));
4974 }
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00004975 case Intrinsic::amdgcn_fmed3:
4976 return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
4977 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Farhana Aleenc370d7b2018-07-16 18:19:59 +00004978 case Intrinsic::amdgcn_fdot2:
4979 return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00004980 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
4981 Op.getOperand(4));
Matt Arsenault32fc5272016-07-26 16:45:45 +00004982 case Intrinsic::amdgcn_fmul_legacy:
4983 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
4984 Op.getOperand(1), Op.getOperand(2));
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00004985 case Intrinsic::amdgcn_sffbh:
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00004986 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
Matt Arsenaultf5262252017-02-22 23:04:58 +00004987 case Intrinsic::amdgcn_sbfe:
4988 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
4989 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4990 case Intrinsic::amdgcn_ubfe:
4991 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
4992 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Marek Olsak13e47412018-01-31 20:18:04 +00004993 case Intrinsic::amdgcn_cvt_pkrtz:
4994 case Intrinsic::amdgcn_cvt_pknorm_i16:
4995 case Intrinsic::amdgcn_cvt_pknorm_u16:
4996 case Intrinsic::amdgcn_cvt_pk_i16:
4997 case Intrinsic::amdgcn_cvt_pk_u16: {
4998 // FIXME: Stop adding cast if v2f16/v2i16 are legal.
Matt Arsenault1f17c662017-02-22 00:27:34 +00004999 EVT VT = Op.getValueType();
Marek Olsak13e47412018-01-31 20:18:04 +00005000 unsigned Opcode;
5001
5002 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
5003 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
5004 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
5005 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
5006 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
5007 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
5008 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
5009 Opcode = AMDGPUISD::CVT_PK_I16_I32;
5010 else
5011 Opcode = AMDGPUISD::CVT_PK_U16_U32;
5012
Matt Arsenault709374d2018-08-01 20:13:58 +00005013 if (isTypeLegal(VT))
5014 return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
5015
Marek Olsak13e47412018-01-31 20:18:04 +00005016 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
Matt Arsenault1f17c662017-02-22 00:27:34 +00005017 Op.getOperand(1), Op.getOperand(2));
5018 return DAG.getNode(ISD::BITCAST, DL, VT, Node);
5019 }
Connor Abbott8c217d02017-08-04 18:36:49 +00005020 case Intrinsic::amdgcn_wqm: {
5021 SDValue Src = Op.getOperand(1);
5022 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src),
5023 0);
5024 }
Connor Abbott92638ab2017-08-04 18:36:52 +00005025 case Intrinsic::amdgcn_wwm: {
5026 SDValue Src = Op.getOperand(1);
5027 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src),
5028 0);
5029 }
Stanislav Mekhanoshindacda792018-06-26 20:04:19 +00005030 case Intrinsic::amdgcn_fmad_ftz:
5031 return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
5032 Op.getOperand(2), Op.getOperand(3));
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005033 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005034 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5035 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
5036 return lowerImage(Op, ImageDimIntr, DAG);
5037
Matt Arsenault754dd3e2017-04-03 18:08:08 +00005038 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005039 }
5040}
5041
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005042SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
5043 SelectionDAG &DAG) const {
5044 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Tom Stellard6f9ef142016-12-20 17:19:44 +00005045 SDLoc DL(Op);
David Stuttard70e8bc12017-06-22 16:29:22 +00005046
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005047 switch (IntrID) {
5048 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005049 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005050 case Intrinsic::amdgcn_ds_fadd:
5051 case Intrinsic::amdgcn_ds_fmin:
5052 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005053 MemSDNode *M = cast<MemSDNode>(Op);
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005054 unsigned Opc;
5055 switch (IntrID) {
5056 case Intrinsic::amdgcn_atomic_inc:
5057 Opc = AMDGPUISD::ATOMIC_INC;
5058 break;
5059 case Intrinsic::amdgcn_atomic_dec:
5060 Opc = AMDGPUISD::ATOMIC_DEC;
5061 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005062 case Intrinsic::amdgcn_ds_fadd:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005063 Opc = AMDGPUISD::ATOMIC_LOAD_FADD;
5064 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005065 case Intrinsic::amdgcn_ds_fmin:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005066 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
5067 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005068 case Intrinsic::amdgcn_ds_fmax:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005069 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
5070 break;
5071 default:
5072 llvm_unreachable("Unknown intrinsic!");
5073 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005074 SDValue Ops[] = {
5075 M->getOperand(0), // Chain
5076 M->getOperand(2), // Ptr
5077 M->getOperand(3) // Value
5078 };
5079
5080 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
5081 M->getMemoryVT(), M->getMemOperand());
5082 }
Tom Stellard6f9ef142016-12-20 17:19:44 +00005083 case Intrinsic::amdgcn_buffer_load:
5084 case Intrinsic::amdgcn_buffer_load_format: {
5085 SDValue Ops[] = {
5086 Op.getOperand(0), // Chain
5087 Op.getOperand(2), // rsrc
5088 Op.getOperand(3), // vindex
5089 Op.getOperand(4), // offset
5090 Op.getOperand(5), // glc
5091 Op.getOperand(6) // slc
5092 };
Tom Stellard6f9ef142016-12-20 17:19:44 +00005093
5094 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
5095 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
5096 EVT VT = Op.getValueType();
5097 EVT IntVT = VT.changeTypeToInteger();
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005098 auto *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00005099 EVT LoadVT = Op.getValueType();
Matt Arsenault1349a042018-05-22 06:32:10 +00005100
Tim Renouf366a49d2018-08-02 23:33:01 +00005101 if (LoadVT.getScalarType() == MVT::f16)
5102 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5103 M, DAG, Ops);
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005104 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5105 M->getMemOperand());
Tom Stellard6f9ef142016-12-20 17:19:44 +00005106 }
David Stuttard70e8bc12017-06-22 16:29:22 +00005107 case Intrinsic::amdgcn_tbuffer_load: {
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005108 MemSDNode *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00005109 EVT LoadVT = Op.getValueType();
Matt Arsenault1349a042018-05-22 06:32:10 +00005110
David Stuttard70e8bc12017-06-22 16:29:22 +00005111 SDValue Ops[] = {
5112 Op.getOperand(0), // Chain
5113 Op.getOperand(2), // rsrc
5114 Op.getOperand(3), // vindex
5115 Op.getOperand(4), // voffset
5116 Op.getOperand(5), // soffset
5117 Op.getOperand(6), // offset
5118 Op.getOperand(7), // dfmt
5119 Op.getOperand(8), // nfmt
5120 Op.getOperand(9), // glc
5121 Op.getOperand(10) // slc
5122 };
5123
Tim Renouf366a49d2018-08-02 23:33:01 +00005124 if (LoadVT.getScalarType() == MVT::f16)
5125 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5126 M, DAG, Ops);
David Stuttard70e8bc12017-06-22 16:29:22 +00005127 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
Matt Arsenault1349a042018-05-22 06:32:10 +00005128 Op->getVTList(), Ops, LoadVT,
5129 M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00005130 }
Marek Olsak5cec6412017-11-09 01:52:48 +00005131 case Intrinsic::amdgcn_buffer_atomic_swap:
5132 case Intrinsic::amdgcn_buffer_atomic_add:
5133 case Intrinsic::amdgcn_buffer_atomic_sub:
5134 case Intrinsic::amdgcn_buffer_atomic_smin:
5135 case Intrinsic::amdgcn_buffer_atomic_umin:
5136 case Intrinsic::amdgcn_buffer_atomic_smax:
5137 case Intrinsic::amdgcn_buffer_atomic_umax:
5138 case Intrinsic::amdgcn_buffer_atomic_and:
5139 case Intrinsic::amdgcn_buffer_atomic_or:
5140 case Intrinsic::amdgcn_buffer_atomic_xor: {
5141 SDValue Ops[] = {
5142 Op.getOperand(0), // Chain
5143 Op.getOperand(2), // vdata
5144 Op.getOperand(3), // rsrc
5145 Op.getOperand(4), // vindex
5146 Op.getOperand(5), // offset
5147 Op.getOperand(6) // slc
5148 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005149 EVT VT = Op.getValueType();
5150
5151 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00005152 unsigned Opcode = 0;
5153
5154 switch (IntrID) {
5155 case Intrinsic::amdgcn_buffer_atomic_swap:
5156 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
5157 break;
5158 case Intrinsic::amdgcn_buffer_atomic_add:
5159 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
5160 break;
5161 case Intrinsic::amdgcn_buffer_atomic_sub:
5162 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
5163 break;
5164 case Intrinsic::amdgcn_buffer_atomic_smin:
5165 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
5166 break;
5167 case Intrinsic::amdgcn_buffer_atomic_umin:
5168 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
5169 break;
5170 case Intrinsic::amdgcn_buffer_atomic_smax:
5171 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
5172 break;
5173 case Intrinsic::amdgcn_buffer_atomic_umax:
5174 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
5175 break;
5176 case Intrinsic::amdgcn_buffer_atomic_and:
5177 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
5178 break;
5179 case Intrinsic::amdgcn_buffer_atomic_or:
5180 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
5181 break;
5182 case Intrinsic::amdgcn_buffer_atomic_xor:
5183 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
5184 break;
5185 default:
5186 llvm_unreachable("unhandled atomic opcode");
5187 }
5188
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005189 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
5190 M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005191 }
5192
5193 case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
5194 SDValue Ops[] = {
5195 Op.getOperand(0), // Chain
5196 Op.getOperand(2), // src
5197 Op.getOperand(3), // cmp
5198 Op.getOperand(4), // rsrc
5199 Op.getOperand(5), // vindex
5200 Op.getOperand(6), // offset
5201 Op.getOperand(7) // slc
5202 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005203 EVT VT = Op.getValueType();
5204 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00005205
5206 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005207 Op->getVTList(), Ops, VT, M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005208 }
5209
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005210 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005211 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5212 AMDGPU::getImageDimIntrinsicInfo(IntrID))
5213 return lowerImage(Op, ImageDimIntr, DAG);
Matt Arsenault1349a042018-05-22 06:32:10 +00005214
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005215 return SDValue();
5216 }
5217}
5218
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005219SDValue SITargetLowering::handleD16VData(SDValue VData,
5220 SelectionDAG &DAG) const {
5221 EVT StoreVT = VData.getValueType();
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005222
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005223 // No change for f16 and legal vector D16 types.
Matt Arsenault1349a042018-05-22 06:32:10 +00005224 if (!StoreVT.isVector())
5225 return VData;
5226
5227 SDLoc DL(VData);
5228 assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
5229
5230 if (Subtarget->hasUnpackedD16VMem()) {
5231 // We need to unpack the packed data to store.
5232 EVT IntStoreVT = StoreVT.changeTypeToInteger();
5233 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
5234
5235 EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
5236 StoreVT.getVectorNumElements());
5237 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
5238 return DAG.UnrollVectorOp(ZExt.getNode());
5239 }
5240
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005241 assert(isTypeLegal(StoreVT));
5242 return VData;
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005243}
5244
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005245SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
5246 SelectionDAG &DAG) const {
Tom Stellardfc92e772015-05-12 14:18:14 +00005247 SDLoc DL(Op);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005248 SDValue Chain = Op.getOperand(0);
5249 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
David Stuttard70e8bc12017-06-22 16:29:22 +00005250 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005251
5252 switch (IntrinsicID) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00005253 case Intrinsic::amdgcn_exp: {
Matt Arsenault4165efd2017-01-17 07:26:53 +00005254 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
5255 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
5256 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
5257 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
5258
5259 const SDValue Ops[] = {
5260 Chain,
5261 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
5262 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
5263 Op.getOperand(4), // src0
5264 Op.getOperand(5), // src1
5265 Op.getOperand(6), // src2
5266 Op.getOperand(7), // src3
5267 DAG.getTargetConstant(0, DL, MVT::i1), // compr
5268 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
5269 };
5270
5271 unsigned Opc = Done->isNullValue() ?
5272 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
5273 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
5274 }
5275 case Intrinsic::amdgcn_exp_compr: {
5276 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
5277 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
5278 SDValue Src0 = Op.getOperand(4);
5279 SDValue Src1 = Op.getOperand(5);
5280 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
5281 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
5282
5283 SDValue Undef = DAG.getUNDEF(MVT::f32);
5284 const SDValue Ops[] = {
5285 Chain,
5286 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
5287 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
5288 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
5289 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
5290 Undef, // src2
5291 Undef, // src3
5292 DAG.getTargetConstant(1, DL, MVT::i1), // compr
5293 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
5294 };
5295
5296 unsigned Opc = Done->isNullValue() ?
5297 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
5298 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
5299 }
5300 case Intrinsic::amdgcn_s_sendmsg:
Matt Arsenaultd3e5cb72017-02-16 02:01:17 +00005301 case Intrinsic::amdgcn_s_sendmsghalt: {
5302 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
5303 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
Tom Stellardfc92e772015-05-12 14:18:14 +00005304 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
5305 SDValue Glue = Chain.getValue(1);
Matt Arsenaulta78ca622017-02-15 22:17:09 +00005306 return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
Jan Veselyd48445d2017-01-04 18:06:55 +00005307 Op.getOperand(2), Glue);
5308 }
Marek Olsak2d825902017-04-28 20:21:58 +00005309 case Intrinsic::amdgcn_init_exec: {
5310 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
5311 Op.getOperand(2));
5312 }
5313 case Intrinsic::amdgcn_init_exec_from_input: {
5314 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
5315 Op.getOperand(2), Op.getOperand(3));
5316 }
Matt Arsenault00568682016-07-13 06:04:22 +00005317 case AMDGPUIntrinsic::AMDGPU_kill: {
Matt Arsenault03006fd2016-07-19 16:27:56 +00005318 SDValue Src = Op.getOperand(2);
5319 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) {
Matt Arsenault00568682016-07-13 06:04:22 +00005320 if (!K->isNegative())
5321 return Chain;
Matt Arsenault03006fd2016-07-19 16:27:56 +00005322
5323 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32);
5324 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne);
Matt Arsenault00568682016-07-13 06:04:22 +00005325 }
5326
Matt Arsenault03006fd2016-07-19 16:27:56 +00005327 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src);
5328 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast);
Matt Arsenault00568682016-07-13 06:04:22 +00005329 }
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00005330 case Intrinsic::amdgcn_s_barrier: {
5331 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00005332 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matthias Braunf1caa282017-12-15 22:22:58 +00005333 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00005334 if (WGSize <= ST.getWavefrontSize())
5335 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
5336 Op.getOperand(0)), 0);
5337 }
5338 return SDValue();
5339 };
David Stuttard70e8bc12017-06-22 16:29:22 +00005340 case AMDGPUIntrinsic::SI_tbuffer_store: {
5341
5342 // Extract vindex and voffset from vaddr as appropriate
5343 const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10));
5344 const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11));
5345 SDValue VAddr = Op.getOperand(5);
5346
5347 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
5348
5349 assert(!(OffEn->isOne() && IdxEn->isOne()) &&
5350 "Legacy intrinsic doesn't support both offset and index - use new version");
5351
5352 SDValue VIndex = IdxEn->isOne() ? VAddr : Zero;
5353 SDValue VOffset = OffEn->isOne() ? VAddr : Zero;
5354
5355 // Deal with the vec-3 case
5356 const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4));
5357 auto Opcode = NumChannels->getZExtValue() == 3 ?
5358 AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT;
5359
5360 SDValue Ops[] = {
5361 Chain,
5362 Op.getOperand(3), // vdata
5363 Op.getOperand(2), // rsrc
5364 VIndex,
5365 VOffset,
5366 Op.getOperand(6), // soffset
5367 Op.getOperand(7), // inst_offset
5368 Op.getOperand(8), // dfmt
5369 Op.getOperand(9), // nfmt
5370 Op.getOperand(12), // glc
5371 Op.getOperand(13), // slc
5372 };
5373
David Stuttardf6779662017-06-22 17:15:49 +00005374 assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 &&
David Stuttard70e8bc12017-06-22 16:29:22 +00005375 "Value of tfe other than zero is unsupported");
5376
5377 EVT VT = Op.getOperand(3).getValueType();
5378 MachineMemOperand *MMO = MF.getMachineMemOperand(
5379 MachinePointerInfo(),
5380 MachineMemOperand::MOStore,
5381 VT.getStoreSize(), 4);
5382 return DAG.getMemIntrinsicNode(Opcode, DL,
5383 Op->getVTList(), Ops, VT, MMO);
5384 }
5385
5386 case Intrinsic::amdgcn_tbuffer_store: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005387 SDValue VData = Op.getOperand(2);
5388 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
5389 if (IsD16)
5390 VData = handleD16VData(VData, DAG);
David Stuttard70e8bc12017-06-22 16:29:22 +00005391 SDValue Ops[] = {
5392 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005393 VData, // vdata
David Stuttard70e8bc12017-06-22 16:29:22 +00005394 Op.getOperand(3), // rsrc
5395 Op.getOperand(4), // vindex
5396 Op.getOperand(5), // voffset
5397 Op.getOperand(6), // soffset
5398 Op.getOperand(7), // offset
5399 Op.getOperand(8), // dfmt
5400 Op.getOperand(9), // nfmt
5401 Op.getOperand(10), // glc
5402 Op.getOperand(11) // slc
5403 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005404 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
5405 AMDGPUISD::TBUFFER_STORE_FORMAT;
5406 MemSDNode *M = cast<MemSDNode>(Op);
5407 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
5408 M->getMemoryVT(), M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00005409 }
5410
Marek Olsak5cec6412017-11-09 01:52:48 +00005411 case Intrinsic::amdgcn_buffer_store:
5412 case Intrinsic::amdgcn_buffer_store_format: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005413 SDValue VData = Op.getOperand(2);
5414 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
5415 if (IsD16)
5416 VData = handleD16VData(VData, DAG);
Marek Olsak5cec6412017-11-09 01:52:48 +00005417 SDValue Ops[] = {
5418 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005419 VData, // vdata
Marek Olsak5cec6412017-11-09 01:52:48 +00005420 Op.getOperand(3), // rsrc
5421 Op.getOperand(4), // vindex
5422 Op.getOperand(5), // offset
5423 Op.getOperand(6), // glc
5424 Op.getOperand(7) // slc
5425 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005426 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
5427 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
5428 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
5429 MemSDNode *M = cast<MemSDNode>(Op);
5430 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
5431 M->getMemoryVT(), M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005432 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005433 default: {
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005434 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5435 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
5436 return lowerImage(Op, ImageDimIntr, DAG);
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005437
Matt Arsenault754dd3e2017-04-03 18:08:08 +00005438 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005439 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005440 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005441}
5442
Matt Arsenault90083d32018-06-07 09:54:49 +00005443static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
5444 ISD::LoadExtType ExtType, SDValue Op,
5445 const SDLoc &SL, EVT VT) {
5446 if (VT.bitsLT(Op.getValueType()))
5447 return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
5448
5449 switch (ExtType) {
5450 case ISD::SEXTLOAD:
5451 return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
5452 case ISD::ZEXTLOAD:
5453 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
5454 case ISD::EXTLOAD:
5455 return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
5456 case ISD::NON_EXTLOAD:
5457 return Op;
5458 }
5459
5460 llvm_unreachable("invalid ext type");
5461}
5462
5463SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
5464 SelectionDAG &DAG = DCI.DAG;
5465 if (Ld->getAlignment() < 4 || Ld->isDivergent())
5466 return SDValue();
5467
5468 // FIXME: Constant loads should all be marked invariant.
5469 unsigned AS = Ld->getAddressSpace();
5470 if (AS != AMDGPUASI.CONSTANT_ADDRESS &&
5471 AS != AMDGPUASI.CONSTANT_ADDRESS_32BIT &&
5472 (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
5473 return SDValue();
5474
5475 // Don't do this early, since it may interfere with adjacent load merging for
5476 // illegal types. We can avoid losing alignment information for exotic types
5477 // pre-legalize.
5478 EVT MemVT = Ld->getMemoryVT();
5479 if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
5480 MemVT.getSizeInBits() >= 32)
5481 return SDValue();
5482
5483 SDLoc SL(Ld);
5484
5485 assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
5486 "unexpected vector extload");
5487
5488 // TODO: Drop only high part of range.
5489 SDValue Ptr = Ld->getBasePtr();
5490 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
5491 MVT::i32, SL, Ld->getChain(), Ptr,
5492 Ld->getOffset(),
5493 Ld->getPointerInfo(), MVT::i32,
5494 Ld->getAlignment(),
5495 Ld->getMemOperand()->getFlags(),
5496 Ld->getAAInfo(),
5497 nullptr); // Drop ranges
5498
5499 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
5500 if (MemVT.isFloatingPoint()) {
5501 assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
5502 "unexpected fp extload");
5503 TruncVT = MemVT.changeTypeToInteger();
5504 }
5505
5506 SDValue Cvt = NewLoad;
5507 if (Ld->getExtensionType() == ISD::SEXTLOAD) {
5508 Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
5509 DAG.getValueType(TruncVT));
5510 } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
5511 Ld->getExtensionType() == ISD::NON_EXTLOAD) {
5512 Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
5513 } else {
5514 assert(Ld->getExtensionType() == ISD::EXTLOAD);
5515 }
5516
5517 EVT VT = Ld->getValueType(0);
5518 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
5519
5520 DCI.AddToWorklist(Cvt.getNode());
5521
5522 // We may need to handle exotic cases, such as i16->i64 extloads, so insert
5523 // the appropriate extension from the 32-bit load.
5524 Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
5525 DCI.AddToWorklist(Cvt.getNode());
5526
5527 // Handle conversion back to floating point if necessary.
5528 Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
5529
5530 return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
5531}
5532
Tom Stellard81d871d2013-11-13 23:36:50 +00005533SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
5534 SDLoc DL(Op);
5535 LoadSDNode *Load = cast<LoadSDNode>(Op);
Matt Arsenault6dfda962016-02-10 18:21:39 +00005536 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaulta1436412016-02-10 18:21:45 +00005537 EVT MemVT = Load->getMemoryVT();
Matt Arsenault6dfda962016-02-10 18:21:39 +00005538
Matt Arsenaulta1436412016-02-10 18:21:45 +00005539 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
Matt Arsenault65ca292a2017-09-07 05:37:34 +00005540 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
5541 return SDValue();
5542
Matt Arsenault6dfda962016-02-10 18:21:39 +00005543 // FIXME: Copied from PPC
5544 // First, load into 32 bits, then truncate to 1 bit.
5545
5546 SDValue Chain = Load->getChain();
5547 SDValue BasePtr = Load->getBasePtr();
5548 MachineMemOperand *MMO = Load->getMemOperand();
5549
Tom Stellard115a6152016-11-10 16:02:37 +00005550 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
5551
Matt Arsenault6dfda962016-02-10 18:21:39 +00005552 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
Tom Stellard115a6152016-11-10 16:02:37 +00005553 BasePtr, RealMemVT, MMO);
Matt Arsenault6dfda962016-02-10 18:21:39 +00005554
5555 SDValue Ops[] = {
Matt Arsenaulta1436412016-02-10 18:21:45 +00005556 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
Matt Arsenault6dfda962016-02-10 18:21:39 +00005557 NewLD.getValue(1)
5558 };
5559
5560 return DAG.getMergeValues(Ops, DL);
5561 }
Tom Stellard81d871d2013-11-13 23:36:50 +00005562
Matt Arsenaulta1436412016-02-10 18:21:45 +00005563 if (!MemVT.isVector())
5564 return SDValue();
Matt Arsenault4d801cd2015-11-24 12:05:03 +00005565
Matt Arsenaulta1436412016-02-10 18:21:45 +00005566 assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
5567 "Custom lowering for non-i32 vectors hasn't been implemented.");
Matt Arsenault4d801cd2015-11-24 12:05:03 +00005568
Farhana Aleen89196642018-03-07 17:09:18 +00005569 unsigned Alignment = Load->getAlignment();
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005570 unsigned AS = Load->getAddressSpace();
5571 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
Farhana Aleen89196642018-03-07 17:09:18 +00005572 AS, Alignment)) {
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005573 SDValue Ops[2];
5574 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
5575 return DAG.getMergeValues(Ops, DL);
5576 }
5577
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005578 MachineFunction &MF = DAG.getMachineFunction();
5579 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
5580 // If there is a possibilty that flat instruction access scratch memory
5581 // then we need to use the same legalization rules we use for private.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005582 if (AS == AMDGPUASI.FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005583 AS = MFI->hasFlatScratchInit() ?
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005584 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005585
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005586 unsigned NumElements = MemVT.getVectorNumElements();
Matt Arsenault6c041a32018-03-29 19:59:28 +00005587
Matt Arsenault923712b2018-02-09 16:57:57 +00005588 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5589 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) {
Matt Arsenault6c041a32018-03-29 19:59:28 +00005590 if (!Op->isDivergent() && Alignment >= 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00005591 return SDValue();
5592 // Non-uniform loads will be selected to MUBUF instructions, so they
Alexander Timofeev18009562016-12-08 17:28:47 +00005593 // have the same legalization requirements as global and private
Matt Arsenaulta1436412016-02-10 18:21:45 +00005594 // loads.
5595 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005596 }
Matt Arsenault6c041a32018-03-29 19:59:28 +00005597
Matt Arsenault923712b2018-02-09 16:57:57 +00005598 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5599 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT ||
5600 AS == AMDGPUASI.GLOBAL_ADDRESS) {
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00005601 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
Farhana Aleen89196642018-03-07 17:09:18 +00005602 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
Matt Arsenault6c041a32018-03-29 19:59:28 +00005603 Alignment >= 4)
Alexander Timofeev18009562016-12-08 17:28:47 +00005604 return SDValue();
5605 // Non-uniform loads will be selected to MUBUF instructions, so they
5606 // have the same legalization requirements as global and private
5607 // loads.
5608 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005609 }
Matt Arsenault923712b2018-02-09 16:57:57 +00005610 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5611 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT ||
5612 AS == AMDGPUASI.GLOBAL_ADDRESS ||
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005613 AS == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005614 if (NumElements > 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00005615 return SplitVectorLoad(Op, DAG);
5616 // v4 loads are supported for private and global memory.
5617 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005618 }
5619 if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005620 // Depending on the setting of the private_element_size field in the
5621 // resource descriptor, we can only make private accesses up to a certain
5622 // size.
5623 switch (Subtarget->getMaxPrivateElementSize()) {
5624 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00005625 return scalarizeVectorLoad(Load, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005626 case 8:
5627 if (NumElements > 2)
5628 return SplitVectorLoad(Op, DAG);
5629 return SDValue();
5630 case 16:
5631 // Same as global/flat
5632 if (NumElements > 4)
5633 return SplitVectorLoad(Op, DAG);
5634 return SDValue();
5635 default:
5636 llvm_unreachable("unsupported private_element_size");
5637 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005638 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
Farhana Aleena7cb3112018-03-09 17:41:39 +00005639 // Use ds_read_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00005640 if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
Farhana Aleena7cb3112018-03-09 17:41:39 +00005641 MemVT.getStoreSize() == 16)
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005642 return SDValue();
5643
Farhana Aleena7cb3112018-03-09 17:41:39 +00005644 if (NumElements > 2)
5645 return SplitVectorLoad(Op, DAG);
Tom Stellarde9373602014-01-22 19:24:14 +00005646 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005647 return SDValue();
Tom Stellard81d871d2013-11-13 23:36:50 +00005648}
5649
Tom Stellard0ec134f2014-02-04 17:18:40 +00005650SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005651 EVT VT = Op.getValueType();
5652 assert(VT.getSizeInBits() == 64);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005653
5654 SDLoc DL(Op);
5655 SDValue Cond = Op.getOperand(0);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005656
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005657 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
5658 SDValue One = DAG.getConstant(1, DL, MVT::i32);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005659
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00005660 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
5661 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
5662
5663 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
5664 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005665
5666 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
5667
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00005668 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
5669 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005670
5671 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
5672
Ahmed Bougacha128f8732016-04-26 21:15:30 +00005673 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005674 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005675}
5676
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005677// Catch division cases where we can use shortcuts with rcp and rsq
5678// instructions.
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005679SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
5680 SelectionDAG &DAG) const {
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005681 SDLoc SL(Op);
5682 SDValue LHS = Op.getOperand(0);
5683 SDValue RHS = Op.getOperand(1);
5684 EVT VT = Op.getValueType();
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005685 const SDNodeFlags Flags = Op->getFlags();
Michael Berg7acc81b2018-05-04 18:48:20 +00005686 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005687
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00005688 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
5689 return SDValue();
5690
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005691 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00005692 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
Matt Arsenault979902b2016-08-02 22:25:04 +00005693 if (CLHS->isExactlyValue(1.0)) {
5694 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
5695 // the CI documentation has a worst case error of 1 ulp.
5696 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
5697 // use it as long as we aren't trying to use denormals.
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005698 //
5699 // v_rcp_f16 and v_rsq_f16 DO support denormals.
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005700
Matt Arsenault979902b2016-08-02 22:25:04 +00005701 // 1.0 / sqrt(x) -> rsq(x)
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005702
Matt Arsenault979902b2016-08-02 22:25:04 +00005703 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
5704 // error seems really high at 2^29 ULP.
5705 if (RHS.getOpcode() == ISD::FSQRT)
5706 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
5707
5708 // 1.0 / x -> rcp(x)
5709 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
5710 }
5711
5712 // Same as for 1.0, but expand the sign out of the constant.
5713 if (CLHS->isExactlyValue(-1.0)) {
5714 // -1.0 / x -> rcp (fneg x)
5715 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
5716 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
5717 }
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005718 }
5719 }
5720
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005721 if (Unsafe) {
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005722 // Turn into multiply by the reciprocal.
5723 // x / y -> x * (1.0 / y)
5724 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005725 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005726 }
5727
5728 return SDValue();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005729}
5730
Tom Stellard8485fa02016-12-07 02:42:15 +00005731static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
5732 EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
5733 if (GlueChain->getNumValues() <= 1) {
5734 return DAG.getNode(Opcode, SL, VT, A, B);
5735 }
5736
5737 assert(GlueChain->getNumValues() == 3);
5738
5739 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
5740 switch (Opcode) {
5741 default: llvm_unreachable("no chain equivalent for opcode");
5742 case ISD::FMUL:
5743 Opcode = AMDGPUISD::FMUL_W_CHAIN;
5744 break;
5745 }
5746
5747 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
5748 GlueChain.getValue(2));
5749}
5750
5751static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
5752 EVT VT, SDValue A, SDValue B, SDValue C,
5753 SDValue GlueChain) {
5754 if (GlueChain->getNumValues() <= 1) {
5755 return DAG.getNode(Opcode, SL, VT, A, B, C);
5756 }
5757
5758 assert(GlueChain->getNumValues() == 3);
5759
5760 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
5761 switch (Opcode) {
5762 default: llvm_unreachable("no chain equivalent for opcode");
5763 case ISD::FMA:
5764 Opcode = AMDGPUISD::FMA_W_CHAIN;
5765 break;
5766 }
5767
5768 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
5769 GlueChain.getValue(2));
5770}
5771
Matt Arsenault4052a572016-12-22 03:05:41 +00005772SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005773 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
5774 return FastLowered;
5775
Matt Arsenault4052a572016-12-22 03:05:41 +00005776 SDLoc SL(Op);
5777 SDValue Src0 = Op.getOperand(0);
5778 SDValue Src1 = Op.getOperand(1);
5779
5780 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
5781 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
5782
5783 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
5784 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
5785
5786 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
5787 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
5788
5789 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
5790}
5791
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005792// Faster 2.5 ULP division that does not support denormals.
5793SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
5794 SDLoc SL(Op);
5795 SDValue LHS = Op.getOperand(1);
5796 SDValue RHS = Op.getOperand(2);
5797
5798 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
5799
5800 const APFloat K0Val(BitsToFloat(0x6f800000));
5801 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
5802
5803 const APFloat K1Val(BitsToFloat(0x2f800000));
5804 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
5805
5806 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
5807
5808 EVT SetCCVT =
5809 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
5810
5811 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
5812
5813 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
5814
5815 // TODO: Should this propagate fast-math-flags?
5816 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
5817
5818 // rcp does not support denormals.
5819 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
5820
5821 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
5822
5823 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
5824}
5825
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005826SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005827 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
Eric Christopher538d09d02016-06-07 20:27:12 +00005828 return FastLowered;
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005829
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005830 SDLoc SL(Op);
5831 SDValue LHS = Op.getOperand(0);
5832 SDValue RHS = Op.getOperand(1);
5833
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005834 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005835
Wei Dinged0f97f2016-06-09 19:17:15 +00005836 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005837
Tom Stellard8485fa02016-12-07 02:42:15 +00005838 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
5839 RHS, RHS, LHS);
5840 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
5841 LHS, RHS, LHS);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005842
Matt Arsenaultdfec5ce2016-07-09 07:48:11 +00005843 // Denominator is scaled to not be denormal, so using rcp is ok.
Tom Stellard8485fa02016-12-07 02:42:15 +00005844 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
5845 DenominatorScaled);
5846 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
5847 DenominatorScaled);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005848
Tom Stellard8485fa02016-12-07 02:42:15 +00005849 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
5850 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
5851 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005852
Tom Stellard8485fa02016-12-07 02:42:15 +00005853 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005854
Tom Stellard8485fa02016-12-07 02:42:15 +00005855 if (!Subtarget->hasFP32Denormals()) {
5856 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
5857 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
5858 SL, MVT::i32);
5859 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
5860 DAG.getEntryNode(),
5861 EnableDenormValue, BitField);
5862 SDValue Ops[3] = {
5863 NegDivScale0,
5864 EnableDenorm.getValue(0),
5865 EnableDenorm.getValue(1)
5866 };
Matt Arsenault37fefd62016-06-10 02:18:02 +00005867
Tom Stellard8485fa02016-12-07 02:42:15 +00005868 NegDivScale0 = DAG.getMergeValues(Ops, SL);
5869 }
5870
5871 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
5872 ApproxRcp, One, NegDivScale0);
5873
5874 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
5875 ApproxRcp, Fma0);
5876
5877 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
5878 Fma1, Fma1);
5879
5880 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
5881 NumeratorScaled, Mul);
5882
5883 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
5884
5885 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
5886 NumeratorScaled, Fma3);
5887
5888 if (!Subtarget->hasFP32Denormals()) {
5889 const SDValue DisableDenormValue =
5890 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
5891 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
5892 Fma4.getValue(1),
5893 DisableDenormValue,
5894 BitField,
5895 Fma4.getValue(2));
5896
5897 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
5898 DisableDenorm, DAG.getRoot());
5899 DAG.setRoot(OutputChain);
5900 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00005901
Wei Dinged0f97f2016-06-09 19:17:15 +00005902 SDValue Scale = NumeratorScaled.getValue(1);
Tom Stellard8485fa02016-12-07 02:42:15 +00005903 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
5904 Fma4, Fma1, Fma3, Scale);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005905
Wei Dinged0f97f2016-06-09 19:17:15 +00005906 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005907}
5908
5909SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005910 if (DAG.getTarget().Options.UnsafeFPMath)
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005911 return lowerFastUnsafeFDIV(Op, DAG);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005912
5913 SDLoc SL(Op);
5914 SDValue X = Op.getOperand(0);
5915 SDValue Y = Op.getOperand(1);
5916
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005917 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005918
5919 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
5920
5921 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
5922
5923 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
5924
5925 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
5926
5927 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
5928
5929 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
5930
5931 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
5932
5933 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
5934
5935 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
5936 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
5937
5938 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
5939 NegDivScale0, Mul, DivScale1);
5940
5941 SDValue Scale;
5942
Tom Stellard5bfbae52018-07-11 20:59:01 +00005943 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005944 // Workaround a hardware bug on SI where the condition output from div_scale
5945 // is not usable.
5946
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005947 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005948
5949 // Figure out if the scale to use for div_fmas.
5950 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
5951 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
5952 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
5953 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
5954
5955 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
5956 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
5957
5958 SDValue Scale0Hi
5959 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
5960 SDValue Scale1Hi
5961 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
5962
5963 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
5964 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
5965 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
5966 } else {
5967 Scale = DivScale1.getValue(1);
5968 }
5969
5970 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
5971 Fma4, Fma3, Mul, Scale);
5972
5973 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005974}
5975
5976SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
5977 EVT VT = Op.getValueType();
5978
5979 if (VT == MVT::f32)
5980 return LowerFDIV32(Op, DAG);
5981
5982 if (VT == MVT::f64)
5983 return LowerFDIV64(Op, DAG);
5984
Matt Arsenault4052a572016-12-22 03:05:41 +00005985 if (VT == MVT::f16)
5986 return LowerFDIV16(Op, DAG);
5987
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005988 llvm_unreachable("Unexpected type for fdiv");
5989}
5990
Tom Stellard81d871d2013-11-13 23:36:50 +00005991SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
5992 SDLoc DL(Op);
5993 StoreSDNode *Store = cast<StoreSDNode>(Op);
5994 EVT VT = Store->getMemoryVT();
5995
Matt Arsenault95245662016-02-11 05:32:46 +00005996 if (VT == MVT::i1) {
5997 return DAG.getTruncStore(Store->getChain(), DL,
5998 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
5999 Store->getBasePtr(), MVT::i1, Store->getMemOperand());
Tom Stellardb02094e2014-07-21 15:45:01 +00006000 }
6001
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006002 assert(VT.isVector() &&
6003 Store->getValue().getValueType().getScalarType() == MVT::i32);
6004
6005 unsigned AS = Store->getAddressSpace();
6006 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
6007 AS, Store->getAlignment())) {
6008 return expandUnalignedStore(Store, DAG);
6009 }
Tom Stellard81d871d2013-11-13 23:36:50 +00006010
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006011 MachineFunction &MF = DAG.getMachineFunction();
6012 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
6013 // If there is a possibilty that flat instruction access scratch memory
6014 // then we need to use the same legalization rules we use for private.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006015 if (AS == AMDGPUASI.FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006016 AS = MFI->hasFlatScratchInit() ?
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006017 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006018
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006019 unsigned NumElements = VT.getVectorNumElements();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006020 if (AS == AMDGPUASI.GLOBAL_ADDRESS ||
6021 AS == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006022 if (NumElements > 4)
6023 return SplitVectorStore(Op, DAG);
6024 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006025 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006026 switch (Subtarget->getMaxPrivateElementSize()) {
6027 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00006028 return scalarizeVectorStore(Store, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006029 case 8:
6030 if (NumElements > 2)
6031 return SplitVectorStore(Op, DAG);
6032 return SDValue();
6033 case 16:
6034 if (NumElements > 4)
6035 return SplitVectorStore(Op, DAG);
6036 return SDValue();
6037 default:
6038 llvm_unreachable("unsupported private_element_size");
6039 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006040 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006041 // Use ds_write_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00006042 if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006043 VT.getStoreSize() == 16)
6044 return SDValue();
6045
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006046 if (NumElements > 2)
6047 return SplitVectorStore(Op, DAG);
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006048 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006049 } else {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006050 llvm_unreachable("unhandled address space");
Matt Arsenault95245662016-02-11 05:32:46 +00006051 }
Tom Stellard81d871d2013-11-13 23:36:50 +00006052}
6053
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006054SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006055 SDLoc DL(Op);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006056 EVT VT = Op.getValueType();
6057 SDValue Arg = Op.getOperand(0);
Sanjay Patela2607012015-09-16 16:31:21 +00006058 // TODO: Should this propagate fast-math-flags?
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006059 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
6060 DAG.getNode(ISD::FMUL, DL, VT, Arg,
6061 DAG.getConstantFP(0.5/M_PI, DL,
6062 VT)));
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006063
6064 switch (Op.getOpcode()) {
6065 case ISD::FCOS:
6066 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart);
6067 case ISD::FSIN:
6068 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart);
6069 default:
6070 llvm_unreachable("Wrong trig opcode");
6071 }
6072}
6073
Tom Stellard354a43c2016-04-01 18:27:37 +00006074SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
6075 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
6076 assert(AtomicNode->isCompareAndSwap());
6077 unsigned AS = AtomicNode->getAddressSpace();
6078
6079 // No custom lowering required for local address space
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006080 if (!isFlatGlobalAddrSpace(AS, AMDGPUASI))
Tom Stellard354a43c2016-04-01 18:27:37 +00006081 return Op;
6082
6083 // Non-local address space requires custom lowering for atomic compare
6084 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
6085 SDLoc DL(Op);
6086 SDValue ChainIn = Op.getOperand(0);
6087 SDValue Addr = Op.getOperand(1);
6088 SDValue Old = Op.getOperand(2);
6089 SDValue New = Op.getOperand(3);
6090 EVT VT = Op.getValueType();
6091 MVT SimpleVT = VT.getSimpleVT();
6092 MVT VecType = MVT::getVectorVT(SimpleVT, 2);
6093
Ahmed Bougacha128f8732016-04-26 21:15:30 +00006094 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
Tom Stellard354a43c2016-04-01 18:27:37 +00006095 SDValue Ops[] = { ChainIn, Addr, NewOld };
Matt Arsenault88701812016-06-09 23:42:48 +00006096
6097 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
6098 Ops, VT, AtomicNode->getMemOperand());
Tom Stellard354a43c2016-04-01 18:27:37 +00006099}
6100
Tom Stellard75aadc22012-12-11 21:25:42 +00006101//===----------------------------------------------------------------------===//
6102// Custom DAG optimizations
6103//===----------------------------------------------------------------------===//
6104
Matt Arsenault364a6742014-06-11 17:50:44 +00006105SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
Matt Arsenaulte6986632015-01-14 01:35:22 +00006106 DAGCombinerInfo &DCI) const {
Matt Arsenault364a6742014-06-11 17:50:44 +00006107 EVT VT = N->getValueType(0);
6108 EVT ScalarVT = VT.getScalarType();
6109 if (ScalarVT != MVT::f32)
6110 return SDValue();
6111
6112 SelectionDAG &DAG = DCI.DAG;
6113 SDLoc DL(N);
6114
6115 SDValue Src = N->getOperand(0);
6116 EVT SrcVT = Src.getValueType();
6117
6118 // TODO: We could try to match extracting the higher bytes, which would be
6119 // easier if i8 vectors weren't promoted to i32 vectors, particularly after
6120 // types are legalized. v4i8 -> v4f32 is probably the only case to worry
6121 // about in practice.
Craig Topper80d3bb32018-03-06 19:44:52 +00006122 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
Matt Arsenault364a6742014-06-11 17:50:44 +00006123 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
6124 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
6125 DCI.AddToWorklist(Cvt.getNode());
6126 return Cvt;
6127 }
6128 }
6129
Matt Arsenault364a6742014-06-11 17:50:44 +00006130 return SDValue();
6131}
6132
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006133// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
6134
6135// This is a variant of
6136// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
6137//
6138// The normal DAG combiner will do this, but only if the add has one use since
6139// that would increase the number of instructions.
6140//
6141// This prevents us from seeing a constant offset that can be folded into a
6142// memory instruction's addressing mode. If we know the resulting add offset of
6143// a pointer can be folded into an addressing offset, we can replace the pointer
6144// operand with the add of new constant offset. This eliminates one of the uses,
6145// and may allow the remaining use to also be simplified.
6146//
6147SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
6148 unsigned AddrSpace,
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006149 EVT MemVT,
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006150 DAGCombinerInfo &DCI) const {
6151 SDValue N0 = N->getOperand(0);
6152 SDValue N1 = N->getOperand(1);
6153
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006154 // We only do this to handle cases where it's profitable when there are
6155 // multiple uses of the add, so defer to the standard combine.
Matt Arsenaultc8903122017-11-14 23:46:42 +00006156 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
6157 N0->hasOneUse())
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006158 return SDValue();
6159
6160 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
6161 if (!CN1)
6162 return SDValue();
6163
6164 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
6165 if (!CAdd)
6166 return SDValue();
6167
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006168 // If the resulting offset is too large, we can't fold it into the addressing
6169 // mode offset.
6170 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006171 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
6172
6173 AddrMode AM;
6174 AM.HasBaseReg = true;
6175 AM.BaseOffs = Offset.getSExtValue();
6176 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006177 return SDValue();
6178
6179 SelectionDAG &DAG = DCI.DAG;
6180 SDLoc SL(N);
6181 EVT VT = N->getValueType(0);
6182
6183 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006184 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006185
Matt Arsenaulte5e0c742017-11-13 05:33:35 +00006186 SDNodeFlags Flags;
6187 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
6188 (N0.getOpcode() == ISD::OR ||
6189 N0->getFlags().hasNoUnsignedWrap()));
6190
6191 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006192}
6193
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006194SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
6195 DAGCombinerInfo &DCI) const {
6196 SDValue Ptr = N->getBasePtr();
6197 SelectionDAG &DAG = DCI.DAG;
6198 SDLoc SL(N);
6199
6200 // TODO: We could also do this for multiplies.
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006201 if (Ptr.getOpcode() == ISD::SHL) {
6202 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(),
6203 N->getMemoryVT(), DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006204 if (NewPtr) {
6205 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
6206
6207 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
6208 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
6209 }
6210 }
6211
6212 return SDValue();
6213}
6214
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006215static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
6216 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
6217 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
6218 (Opc == ISD::XOR && Val == 0);
6219}
6220
6221// Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
6222// will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
6223// integer combine opportunities since most 64-bit operations are decomposed
6224// this way. TODO: We won't want this for SALU especially if it is an inline
6225// immediate.
6226SDValue SITargetLowering::splitBinaryBitConstantOp(
6227 DAGCombinerInfo &DCI,
6228 const SDLoc &SL,
6229 unsigned Opc, SDValue LHS,
6230 const ConstantSDNode *CRHS) const {
6231 uint64_t Val = CRHS->getZExtValue();
6232 uint32_t ValLo = Lo_32(Val);
6233 uint32_t ValHi = Hi_32(Val);
6234 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6235
6236 if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
6237 bitOpWithConstantIsReducible(Opc, ValHi)) ||
6238 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
6239 // If we need to materialize a 64-bit immediate, it will be split up later
6240 // anyway. Avoid creating the harder to understand 64-bit immediate
6241 // materialization.
6242 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
6243 }
6244
6245 return SDValue();
6246}
6247
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00006248// Returns true if argument is a boolean value which is not serialized into
6249// memory or argument and does not require v_cmdmask_b32 to be deserialized.
6250static bool isBoolSGPR(SDValue V) {
6251 if (V.getValueType() != MVT::i1)
6252 return false;
6253 switch (V.getOpcode()) {
6254 default: break;
6255 case ISD::SETCC:
6256 case ISD::AND:
6257 case ISD::OR:
6258 case ISD::XOR:
6259 case AMDGPUISD::FP_CLASS:
6260 return true;
6261 }
6262 return false;
6263}
6264
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006265// If a constant has all zeroes or all ones within each byte return it.
6266// Otherwise return 0.
6267static uint32_t getConstantPermuteMask(uint32_t C) {
6268 // 0xff for any zero byte in the mask
6269 uint32_t ZeroByteMask = 0;
6270 if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
6271 if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
6272 if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
6273 if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
6274 uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
6275 if ((NonZeroByteMask & C) != NonZeroByteMask)
6276 return 0; // Partial bytes selected.
6277 return C;
6278}
6279
6280// Check if a node selects whole bytes from its operand 0 starting at a byte
6281// boundary while masking the rest. Returns select mask as in the v_perm_b32
6282// or -1 if not succeeded.
6283// Note byte select encoding:
6284// value 0-3 selects corresponding source byte;
6285// value 0xc selects zero;
6286// value 0xff selects 0xff.
6287static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
6288 assert(V.getValueSizeInBits() == 32);
6289
6290 if (V.getNumOperands() != 2)
6291 return ~0;
6292
6293 ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
6294 if (!N1)
6295 return ~0;
6296
6297 uint32_t C = N1->getZExtValue();
6298
6299 switch (V.getOpcode()) {
6300 default:
6301 break;
6302 case ISD::AND:
6303 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
6304 return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
6305 }
6306 break;
6307
6308 case ISD::OR:
6309 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
6310 return (0x03020100 & ~ConstMask) | ConstMask;
6311 }
6312 break;
6313
6314 case ISD::SHL:
6315 if (C % 8)
6316 return ~0;
6317
6318 return uint32_t((0x030201000c0c0c0cull << C) >> 32);
6319
6320 case ISD::SRL:
6321 if (C % 8)
6322 return ~0;
6323
6324 return uint32_t(0x0c0c0c0c03020100ull >> C);
6325 }
6326
6327 return ~0;
6328}
6329
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006330SDValue SITargetLowering::performAndCombine(SDNode *N,
6331 DAGCombinerInfo &DCI) const {
6332 if (DCI.isBeforeLegalize())
6333 return SDValue();
6334
6335 SelectionDAG &DAG = DCI.DAG;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006336 EVT VT = N->getValueType(0);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006337 SDValue LHS = N->getOperand(0);
6338 SDValue RHS = N->getOperand(1);
6339
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006340
Stanislav Mekhanoshin53a21292017-05-23 19:54:48 +00006341 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
6342 if (VT == MVT::i64 && CRHS) {
6343 if (SDValue Split
6344 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
6345 return Split;
6346 }
6347
6348 if (CRHS && VT == MVT::i32) {
6349 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
6350 // nb = number of trailing zeroes in mask
6351 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
6352 // given that we are selecting 8 or 16 bit fields starting at byte boundary.
6353 uint64_t Mask = CRHS->getZExtValue();
6354 unsigned Bits = countPopulation(Mask);
6355 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
6356 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
6357 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
6358 unsigned Shift = CShift->getZExtValue();
6359 unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
6360 unsigned Offset = NB + Shift;
6361 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
6362 SDLoc SL(N);
6363 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
6364 LHS->getOperand(0),
6365 DAG.getConstant(Offset, SL, MVT::i32),
6366 DAG.getConstant(Bits, SL, MVT::i32));
6367 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
6368 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
6369 DAG.getValueType(NarrowVT));
6370 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
6371 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
6372 return Shl;
6373 }
6374 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006375 }
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006376
6377 // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
6378 if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
6379 isa<ConstantSDNode>(LHS.getOperand(2))) {
6380 uint32_t Sel = getConstantPermuteMask(Mask);
6381 if (!Sel)
6382 return SDValue();
6383
6384 // Select 0xc for all zero bytes
6385 Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
6386 SDLoc DL(N);
6387 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
6388 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
6389 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006390 }
6391
6392 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
6393 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
6394 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006395 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6396 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
6397
6398 SDValue X = LHS.getOperand(0);
6399 SDValue Y = RHS.getOperand(0);
6400 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
6401 return SDValue();
6402
6403 if (LCC == ISD::SETO) {
6404 if (X != LHS.getOperand(1))
6405 return SDValue();
6406
6407 if (RCC == ISD::SETUNE) {
6408 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
6409 if (!C1 || !C1->isInfinity() || C1->isNegative())
6410 return SDValue();
6411
6412 const uint32_t Mask = SIInstrFlags::N_NORMAL |
6413 SIInstrFlags::N_SUBNORMAL |
6414 SIInstrFlags::N_ZERO |
6415 SIInstrFlags::P_ZERO |
6416 SIInstrFlags::P_SUBNORMAL |
6417 SIInstrFlags::P_NORMAL;
6418
6419 static_assert(((~(SIInstrFlags::S_NAN |
6420 SIInstrFlags::Q_NAN |
6421 SIInstrFlags::N_INFINITY |
6422 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
6423 "mask not equal");
6424
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006425 SDLoc DL(N);
6426 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
6427 X, DAG.getConstant(Mask, DL, MVT::i32));
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006428 }
6429 }
6430 }
6431
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00006432 if (VT == MVT::i32 &&
6433 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
6434 // and x, (sext cc from i1) => select cc, x, 0
6435 if (RHS.getOpcode() != ISD::SIGN_EXTEND)
6436 std::swap(LHS, RHS);
6437 if (isBoolSGPR(RHS.getOperand(0)))
6438 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
6439 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
6440 }
6441
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006442 // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
6443 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6444 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
6445 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
6446 uint32_t LHSMask = getPermuteMask(DAG, LHS);
6447 uint32_t RHSMask = getPermuteMask(DAG, RHS);
6448 if (LHSMask != ~0u && RHSMask != ~0u) {
6449 // Canonicalize the expression in an attempt to have fewer unique masks
6450 // and therefore fewer registers used to hold the masks.
6451 if (LHSMask > RHSMask) {
6452 std::swap(LHSMask, RHSMask);
6453 std::swap(LHS, RHS);
6454 }
6455
6456 // Select 0xc for each lane used from source operand. Zero has 0xc mask
6457 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
6458 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6459 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6460
6461 // Check of we need to combine values from two sources within a byte.
6462 if (!(LHSUsedLanes & RHSUsedLanes) &&
6463 // If we select high and lower word keep it for SDWA.
6464 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
6465 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
6466 // Each byte in each mask is either selector mask 0-3, or has higher
6467 // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
6468 // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
6469 // mask which is not 0xff wins. By anding both masks we have a correct
6470 // result except that 0x0c shall be corrected to give 0x0c only.
6471 uint32_t Mask = LHSMask & RHSMask;
6472 for (unsigned I = 0; I < 32; I += 8) {
6473 uint32_t ByteSel = 0xff << I;
6474 if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
6475 Mask &= (0x0c << I) & 0xffffffff;
6476 }
6477
6478 // Add 4 to each active LHS lane. It will not affect any existing 0xff
6479 // or 0x0c.
6480 uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
6481 SDLoc DL(N);
6482
6483 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
6484 LHS.getOperand(0), RHS.getOperand(0),
6485 DAG.getConstant(Sel, DL, MVT::i32));
6486 }
6487 }
6488 }
6489
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006490 return SDValue();
6491}
6492
Matt Arsenaultf2290332015-01-06 23:00:39 +00006493SDValue SITargetLowering::performOrCombine(SDNode *N,
6494 DAGCombinerInfo &DCI) const {
6495 SelectionDAG &DAG = DCI.DAG;
6496 SDValue LHS = N->getOperand(0);
6497 SDValue RHS = N->getOperand(1);
6498
Matt Arsenault3b082382016-04-12 18:24:38 +00006499 EVT VT = N->getValueType(0);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006500 if (VT == MVT::i1) {
6501 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
6502 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
6503 RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
6504 SDValue Src = LHS.getOperand(0);
6505 if (Src != RHS.getOperand(0))
6506 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00006507
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006508 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
6509 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
6510 if (!CLHS || !CRHS)
6511 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00006512
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006513 // Only 10 bits are used.
6514 static const uint32_t MaxMask = 0x3ff;
Matt Arsenault3b082382016-04-12 18:24:38 +00006515
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006516 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
6517 SDLoc DL(N);
6518 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
6519 Src, DAG.getConstant(NewMask, DL, MVT::i32));
6520 }
Matt Arsenault3b082382016-04-12 18:24:38 +00006521
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006522 return SDValue();
6523 }
6524
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006525 // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
6526 if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
6527 LHS.getOpcode() == AMDGPUISD::PERM &&
6528 isa<ConstantSDNode>(LHS.getOperand(2))) {
6529 uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
6530 if (!Sel)
6531 return SDValue();
6532
6533 Sel |= LHS.getConstantOperandVal(2);
6534 SDLoc DL(N);
6535 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
6536 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
6537 }
6538
6539 // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
6540 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6541 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
6542 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
6543 uint32_t LHSMask = getPermuteMask(DAG, LHS);
6544 uint32_t RHSMask = getPermuteMask(DAG, RHS);
6545 if (LHSMask != ~0u && RHSMask != ~0u) {
6546 // Canonicalize the expression in an attempt to have fewer unique masks
6547 // and therefore fewer registers used to hold the masks.
6548 if (LHSMask > RHSMask) {
6549 std::swap(LHSMask, RHSMask);
6550 std::swap(LHS, RHS);
6551 }
6552
6553 // Select 0xc for each lane used from source operand. Zero has 0xc mask
6554 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
6555 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6556 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6557
6558 // Check of we need to combine values from two sources within a byte.
6559 if (!(LHSUsedLanes & RHSUsedLanes) &&
6560 // If we select high and lower word keep it for SDWA.
6561 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
6562 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
6563 // Kill zero bytes selected by other mask. Zero value is 0xc.
6564 LHSMask &= ~RHSUsedLanes;
6565 RHSMask &= ~LHSUsedLanes;
6566 // Add 4 to each active LHS lane
6567 LHSMask |= LHSUsedLanes & 0x04040404;
6568 // Combine masks
6569 uint32_t Sel = LHSMask | RHSMask;
6570 SDLoc DL(N);
6571
6572 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
6573 LHS.getOperand(0), RHS.getOperand(0),
6574 DAG.getConstant(Sel, DL, MVT::i32));
6575 }
6576 }
6577 }
6578
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006579 if (VT != MVT::i64)
6580 return SDValue();
6581
6582 // TODO: This could be a generic combine with a predicate for extracting the
6583 // high half of an integer being free.
6584
6585 // (or i64:x, (zero_extend i32:y)) ->
6586 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
6587 if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
6588 RHS.getOpcode() != ISD::ZERO_EXTEND)
6589 std::swap(LHS, RHS);
6590
6591 if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
6592 SDValue ExtSrc = RHS.getOperand(0);
6593 EVT SrcVT = ExtSrc.getValueType();
6594 if (SrcVT == MVT::i32) {
6595 SDLoc SL(N);
6596 SDValue LowLHS, HiBits;
6597 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
6598 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
6599
6600 DCI.AddToWorklist(LowOr.getNode());
6601 DCI.AddToWorklist(HiBits.getNode());
6602
6603 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
6604 LowOr, HiBits);
6605 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
Matt Arsenault3b082382016-04-12 18:24:38 +00006606 }
6607 }
6608
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006609 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
6610 if (CRHS) {
6611 if (SDValue Split
6612 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
6613 return Split;
6614 }
Matt Arsenaultf2290332015-01-06 23:00:39 +00006615
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006616 return SDValue();
6617}
Matt Arsenaultf2290332015-01-06 23:00:39 +00006618
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006619SDValue SITargetLowering::performXorCombine(SDNode *N,
6620 DAGCombinerInfo &DCI) const {
6621 EVT VT = N->getValueType(0);
6622 if (VT != MVT::i64)
6623 return SDValue();
Matt Arsenaultf2290332015-01-06 23:00:39 +00006624
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006625 SDValue LHS = N->getOperand(0);
6626 SDValue RHS = N->getOperand(1);
6627
6628 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
6629 if (CRHS) {
6630 if (SDValue Split
6631 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
6632 return Split;
Matt Arsenaultf2290332015-01-06 23:00:39 +00006633 }
6634
6635 return SDValue();
6636}
6637
Matt Arsenault5cf42712017-04-06 20:58:30 +00006638// Instructions that will be lowered with a final instruction that zeros the
6639// high result bits.
6640// XXX - probably only need to list legal operations.
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006641static bool fp16SrcZerosHighBits(unsigned Opc) {
6642 switch (Opc) {
Matt Arsenault5cf42712017-04-06 20:58:30 +00006643 case ISD::FADD:
6644 case ISD::FSUB:
6645 case ISD::FMUL:
6646 case ISD::FDIV:
6647 case ISD::FREM:
6648 case ISD::FMA:
6649 case ISD::FMAD:
6650 case ISD::FCANONICALIZE:
6651 case ISD::FP_ROUND:
6652 case ISD::UINT_TO_FP:
6653 case ISD::SINT_TO_FP:
6654 case ISD::FABS:
6655 // Fabs is lowered to a bit operation, but it's an and which will clear the
6656 // high bits anyway.
6657 case ISD::FSQRT:
6658 case ISD::FSIN:
6659 case ISD::FCOS:
6660 case ISD::FPOWI:
6661 case ISD::FPOW:
6662 case ISD::FLOG:
6663 case ISD::FLOG2:
6664 case ISD::FLOG10:
6665 case ISD::FEXP:
6666 case ISD::FEXP2:
6667 case ISD::FCEIL:
6668 case ISD::FTRUNC:
6669 case ISD::FRINT:
6670 case ISD::FNEARBYINT:
6671 case ISD::FROUND:
6672 case ISD::FFLOOR:
6673 case ISD::FMINNUM:
6674 case ISD::FMAXNUM:
6675 case AMDGPUISD::FRACT:
6676 case AMDGPUISD::CLAMP:
6677 case AMDGPUISD::COS_HW:
6678 case AMDGPUISD::SIN_HW:
6679 case AMDGPUISD::FMIN3:
6680 case AMDGPUISD::FMAX3:
6681 case AMDGPUISD::FMED3:
6682 case AMDGPUISD::FMAD_FTZ:
6683 case AMDGPUISD::RCP:
6684 case AMDGPUISD::RSQ:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00006685 case AMDGPUISD::RCP_IFLAG:
Matt Arsenault5cf42712017-04-06 20:58:30 +00006686 case AMDGPUISD::LDEXP:
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006687 return true;
Matt Arsenault5cf42712017-04-06 20:58:30 +00006688 default:
6689 // fcopysign, select and others may be lowered to 32-bit bit operations
6690 // which don't zero the high bits.
6691 return false;
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006692 }
6693}
6694
6695SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
6696 DAGCombinerInfo &DCI) const {
6697 if (!Subtarget->has16BitInsts() ||
6698 DCI.getDAGCombineLevel() < AfterLegalizeDAG)
6699 return SDValue();
6700
6701 EVT VT = N->getValueType(0);
6702 if (VT != MVT::i32)
6703 return SDValue();
6704
6705 SDValue Src = N->getOperand(0);
6706 if (Src.getValueType() != MVT::i16)
6707 return SDValue();
6708
6709 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
6710 // FIXME: It is not universally true that the high bits are zeroed on gfx9.
6711 if (Src.getOpcode() == ISD::BITCAST) {
6712 SDValue BCSrc = Src.getOperand(0);
6713 if (BCSrc.getValueType() == MVT::f16 &&
6714 fp16SrcZerosHighBits(BCSrc.getOpcode()))
6715 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
6716 }
6717
6718 return SDValue();
6719}
6720
Matt Arsenaultf2290332015-01-06 23:00:39 +00006721SDValue SITargetLowering::performClassCombine(SDNode *N,
6722 DAGCombinerInfo &DCI) const {
6723 SelectionDAG &DAG = DCI.DAG;
6724 SDValue Mask = N->getOperand(1);
6725
6726 // fp_class x, 0 -> false
6727 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
6728 if (CMask->isNullValue())
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006729 return DAG.getConstant(0, SDLoc(N), MVT::i1);
Matt Arsenaultf2290332015-01-06 23:00:39 +00006730 }
6731
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00006732 if (N->getOperand(0).isUndef())
6733 return DAG.getUNDEF(MVT::i1);
6734
Matt Arsenaultf2290332015-01-06 23:00:39 +00006735 return SDValue();
6736}
6737
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00006738SDValue SITargetLowering::performRcpCombine(SDNode *N,
6739 DAGCombinerInfo &DCI) const {
6740 EVT VT = N->getValueType(0);
6741 SDValue N0 = N->getOperand(0);
6742
6743 if (N0.isUndef())
6744 return N0;
6745
6746 if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
6747 N0.getOpcode() == ISD::SINT_TO_FP)) {
6748 return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
6749 N->getFlags());
6750 }
6751
6752 return AMDGPUTargetLowering::performRcpCombine(N, DCI);
6753}
6754
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006755bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
6756 unsigned MaxDepth) const {
6757 unsigned Opcode = Op.getOpcode();
6758 if (Opcode == ISD::FCANONICALIZE)
6759 return true;
6760
6761 if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
6762 auto F = CFP->getValueAPF();
6763 if (F.isNaN() && F.isSignaling())
6764 return false;
6765 return !F.isDenormal() || denormalsEnabledForType(Op.getValueType());
6766 }
6767
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006768 // If source is a result of another standard FP operation it is already in
6769 // canonical form.
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006770 if (MaxDepth == 0)
6771 return false;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006772
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006773 switch (Opcode) {
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006774 // These will flush denorms if required.
6775 case ISD::FADD:
6776 case ISD::FSUB:
6777 case ISD::FMUL:
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006778 case ISD::FCEIL:
6779 case ISD::FFLOOR:
6780 case ISD::FMA:
6781 case ISD::FMAD:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006782 case ISD::FSQRT:
6783 case ISD::FDIV:
6784 case ISD::FREM:
Matt Arsenaultce6d61f2018-08-06 21:51:52 +00006785 case ISD::FP_ROUND:
6786 case ISD::FP_EXTEND:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006787 case AMDGPUISD::FMUL_LEGACY:
6788 case AMDGPUISD::FMAD_FTZ:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00006789 case AMDGPUISD::RCP:
6790 case AMDGPUISD::RSQ:
6791 case AMDGPUISD::RSQ_CLAMP:
6792 case AMDGPUISD::RCP_LEGACY:
6793 case AMDGPUISD::RSQ_LEGACY:
6794 case AMDGPUISD::RCP_IFLAG:
6795 case AMDGPUISD::TRIG_PREOP:
6796 case AMDGPUISD::DIV_SCALE:
6797 case AMDGPUISD::DIV_FMAS:
6798 case AMDGPUISD::DIV_FIXUP:
6799 case AMDGPUISD::FRACT:
6800 case AMDGPUISD::LDEXP:
Matt Arsenault08f3fe42018-08-06 23:01:31 +00006801 case AMDGPUISD::CVT_PKRTZ_F16_F32:
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006802 return true;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006803
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006804 // It can/will be lowered or combined as a bit operation.
6805 // Need to check their input recursively to handle.
6806 case ISD::FNEG:
6807 case ISD::FABS:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006808 case ISD::FCOPYSIGN:
6809 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006810
6811 case ISD::FSIN:
6812 case ISD::FCOS:
6813 case ISD::FSINCOS:
6814 return Op.getValueType().getScalarType() != MVT::f16;
6815
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006816 case ISD::FMINNUM:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00006817 case ISD::FMAXNUM:
6818 case AMDGPUISD::CLAMP:
6819 case AMDGPUISD::FMED3:
6820 case AMDGPUISD::FMAX3:
6821 case AMDGPUISD::FMIN3: {
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006822 // FIXME: Shouldn't treat the generic operations different based these.
6823 bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction());
6824 if (IsIEEEMode) {
6825 // snans will be quieted, so we only need to worry about denormals.
6826 if (Subtarget->supportsMinMaxDenormModes() ||
6827 denormalsEnabledForType(Op.getValueType()))
6828 return true;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006829
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006830 // Flushing may be required.
6831 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
6832 // targets need to check their input recursively.
6833 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
6834 isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
6835 }
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006836
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006837 if (Subtarget->supportsMinMaxDenormModes() ||
6838 denormalsEnabledForType(Op.getValueType())) {
6839 // Only quieting may be necessary.
6840 return DAG.isKnownNeverSNaN(Op.getOperand(0)) &&
6841 DAG.isKnownNeverSNaN(Op.getOperand(1));
6842 }
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006843
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006844 // Flushing and quieting may be necessary
6845 // With ieee_mode off, the nan is returned as-is, so if it is an sNaN it
6846 // needs to be quieted.
6847 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
6848 isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006849 }
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006850 case ISD::SELECT: {
6851 return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
6852 isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006853 }
Matt Arsenaulte94ee832018-08-06 22:45:51 +00006854 case ISD::BUILD_VECTOR: {
6855 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6856 SDValue SrcOp = Op.getOperand(i);
6857 if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
6858 return false;
6859 }
6860
6861 return true;
6862 }
6863 case ISD::EXTRACT_VECTOR_ELT:
6864 case ISD::EXTRACT_SUBVECTOR: {
6865 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
6866 }
6867 case ISD::INSERT_VECTOR_ELT: {
6868 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
6869 isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
6870 }
6871 case ISD::UNDEF:
6872 // Could be anything.
6873 return false;
Matt Arsenault08f3fe42018-08-06 23:01:31 +00006874
6875 case ISD::INTRINSIC_WO_CHAIN: {
6876 unsigned IntrinsicID
6877 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
6878 // TODO: Handle more intrinsics
6879 switch (IntrinsicID) {
6880 case Intrinsic::amdgcn_cvt_pkrtz:
6881 return true;
6882 default:
6883 break;
6884 }
Matt Arsenault5bb9d792018-08-10 17:57:12 +00006885
6886 LLVM_FALLTHROUGH;
Matt Arsenault08f3fe42018-08-06 23:01:31 +00006887 }
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00006888 default:
6889 return denormalsEnabledForType(Op.getValueType()) &&
6890 DAG.isKnownNeverSNaN(Op);
6891 }
6892
6893 llvm_unreachable("invalid operation");
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006894}
6895
Matt Arsenault9cd90712016-04-14 01:42:16 +00006896// Constant fold canonicalize.
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00006897
6898SDValue SITargetLowering::getCanonicalConstantFP(
6899 SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
6900 // Flush denormals to 0 if not enabled.
6901 if (C.isDenormal() && !denormalsEnabledForType(VT))
6902 return DAG.getConstantFP(0.0, SL, VT);
6903
6904 if (C.isNaN()) {
6905 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
6906 if (C.isSignaling()) {
6907 // Quiet a signaling NaN.
6908 // FIXME: Is this supposed to preserve payload bits?
6909 return DAG.getConstantFP(CanonicalQNaN, SL, VT);
6910 }
6911
6912 // Make sure it is the canonical NaN bitpattern.
6913 //
6914 // TODO: Can we use -1 as the canonical NaN value since it's an inline
6915 // immediate?
6916 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
6917 return DAG.getConstantFP(CanonicalQNaN, SL, VT);
6918 }
6919
6920 // Already canonical.
6921 return DAG.getConstantFP(C, SL, VT);
6922}
6923
Matt Arsenaulta29e7622018-08-06 22:30:44 +00006924static bool vectorEltWillFoldAway(SDValue Op) {
6925 return Op.isUndef() || isa<ConstantFPSDNode>(Op);
6926}
6927
Matt Arsenault9cd90712016-04-14 01:42:16 +00006928SDValue SITargetLowering::performFCanonicalizeCombine(
6929 SDNode *N,
6930 DAGCombinerInfo &DCI) const {
Matt Arsenault9cd90712016-04-14 01:42:16 +00006931 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault4aec86d2018-07-31 13:34:31 +00006932 SDValue N0 = N->getOperand(0);
Matt Arsenaulta29e7622018-08-06 22:30:44 +00006933 EVT VT = N->getValueType(0);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006934
Matt Arsenault4aec86d2018-07-31 13:34:31 +00006935 // fcanonicalize undef -> qnan
6936 if (N0.isUndef()) {
Matt Arsenault4aec86d2018-07-31 13:34:31 +00006937 APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
6938 return DAG.getConstantFP(QNaN, SDLoc(N), VT);
6939 }
6940
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00006941 if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
Matt Arsenault9cd90712016-04-14 01:42:16 +00006942 EVT VT = N->getValueType(0);
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00006943 return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
Matt Arsenault9cd90712016-04-14 01:42:16 +00006944 }
6945
Matt Arsenaulta29e7622018-08-06 22:30:44 +00006946 // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
6947 // (fcanonicalize k)
6948 //
6949 // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
6950
6951 // TODO: This could be better with wider vectors that will be split to v2f16,
6952 // and to consider uses since there aren't that many packed operations.
6953 if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16) {
6954 SDLoc SL(N);
6955 SDValue NewElts[2];
6956 SDValue Lo = N0.getOperand(0);
6957 SDValue Hi = N0.getOperand(1);
6958 if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
6959 for (unsigned I = 0; I != 2; ++I) {
6960 SDValue Op = N0.getOperand(I);
6961 EVT EltVT = Op.getValueType();
6962 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
6963 NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
6964 CFP->getValueAPF());
6965 } else if (Op.isUndef()) {
6966 // This would ordinarily be folded to a qNaN. Since this may be half
6967 // of a packed operation, it may be cheaper to use a 0.
6968 NewElts[I] = DAG.getConstantFP(0.0f, SL, EltVT);
6969 } else {
6970 NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
6971 }
6972 }
6973
6974 return DAG.getBuildVector(VT, SL, NewElts);
6975 }
6976 }
6977
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00006978 return isCanonicalized(DAG, N0) ? N0 : SDValue();
Matt Arsenault9cd90712016-04-14 01:42:16 +00006979}
6980
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006981static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
6982 switch (Opc) {
6983 case ISD::FMAXNUM:
6984 return AMDGPUISD::FMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006985 case ISD::SMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006986 return AMDGPUISD::SMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006987 case ISD::UMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006988 return AMDGPUISD::UMAX3;
6989 case ISD::FMINNUM:
6990 return AMDGPUISD::FMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006991 case ISD::SMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006992 return AMDGPUISD::SMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006993 case ISD::UMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006994 return AMDGPUISD::UMIN3;
6995 default:
6996 llvm_unreachable("Not a min/max opcode");
6997 }
6998}
6999
Matt Arsenault10268f92017-02-27 22:40:39 +00007000SDValue SITargetLowering::performIntMed3ImmCombine(
7001 SelectionDAG &DAG, const SDLoc &SL,
7002 SDValue Op0, SDValue Op1, bool Signed) const {
Matt Arsenaultf639c322016-01-28 20:53:42 +00007003 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
7004 if (!K1)
7005 return SDValue();
7006
7007 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
7008 if (!K0)
7009 return SDValue();
7010
Matt Arsenaultf639c322016-01-28 20:53:42 +00007011 if (Signed) {
7012 if (K0->getAPIntValue().sge(K1->getAPIntValue()))
7013 return SDValue();
7014 } else {
7015 if (K0->getAPIntValue().uge(K1->getAPIntValue()))
7016 return SDValue();
7017 }
7018
7019 EVT VT = K0->getValueType(0);
Matt Arsenault10268f92017-02-27 22:40:39 +00007020 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
7021 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
7022 return DAG.getNode(Med3Opc, SL, VT,
7023 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
7024 }
Tom Stellard115a6152016-11-10 16:02:37 +00007025
Matt Arsenault10268f92017-02-27 22:40:39 +00007026 // If there isn't a 16-bit med3 operation, convert to 32-bit.
Tom Stellard115a6152016-11-10 16:02:37 +00007027 MVT NVT = MVT::i32;
7028 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
7029
Matt Arsenault10268f92017-02-27 22:40:39 +00007030 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
7031 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
7032 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
Tom Stellard115a6152016-11-10 16:02:37 +00007033
Matt Arsenault10268f92017-02-27 22:40:39 +00007034 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
7035 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
Matt Arsenaultf639c322016-01-28 20:53:42 +00007036}
7037
Matt Arsenault6b114d22017-08-30 01:20:17 +00007038static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
7039 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
7040 return C;
7041
7042 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
7043 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
7044 return C;
7045 }
7046
7047 return nullptr;
7048}
7049
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007050SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
7051 const SDLoc &SL,
7052 SDValue Op0,
7053 SDValue Op1) const {
Matt Arsenault6b114d22017-08-30 01:20:17 +00007054 ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
Matt Arsenaultf639c322016-01-28 20:53:42 +00007055 if (!K1)
7056 return SDValue();
7057
Matt Arsenault6b114d22017-08-30 01:20:17 +00007058 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
Matt Arsenaultf639c322016-01-28 20:53:42 +00007059 if (!K0)
7060 return SDValue();
7061
7062 // Ordered >= (although NaN inputs should have folded away by now).
7063 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
7064 if (Cmp == APFloat::cmpGreaterThan)
7065 return SDValue();
7066
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007067 // TODO: Check IEEE bit enabled?
Matt Arsenault6b114d22017-08-30 01:20:17 +00007068 EVT VT = Op0.getValueType();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007069 if (Subtarget->enableDX10Clamp()) {
7070 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
7071 // hardware fmed3 behavior converting to a min.
7072 // FIXME: Should this be allowing -0.0?
7073 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
7074 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
7075 }
7076
Matt Arsenault6b114d22017-08-30 01:20:17 +00007077 // med3 for f16 is only available on gfx9+, and not available for v2f16.
7078 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
7079 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
7080 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
7081 // then give the other result, which is different from med3 with a NaN
7082 // input.
7083 SDValue Var = Op0.getOperand(0);
Matt Arsenaultc3dc8e62018-08-03 18:27:52 +00007084 if (!DAG.isKnownNeverSNaN(Var))
Matt Arsenault6b114d22017-08-30 01:20:17 +00007085 return SDValue();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007086
Matt Arsenault6b114d22017-08-30 01:20:17 +00007087 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
7088 Var, SDValue(K0, 0), SDValue(K1, 0));
7089 }
Matt Arsenaultf639c322016-01-28 20:53:42 +00007090
Matt Arsenault6b114d22017-08-30 01:20:17 +00007091 return SDValue();
Matt Arsenaultf639c322016-01-28 20:53:42 +00007092}
7093
7094SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
7095 DAGCombinerInfo &DCI) const {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007096 SelectionDAG &DAG = DCI.DAG;
7097
Matt Arsenault79a45db2017-02-22 23:53:37 +00007098 EVT VT = N->getValueType(0);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007099 unsigned Opc = N->getOpcode();
7100 SDValue Op0 = N->getOperand(0);
7101 SDValue Op1 = N->getOperand(1);
7102
7103 // Only do this if the inner op has one use since this will just increases
7104 // register pressure for no benefit.
7105
Matt Arsenault79a45db2017-02-22 23:53:37 +00007106
7107 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
Farhana Aleene80aeac2018-04-03 23:00:30 +00007108 !VT.isVector() && VT != MVT::f64 &&
Matt Arsenaultee324ff2017-05-17 19:25:06 +00007109 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) {
Matt Arsenault5b39b342016-01-28 20:53:48 +00007110 // max(max(a, b), c) -> max3(a, b, c)
7111 // min(min(a, b), c) -> min3(a, b, c)
7112 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
7113 SDLoc DL(N);
7114 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
7115 DL,
7116 N->getValueType(0),
7117 Op0.getOperand(0),
7118 Op0.getOperand(1),
7119 Op1);
7120 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007121
Matt Arsenault5b39b342016-01-28 20:53:48 +00007122 // Try commuted.
7123 // max(a, max(b, c)) -> max3(a, b, c)
7124 // min(a, min(b, c)) -> min3(a, b, c)
7125 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
7126 SDLoc DL(N);
7127 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
7128 DL,
7129 N->getValueType(0),
7130 Op0,
7131 Op1.getOperand(0),
7132 Op1.getOperand(1));
7133 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007134 }
7135
Matt Arsenaultf639c322016-01-28 20:53:42 +00007136 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
7137 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
7138 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
7139 return Med3;
7140 }
7141
7142 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
7143 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
7144 return Med3;
7145 }
7146
7147 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
Matt Arsenault5b39b342016-01-28 20:53:48 +00007148 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
7149 (Opc == AMDGPUISD::FMIN_LEGACY &&
7150 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
Matt Arsenault79a45db2017-02-22 23:53:37 +00007151 (VT == MVT::f32 || VT == MVT::f64 ||
Matt Arsenault6b114d22017-08-30 01:20:17 +00007152 (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
7153 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007154 Op0.hasOneUse()) {
Matt Arsenaultf639c322016-01-28 20:53:42 +00007155 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
7156 return Res;
7157 }
7158
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007159 return SDValue();
7160}
7161
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007162static bool isClampZeroToOne(SDValue A, SDValue B) {
7163 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
7164 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
7165 // FIXME: Should this be allowing -0.0?
7166 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
7167 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
7168 }
7169 }
7170
7171 return false;
7172}
7173
7174// FIXME: Should only worry about snans for version with chain.
7175SDValue SITargetLowering::performFMed3Combine(SDNode *N,
7176 DAGCombinerInfo &DCI) const {
7177 EVT VT = N->getValueType(0);
7178 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
7179 // NaNs. With a NaN input, the order of the operands may change the result.
7180
7181 SelectionDAG &DAG = DCI.DAG;
7182 SDLoc SL(N);
7183
7184 SDValue Src0 = N->getOperand(0);
7185 SDValue Src1 = N->getOperand(1);
7186 SDValue Src2 = N->getOperand(2);
7187
7188 if (isClampZeroToOne(Src0, Src1)) {
7189 // const_a, const_b, x -> clamp is safe in all cases including signaling
7190 // nans.
7191 // FIXME: Should this be allowing -0.0?
7192 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
7193 }
7194
7195 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
7196 // handling no dx10-clamp?
7197 if (Subtarget->enableDX10Clamp()) {
7198 // If NaNs is clamped to 0, we are free to reorder the inputs.
7199
7200 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
7201 std::swap(Src0, Src1);
7202
7203 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
7204 std::swap(Src1, Src2);
7205
7206 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
7207 std::swap(Src0, Src1);
7208
7209 if (isClampZeroToOne(Src1, Src2))
7210 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
7211 }
7212
7213 return SDValue();
7214}
7215
Matt Arsenault1f17c662017-02-22 00:27:34 +00007216SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
7217 DAGCombinerInfo &DCI) const {
7218 SDValue Src0 = N->getOperand(0);
7219 SDValue Src1 = N->getOperand(1);
7220 if (Src0.isUndef() && Src1.isUndef())
7221 return DCI.DAG.getUNDEF(N->getValueType(0));
7222 return SDValue();
7223}
7224
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007225SDValue SITargetLowering::performExtractVectorEltCombine(
7226 SDNode *N, DAGCombinerInfo &DCI) const {
7227 SDValue Vec = N->getOperand(0);
Matt Arsenault8cbb4882017-09-20 21:01:24 +00007228 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault63bc0e32018-06-15 15:31:36 +00007229
7230 EVT VecVT = Vec.getValueType();
7231 EVT EltVT = VecVT.getVectorElementType();
7232
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00007233 if ((Vec.getOpcode() == ISD::FNEG ||
7234 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007235 SDLoc SL(N);
7236 EVT EltVT = N->getValueType(0);
7237 SDValue Idx = N->getOperand(1);
7238 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7239 Vec.getOperand(0), Idx);
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00007240 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007241 }
7242
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00007243 // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
7244 // =>
7245 // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
7246 // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
7247 // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
Farhana Aleene24f3ff2018-05-09 21:18:34 +00007248 if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00007249 SDLoc SL(N);
7250 EVT EltVT = N->getValueType(0);
7251 SDValue Idx = N->getOperand(1);
7252 unsigned Opc = Vec.getOpcode();
7253
7254 switch(Opc) {
7255 default:
7256 return SDValue();
7257 // TODO: Support other binary operations.
7258 case ISD::FADD:
7259 case ISD::ADD:
Farhana Aleene24f3ff2018-05-09 21:18:34 +00007260 case ISD::UMIN:
7261 case ISD::UMAX:
7262 case ISD::SMIN:
7263 case ISD::SMAX:
7264 case ISD::FMAXNUM:
7265 case ISD::FMINNUM:
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00007266 return DAG.getNode(Opc, SL, EltVT,
7267 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7268 Vec.getOperand(0), Idx),
7269 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7270 Vec.getOperand(1), Idx));
7271 }
7272 }
Matt Arsenault63bc0e32018-06-15 15:31:36 +00007273
7274 if (!DCI.isBeforeLegalize())
7275 return SDValue();
7276
7277 unsigned VecSize = VecVT.getSizeInBits();
7278 unsigned EltSize = EltVT.getSizeInBits();
7279
7280 // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
7281 // elements. This exposes more load reduction opportunities by replacing
7282 // multiple small extract_vector_elements with a single 32-bit extract.
7283 auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
7284 if (EltSize <= 16 &&
7285 EltVT.isByteSized() &&
7286 VecSize > 32 &&
7287 VecSize % 32 == 0 &&
7288 Idx) {
7289 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
7290
7291 unsigned BitIndex = Idx->getZExtValue() * EltSize;
7292 unsigned EltIdx = BitIndex / 32;
7293 unsigned LeftoverBitIdx = BitIndex % 32;
7294 SDLoc SL(N);
7295
7296 SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
7297 DCI.AddToWorklist(Cast.getNode());
7298
7299 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
7300 DAG.getConstant(EltIdx, SL, MVT::i32));
7301 DCI.AddToWorklist(Elt.getNode());
7302 SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
7303 DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
7304 DCI.AddToWorklist(Srl.getNode());
7305
7306 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
7307 DCI.AddToWorklist(Trunc.getNode());
7308 return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
7309 }
7310
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007311 return SDValue();
7312}
7313
Matt Arsenault8cbb4882017-09-20 21:01:24 +00007314static bool convertBuildVectorCastElt(SelectionDAG &DAG,
7315 SDValue &Lo, SDValue &Hi) {
7316 if (Hi.getOpcode() == ISD::BITCAST &&
7317 Hi.getOperand(0).getValueType() == MVT::f16 &&
7318 (isa<ConstantSDNode>(Lo) || Lo.isUndef())) {
7319 Lo = DAG.getNode(ISD::BITCAST, SDLoc(Lo), MVT::f16, Lo);
7320 Hi = Hi.getOperand(0);
7321 return true;
7322 }
7323
7324 return false;
7325}
7326
7327SDValue SITargetLowering::performBuildVectorCombine(
7328 SDNode *N, DAGCombinerInfo &DCI) const {
7329 SDLoc SL(N);
7330
7331 if (!isTypeLegal(MVT::v2i16))
7332 return SDValue();
7333 SelectionDAG &DAG = DCI.DAG;
7334 EVT VT = N->getValueType(0);
7335
7336 if (VT == MVT::v2i16) {
7337 SDValue Lo = N->getOperand(0);
7338 SDValue Hi = N->getOperand(1);
7339
7340 // v2i16 build_vector (const|undef), (bitcast f16:$x)
7341 // -> bitcast (v2f16 build_vector const|undef, $x
7342 if (convertBuildVectorCastElt(DAG, Lo, Hi)) {
7343 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Lo, Hi });
7344 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
7345 }
7346
7347 if (convertBuildVectorCastElt(DAG, Hi, Lo)) {
7348 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Hi, Lo });
7349 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
7350 }
7351 }
7352
7353 return SDValue();
7354}
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007355
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007356unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
7357 const SDNode *N0,
7358 const SDNode *N1) const {
7359 EVT VT = N0->getValueType(0);
7360
Matt Arsenault770ec862016-12-22 03:55:35 +00007361 // Only do this if we are not trying to support denormals. v_mad_f32 does not
7362 // support denormals ever.
7363 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
7364 (VT == MVT::f16 && !Subtarget->hasFP16Denormals()))
7365 return ISD::FMAD;
7366
7367 const TargetOptions &Options = DAG.getTarget().Options;
Amara Emersond28f0cd42017-05-01 15:17:51 +00007368 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
Michael Berg7acc81b2018-05-04 18:48:20 +00007369 (N0->getFlags().hasAllowContract() &&
7370 N1->getFlags().hasAllowContract())) &&
Matt Arsenault770ec862016-12-22 03:55:35 +00007371 isFMAFasterThanFMulAndFAdd(VT)) {
7372 return ISD::FMA;
7373 }
7374
7375 return 0;
7376}
7377
Matt Arsenault4f6318f2017-11-06 17:04:37 +00007378static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
7379 EVT VT,
7380 SDValue N0, SDValue N1, SDValue N2,
7381 bool Signed) {
7382 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
7383 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
7384 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
7385 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
7386}
7387
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007388SDValue SITargetLowering::performAddCombine(SDNode *N,
7389 DAGCombinerInfo &DCI) const {
7390 SelectionDAG &DAG = DCI.DAG;
7391 EVT VT = N->getValueType(0);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007392 SDLoc SL(N);
7393 SDValue LHS = N->getOperand(0);
7394 SDValue RHS = N->getOperand(1);
7395
Matt Arsenault4f6318f2017-11-06 17:04:37 +00007396 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
7397 && Subtarget->hasMad64_32() &&
7398 !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
7399 VT.getScalarSizeInBits() <= 64) {
7400 if (LHS.getOpcode() != ISD::MUL)
7401 std::swap(LHS, RHS);
7402
7403 SDValue MulLHS = LHS.getOperand(0);
7404 SDValue MulRHS = LHS.getOperand(1);
7405 SDValue AddRHS = RHS;
7406
7407 // TODO: Maybe restrict if SGPR inputs.
7408 if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
7409 numBitsUnsigned(MulRHS, DAG) <= 32) {
7410 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
7411 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
7412 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
7413 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
7414 }
7415
7416 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
7417 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
7418 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
7419 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
7420 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
7421 }
7422
7423 return SDValue();
7424 }
7425
Farhana Aleen07e61232018-05-02 18:16:39 +00007426 if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
Matt Arsenault4f6318f2017-11-06 17:04:37 +00007427 return SDValue();
7428
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007429 // add x, zext (setcc) => addcarry x, 0, setcc
7430 // add x, sext (setcc) => subcarry x, 0, setcc
7431 unsigned Opc = LHS.getOpcode();
7432 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007433 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007434 std::swap(RHS, LHS);
7435
7436 Opc = RHS.getOpcode();
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007437 switch (Opc) {
7438 default: break;
7439 case ISD::ZERO_EXTEND:
7440 case ISD::SIGN_EXTEND:
7441 case ISD::ANY_EXTEND: {
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007442 auto Cond = RHS.getOperand(0);
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00007443 if (!isBoolSGPR(Cond))
Stanislav Mekhanoshin3ed38c62017-06-21 23:46:22 +00007444 break;
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007445 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
7446 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
7447 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
7448 return DAG.getNode(Opc, SL, VTList, Args);
7449 }
7450 case ISD::ADDCARRY: {
7451 // add x, (addcarry y, 0, cc) => addcarry x, y, cc
7452 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
7453 if (!C || C->getZExtValue() != 0) break;
7454 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
7455 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
7456 }
7457 }
7458 return SDValue();
7459}
7460
7461SDValue SITargetLowering::performSubCombine(SDNode *N,
7462 DAGCombinerInfo &DCI) const {
7463 SelectionDAG &DAG = DCI.DAG;
7464 EVT VT = N->getValueType(0);
7465
7466 if (VT != MVT::i32)
7467 return SDValue();
7468
7469 SDLoc SL(N);
7470 SDValue LHS = N->getOperand(0);
7471 SDValue RHS = N->getOperand(1);
7472
7473 unsigned Opc = LHS.getOpcode();
7474 if (Opc != ISD::SUBCARRY)
7475 std::swap(RHS, LHS);
7476
7477 if (LHS.getOpcode() == ISD::SUBCARRY) {
7478 // sub (subcarry x, 0, cc), y => subcarry x, y, cc
7479 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
7480 if (!C || C->getZExtValue() != 0)
7481 return SDValue();
7482 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
7483 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
7484 }
7485 return SDValue();
7486}
7487
7488SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
7489 DAGCombinerInfo &DCI) const {
7490
7491 if (N->getValueType(0) != MVT::i32)
7492 return SDValue();
7493
7494 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7495 if (!C || C->getZExtValue() != 0)
7496 return SDValue();
7497
7498 SelectionDAG &DAG = DCI.DAG;
7499 SDValue LHS = N->getOperand(0);
7500
7501 // addcarry (add x, y), 0, cc => addcarry x, y, cc
7502 // subcarry (sub x, y), 0, cc => subcarry x, y, cc
7503 unsigned LHSOpc = LHS.getOpcode();
7504 unsigned Opc = N->getOpcode();
7505 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
7506 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
7507 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
7508 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007509 }
7510 return SDValue();
7511}
7512
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007513SDValue SITargetLowering::performFAddCombine(SDNode *N,
7514 DAGCombinerInfo &DCI) const {
7515 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
7516 return SDValue();
7517
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007518 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault770ec862016-12-22 03:55:35 +00007519 EVT VT = N->getValueType(0);
Matt Arsenault770ec862016-12-22 03:55:35 +00007520
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007521 SDLoc SL(N);
7522 SDValue LHS = N->getOperand(0);
7523 SDValue RHS = N->getOperand(1);
7524
7525 // These should really be instruction patterns, but writing patterns with
7526 // source modiifiers is a pain.
7527
7528 // fadd (fadd (a, a), b) -> mad 2.0, a, b
7529 if (LHS.getOpcode() == ISD::FADD) {
7530 SDValue A = LHS.getOperand(0);
7531 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007532 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007533 if (FusedOp != 0) {
7534 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007535 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00007536 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007537 }
7538 }
7539
7540 // fadd (b, fadd (a, a)) -> mad 2.0, a, b
7541 if (RHS.getOpcode() == ISD::FADD) {
7542 SDValue A = RHS.getOperand(0);
7543 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007544 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007545 if (FusedOp != 0) {
7546 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007547 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00007548 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007549 }
7550 }
7551
7552 return SDValue();
7553}
7554
7555SDValue SITargetLowering::performFSubCombine(SDNode *N,
7556 DAGCombinerInfo &DCI) const {
7557 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
7558 return SDValue();
7559
7560 SelectionDAG &DAG = DCI.DAG;
7561 SDLoc SL(N);
7562 EVT VT = N->getValueType(0);
7563 assert(!VT.isVector());
7564
7565 // Try to get the fneg to fold into the source modifier. This undoes generic
7566 // DAG combines and folds them into the mad.
7567 //
7568 // Only do this if we are not trying to support denormals. v_mad_f32 does
7569 // not support denormals ever.
Matt Arsenault770ec862016-12-22 03:55:35 +00007570 SDValue LHS = N->getOperand(0);
7571 SDValue RHS = N->getOperand(1);
7572 if (LHS.getOpcode() == ISD::FADD) {
7573 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
7574 SDValue A = LHS.getOperand(0);
7575 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007576 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007577 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007578 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
7579 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
7580
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007581 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007582 }
7583 }
Matt Arsenault770ec862016-12-22 03:55:35 +00007584 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007585
Matt Arsenault770ec862016-12-22 03:55:35 +00007586 if (RHS.getOpcode() == ISD::FADD) {
7587 // (fsub c, (fadd a, a)) -> mad -2.0, a, c
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007588
Matt Arsenault770ec862016-12-22 03:55:35 +00007589 SDValue A = RHS.getOperand(0);
7590 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007591 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007592 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007593 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007594 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007595 }
7596 }
7597 }
7598
7599 return SDValue();
7600}
7601
Farhana Aleenc370d7b2018-07-16 18:19:59 +00007602SDValue SITargetLowering::performFMACombine(SDNode *N,
7603 DAGCombinerInfo &DCI) const {
7604 SelectionDAG &DAG = DCI.DAG;
7605 EVT VT = N->getValueType(0);
7606 SDLoc SL(N);
7607
7608 if (!Subtarget->hasDLInsts() || VT != MVT::f32)
7609 return SDValue();
7610
7611 // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
7612 // FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
7613 SDValue Op1 = N->getOperand(0);
7614 SDValue Op2 = N->getOperand(1);
7615 SDValue FMA = N->getOperand(2);
7616
7617 if (FMA.getOpcode() != ISD::FMA ||
7618 Op1.getOpcode() != ISD::FP_EXTEND ||
7619 Op2.getOpcode() != ISD::FP_EXTEND)
7620 return SDValue();
7621
7622 // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
7623 // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
7624 // is sufficient to allow generaing fdot2.
7625 const TargetOptions &Options = DAG.getTarget().Options;
7626 if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
7627 (N->getFlags().hasAllowContract() &&
7628 FMA->getFlags().hasAllowContract())) {
7629 Op1 = Op1.getOperand(0);
7630 Op2 = Op2.getOperand(0);
7631 if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7632 Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
7633 return SDValue();
7634
7635 SDValue Vec1 = Op1.getOperand(0);
7636 SDValue Idx1 = Op1.getOperand(1);
7637 SDValue Vec2 = Op2.getOperand(0);
7638
7639 SDValue FMAOp1 = FMA.getOperand(0);
7640 SDValue FMAOp2 = FMA.getOperand(1);
7641 SDValue FMAAcc = FMA.getOperand(2);
7642
7643 if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
7644 FMAOp2.getOpcode() != ISD::FP_EXTEND)
7645 return SDValue();
7646
7647 FMAOp1 = FMAOp1.getOperand(0);
7648 FMAOp2 = FMAOp2.getOperand(0);
7649 if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7650 FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
7651 return SDValue();
7652
7653 SDValue Vec3 = FMAOp1.getOperand(0);
7654 SDValue Vec4 = FMAOp2.getOperand(0);
7655 SDValue Idx2 = FMAOp1.getOperand(1);
7656
7657 if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
7658 // Idx1 and Idx2 cannot be the same.
7659 Idx1 == Idx2)
7660 return SDValue();
7661
7662 if (Vec1 == Vec2 || Vec3 == Vec4)
7663 return SDValue();
7664
7665 if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
7666 return SDValue();
7667
7668 if ((Vec1 == Vec3 && Vec2 == Vec4) ||
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00007669 (Vec1 == Vec4 && Vec2 == Vec3)) {
7670 return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
7671 DAG.getTargetConstant(0, SL, MVT::i1));
7672 }
Farhana Aleenc370d7b2018-07-16 18:19:59 +00007673 }
7674 return SDValue();
7675}
7676
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007677SDValue SITargetLowering::performSetCCCombine(SDNode *N,
7678 DAGCombinerInfo &DCI) const {
7679 SelectionDAG &DAG = DCI.DAG;
7680 SDLoc SL(N);
7681
7682 SDValue LHS = N->getOperand(0);
7683 SDValue RHS = N->getOperand(1);
7684 EVT VT = LHS.getValueType();
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00007685 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
7686
7687 auto CRHS = dyn_cast<ConstantSDNode>(RHS);
7688 if (!CRHS) {
7689 CRHS = dyn_cast<ConstantSDNode>(LHS);
7690 if (CRHS) {
7691 std::swap(LHS, RHS);
7692 CC = getSetCCSwappedOperands(CC);
7693 }
7694 }
7695
Stanislav Mekhanoshin3b117942018-06-16 03:46:59 +00007696 if (CRHS) {
7697 if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
7698 isBoolSGPR(LHS.getOperand(0))) {
7699 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
7700 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
7701 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1
7702 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc
7703 if ((CRHS->isAllOnesValue() &&
7704 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
7705 (CRHS->isNullValue() &&
7706 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
7707 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
7708 DAG.getConstant(-1, SL, MVT::i1));
7709 if ((CRHS->isAllOnesValue() &&
7710 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
7711 (CRHS->isNullValue() &&
7712 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
7713 return LHS.getOperand(0);
7714 }
7715
7716 uint64_t CRHSVal = CRHS->getZExtValue();
7717 if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
7718 LHS.getOpcode() == ISD::SELECT &&
7719 isa<ConstantSDNode>(LHS.getOperand(1)) &&
7720 isa<ConstantSDNode>(LHS.getOperand(2)) &&
7721 LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
7722 isBoolSGPR(LHS.getOperand(0))) {
7723 // Given CT != FT:
7724 // setcc (select cc, CT, CF), CF, eq => xor cc, -1
7725 // setcc (select cc, CT, CF), CF, ne => cc
7726 // setcc (select cc, CT, CF), CT, ne => xor cc, -1
7727 // setcc (select cc, CT, CF), CT, eq => cc
7728 uint64_t CT = LHS.getConstantOperandVal(1);
7729 uint64_t CF = LHS.getConstantOperandVal(2);
7730
7731 if ((CF == CRHSVal && CC == ISD::SETEQ) ||
7732 (CT == CRHSVal && CC == ISD::SETNE))
7733 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
7734 DAG.getConstant(-1, SL, MVT::i1));
7735 if ((CF == CRHSVal && CC == ISD::SETNE) ||
7736 (CT == CRHSVal && CC == ISD::SETEQ))
7737 return LHS.getOperand(0);
7738 }
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00007739 }
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007740
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00007741 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
7742 VT != MVT::f16))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007743 return SDValue();
7744
7745 // Match isinf pattern
7746 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007747 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) {
7748 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
7749 if (!CRHS)
7750 return SDValue();
7751
7752 const APFloat &APF = CRHS->getValueAPF();
7753 if (APF.isInfinity() && !APF.isNegative()) {
7754 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007755 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
7756 DAG.getConstant(Mask, SL, MVT::i32));
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007757 }
7758 }
7759
7760 return SDValue();
7761}
7762
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007763SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
7764 DAGCombinerInfo &DCI) const {
7765 SelectionDAG &DAG = DCI.DAG;
7766 SDLoc SL(N);
7767 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
7768
7769 SDValue Src = N->getOperand(0);
7770 SDValue Srl = N->getOperand(0);
7771 if (Srl.getOpcode() == ISD::ZERO_EXTEND)
7772 Srl = Srl.getOperand(0);
7773
7774 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
7775 if (Srl.getOpcode() == ISD::SRL) {
7776 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
7777 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
7778 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
7779
7780 if (const ConstantSDNode *C =
7781 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
7782 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
7783 EVT(MVT::i32));
7784
7785 unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
7786 if (SrcOffset < 32 && SrcOffset % 8 == 0) {
7787 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
7788 MVT::f32, Srl);
7789 }
7790 }
7791 }
7792
7793 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
7794
Craig Topperd0af7e82017-04-28 05:31:46 +00007795 KnownBits Known;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007796 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
7797 !DCI.isBeforeLegalizeOps());
7798 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Akira Hatanaka22e839f2017-04-21 18:53:12 +00007799 if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) ||
Craig Topperd0af7e82017-04-28 05:31:46 +00007800 TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007801 DCI.CommitTargetLoweringOpt(TLO);
7802 }
7803
7804 return SDValue();
7805}
7806
Tom Stellard1b95fed2018-05-24 05:28:34 +00007807SDValue SITargetLowering::performClampCombine(SDNode *N,
7808 DAGCombinerInfo &DCI) const {
7809 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
7810 if (!CSrc)
7811 return SDValue();
7812
7813 const APFloat &F = CSrc->getValueAPF();
7814 APFloat Zero = APFloat::getZero(F.getSemantics());
7815 APFloat::cmpResult Cmp0 = F.compare(Zero);
7816 if (Cmp0 == APFloat::cmpLessThan ||
7817 (Cmp0 == APFloat::cmpUnordered && Subtarget->enableDX10Clamp())) {
7818 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
7819 }
7820
7821 APFloat One(F.getSemantics(), "1.0");
7822 APFloat::cmpResult Cmp1 = F.compare(One);
7823 if (Cmp1 == APFloat::cmpGreaterThan)
7824 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
7825
7826 return SDValue(CSrc, 0);
7827}
7828
7829
Tom Stellard75aadc22012-12-11 21:25:42 +00007830SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
7831 DAGCombinerInfo &DCI) const {
Tom Stellard75aadc22012-12-11 21:25:42 +00007832 switch (N->getOpcode()) {
Matt Arsenault22b4c252014-12-21 16:48:42 +00007833 default:
7834 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007835 case ISD::ADD:
7836 return performAddCombine(N, DCI);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007837 case ISD::SUB:
7838 return performSubCombine(N, DCI);
7839 case ISD::ADDCARRY:
7840 case ISD::SUBCARRY:
7841 return performAddCarrySubCarryCombine(N, DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007842 case ISD::FADD:
7843 return performFAddCombine(N, DCI);
7844 case ISD::FSUB:
7845 return performFSubCombine(N, DCI);
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007846 case ISD::SETCC:
7847 return performSetCCCombine(N, DCI);
Matt Arsenault5b39b342016-01-28 20:53:48 +00007848 case ISD::FMAXNUM:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007849 case ISD::FMINNUM:
Matt Arsenault5881f4e2015-06-09 00:52:37 +00007850 case ISD::SMAX:
7851 case ISD::SMIN:
7852 case ISD::UMAX:
Matt Arsenault5b39b342016-01-28 20:53:48 +00007853 case ISD::UMIN:
7854 case AMDGPUISD::FMIN_LEGACY:
7855 case AMDGPUISD::FMAX_LEGACY: {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007856 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
7857 getTargetMachine().getOptLevel() > CodeGenOpt::None)
Matt Arsenaultf639c322016-01-28 20:53:42 +00007858 return performMinMaxCombine(N, DCI);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007859 break;
7860 }
Farhana Aleenc370d7b2018-07-16 18:19:59 +00007861 case ISD::FMA:
7862 return performFMACombine(N, DCI);
Matt Arsenault90083d32018-06-07 09:54:49 +00007863 case ISD::LOAD: {
7864 if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
7865 return Widended;
7866 LLVM_FALLTHROUGH;
7867 }
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007868 case ISD::STORE:
7869 case ISD::ATOMIC_LOAD:
7870 case ISD::ATOMIC_STORE:
7871 case ISD::ATOMIC_CMP_SWAP:
7872 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
7873 case ISD::ATOMIC_SWAP:
7874 case ISD::ATOMIC_LOAD_ADD:
7875 case ISD::ATOMIC_LOAD_SUB:
7876 case ISD::ATOMIC_LOAD_AND:
7877 case ISD::ATOMIC_LOAD_OR:
7878 case ISD::ATOMIC_LOAD_XOR:
7879 case ISD::ATOMIC_LOAD_NAND:
7880 case ISD::ATOMIC_LOAD_MIN:
7881 case ISD::ATOMIC_LOAD_MAX:
7882 case ISD::ATOMIC_LOAD_UMIN:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00007883 case ISD::ATOMIC_LOAD_UMAX:
7884 case AMDGPUISD::ATOMIC_INC:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00007885 case AMDGPUISD::ATOMIC_DEC:
7886 case AMDGPUISD::ATOMIC_LOAD_FADD:
7887 case AMDGPUISD::ATOMIC_LOAD_FMIN:
7888 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007889 if (DCI.isBeforeLegalize())
7890 break;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007891 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007892 case ISD::AND:
7893 return performAndCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00007894 case ISD::OR:
7895 return performOrCombine(N, DCI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007896 case ISD::XOR:
7897 return performXorCombine(N, DCI);
Matt Arsenault8edfaee2017-03-31 19:53:03 +00007898 case ISD::ZERO_EXTEND:
7899 return performZeroExtendCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00007900 case AMDGPUISD::FP_CLASS:
7901 return performClassCombine(N, DCI);
Matt Arsenault9cd90712016-04-14 01:42:16 +00007902 case ISD::FCANONICALIZE:
7903 return performFCanonicalizeCombine(N, DCI);
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007904 case AMDGPUISD::RCP:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00007905 return performRcpCombine(N, DCI);
7906 case AMDGPUISD::FRACT:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007907 case AMDGPUISD::RSQ:
Matt Arsenault32fc5272016-07-26 16:45:45 +00007908 case AMDGPUISD::RCP_LEGACY:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007909 case AMDGPUISD::RSQ_LEGACY:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00007910 case AMDGPUISD::RCP_IFLAG:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007911 case AMDGPUISD::RSQ_CLAMP:
7912 case AMDGPUISD::LDEXP: {
7913 SDValue Src = N->getOperand(0);
7914 if (Src.isUndef())
7915 return Src;
7916 break;
7917 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007918 case ISD::SINT_TO_FP:
7919 case ISD::UINT_TO_FP:
7920 return performUCharToFloatCombine(N, DCI);
7921 case AMDGPUISD::CVT_F32_UBYTE0:
7922 case AMDGPUISD::CVT_F32_UBYTE1:
7923 case AMDGPUISD::CVT_F32_UBYTE2:
7924 case AMDGPUISD::CVT_F32_UBYTE3:
7925 return performCvtF32UByteNCombine(N, DCI);
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007926 case AMDGPUISD::FMED3:
7927 return performFMed3Combine(N, DCI);
Matt Arsenault1f17c662017-02-22 00:27:34 +00007928 case AMDGPUISD::CVT_PKRTZ_F16_F32:
7929 return performCvtPkRTZCombine(N, DCI);
Tom Stellard1b95fed2018-05-24 05:28:34 +00007930 case AMDGPUISD::CLAMP:
7931 return performClampCombine(N, DCI);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00007932 case ISD::SCALAR_TO_VECTOR: {
7933 SelectionDAG &DAG = DCI.DAG;
7934 EVT VT = N->getValueType(0);
7935
7936 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
7937 if (VT == MVT::v2i16 || VT == MVT::v2f16) {
7938 SDLoc SL(N);
7939 SDValue Src = N->getOperand(0);
7940 EVT EltVT = Src.getValueType();
7941 if (EltVT == MVT::f16)
7942 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
7943
7944 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
7945 return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
7946 }
7947
7948 break;
7949 }
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007950 case ISD::EXTRACT_VECTOR_ELT:
7951 return performExtractVectorEltCombine(N, DCI);
Matt Arsenault8cbb4882017-09-20 21:01:24 +00007952 case ISD::BUILD_VECTOR:
7953 return performBuildVectorCombine(N, DCI);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007954 }
Matt Arsenault5565f65e2014-05-22 18:09:07 +00007955 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Tom Stellard75aadc22012-12-11 21:25:42 +00007956}
Christian Konigd910b7d2013-02-26 17:52:16 +00007957
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00007958/// Helper function for adjustWritemask
Benjamin Kramer635e3682013-05-23 15:43:05 +00007959static unsigned SubIdx2Lane(unsigned Idx) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00007960 switch (Idx) {
7961 default: return 0;
7962 case AMDGPU::sub0: return 0;
7963 case AMDGPU::sub1: return 1;
7964 case AMDGPU::sub2: return 2;
7965 case AMDGPU::sub3: return 3;
7966 }
7967}
7968
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00007969/// Adjust the writemask of MIMG instructions
Matt Arsenault68f05052017-12-04 22:18:27 +00007970SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
7971 SelectionDAG &DAG) const {
Nicolai Haehnlef2674312018-06-21 13:36:01 +00007972 unsigned Opcode = Node->getMachineOpcode();
7973
7974 // Subtract 1 because the vdata output is not a MachineSDNode operand.
7975 int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
7976 if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
7977 return Node; // not implemented for D16
7978
Matt Arsenault68f05052017-12-04 22:18:27 +00007979 SDNode *Users[4] = { nullptr };
Tom Stellard54774e52013-10-23 02:53:47 +00007980 unsigned Lane = 0;
Nicolai Haehnlef2674312018-06-21 13:36:01 +00007981 unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00007982 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
Tom Stellard54774e52013-10-23 02:53:47 +00007983 unsigned NewDmask = 0;
Matt Arsenault856777d2017-12-08 20:00:57 +00007984 bool HasChain = Node->getNumValues() > 1;
7985
7986 if (OldDmask == 0) {
7987 // These are folded out, but on the chance it happens don't assert.
7988 return Node;
7989 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00007990
7991 // Try to figure out the used register components
7992 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
7993 I != E; ++I) {
7994
Matt Arsenault93e65ea2017-02-22 21:16:41 +00007995 // Don't look at users of the chain.
7996 if (I.getUse().getResNo() != 0)
7997 continue;
7998
Christian Konig8e06e2a2013-04-10 08:39:08 +00007999 // Abort if we can't understand the usage
8000 if (!I->isMachineOpcode() ||
8001 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
Matt Arsenault68f05052017-12-04 22:18:27 +00008002 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00008003
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +00008004 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
Tom Stellard54774e52013-10-23 02:53:47 +00008005 // Note that subregs are packed, i.e. Lane==0 is the first bit set
8006 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
8007 // set, etc.
Christian Konig8b1ed282013-04-10 08:39:16 +00008008 Lane = SubIdx2Lane(I->getConstantOperandVal(1));
Christian Konig8e06e2a2013-04-10 08:39:08 +00008009
Tom Stellard54774e52013-10-23 02:53:47 +00008010 // Set which texture component corresponds to the lane.
8011 unsigned Comp;
8012 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
Tom Stellard03a5c082013-10-23 03:50:25 +00008013 Comp = countTrailingZeros(Dmask);
Tom Stellard54774e52013-10-23 02:53:47 +00008014 Dmask &= ~(1 << Comp);
8015 }
8016
Christian Konig8e06e2a2013-04-10 08:39:08 +00008017 // Abort if we have more than one user per component
8018 if (Users[Lane])
Matt Arsenault68f05052017-12-04 22:18:27 +00008019 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00008020
8021 Users[Lane] = *I;
Tom Stellard54774e52013-10-23 02:53:47 +00008022 NewDmask |= 1 << Comp;
Christian Konig8e06e2a2013-04-10 08:39:08 +00008023 }
8024
Tom Stellard54774e52013-10-23 02:53:47 +00008025 // Abort if there's no change
8026 if (NewDmask == OldDmask)
Matt Arsenault68f05052017-12-04 22:18:27 +00008027 return Node;
8028
8029 unsigned BitsSet = countPopulation(NewDmask);
8030
Nicolai Haehnle0ab200b2018-06-21 13:36:44 +00008031 int NewOpcode = AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), BitsSet);
Matt Arsenault68f05052017-12-04 22:18:27 +00008032 assert(NewOpcode != -1 &&
8033 NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
8034 "failed to find equivalent MIMG op");
Christian Konig8e06e2a2013-04-10 08:39:08 +00008035
8036 // Adjust the writemask in the node
Matt Arsenault68f05052017-12-04 22:18:27 +00008037 SmallVector<SDValue, 12> Ops;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00008038 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008039 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
Nikolay Haustov2f684f12016-02-26 09:51:05 +00008040 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
Christian Konig8e06e2a2013-04-10 08:39:08 +00008041
Matt Arsenault68f05052017-12-04 22:18:27 +00008042 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
8043
Matt Arsenault856777d2017-12-08 20:00:57 +00008044 MVT ResultVT = BitsSet == 1 ?
8045 SVT : MVT::getVectorVT(SVT, BitsSet == 3 ? 4 : BitsSet);
8046 SDVTList NewVTList = HasChain ?
8047 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
8048
Matt Arsenault68f05052017-12-04 22:18:27 +00008049
8050 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
8051 NewVTList, Ops);
Matt Arsenaultecad0d532017-12-08 20:00:45 +00008052
Matt Arsenault856777d2017-12-08 20:00:57 +00008053 if (HasChain) {
8054 // Update chain.
8055 NewNode->setMemRefs(Node->memoperands_begin(), Node->memoperands_end());
8056 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
8057 }
Matt Arsenault68f05052017-12-04 22:18:27 +00008058
8059 if (BitsSet == 1) {
8060 assert(Node->hasNUsesOfValue(1, 0));
8061 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
8062 SDLoc(Node), Users[Lane]->getValueType(0),
8063 SDValue(NewNode, 0));
Christian Konig8b1ed282013-04-10 08:39:16 +00008064 DAG.ReplaceAllUsesWith(Users[Lane], Copy);
Matt Arsenault68f05052017-12-04 22:18:27 +00008065 return nullptr;
Christian Konig8b1ed282013-04-10 08:39:16 +00008066 }
8067
Christian Konig8e06e2a2013-04-10 08:39:08 +00008068 // Update the users of the node with the new indices
8069 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00008070 SDNode *User = Users[i];
8071 if (!User)
8072 continue;
8073
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008074 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
Matt Arsenault68f05052017-12-04 22:18:27 +00008075 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
Christian Konig8e06e2a2013-04-10 08:39:08 +00008076
8077 switch (Idx) {
8078 default: break;
8079 case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
8080 case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
8081 case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
8082 }
8083 }
Matt Arsenault68f05052017-12-04 22:18:27 +00008084
8085 DAG.RemoveDeadNode(Node);
8086 return nullptr;
Christian Konig8e06e2a2013-04-10 08:39:08 +00008087}
8088
Tom Stellardc98ee202015-07-16 19:40:07 +00008089static bool isFrameIndexOp(SDValue Op) {
8090 if (Op.getOpcode() == ISD::AssertZext)
8091 Op = Op.getOperand(0);
8092
8093 return isa<FrameIndexSDNode>(Op);
8094}
8095
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00008096/// Legalize target independent instructions (e.g. INSERT_SUBREG)
Tom Stellard3457a842014-10-09 19:06:00 +00008097/// with frame index operands.
8098/// LLVM assumes that inputs are to these instructions are registers.
Matt Arsenault0d0d6c22017-04-12 21:58:23 +00008099SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
8100 SelectionDAG &DAG) const {
8101 if (Node->getOpcode() == ISD::CopyToReg) {
8102 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
8103 SDValue SrcVal = Node->getOperand(2);
8104
8105 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
8106 // to try understanding copies to physical registers.
8107 if (SrcVal.getValueType() == MVT::i1 &&
8108 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
8109 SDLoc SL(Node);
8110 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
8111 SDValue VReg = DAG.getRegister(
8112 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
8113
8114 SDNode *Glued = Node->getGluedNode();
8115 SDValue ToVReg
8116 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
8117 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
8118 SDValue ToResultReg
8119 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
8120 VReg, ToVReg.getValue(1));
8121 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
8122 DAG.RemoveDeadNode(Node);
8123 return ToResultReg.getNode();
8124 }
8125 }
Tom Stellard8dd392e2014-10-09 18:09:15 +00008126
8127 SmallVector<SDValue, 8> Ops;
Tom Stellard3457a842014-10-09 19:06:00 +00008128 for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
Tom Stellardc98ee202015-07-16 19:40:07 +00008129 if (!isFrameIndexOp(Node->getOperand(i))) {
Tom Stellard3457a842014-10-09 19:06:00 +00008130 Ops.push_back(Node->getOperand(i));
Tom Stellard8dd392e2014-10-09 18:09:15 +00008131 continue;
8132 }
8133
Tom Stellard3457a842014-10-09 19:06:00 +00008134 SDLoc DL(Node);
Tom Stellard8dd392e2014-10-09 18:09:15 +00008135 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
Tom Stellard3457a842014-10-09 19:06:00 +00008136 Node->getOperand(i).getValueType(),
8137 Node->getOperand(i)), 0));
Tom Stellard8dd392e2014-10-09 18:09:15 +00008138 }
8139
Mark Searles4e3d6162017-10-16 23:38:53 +00008140 return DAG.UpdateNodeOperands(Node, Ops);
Tom Stellard8dd392e2014-10-09 18:09:15 +00008141}
8142
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00008143/// Fold the instructions after selecting them.
Matt Arsenault68f05052017-12-04 22:18:27 +00008144/// Returns null if users were already updated.
Christian Konig8e06e2a2013-04-10 08:39:08 +00008145SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
8146 SelectionDAG &DAG) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00008147 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00008148 unsigned Opcode = Node->getMachineOpcode();
Christian Konig8e06e2a2013-04-10 08:39:08 +00008149
Nicolai Haehnlec06bfa12016-07-11 21:59:43 +00008150 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
Nicolai Haehnlef2674312018-06-21 13:36:01 +00008151 !TII->isGather4(Opcode)) {
Matt Arsenault68f05052017-12-04 22:18:27 +00008152 return adjustWritemask(Node, DAG);
8153 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00008154
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00008155 if (Opcode == AMDGPU::INSERT_SUBREG ||
8156 Opcode == AMDGPU::REG_SEQUENCE) {
Tom Stellard8dd392e2014-10-09 18:09:15 +00008157 legalizeTargetIndependentNode(Node, DAG);
8158 return Node;
8159 }
Matt Arsenault206f8262017-08-01 20:49:41 +00008160
8161 switch (Opcode) {
8162 case AMDGPU::V_DIV_SCALE_F32:
8163 case AMDGPU::V_DIV_SCALE_F64: {
8164 // Satisfy the operand register constraint when one of the inputs is
8165 // undefined. Ordinarily each undef value will have its own implicit_def of
8166 // a vreg, so force these to use a single register.
8167 SDValue Src0 = Node->getOperand(0);
8168 SDValue Src1 = Node->getOperand(1);
8169 SDValue Src2 = Node->getOperand(2);
8170
8171 if ((Src0.isMachineOpcode() &&
8172 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
8173 (Src0 == Src1 || Src0 == Src2))
8174 break;
8175
8176 MVT VT = Src0.getValueType().getSimpleVT();
8177 const TargetRegisterClass *RC = getRegClassFor(VT);
8178
8179 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
8180 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
8181
8182 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
8183 UndefReg, Src0, SDValue());
8184
8185 // src0 must be the same register as src1 or src2, even if the value is
8186 // undefined, so make sure we don't violate this constraint.
8187 if (Src0.isMachineOpcode() &&
8188 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
8189 if (Src1.isMachineOpcode() &&
8190 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
8191 Src0 = Src1;
8192 else if (Src2.isMachineOpcode() &&
8193 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
8194 Src0 = Src2;
8195 else {
8196 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
8197 Src0 = UndefReg;
8198 Src1 = UndefReg;
8199 }
8200 } else
8201 break;
8202
8203 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
8204 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
8205 Ops.push_back(Node->getOperand(I));
8206
8207 Ops.push_back(ImpDef.getValue(1));
8208 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
8209 }
8210 default:
8211 break;
8212 }
8213
Tom Stellard654d6692015-01-08 15:08:17 +00008214 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00008215}
Christian Konig8b1ed282013-04-10 08:39:16 +00008216
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00008217/// Assign the register class depending on the number of
Christian Konig8b1ed282013-04-10 08:39:16 +00008218/// bits set in the writemask
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008219void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
Christian Konig8b1ed282013-04-10 08:39:16 +00008220 SDNode *Node) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00008221 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008222
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008223 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Matt Arsenault6005fcb2015-10-21 21:51:02 +00008224
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008225 if (TII->isVOP3(MI.getOpcode())) {
Matt Arsenault6005fcb2015-10-21 21:51:02 +00008226 // Make sure constant bus requirements are respected.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008227 TII->legalizeOperandsVOP3(MRI, MI);
Matt Arsenault6005fcb2015-10-21 21:51:02 +00008228 return;
8229 }
Matt Arsenaultcb0ac3d2014-09-26 17:54:59 +00008230
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008231 // Replace unused atomics with the no return version.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008232 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008233 if (NoRetAtomicOp != -1) {
8234 if (!Node->hasAnyUseOfValue(0)) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008235 MI.setDesc(TII->get(NoRetAtomicOp));
8236 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00008237 return;
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008238 }
8239
Tom Stellard354a43c2016-04-01 18:27:37 +00008240 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
8241 // instruction, because the return type of these instructions is a vec2 of
8242 // the memory type, so it can be tied to the input operand.
8243 // This means these instructions always have a use, so we need to add a
8244 // special case to check if the atomic has only one extract_subreg use,
8245 // which itself has no uses.
8246 if ((Node->hasNUsesOfValue(1, 0) &&
Nicolai Haehnle750082d2016-04-15 14:42:36 +00008247 Node->use_begin()->isMachineOpcode() &&
Tom Stellard354a43c2016-04-01 18:27:37 +00008248 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
8249 !Node->use_begin()->hasAnyUseOfValue(0))) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008250 unsigned Def = MI.getOperand(0).getReg();
Tom Stellard354a43c2016-04-01 18:27:37 +00008251
8252 // Change this into a noret atomic.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008253 MI.setDesc(TII->get(NoRetAtomicOp));
8254 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00008255
8256 // If we only remove the def operand from the atomic instruction, the
8257 // extract_subreg will be left with a use of a vreg without a def.
8258 // So we need to insert an implicit_def to avoid machine verifier
8259 // errors.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008260 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
Tom Stellard354a43c2016-04-01 18:27:37 +00008261 TII->get(AMDGPU::IMPLICIT_DEF), Def);
8262 }
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008263 return;
8264 }
Christian Konig8b1ed282013-04-10 08:39:16 +00008265}
Tom Stellard0518ff82013-06-03 17:39:58 +00008266
Benjamin Kramerbdc49562016-06-12 15:39:02 +00008267static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
8268 uint64_t Val) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008269 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
Matt Arsenault485defe2014-11-05 19:01:17 +00008270 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
8271}
8272
8273MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00008274 const SDLoc &DL,
Matt Arsenault485defe2014-11-05 19:01:17 +00008275 SDValue Ptr) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00008276 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault485defe2014-11-05 19:01:17 +00008277
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008278 // Build the half of the subregister with the constants before building the
8279 // full 128-bit register. If we are building multiple resource descriptors,
8280 // this will allow CSEing of the 2-component register.
8281 const SDValue Ops0[] = {
8282 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
8283 buildSMovImm32(DAG, DL, 0),
8284 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
8285 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
8286 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
8287 };
Matt Arsenault485defe2014-11-05 19:01:17 +00008288
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008289 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
8290 MVT::v2i32, Ops0), 0);
Matt Arsenault485defe2014-11-05 19:01:17 +00008291
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008292 // Combine the constants and the pointer.
8293 const SDValue Ops1[] = {
8294 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
8295 Ptr,
8296 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
8297 SubRegHi,
8298 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
8299 };
Matt Arsenault485defe2014-11-05 19:01:17 +00008300
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008301 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
Matt Arsenault485defe2014-11-05 19:01:17 +00008302}
8303
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00008304/// Return a resource descriptor with the 'Add TID' bit enabled
Benjamin Kramerdf005cb2015-08-08 18:27:36 +00008305/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
8306/// of the resource descriptor) to create an offset, which is added to
8307/// the resource pointer.
Benjamin Kramerbdc49562016-06-12 15:39:02 +00008308MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
8309 SDValue Ptr, uint32_t RsrcDword1,
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008310 uint64_t RsrcDword2And3) const {
8311 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
8312 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
8313 if (RsrcDword1) {
8314 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008315 DAG.getConstant(RsrcDword1, DL, MVT::i32)),
8316 0);
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008317 }
8318
8319 SDValue DataLo = buildSMovImm32(DAG, DL,
8320 RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
8321 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
8322
8323 const SDValue Ops[] = {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008324 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008325 PtrLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008326 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008327 PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008328 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008329 DataLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008330 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008331 DataHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008332 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008333 };
8334
8335 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
8336}
8337
Tom Stellardd7e6f132015-04-08 01:09:26 +00008338//===----------------------------------------------------------------------===//
8339// SI Inline Assembly Support
8340//===----------------------------------------------------------------------===//
8341
8342std::pair<unsigned, const TargetRegisterClass *>
8343SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +00008344 StringRef Constraint,
Tom Stellardd7e6f132015-04-08 01:09:26 +00008345 MVT VT) const {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008346 const TargetRegisterClass *RC = nullptr;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008347 if (Constraint.size() == 1) {
8348 switch (Constraint[0]) {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008349 default:
8350 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008351 case 's':
8352 case 'r':
8353 switch (VT.getSizeInBits()) {
8354 default:
8355 return std::make_pair(0U, nullptr);
8356 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00008357 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008358 RC = &AMDGPU::SReg_32_XM0RegClass;
8359 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008360 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008361 RC = &AMDGPU::SGPR_64RegClass;
8362 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008363 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008364 RC = &AMDGPU::SReg_128RegClass;
8365 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008366 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008367 RC = &AMDGPU::SReg_256RegClass;
8368 break;
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +00008369 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008370 RC = &AMDGPU::SReg_512RegClass;
8371 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008372 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008373 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008374 case 'v':
8375 switch (VT.getSizeInBits()) {
8376 default:
8377 return std::make_pair(0U, nullptr);
8378 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00008379 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008380 RC = &AMDGPU::VGPR_32RegClass;
8381 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008382 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008383 RC = &AMDGPU::VReg_64RegClass;
8384 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008385 case 96:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008386 RC = &AMDGPU::VReg_96RegClass;
8387 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008388 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008389 RC = &AMDGPU::VReg_128RegClass;
8390 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008391 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008392 RC = &AMDGPU::VReg_256RegClass;
8393 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008394 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008395 RC = &AMDGPU::VReg_512RegClass;
8396 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008397 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008398 break;
Tom Stellardd7e6f132015-04-08 01:09:26 +00008399 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008400 // We actually support i128, i16 and f16 as inline parameters
8401 // even if they are not reported as legal
8402 if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
8403 VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
8404 return std::make_pair(0U, RC);
Tom Stellardd7e6f132015-04-08 01:09:26 +00008405 }
8406
8407 if (Constraint.size() > 1) {
Tom Stellardd7e6f132015-04-08 01:09:26 +00008408 if (Constraint[1] == 'v') {
8409 RC = &AMDGPU::VGPR_32RegClass;
8410 } else if (Constraint[1] == 's') {
8411 RC = &AMDGPU::SGPR_32RegClass;
8412 }
8413
8414 if (RC) {
Matt Arsenault0b554ed2015-06-23 02:05:55 +00008415 uint32_t Idx;
8416 bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
8417 if (!Failed && Idx < RC->getNumRegs())
Tom Stellardd7e6f132015-04-08 01:09:26 +00008418 return std::make_pair(RC->getRegister(Idx), RC);
8419 }
8420 }
8421 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8422}
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008423
8424SITargetLowering::ConstraintType
8425SITargetLowering::getConstraintType(StringRef Constraint) const {
8426 if (Constraint.size() == 1) {
8427 switch (Constraint[0]) {
8428 default: break;
8429 case 's':
8430 case 'v':
8431 return C_RegisterClass;
8432 }
8433 }
8434 return TargetLowering::getConstraintType(Constraint);
8435}
Matt Arsenault1cc47f82017-07-18 16:44:56 +00008436
8437// Figure out which registers should be reserved for stack access. Only after
8438// the function is legalized do we know all of the non-spill stack objects or if
8439// calls are present.
8440void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
8441 MachineRegisterInfo &MRI = MF.getRegInfo();
8442 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
8443 const MachineFrameInfo &MFI = MF.getFrameInfo();
Tom Stellardc5a154d2018-06-28 23:47:12 +00008444 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault1cc47f82017-07-18 16:44:56 +00008445
8446 if (Info->isEntryFunction()) {
8447 // Callable functions have fixed registers used for stack access.
8448 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
8449 }
8450
8451 // We have to assume the SP is needed in case there are calls in the function
8452 // during lowering. Calls are only detected after the function is
8453 // lowered. We're about to reserve registers, so don't bother using it if we
8454 // aren't really going to use it.
8455 bool NeedSP = !Info->isEntryFunction() ||
8456 MFI.hasVarSizedObjects() ||
8457 MFI.hasCalls();
8458
8459 if (NeedSP) {
8460 unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF);
8461 Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg);
8462
8463 assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg());
8464 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
8465 Info->getStackPtrOffsetReg()));
8466 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
8467 }
8468
8469 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
8470 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
8471 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
8472 Info->getScratchWaveOffsetReg());
8473
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +00008474 Info->limitOccupancy(MF);
8475
Matt Arsenault1cc47f82017-07-18 16:44:56 +00008476 TargetLoweringBase::finalizeLowering(MF);
8477}
Matt Arsenault45b98182017-11-15 00:45:43 +00008478
8479void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
8480 KnownBits &Known,
8481 const APInt &DemandedElts,
8482 const SelectionDAG &DAG,
8483 unsigned Depth) const {
8484 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
8485 DAG, Depth);
8486
8487 if (getSubtarget()->enableHugePrivateBuffer())
8488 return;
8489
8490 // Technically it may be possible to have a dispatch with a single workitem
8491 // that uses the full private memory size, but that's not really useful. We
8492 // can't use vaddr in MUBUF instructions if we don't know the address
8493 // calculation won't overflow, so assume the sign bit is never set.
8494 Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits);
8495}
Tom Stellard264c1712018-06-13 15:06:37 +00008496
8497bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
8498 FunctionLoweringInfo * FLI, DivergenceAnalysis * DA) const
8499{
8500 switch (N->getOpcode()) {
8501 case ISD::Register:
8502 case ISD::CopyFromReg:
8503 {
8504 const RegisterSDNode *R = nullptr;
8505 if (N->getOpcode() == ISD::Register) {
8506 R = dyn_cast<RegisterSDNode>(N);
8507 }
8508 else {
8509 R = dyn_cast<RegisterSDNode>(N->getOperand(1));
8510 }
8511 if (R)
8512 {
8513 const MachineFunction * MF = FLI->MF;
Tom Stellard5bfbae52018-07-11 20:59:01 +00008514 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
Tom Stellard264c1712018-06-13 15:06:37 +00008515 const MachineRegisterInfo &MRI = MF->getRegInfo();
8516 const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
8517 unsigned Reg = R->getReg();
8518 if (TRI.isPhysicalRegister(Reg))
8519 return TRI.isVGPR(MRI, Reg);
8520
8521 if (MRI.isLiveIn(Reg)) {
8522 // workitem.id.x workitem.id.y workitem.id.z
8523 // Any VGPR formal argument is also considered divergent
8524 if (TRI.isVGPR(MRI, Reg))
8525 return true;
8526 // Formal arguments of non-entry functions
8527 // are conservatively considered divergent
8528 else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
8529 return true;
8530 }
8531 return !DA || DA->isDivergent(FLI->getValueFromVirtualReg(Reg));
8532 }
8533 }
8534 break;
8535 case ISD::LOAD: {
8536 const LoadSDNode *L = dyn_cast<LoadSDNode>(N);
8537 if (L->getMemOperand()->getAddrSpace() ==
8538 Subtarget->getAMDGPUAS().PRIVATE_ADDRESS)
8539 return true;
8540 } break;
8541 case ISD::CALLSEQ_END:
8542 return true;
8543 break;
8544 case ISD::INTRINSIC_WO_CHAIN:
8545 {
8546
8547 }
8548 return AMDGPU::isIntrinsicSourceOfDivergence(
8549 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
8550 case ISD::INTRINSIC_W_CHAIN:
8551 return AMDGPU::isIntrinsicSourceOfDivergence(
8552 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
8553 // In some cases intrinsics that are a source of divergence have been
8554 // lowered to AMDGPUISD so we also need to check those too.
8555 case AMDGPUISD::INTERP_MOV:
8556 case AMDGPUISD::INTERP_P1:
8557 case AMDGPUISD::INTERP_P2:
8558 return true;
8559 }
8560 return false;
8561}
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008562
8563bool SITargetLowering::denormalsEnabledForType(EVT VT) const {
8564 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
8565 case MVT::f32:
8566 return Subtarget->hasFP32Denormals();
8567 case MVT::f64:
8568 return Subtarget->hasFP64Denormals();
8569 case MVT::f16:
8570 return Subtarget->hasFP16Denormals();
8571 default:
8572 return false;
8573 }
8574}