blob: 69401f455b4962bd91b32e78efad7cab304c967b [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000011/// Custom DAG lowering for SI
Tom Stellard75aadc22012-12-11 21:25:42 +000012//
13//===----------------------------------------------------------------------===//
14
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000015#ifdef _MSC_VER
16// Provide M_PI.
17#define _USE_MATH_DEFINES
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000018#endif
19
Chandler Carruth6bda14b2017-06-06 11:49:48 +000020#include "SIISelLowering.h"
Christian Konig99ee0f42013-03-07 09:04:14 +000021#include "AMDGPU.h"
Matt Arsenaultc791f392014-06-23 18:00:31 +000022#include "AMDGPUIntrinsicInfo.h"
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000023#include "AMDGPUSubtarget.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000024#include "AMDGPUTargetMachine.h"
Tom Stellard8485fa02016-12-07 02:42:15 +000025#include "SIDefines.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "SIInstrInfo.h"
27#include "SIMachineFunctionInfo.h"
28#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000029#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000030#include "Utils/AMDGPUBaseInfo.h"
31#include "llvm/ADT/APFloat.h"
32#include "llvm/ADT/APInt.h"
33#include "llvm/ADT/ArrayRef.h"
Alexey Samsonova253bf92014-08-27 19:36:53 +000034#include "llvm/ADT/BitVector.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000035#include "llvm/ADT/SmallVector.h"
Matt Arsenault71bcbd42017-08-11 20:42:08 +000036#include "llvm/ADT/Statistic.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000037#include "llvm/ADT/StringRef.h"
Matt Arsenault9a10cea2016-01-26 04:29:24 +000038#include "llvm/ADT/StringSwitch.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000039#include "llvm/ADT/Twine.h"
Wei Ding07e03712016-07-28 16:42:13 +000040#include "llvm/CodeGen/Analysis.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000041#include "llvm/CodeGen/CallingConvLower.h"
42#include "llvm/CodeGen/DAGCombine.h"
43#include "llvm/CodeGen/ISDOpcodes.h"
44#include "llvm/CodeGen/MachineBasicBlock.h"
45#include "llvm/CodeGen/MachineFrameInfo.h"
46#include "llvm/CodeGen/MachineFunction.h"
47#include "llvm/CodeGen/MachineInstr.h"
48#include "llvm/CodeGen/MachineInstrBuilder.h"
49#include "llvm/CodeGen/MachineMemOperand.h"
Matt Arsenault8623e8d2017-08-03 23:00:29 +000050#include "llvm/CodeGen/MachineModuleInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000051#include "llvm/CodeGen/MachineOperand.h"
52#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000053#include "llvm/CodeGen/SelectionDAG.h"
54#include "llvm/CodeGen/SelectionDAGNodes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000055#include "llvm/CodeGen/TargetCallingConv.h"
56#include "llvm/CodeGen/TargetRegisterInfo.h"
Craig Topper2fa14362018-03-29 17:21:10 +000057#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000058#include "llvm/IR/Constants.h"
59#include "llvm/IR/DataLayout.h"
60#include "llvm/IR/DebugLoc.h"
61#include "llvm/IR/DerivedTypes.h"
Oliver Stannard7e7d9832016-02-02 13:52:43 +000062#include "llvm/IR/DiagnosticInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000063#include "llvm/IR/Function.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000064#include "llvm/IR/GlobalValue.h"
65#include "llvm/IR/InstrTypes.h"
66#include "llvm/IR/Instruction.h"
67#include "llvm/IR/Instructions.h"
Matt Arsenault7dc01c92017-03-15 23:15:12 +000068#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000069#include "llvm/IR/Type.h"
70#include "llvm/Support/Casting.h"
71#include "llvm/Support/CodeGen.h"
72#include "llvm/Support/CommandLine.h"
73#include "llvm/Support/Compiler.h"
74#include "llvm/Support/ErrorHandling.h"
Craig Topperd0af7e82017-04-28 05:31:46 +000075#include "llvm/Support/KnownBits.h"
David Blaikie13e77db2018-03-23 23:58:25 +000076#include "llvm/Support/MachineValueType.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000077#include "llvm/Support/MathExtras.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000078#include "llvm/Target/TargetOptions.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000079#include <cassert>
80#include <cmath>
81#include <cstdint>
82#include <iterator>
83#include <tuple>
84#include <utility>
85#include <vector>
Tom Stellard75aadc22012-12-11 21:25:42 +000086
87using namespace llvm;
88
Matt Arsenault71bcbd42017-08-11 20:42:08 +000089#define DEBUG_TYPE "si-lower"
90
91STATISTIC(NumTailCalls, "Number of tail calls");
92
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000093static cl::opt<bool> EnableVGPRIndexMode(
94 "amdgpu-vgpr-index-mode",
95 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
96 cl::init(false));
97
Matt Arsenault45b98182017-11-15 00:45:43 +000098static cl::opt<unsigned> AssumeFrameIndexHighZeroBits(
99 "amdgpu-frame-index-zero-bits",
100 cl::desc("High bits of frame index assumed to be zero"),
101 cl::init(5),
102 cl::ReallyHidden);
103
Tom Stellardf110f8f2016-04-14 16:27:03 +0000104static unsigned findFirstFreeSGPR(CCState &CCInfo) {
105 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
106 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
107 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
108 return AMDGPU::SGPR0 + Reg;
109 }
110 }
111 llvm_unreachable("Cannot allocate sgpr");
112}
113
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000114SITargetLowering::SITargetLowering(const TargetMachine &TM,
115 const SISubtarget &STI)
Eric Christopher7792e322015-01-30 23:24:40 +0000116 : AMDGPUTargetLowering(TM, STI) {
Tom Stellard1bd80722014-04-30 15:31:33 +0000117 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
Tom Stellard436780b2014-05-15 14:41:57 +0000118 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000119
Marek Olsak79c05872016-11-25 17:37:09 +0000120 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000121 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000122
Tom Stellard436780b2014-05-15 14:41:57 +0000123 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
124 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
125 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000126
Matt Arsenault61001bb2015-11-25 19:58:34 +0000127 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
128 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
129
Tom Stellard436780b2014-05-15 14:41:57 +0000130 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
131 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000132
Tom Stellardf0a21072014-11-18 20:39:39 +0000133 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000134 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
135
Tom Stellardf0a21072014-11-18 20:39:39 +0000136 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000137 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000138
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000139 if (Subtarget->has16BitInsts()) {
Marek Olsak79c05872016-11-25 17:37:09 +0000140 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
141 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard115a6152016-11-10 16:02:37 +0000142
Matt Arsenault1349a042018-05-22 06:32:10 +0000143 // Unless there are also VOP3P operations, not operations are really legal.
Matt Arsenault7596f132017-02-27 20:52:10 +0000144 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
145 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000146 addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
147 addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
Matt Arsenault7596f132017-02-27 20:52:10 +0000148 }
149
Eric Christopher23a3a7c2015-02-26 00:00:24 +0000150 computeRegisterProperties(STI.getRegisterInfo());
Tom Stellard75aadc22012-12-11 21:25:42 +0000151
Tom Stellard35bb18c2013-08-26 15:06:04 +0000152 // We need to custom lower vector stores from local memory
Matt Arsenault71e66762016-05-21 02:27:49 +0000153 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
Tom Stellard35bb18c2013-08-26 15:06:04 +0000154 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
Tom Stellardaf775432013-10-23 00:44:32 +0000155 setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
156 setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000157 setOperationAction(ISD::LOAD, MVT::i1, Custom);
Matt Arsenault2b957b52016-05-02 20:07:26 +0000158
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000159 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000160 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
161 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
162 setOperationAction(ISD::STORE, MVT::v16i32, Custom);
163 setOperationAction(ISD::STORE, MVT::i1, Custom);
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000164
Jan Vesely06200bd2017-01-06 21:00:46 +0000165 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
166 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
167 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
168 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
169 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
170 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
171 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
172 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
173 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
174 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
175
Matt Arsenault71e66762016-05-21 02:27:49 +0000176 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
177 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000178
179 setOperationAction(ISD::SELECT, MVT::i1, Promote);
Tom Stellard0ec134f2014-02-04 17:18:40 +0000180 setOperationAction(ISD::SELECT, MVT::i64, Custom);
Tom Stellardda99c6e2014-03-24 16:07:30 +0000181 setOperationAction(ISD::SELECT, MVT::f64, Promote);
182 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
Tom Stellard81d871d2013-11-13 23:36:50 +0000183
Tom Stellard3ca1bfc2014-06-10 16:01:22 +0000184 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
185 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
186 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
187 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
Matt Arsenault71e66762016-05-21 02:27:49 +0000188 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
Tom Stellard754f80f2013-04-05 23:31:51 +0000189
Tom Stellardd1efda82016-01-20 21:48:24 +0000190 setOperationAction(ISD::SETCC, MVT::i1, Promote);
Tom Stellard83747202013-07-18 21:43:53 +0000191 setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
192 setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
Matt Arsenault18f56be2016-12-22 16:27:11 +0000193 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
Tom Stellard83747202013-07-18 21:43:53 +0000194
Matt Arsenault71e66762016-05-21 02:27:49 +0000195 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
196 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
Matt Arsenaulte306a322014-10-21 16:25:08 +0000197
Matt Arsenault4e466652014-04-16 01:41:30 +0000198 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
199 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000200 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
201 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
203 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000204 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
205
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000206 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000207 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000208 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
Marek Olsak13e47412018-01-31 20:18:04 +0000209 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000210 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
211
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000212 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
213 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000214 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000215
216 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
Matt Arsenault4165efd2017-01-17 07:26:53 +0000217 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
218 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000219 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000220
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000221 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000222 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000223 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
224 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
225 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
226 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
Tom Stellardafcf12f2013-09-12 02:55:14 +0000227
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +0000228 setOperationAction(ISD::UADDO, MVT::i32, Legal);
229 setOperationAction(ISD::USUBO, MVT::i32, Legal);
230
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000231 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
232 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
233
Matt Arsenault84445dd2017-11-30 22:51:26 +0000234#if 0
235 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
236 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
237#endif
238
Benjamin Kramer867bfc52015-03-07 17:41:00 +0000239 // We only support LOAD/STORE and vector manipulation ops for vectors
240 // with > 4 elements.
Matt Arsenault7596f132017-02-27 20:52:10 +0000241 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000242 MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16 }) {
Tom Stellard967bf582014-02-13 23:34:15 +0000243 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000244 switch (Op) {
Tom Stellard967bf582014-02-13 23:34:15 +0000245 case ISD::LOAD:
246 case ISD::STORE:
247 case ISD::BUILD_VECTOR:
248 case ISD::BITCAST:
249 case ISD::EXTRACT_VECTOR_ELT:
250 case ISD::INSERT_VECTOR_ELT:
Tom Stellard967bf582014-02-13 23:34:15 +0000251 case ISD::INSERT_SUBVECTOR:
252 case ISD::EXTRACT_SUBVECTOR:
Matt Arsenault61001bb2015-11-25 19:58:34 +0000253 case ISD::SCALAR_TO_VECTOR:
Tom Stellard967bf582014-02-13 23:34:15 +0000254 break;
Tom Stellardc0503db2014-08-09 01:06:56 +0000255 case ISD::CONCAT_VECTORS:
256 setOperationAction(Op, VT, Custom);
257 break;
Tom Stellard967bf582014-02-13 23:34:15 +0000258 default:
Matt Arsenaultd504a742014-05-15 21:44:05 +0000259 setOperationAction(Op, VT, Expand);
Tom Stellard967bf582014-02-13 23:34:15 +0000260 break;
261 }
262 }
263 }
264
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000265 setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
266
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000267 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
268 // is expanded to avoid having two separate loops in case the index is a VGPR.
269
Matt Arsenault61001bb2015-11-25 19:58:34 +0000270 // Most operations are naturally 32-bit vector operations. We only support
271 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
272 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
273 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
274 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
275
276 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
277 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
278
279 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
280 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
281
282 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
283 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
284 }
285
Matt Arsenault71e66762016-05-21 02:27:49 +0000286 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
287 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
288 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
289 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000290
Matt Arsenault67a98152018-05-16 11:47:30 +0000291 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
292 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
293
Matt Arsenault3aef8092017-01-23 23:09:58 +0000294 // Avoid stack access for these.
295 // TODO: Generalize to more vector types.
296 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
297 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault67a98152018-05-16 11:47:30 +0000298 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
299 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
300
Matt Arsenault3aef8092017-01-23 23:09:58 +0000301 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
302 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault9224c002018-06-05 19:52:46 +0000303 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
304 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
305 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
306
307 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
308 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
309 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
Matt Arsenault3aef8092017-01-23 23:09:58 +0000310
Matt Arsenault67a98152018-05-16 11:47:30 +0000311 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
312 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
313 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
314 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
315
Tom Stellard354a43c2016-04-01 18:27:37 +0000316 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
317 // and output demarshalling
318 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
319 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
320
321 // We can't return success/failure, only the old value,
322 // let LLVM add the comparison
323 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
324 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
325
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000326 if (getSubtarget()->hasFlatAddressSpace()) {
Matt Arsenault99c14522016-04-25 19:27:24 +0000327 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
328 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
329 }
330
Matt Arsenault71e66762016-05-21 02:27:49 +0000331 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
332 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
333
334 // On SI this is s_memtime and s_memrealtime on VI.
335 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
Matt Arsenault3e025382017-04-24 17:49:13 +0000336 setOperationAction(ISD::TRAP, MVT::Other, Custom);
337 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000338
339 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
340 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
341
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000342 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000343 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
344 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
345 setOperationAction(ISD::FRINT, MVT::f64, Legal);
346 }
347
348 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
349
350 setOperationAction(ISD::FSIN, MVT::f32, Custom);
351 setOperationAction(ISD::FCOS, MVT::f32, Custom);
352 setOperationAction(ISD::FDIV, MVT::f32, Custom);
353 setOperationAction(ISD::FDIV, MVT::f64, Custom);
354
Tom Stellard115a6152016-11-10 16:02:37 +0000355 if (Subtarget->has16BitInsts()) {
356 setOperationAction(ISD::Constant, MVT::i16, Legal);
357
358 setOperationAction(ISD::SMIN, MVT::i16, Legal);
359 setOperationAction(ISD::SMAX, MVT::i16, Legal);
360
361 setOperationAction(ISD::UMIN, MVT::i16, Legal);
362 setOperationAction(ISD::UMAX, MVT::i16, Legal);
363
Tom Stellard115a6152016-11-10 16:02:37 +0000364 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
365 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
366
367 setOperationAction(ISD::ROTR, MVT::i16, Promote);
368 setOperationAction(ISD::ROTL, MVT::i16, Promote);
369
370 setOperationAction(ISD::SDIV, MVT::i16, Promote);
371 setOperationAction(ISD::UDIV, MVT::i16, Promote);
372 setOperationAction(ISD::SREM, MVT::i16, Promote);
373 setOperationAction(ISD::UREM, MVT::i16, Promote);
374
375 setOperationAction(ISD::BSWAP, MVT::i16, Promote);
376 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
377
378 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
379 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
380 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
381 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
Jan Veselyb283ea02018-03-02 02:50:22 +0000382 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
Tom Stellard115a6152016-11-10 16:02:37 +0000383
384 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
385
386 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
387
388 setOperationAction(ISD::LOAD, MVT::i16, Custom);
389
390 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
391
Tom Stellard115a6152016-11-10 16:02:37 +0000392 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
393 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
394 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
395 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000396
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000397 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
398 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
399 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
400 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000401
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000402 // F16 - Constant Actions.
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000403 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000404
405 // F16 - Load/Store Actions.
406 setOperationAction(ISD::LOAD, MVT::f16, Promote);
407 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
408 setOperationAction(ISD::STORE, MVT::f16, Promote);
409 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
410
411 // F16 - VOP1 Actions.
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +0000412 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000413 setOperationAction(ISD::FCOS, MVT::f16, Promote);
414 setOperationAction(ISD::FSIN, MVT::f16, Promote);
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000415 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
416 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
417 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
418 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
Matt Arsenaultb5d23272017-03-24 20:04:18 +0000419 setOperationAction(ISD::FROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000420
421 // F16 - VOP2 Actions.
Konstantin Zhuravlyov662e01d2016-11-17 03:49:01 +0000422 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000423 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000424 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
425 setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
Matt Arsenault4052a572016-12-22 03:05:41 +0000426 setOperationAction(ISD::FDIV, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000427
428 // F16 - VOP3 Actions.
429 setOperationAction(ISD::FMA, MVT::f16, Legal);
430 if (!Subtarget->hasFP16Denormals())
431 setOperationAction(ISD::FMAD, MVT::f16, Legal);
Tom Stellard115a6152016-11-10 16:02:37 +0000432
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000433 for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
Matt Arsenault7596f132017-02-27 20:52:10 +0000434 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
435 switch (Op) {
436 case ISD::LOAD:
437 case ISD::STORE:
438 case ISD::BUILD_VECTOR:
439 case ISD::BITCAST:
440 case ISD::EXTRACT_VECTOR_ELT:
441 case ISD::INSERT_VECTOR_ELT:
442 case ISD::INSERT_SUBVECTOR:
443 case ISD::EXTRACT_SUBVECTOR:
444 case ISD::SCALAR_TO_VECTOR:
445 break;
446 case ISD::CONCAT_VECTORS:
447 setOperationAction(Op, VT, Custom);
448 break;
449 default:
450 setOperationAction(Op, VT, Expand);
451 break;
452 }
453 }
454 }
455
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000456 // XXX - Do these do anything? Vector constants turn into build_vector.
457 setOperationAction(ISD::Constant, MVT::v2i16, Legal);
458 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
459
Matt Arsenaultdfb88df2018-05-13 10:04:38 +0000460 setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
461 setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
462
Matt Arsenault7596f132017-02-27 20:52:10 +0000463 setOperationAction(ISD::STORE, MVT::v2i16, Promote);
464 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
465 setOperationAction(ISD::STORE, MVT::v2f16, Promote);
466 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
467
468 setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
469 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
470 setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
471 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000472
473 setOperationAction(ISD::AND, MVT::v2i16, Promote);
474 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
475 setOperationAction(ISD::OR, MVT::v2i16, Promote);
476 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
477 setOperationAction(ISD::XOR, MVT::v2i16, Promote);
478 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000479
Matt Arsenault1349a042018-05-22 06:32:10 +0000480 setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
481 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
482 setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
483 AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
484
485 setOperationAction(ISD::STORE, MVT::v4i16, Promote);
486 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
487 setOperationAction(ISD::STORE, MVT::v4f16, Promote);
488 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
489
490 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
491 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
492 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
493 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
494
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000495 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
496 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
497 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
498
Matt Arsenault1349a042018-05-22 06:32:10 +0000499 if (!Subtarget->hasVOP3PInsts()) {
500 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
501 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
502 }
503
504 setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
505 // This isn't really legal, but this avoids the legalizer unrolling it (and
506 // allows matching fneg (fabs x) patterns)
507 setOperationAction(ISD::FABS, MVT::v2f16, Legal);
508 }
509
510 if (Subtarget->hasVOP3PInsts()) {
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000511 setOperationAction(ISD::ADD, MVT::v2i16, Legal);
512 setOperationAction(ISD::SUB, MVT::v2i16, Legal);
513 setOperationAction(ISD::MUL, MVT::v2i16, Legal);
514 setOperationAction(ISD::SHL, MVT::v2i16, Legal);
515 setOperationAction(ISD::SRL, MVT::v2i16, Legal);
516 setOperationAction(ISD::SRA, MVT::v2i16, Legal);
517 setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
518 setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
519 setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
520 setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
521
522 setOperationAction(ISD::FADD, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000523 setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
524 setOperationAction(ISD::FMA, MVT::v2f16, Legal);
525 setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal);
526 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal);
Matt Arsenault540512c2018-04-26 19:21:37 +0000527 setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000528
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000529 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
530 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000531
532 setOperationAction(ISD::SHL, MVT::v4i16, Custom);
533 setOperationAction(ISD::SRA, MVT::v4i16, Custom);
534 setOperationAction(ISD::SRL, MVT::v4i16, Custom);
535 setOperationAction(ISD::ADD, MVT::v4i16, Custom);
536 setOperationAction(ISD::SUB, MVT::v4i16, Custom);
537 setOperationAction(ISD::MUL, MVT::v4i16, Custom);
538
539 setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
540 setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
541 setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
542 setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
543
544 setOperationAction(ISD::FADD, MVT::v4f16, Custom);
545 setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
546 setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
547 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
548
549 setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
550 setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
Matt Arsenault1349a042018-05-22 06:32:10 +0000551 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000552
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000553 setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
554 setOperationAction(ISD::FABS, MVT::v4f16, Custom);
555
Matt Arsenault1349a042018-05-22 06:32:10 +0000556 if (Subtarget->has16BitInsts()) {
557 setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
558 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
559 setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
560 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
Matt Arsenault4a486232017-04-19 20:53:07 +0000561 } else {
Matt Arsenault1349a042018-05-22 06:32:10 +0000562 // Legalization hack.
Matt Arsenault4a486232017-04-19 20:53:07 +0000563 setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
564 setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
Matt Arsenaulte9524f12018-06-06 21:28:11 +0000565
566 setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
567 setOperationAction(ISD::FABS, MVT::v2f16, Custom);
Matt Arsenault4a486232017-04-19 20:53:07 +0000568 }
569
570 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
571 setOperationAction(ISD::SELECT, VT, Custom);
Matt Arsenault7596f132017-02-27 20:52:10 +0000572 }
573
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000574 setTargetDAGCombine(ISD::ADD);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +0000575 setTargetDAGCombine(ISD::ADDCARRY);
576 setTargetDAGCombine(ISD::SUB);
577 setTargetDAGCombine(ISD::SUBCARRY);
Matt Arsenault02cb0ff2014-09-29 14:59:34 +0000578 setTargetDAGCombine(ISD::FADD);
Matt Arsenault8675db12014-08-29 16:01:14 +0000579 setTargetDAGCombine(ISD::FSUB);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +0000580 setTargetDAGCombine(ISD::FMINNUM);
581 setTargetDAGCombine(ISD::FMAXNUM);
Matt Arsenault5881f4e2015-06-09 00:52:37 +0000582 setTargetDAGCombine(ISD::SMIN);
583 setTargetDAGCombine(ISD::SMAX);
584 setTargetDAGCombine(ISD::UMIN);
585 setTargetDAGCombine(ISD::UMAX);
Tom Stellard75aadc22012-12-11 21:25:42 +0000586 setTargetDAGCombine(ISD::SETCC);
Matt Arsenaultd0101a22015-01-06 23:00:46 +0000587 setTargetDAGCombine(ISD::AND);
Matt Arsenaultf2290332015-01-06 23:00:39 +0000588 setTargetDAGCombine(ISD::OR);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000589 setTargetDAGCombine(ISD::XOR);
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +0000590 setTargetDAGCombine(ISD::SINT_TO_FP);
Matt Arsenault364a6742014-06-11 17:50:44 +0000591 setTargetDAGCombine(ISD::UINT_TO_FP);
Matt Arsenault9cd90712016-04-14 01:42:16 +0000592 setTargetDAGCombine(ISD::FCANONICALIZE);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000593 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
Matt Arsenault8edfaee2017-03-31 19:53:03 +0000594 setTargetDAGCombine(ISD::ZERO_EXTEND);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +0000595 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
Matt Arsenault8cbb4882017-09-20 21:01:24 +0000596 setTargetDAGCombine(ISD::BUILD_VECTOR);
Matt Arsenault364a6742014-06-11 17:50:44 +0000597
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000598 // All memory operations. Some folding on the pointer operand is done to help
599 // matching the constant offsets in the addressing modes.
600 setTargetDAGCombine(ISD::LOAD);
601 setTargetDAGCombine(ISD::STORE);
602 setTargetDAGCombine(ISD::ATOMIC_LOAD);
603 setTargetDAGCombine(ISD::ATOMIC_STORE);
604 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
605 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
606 setTargetDAGCombine(ISD::ATOMIC_SWAP);
607 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
608 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
609 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
610 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
611 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
612 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
613 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
614 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
615 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
616 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
617
Christian Konigeecebd02013-03-26 14:04:02 +0000618 setSchedulingPreference(Sched::RegPressure);
Tom Stellard75aadc22012-12-11 21:25:42 +0000619}
620
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000621const SISubtarget *SITargetLowering::getSubtarget() const {
622 return static_cast<const SISubtarget *>(Subtarget);
623}
624
Tom Stellard0125f2a2013-06-25 02:39:35 +0000625//===----------------------------------------------------------------------===//
626// TargetLowering queries
627//===----------------------------------------------------------------------===//
628
Tom Stellardb12f4de2018-05-22 19:37:55 +0000629// v_mad_mix* support a conversion from f16 to f32.
630//
631// There is only one special case when denormals are enabled we don't currently,
632// where this is OK to use.
633bool SITargetLowering::isFPExtFoldable(unsigned Opcode,
634 EVT DestVT, EVT SrcVT) const {
635 return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
636 (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
637 DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
638 SrcVT.getScalarType() == MVT::f16;
639}
640
Zvi Rackover1b736822017-07-26 08:06:58 +0000641bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000642 // SI has some legal vector types, but no legal vector operations. Say no
643 // shuffles are legal in order to prefer scalarizing some vector operations.
644 return false;
645}
646
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000647bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
648 const CallInst &CI,
Matt Arsenault7d7adf42017-12-14 22:34:10 +0000649 MachineFunction &MF,
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000650 unsigned IntrID) const {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000651 if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
652 AMDGPU::lookupRsrcIntrinsicByIntr(IntrID)) {
653 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
654 (Intrinsic::ID)IntrID);
655 if (Attr.hasFnAttribute(Attribute::ReadNone))
656 return false;
657
658 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
659
660 if (RsrcIntr->IsImage) {
661 Info.ptrVal = MFI->getImagePSV(
662 *MF.getSubtarget<SISubtarget>().getInstrInfo(),
663 CI.getArgOperand(RsrcIntr->RsrcArg));
664 Info.align = 0;
665 } else {
666 Info.ptrVal = MFI->getBufferPSV(
667 *MF.getSubtarget<SISubtarget>().getInstrInfo(),
668 CI.getArgOperand(RsrcIntr->RsrcArg));
669 }
670
671 Info.flags = MachineMemOperand::MODereferenceable;
672 if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
673 Info.opc = ISD::INTRINSIC_W_CHAIN;
674 Info.memVT = MVT::getVT(CI.getType());
675 Info.flags |= MachineMemOperand::MOLoad;
676 } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
677 Info.opc = ISD::INTRINSIC_VOID;
678 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
679 Info.flags |= MachineMemOperand::MOStore;
680 } else {
681 // Atomic
682 Info.opc = ISD::INTRINSIC_W_CHAIN;
683 Info.memVT = MVT::getVT(CI.getType());
684 Info.flags = MachineMemOperand::MOLoad |
685 MachineMemOperand::MOStore |
686 MachineMemOperand::MODereferenceable;
687
688 // XXX - Should this be volatile without known ordering?
689 Info.flags |= MachineMemOperand::MOVolatile;
690 }
691 return true;
692 }
693
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000694 switch (IntrID) {
695 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000696 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000697 case Intrinsic::amdgcn_ds_fadd:
698 case Intrinsic::amdgcn_ds_fmin:
699 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000700 Info.opc = ISD::INTRINSIC_W_CHAIN;
701 Info.memVT = MVT::getVT(CI.getType());
702 Info.ptrVal = CI.getOperand(0);
703 Info.align = 0;
Matt Arsenault11171332017-12-14 21:39:51 +0000704 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000705
706 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
Matt Arsenault11171332017-12-14 21:39:51 +0000707 if (!Vol || !Vol->isZero())
708 Info.flags |= MachineMemOperand::MOVolatile;
709
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000710 return true;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000711 }
Matt Arsenault905f3512017-12-29 17:18:14 +0000712
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000713 default:
714 return false;
715 }
716}
717
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000718bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
719 SmallVectorImpl<Value*> &Ops,
720 Type *&AccessTy) const {
721 switch (II->getIntrinsicID()) {
722 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000723 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000724 case Intrinsic::amdgcn_ds_fadd:
725 case Intrinsic::amdgcn_ds_fmin:
726 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000727 Value *Ptr = II->getArgOperand(0);
728 AccessTy = II->getType();
729 Ops.push_back(Ptr);
730 return true;
731 }
732 default:
733 return false;
734 }
Matt Arsenaulte306a322014-10-21 16:25:08 +0000735}
736
Tom Stellard70580f82015-07-20 14:28:41 +0000737bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
Matt Arsenaultd9b77842017-06-12 17:06:35 +0000738 if (!Subtarget->hasFlatInstOffsets()) {
739 // Flat instructions do not have offsets, and only have the register
740 // address.
741 return AM.BaseOffs == 0 && AM.Scale == 0;
742 }
743
744 // GFX9 added a 13-bit signed offset. When using regular flat instructions,
745 // the sign bit is ignored and is treated as a 12-bit unsigned offset.
746
747 // Just r + i
748 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
Tom Stellard70580f82015-07-20 14:28:41 +0000749}
750
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000751bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
752 if (Subtarget->hasFlatGlobalInsts())
753 return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
754
755 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
756 // Assume the we will use FLAT for all global memory accesses
757 // on VI.
758 // FIXME: This assumption is currently wrong. On VI we still use
759 // MUBUF instructions for the r + i addressing mode. As currently
760 // implemented, the MUBUF instructions only work on buffer < 4GB.
761 // It may be possible to support > 4GB buffers with MUBUF instructions,
762 // by setting the stride value in the resource descriptor which would
763 // increase the size limit to (stride * 4GB). However, this is risky,
764 // because it has never been validated.
765 return isLegalFlatAddressingMode(AM);
766 }
767
768 return isLegalMUBUFAddressingMode(AM);
769}
770
Matt Arsenault711b3902015-08-07 20:18:34 +0000771bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
772 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
773 // additionally can do r + r + i with addr64. 32-bit has more addressing
774 // mode options. Depending on the resource constant, it can also do
775 // (i64 r0) + (i32 r1) * (i14 i).
776 //
777 // Private arrays end up using a scratch buffer most of the time, so also
778 // assume those use MUBUF instructions. Scratch loads / stores are currently
779 // implemented as mubuf instructions with offen bit set, so slightly
780 // different than the normal addr64.
781 if (!isUInt<12>(AM.BaseOffs))
782 return false;
783
784 // FIXME: Since we can split immediate into soffset and immediate offset,
785 // would it make sense to allow any immediate?
786
787 switch (AM.Scale) {
788 case 0: // r + i or just i, depending on HasBaseReg.
789 return true;
790 case 1:
791 return true; // We have r + r or r + i.
792 case 2:
793 if (AM.HasBaseReg) {
794 // Reject 2 * r + r.
795 return false;
796 }
797
798 // Allow 2 * r as r + r
799 // Or 2 * r + i is allowed as r + r + i.
800 return true;
801 default: // Don't allow n * r
802 return false;
803 }
804}
805
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000806bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
807 const AddrMode &AM, Type *Ty,
Jonas Paulsson024e3192017-07-21 11:59:37 +0000808 unsigned AS, Instruction *I) const {
Matt Arsenault5015a892014-08-15 17:17:07 +0000809 // No global is ever allowed as a base.
810 if (AM.BaseGV)
811 return false;
812
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000813 if (AS == AMDGPUASI.GLOBAL_ADDRESS)
814 return isLegalGlobalAddressingMode(AM);
Matt Arsenault5015a892014-08-15 17:17:07 +0000815
Matt Arsenault923712b2018-02-09 16:57:57 +0000816 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
817 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000818 // If the offset isn't a multiple of 4, it probably isn't going to be
819 // correctly aligned.
Matt Arsenault3cc1e002016-08-13 01:43:51 +0000820 // FIXME: Can we get the real alignment here?
Matt Arsenault711b3902015-08-07 20:18:34 +0000821 if (AM.BaseOffs % 4 != 0)
822 return isLegalMUBUFAddressingMode(AM);
823
824 // There are no SMRD extloads, so if we have to do a small type access we
825 // will use a MUBUF load.
826 // FIXME?: We also need to do this if unaligned, but we don't know the
827 // alignment here.
Stanislav Mekhanoshin57d341c2018-05-15 22:07:51 +0000828 if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000829 return isLegalGlobalAddressingMode(AM);
Matt Arsenault711b3902015-08-07 20:18:34 +0000830
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000831 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000832 // SMRD instructions have an 8-bit, dword offset on SI.
833 if (!isUInt<8>(AM.BaseOffs / 4))
834 return false;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000835 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000836 // On CI+, this can also be a 32-bit literal constant offset. If it fits
837 // in 8-bits, it can use a smaller encoding.
838 if (!isUInt<32>(AM.BaseOffs / 4))
839 return false;
Matt Arsenaulte823d922017-02-18 18:29:53 +0000840 } else if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000841 // On VI, these use the SMEM format and the offset is 20-bit in bytes.
842 if (!isUInt<20>(AM.BaseOffs))
843 return false;
844 } else
845 llvm_unreachable("unhandled generation");
846
847 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
848 return true;
849
850 if (AM.Scale == 1 && AM.HasBaseReg)
851 return true;
852
853 return false;
Matt Arsenault711b3902015-08-07 20:18:34 +0000854
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000855 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000856 return isLegalMUBUFAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000857 } else if (AS == AMDGPUASI.LOCAL_ADDRESS ||
858 AS == AMDGPUASI.REGION_ADDRESS) {
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000859 // Basic, single offset DS instructions allow a 16-bit unsigned immediate
860 // field.
861 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
862 // an 8-bit dword offset but we don't know the alignment here.
863 if (!isUInt<16>(AM.BaseOffs))
Matt Arsenault5015a892014-08-15 17:17:07 +0000864 return false;
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000865
866 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
867 return true;
868
869 if (AM.Scale == 1 && AM.HasBaseReg)
870 return true;
871
Matt Arsenault5015a892014-08-15 17:17:07 +0000872 return false;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000873 } else if (AS == AMDGPUASI.FLAT_ADDRESS ||
874 AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) {
Matt Arsenault7d1b6c82016-04-29 06:25:10 +0000875 // For an unknown address space, this usually means that this is for some
876 // reason being used for pure arithmetic, and not based on some addressing
877 // computation. We don't have instructions that compute pointers with any
878 // addressing modes, so treat them as having no offset like flat
879 // instructions.
Tom Stellard70580f82015-07-20 14:28:41 +0000880 return isLegalFlatAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000881 } else {
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000882 llvm_unreachable("unhandled address space");
883 }
Matt Arsenault5015a892014-08-15 17:17:07 +0000884}
885
Nirav Dave4dcad5d2017-07-10 20:25:54 +0000886bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
887 const SelectionDAG &DAG) const {
Nirav Daved20066c2017-05-24 15:59:09 +0000888 if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) {
889 return (MemVT.getSizeInBits() <= 4 * 32);
890 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
891 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
892 return (MemVT.getSizeInBits() <= MaxPrivateBits);
893 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
894 return (MemVT.getSizeInBits() <= 2 * 32);
895 }
896 return true;
897}
898
Matt Arsenaulte6986632015-01-14 01:35:22 +0000899bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000900 unsigned AddrSpace,
901 unsigned Align,
902 bool *IsFast) const {
Matt Arsenault1018c892014-04-24 17:08:26 +0000903 if (IsFast)
904 *IsFast = false;
905
Matt Arsenault1018c892014-04-24 17:08:26 +0000906 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
907 // which isn't a simple VT.
Alina Sbirlea6f937b12016-08-04 16:38:44 +0000908 // Until MVT is extended to handle this, simply check for the size and
909 // rely on the condition below: allow accesses if the size is a multiple of 4.
910 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
911 VT.getStoreSize() > 16)) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000912 return false;
Alina Sbirlea6f937b12016-08-04 16:38:44 +0000913 }
Matt Arsenault1018c892014-04-24 17:08:26 +0000914
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000915 if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS ||
916 AddrSpace == AMDGPUASI.REGION_ADDRESS) {
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000917 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
918 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
919 // with adjacent offsets.
Sanjay Patelce74db92015-09-03 15:03:19 +0000920 bool AlignedBy4 = (Align % 4 == 0);
921 if (IsFast)
922 *IsFast = AlignedBy4;
Matt Arsenault7f681ac2016-07-01 23:03:44 +0000923
Sanjay Patelce74db92015-09-03 15:03:19 +0000924 return AlignedBy4;
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000925 }
Matt Arsenault1018c892014-04-24 17:08:26 +0000926
Tom Stellard64a9d082016-10-14 18:10:39 +0000927 // FIXME: We have to be conservative here and assume that flat operations
928 // will access scratch. If we had access to the IR function, then we
929 // could determine if any private memory was used in the function.
930 if (!Subtarget->hasUnalignedScratchAccess() &&
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000931 (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS ||
932 AddrSpace == AMDGPUASI.FLAT_ADDRESS)) {
Tom Stellard64a9d082016-10-14 18:10:39 +0000933 return false;
934 }
935
Matt Arsenault7f681ac2016-07-01 23:03:44 +0000936 if (Subtarget->hasUnalignedBufferAccess()) {
937 // If we have an uniform constant load, it still requires using a slow
938 // buffer instruction if unaligned.
939 if (IsFast) {
Matt Arsenault923712b2018-02-09 16:57:57 +0000940 *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS ||
941 AddrSpace == AMDGPUASI.CONSTANT_ADDRESS_32BIT) ?
Matt Arsenault7f681ac2016-07-01 23:03:44 +0000942 (Align % 4 == 0) : true;
943 }
944
945 return true;
946 }
947
Tom Stellard33e64c62015-02-04 20:49:52 +0000948 // Smaller than dword value must be aligned.
Tom Stellard33e64c62015-02-04 20:49:52 +0000949 if (VT.bitsLT(MVT::i32))
950 return false;
951
Matt Arsenault1018c892014-04-24 17:08:26 +0000952 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
953 // byte-address are ignored, thus forcing Dword alignment.
Tom Stellarde812f2f2014-07-21 15:45:06 +0000954 // This applies to private, global, and constant memory.
Matt Arsenault1018c892014-04-24 17:08:26 +0000955 if (IsFast)
956 *IsFast = true;
Tom Stellardc6b299c2015-02-02 18:02:28 +0000957
958 return VT.bitsGT(MVT::i32) && Align % 4 == 0;
Tom Stellard0125f2a2013-06-25 02:39:35 +0000959}
960
Matt Arsenault46645fa2014-07-28 17:49:26 +0000961EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
962 unsigned SrcAlign, bool IsMemset,
963 bool ZeroMemset,
964 bool MemcpyStrSrc,
965 MachineFunction &MF) const {
966 // FIXME: Should account for address space here.
967
968 // The default fallback uses the private pointer size as a guess for a type to
969 // use. Make sure we switch these to 64-bit accesses.
970
971 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
972 return MVT::v4i32;
973
974 if (Size >= 8 && DstAlign >= 4)
975 return MVT::v2i32;
976
977 // Use the default.
978 return MVT::Other;
979}
980
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000981static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) {
982 return AS == AMDGPUASI.GLOBAL_ADDRESS ||
983 AS == AMDGPUASI.FLAT_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +0000984 AS == AMDGPUASI.CONSTANT_ADDRESS ||
985 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT;
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +0000986}
987
988bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
989 unsigned DestAS) const {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000990 return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) &&
991 isFlatGlobalAddrSpace(DestAS, AMDGPUASI);
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +0000992}
993
Alexander Timofeev18009562016-12-08 17:28:47 +0000994bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
995 const MemSDNode *MemNode = cast<MemSDNode>(N);
996 const Value *Ptr = MemNode->getMemOperand()->getValue();
Matt Arsenault0a0c8712018-03-27 18:39:45 +0000997 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
Alexander Timofeev18009562016-12-08 17:28:47 +0000998 return I && I->getMetadata("amdgpu.noclobber");
999}
1000
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001001bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
1002 unsigned DestAS) const {
1003 // Flat -> private/local is a simple truncate.
1004 // Flat -> global is no-op
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001005 if (SrcAS == AMDGPUASI.FLAT_ADDRESS)
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001006 return true;
1007
1008 return isNoopAddrSpaceCast(SrcAS, DestAS);
1009}
1010
Tom Stellarda6f24c62015-12-15 20:55:55 +00001011bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1012 const MemSDNode *MemNode = cast<MemSDNode>(N);
Tom Stellarda6f24c62015-12-15 20:55:55 +00001013
Matt Arsenaultbcf7bec2018-02-09 16:57:48 +00001014 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
Tom Stellarda6f24c62015-12-15 20:55:55 +00001015}
1016
Chandler Carruth9d010ff2014-07-03 00:23:43 +00001017TargetLoweringBase::LegalizeTypeAction
1018SITargetLowering::getPreferredVectorAction(EVT VT) const {
1019 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1020 return TypeSplitVector;
1021
1022 return TargetLoweringBase::getPreferredVectorAction(VT);
Tom Stellardd86003e2013-08-14 23:25:00 +00001023}
Tom Stellard0125f2a2013-06-25 02:39:35 +00001024
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001025bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1026 Type *Ty) const {
Matt Arsenault749035b2016-07-30 01:40:36 +00001027 // FIXME: Could be smarter if called for vector constants.
1028 return true;
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001029}
1030
Tom Stellard2e045bb2016-01-20 00:13:22 +00001031bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001032 if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1033 switch (Op) {
1034 case ISD::LOAD:
1035 case ISD::STORE:
Tom Stellard2e045bb2016-01-20 00:13:22 +00001036
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001037 // These operations are done with 32-bit instructions anyway.
1038 case ISD::AND:
1039 case ISD::OR:
1040 case ISD::XOR:
1041 case ISD::SELECT:
1042 // TODO: Extensions?
1043 return true;
1044 default:
1045 return false;
1046 }
1047 }
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +00001048
Tom Stellard2e045bb2016-01-20 00:13:22 +00001049 // SimplifySetCC uses this function to determine whether or not it should
1050 // create setcc with i1 operands. We don't have instructions for i1 setcc.
1051 if (VT == MVT::i1 && Op == ISD::SETCC)
1052 return false;
1053
1054 return TargetLowering::isTypeDesirableForOp(Op, VT);
1055}
1056
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001057SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1058 const SDLoc &SL,
1059 SDValue Chain,
1060 uint64_t Offset) const {
Mehdi Aminia749f2a2015-07-09 02:09:52 +00001061 const DataLayout &DL = DAG.getDataLayout();
Tom Stellardec2e43c2014-09-22 15:35:29 +00001062 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001063 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1064
1065 const ArgDescriptor *InputPtrReg;
1066 const TargetRegisterClass *RC;
1067
1068 std::tie(InputPtrReg, RC)
1069 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Tom Stellard94593ee2013-06-03 17:40:18 +00001070
Matt Arsenault86033ca2014-07-28 17:31:39 +00001071 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001072 MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS);
Matt Arsenaulta0269b62015-06-01 21:58:24 +00001073 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001074 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1075
Matt Arsenault2fb9ccf2018-05-29 17:42:38 +00001076 return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
Jan Veselyfea814d2016-06-21 20:46:20 +00001077}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001078
Matt Arsenault9166ce82017-07-28 15:52:08 +00001079SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1080 const SDLoc &SL) const {
1081 auto MFI = DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>();
1082 uint64_t Offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT);
1083 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1084}
1085
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001086SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1087 const SDLoc &SL, SDValue Val,
1088 bool Signed,
Matt Arsenault6dca5422017-01-09 18:52:39 +00001089 const ISD::InputArg *Arg) const {
Matt Arsenault6dca5422017-01-09 18:52:39 +00001090 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1091 VT.bitsLT(MemVT)) {
1092 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1093 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1094 }
1095
Tom Stellardbc6c5232016-10-17 16:21:45 +00001096 if (MemVT.isFloatingPoint())
Matt Arsenault6dca5422017-01-09 18:52:39 +00001097 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001098 else if (Signed)
Matt Arsenault6dca5422017-01-09 18:52:39 +00001099 Val = DAG.getSExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001100 else
Matt Arsenault6dca5422017-01-09 18:52:39 +00001101 Val = DAG.getZExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001102
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001103 return Val;
1104}
1105
1106SDValue SITargetLowering::lowerKernargMemParameter(
1107 SelectionDAG &DAG, EVT VT, EVT MemVT,
1108 const SDLoc &SL, SDValue Chain,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001109 uint64_t Offset, unsigned Align, bool Signed,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001110 const ISD::InputArg *Arg) const {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001111 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
1112 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
1113 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1114
Matt Arsenault90083d32018-06-07 09:54:49 +00001115
1116 // Try to avoid using an extload by loading earlier than the argument address,
1117 // and extracting the relevant bits. The load should hopefully be merged with
1118 // the previous argument.
1119 if (Align < 4) {
1120 //if (MemVT.getStoreSize() < 4) {
1121 assert(MemVT.getStoreSize() < 4);
1122 int64_t AlignDownOffset = alignDown(Offset, 4);
1123 int64_t OffsetDiff = Offset - AlignDownOffset;
1124
1125 EVT IntVT = MemVT.changeTypeToInteger();
1126
1127 // TODO: If we passed in the base kernel offset we could have a better
1128 // alignment than 4, but we don't really need it.
1129 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1130 SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1131 MachineMemOperand::MODereferenceable |
1132 MachineMemOperand::MOInvariant);
1133
1134 SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1135 SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1136
1137 SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1138 ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1139 ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1140
1141
1142 return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1143 }
1144
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001145 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1146 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001147 MachineMemOperand::MODereferenceable |
1148 MachineMemOperand::MOInvariant);
1149
1150 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
Matt Arsenault6dca5422017-01-09 18:52:39 +00001151 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
Tom Stellard94593ee2013-06-03 17:40:18 +00001152}
1153
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001154SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1155 const SDLoc &SL, SDValue Chain,
1156 const ISD::InputArg &Arg) const {
1157 MachineFunction &MF = DAG.getMachineFunction();
1158 MachineFrameInfo &MFI = MF.getFrameInfo();
1159
1160 if (Arg.Flags.isByVal()) {
1161 unsigned Size = Arg.Flags.getByValSize();
1162 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1163 return DAG.getFrameIndex(FrameIdx, MVT::i32);
1164 }
1165
1166 unsigned ArgOffset = VA.getLocMemOffset();
1167 unsigned ArgSize = VA.getValVT().getStoreSize();
1168
1169 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1170
1171 // Create load nodes to retrieve arguments from the stack.
1172 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1173 SDValue ArgValue;
1174
1175 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1176 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1177 MVT MemVT = VA.getValVT();
1178
1179 switch (VA.getLocInfo()) {
1180 default:
1181 break;
1182 case CCValAssign::BCvt:
1183 MemVT = VA.getLocVT();
1184 break;
1185 case CCValAssign::SExt:
1186 ExtType = ISD::SEXTLOAD;
1187 break;
1188 case CCValAssign::ZExt:
1189 ExtType = ISD::ZEXTLOAD;
1190 break;
1191 case CCValAssign::AExt:
1192 ExtType = ISD::EXTLOAD;
1193 break;
1194 }
1195
1196 ArgValue = DAG.getExtLoad(
1197 ExtType, SL, VA.getLocVT(), Chain, FIN,
1198 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1199 MemVT);
1200 return ArgValue;
1201}
1202
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001203SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1204 const SIMachineFunctionInfo &MFI,
1205 EVT VT,
1206 AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1207 const ArgDescriptor *Reg;
1208 const TargetRegisterClass *RC;
1209
1210 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1211 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1212}
1213
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001214static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1215 CallingConv::ID CallConv,
1216 ArrayRef<ISD::InputArg> Ins,
1217 BitVector &Skipped,
1218 FunctionType *FType,
1219 SIMachineFunctionInfo *Info) {
1220 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1221 const ISD::InputArg &Arg = Ins[I];
1222
1223 // First check if it's a PS input addr.
1224 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() &&
1225 !Arg.Flags.isByVal() && PSInputNum <= 15) {
1226
1227 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) {
1228 // We can safely skip PS inputs.
1229 Skipped.set(I);
1230 ++PSInputNum;
1231 continue;
1232 }
1233
1234 Info->markPSInputAllocated(PSInputNum);
1235 if (Arg.Used)
1236 Info->markPSInputEnabled(PSInputNum);
1237
1238 ++PSInputNum;
1239 }
1240
1241 // Second split vertices into their elements.
1242 if (Arg.VT.isVector()) {
1243 ISD::InputArg NewArg = Arg;
1244 NewArg.Flags.setSplit();
1245 NewArg.VT = Arg.VT.getVectorElementType();
1246
1247 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
1248 // three or five element vertex only needs three or five registers,
1249 // NOT four or eight.
1250 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1251 unsigned NumElements = ParamType->getVectorNumElements();
1252
1253 for (unsigned J = 0; J != NumElements; ++J) {
1254 Splits.push_back(NewArg);
1255 NewArg.PartOffset += NewArg.VT.getStoreSize();
1256 }
1257 } else {
1258 Splits.push_back(Arg);
1259 }
1260 }
1261}
1262
1263// Allocate special inputs passed in VGPRs.
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001264static void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1265 MachineFunction &MF,
1266 const SIRegisterInfo &TRI,
1267 SIMachineFunctionInfo &Info) {
1268 if (Info.hasWorkItemIDX()) {
1269 unsigned Reg = AMDGPU::VGPR0;
1270 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001271
1272 CCInfo.AllocateReg(Reg);
1273 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1274 }
1275
1276 if (Info.hasWorkItemIDY()) {
1277 unsigned Reg = AMDGPU::VGPR1;
1278 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1279
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001280 CCInfo.AllocateReg(Reg);
1281 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1282 }
1283
1284 if (Info.hasWorkItemIDZ()) {
1285 unsigned Reg = AMDGPU::VGPR2;
1286 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1287
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001288 CCInfo.AllocateReg(Reg);
1289 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1290 }
1291}
1292
1293// Try to allocate a VGPR at the end of the argument list, or if no argument
1294// VGPRs are left allocating a stack slot.
1295static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) {
1296 ArrayRef<MCPhysReg> ArgVGPRs
1297 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1298 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1299 if (RegIdx == ArgVGPRs.size()) {
1300 // Spill to stack required.
1301 int64_t Offset = CCInfo.AllocateStack(4, 4);
1302
1303 return ArgDescriptor::createStack(Offset);
1304 }
1305
1306 unsigned Reg = ArgVGPRs[RegIdx];
1307 Reg = CCInfo.AllocateReg(Reg);
1308 assert(Reg != AMDGPU::NoRegister);
1309
1310 MachineFunction &MF = CCInfo.getMachineFunction();
1311 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1312 return ArgDescriptor::createRegister(Reg);
1313}
1314
1315static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1316 const TargetRegisterClass *RC,
1317 unsigned NumArgRegs) {
1318 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1319 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1320 if (RegIdx == ArgSGPRs.size())
1321 report_fatal_error("ran out of SGPRs for arguments");
1322
1323 unsigned Reg = ArgSGPRs[RegIdx];
1324 Reg = CCInfo.AllocateReg(Reg);
1325 assert(Reg != AMDGPU::NoRegister);
1326
1327 MachineFunction &MF = CCInfo.getMachineFunction();
1328 MF.addLiveIn(Reg, RC);
1329 return ArgDescriptor::createRegister(Reg);
1330}
1331
1332static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1333 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1334}
1335
1336static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1337 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1338}
1339
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001340static void allocateSpecialInputVGPRs(CCState &CCInfo,
1341 MachineFunction &MF,
1342 const SIRegisterInfo &TRI,
1343 SIMachineFunctionInfo &Info) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001344 if (Info.hasWorkItemIDX())
1345 Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001346
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001347 if (Info.hasWorkItemIDY())
1348 Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001349
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001350 if (Info.hasWorkItemIDZ())
1351 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1352}
1353
1354static void allocateSpecialInputSGPRs(CCState &CCInfo,
1355 MachineFunction &MF,
1356 const SIRegisterInfo &TRI,
1357 SIMachineFunctionInfo &Info) {
1358 auto &ArgInfo = Info.getArgInfo();
1359
1360 // TODO: Unify handling with private memory pointers.
1361
1362 if (Info.hasDispatchPtr())
1363 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1364
1365 if (Info.hasQueuePtr())
1366 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1367
1368 if (Info.hasKernargSegmentPtr())
1369 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1370
1371 if (Info.hasDispatchID())
1372 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1373
1374 // flat_scratch_init is not applicable for non-kernel functions.
1375
1376 if (Info.hasWorkGroupIDX())
1377 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1378
1379 if (Info.hasWorkGroupIDY())
1380 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1381
1382 if (Info.hasWorkGroupIDZ())
1383 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
Matt Arsenault817c2532017-08-03 23:12:44 +00001384
1385 if (Info.hasImplicitArgPtr())
1386 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001387}
1388
1389// Allocate special inputs passed in user SGPRs.
1390static void allocateHSAUserSGPRs(CCState &CCInfo,
1391 MachineFunction &MF,
1392 const SIRegisterInfo &TRI,
1393 SIMachineFunctionInfo &Info) {
Matt Arsenault10fc0622017-06-26 03:01:31 +00001394 if (Info.hasImplicitBufferPtr()) {
1395 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1396 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1397 CCInfo.AllocateReg(ImplicitBufferPtrReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001398 }
1399
1400 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1401 if (Info.hasPrivateSegmentBuffer()) {
1402 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1403 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1404 CCInfo.AllocateReg(PrivateSegmentBufferReg);
1405 }
1406
1407 if (Info.hasDispatchPtr()) {
1408 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1409 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1410 CCInfo.AllocateReg(DispatchPtrReg);
1411 }
1412
1413 if (Info.hasQueuePtr()) {
1414 unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1415 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1416 CCInfo.AllocateReg(QueuePtrReg);
1417 }
1418
1419 if (Info.hasKernargSegmentPtr()) {
1420 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1421 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1422 CCInfo.AllocateReg(InputPtrReg);
1423 }
1424
1425 if (Info.hasDispatchID()) {
1426 unsigned DispatchIDReg = Info.addDispatchID(TRI);
1427 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1428 CCInfo.AllocateReg(DispatchIDReg);
1429 }
1430
1431 if (Info.hasFlatScratchInit()) {
1432 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1433 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1434 CCInfo.AllocateReg(FlatScratchInitReg);
1435 }
1436
1437 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1438 // these from the dispatch pointer.
1439}
1440
1441// Allocate special input registers that are initialized per-wave.
1442static void allocateSystemSGPRs(CCState &CCInfo,
1443 MachineFunction &MF,
1444 SIMachineFunctionInfo &Info,
Marek Olsak584d2c02017-05-04 22:25:20 +00001445 CallingConv::ID CallConv,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001446 bool IsShader) {
1447 if (Info.hasWorkGroupIDX()) {
1448 unsigned Reg = Info.addWorkGroupIDX();
1449 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1450 CCInfo.AllocateReg(Reg);
1451 }
1452
1453 if (Info.hasWorkGroupIDY()) {
1454 unsigned Reg = Info.addWorkGroupIDY();
1455 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1456 CCInfo.AllocateReg(Reg);
1457 }
1458
1459 if (Info.hasWorkGroupIDZ()) {
1460 unsigned Reg = Info.addWorkGroupIDZ();
1461 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1462 CCInfo.AllocateReg(Reg);
1463 }
1464
1465 if (Info.hasWorkGroupInfo()) {
1466 unsigned Reg = Info.addWorkGroupInfo();
1467 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1468 CCInfo.AllocateReg(Reg);
1469 }
1470
1471 if (Info.hasPrivateSegmentWaveByteOffset()) {
1472 // Scratch wave offset passed in system SGPR.
1473 unsigned PrivateSegmentWaveByteOffsetReg;
1474
1475 if (IsShader) {
Marek Olsak584d2c02017-05-04 22:25:20 +00001476 PrivateSegmentWaveByteOffsetReg =
1477 Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1478
1479 // This is true if the scratch wave byte offset doesn't have a fixed
1480 // location.
1481 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1482 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1483 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1484 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001485 } else
1486 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1487
1488 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1489 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1490 }
1491}
1492
1493static void reservePrivateMemoryRegs(const TargetMachine &TM,
1494 MachineFunction &MF,
1495 const SIRegisterInfo &TRI,
Matt Arsenault1cc47f82017-07-18 16:44:56 +00001496 SIMachineFunctionInfo &Info) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001497 // Now that we've figured out where the scratch register inputs are, see if
1498 // should reserve the arguments and use them directly.
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001499 MachineFrameInfo &MFI = MF.getFrameInfo();
1500 bool HasStackObjects = MFI.hasStackObjects();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001501
1502 // Record that we know we have non-spill stack objects so we don't need to
1503 // check all stack objects later.
1504 if (HasStackObjects)
1505 Info.setHasNonSpillStackObjects(true);
1506
1507 // Everything live out of a block is spilled with fast regalloc, so it's
1508 // almost certain that spilling will be required.
1509 if (TM.getOptLevel() == CodeGenOpt::None)
1510 HasStackObjects = true;
1511
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001512 // For now assume stack access is needed in any callee functions, so we need
1513 // the scratch registers to pass in.
1514 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1515
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001516 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001517 if (ST.isAmdCodeObjectV2(MF.getFunction())) {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001518 if (RequiresStackAccess) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001519 // If we have stack objects, we unquestionably need the private buffer
1520 // resource. For the Code Object V2 ABI, this will be the first 4 user
1521 // SGPR inputs. We can reserve those and use them directly.
1522
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001523 unsigned PrivateSegmentBufferReg = Info.getPreloadedReg(
1524 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001525 Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1526
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001527 if (MFI.hasCalls()) {
1528 // If we have calls, we need to keep the frame register in a register
1529 // that won't be clobbered by a call, so ensure it is copied somewhere.
1530
1531 // This is not a problem for the scratch wave offset, because the same
1532 // registers are reserved in all functions.
1533
1534 // FIXME: Nothing is really ensuring this is a call preserved register,
1535 // it's just selected from the end so it happens to be.
1536 unsigned ReservedOffsetReg
1537 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1538 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1539 } else {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001540 unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg(
1541 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001542 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1543 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001544 } else {
1545 unsigned ReservedBufferReg
1546 = TRI.reservedPrivateSegmentBufferReg(MF);
1547 unsigned ReservedOffsetReg
1548 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1549
1550 // We tentatively reserve the last registers (skipping the last two
1551 // which may contain VCC). After register allocation, we'll replace
1552 // these with the ones immediately after those which were really
1553 // allocated. In the prologue copies will be inserted from the argument
1554 // to these reserved registers.
1555 Info.setScratchRSrcReg(ReservedBufferReg);
1556 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1557 }
1558 } else {
1559 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1560
1561 // Without HSA, relocations are used for the scratch pointer and the
1562 // buffer resource setup is always inserted in the prologue. Scratch wave
1563 // offset is still in an input SGPR.
1564 Info.setScratchRSrcReg(ReservedBufferReg);
1565
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001566 if (HasStackObjects && !MFI.hasCalls()) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001567 unsigned ScratchWaveOffsetReg = Info.getPreloadedReg(
1568 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001569 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1570 } else {
1571 unsigned ReservedOffsetReg
1572 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1573 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1574 }
1575 }
1576}
1577
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001578bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1579 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1580 return !Info->isEntryFunction();
1581}
1582
1583void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1584
1585}
1586
1587void SITargetLowering::insertCopiesSplitCSR(
1588 MachineBasicBlock *Entry,
1589 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1590 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1591
1592 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1593 if (!IStart)
1594 return;
1595
1596 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1597 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1598 MachineBasicBlock::iterator MBBI = Entry->begin();
1599 for (const MCPhysReg *I = IStart; *I; ++I) {
1600 const TargetRegisterClass *RC = nullptr;
1601 if (AMDGPU::SReg_64RegClass.contains(*I))
1602 RC = &AMDGPU::SGPR_64RegClass;
1603 else if (AMDGPU::SReg_32RegClass.contains(*I))
1604 RC = &AMDGPU::SGPR_32RegClass;
1605 else
1606 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1607
1608 unsigned NewVR = MRI->createVirtualRegister(RC);
1609 // Create copy from CSR to a virtual register.
1610 Entry->addLiveIn(*I);
1611 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1612 .addReg(*I);
1613
1614 // Insert the copy-back instructions right before the terminator.
1615 for (auto *Exit : Exits)
1616 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1617 TII->get(TargetOpcode::COPY), *I)
1618 .addReg(NewVR);
1619 }
1620}
1621
Christian Konig2c8f6d52013-03-07 09:03:52 +00001622SDValue SITargetLowering::LowerFormalArguments(
Eric Christopher7792e322015-01-30 23:24:40 +00001623 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001624 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1625 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001626 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001627
1628 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001629 const Function &Fn = MF.getFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00001630 FunctionType *FType = MF.getFunction().getFunctionType();
Christian Konig99ee0f42013-03-07 09:04:14 +00001631 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001632 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001633
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +00001634 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00001635 DiagnosticInfoUnsupported NoGraphicsHSA(
Matthias Braunf1caa282017-12-15 22:22:58 +00001636 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
Matt Arsenaultd48da142015-11-02 23:23:02 +00001637 DAG.getContext()->diagnose(NoGraphicsHSA);
Diana Picus81bc3172016-05-26 15:24:55 +00001638 return DAG.getEntryNode();
Matt Arsenaultd48da142015-11-02 23:23:02 +00001639 }
1640
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00001641 // Create stack objects that are used for emitting debugger prologue if
1642 // "amdgpu-debugger-emit-prologue" attribute was specified.
1643 if (ST.debuggerEmitPrologue())
1644 createDebuggerPrologueStackObjects(MF);
1645
Christian Konig2c8f6d52013-03-07 09:03:52 +00001646 SmallVector<ISD::InputArg, 16> Splits;
Christian Konig2c8f6d52013-03-07 09:03:52 +00001647 SmallVector<CCValAssign, 16> ArgLocs;
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001648 BitVector Skipped(Ins.size());
Eric Christopherb5217502014-08-06 18:45:26 +00001649 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1650 *DAG.getContext());
Christian Konig2c8f6d52013-03-07 09:03:52 +00001651
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001652 bool IsShader = AMDGPU::isShader(CallConv);
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +00001653 bool IsKernel = AMDGPU::isKernel(CallConv);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001654 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
Christian Konig99ee0f42013-03-07 09:04:14 +00001655
Matt Arsenaultd1867c02017-08-02 00:59:51 +00001656 if (!IsEntryFunc) {
1657 // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over
1658 // this when allocating argument fixed offsets.
1659 CCInfo.AllocateStack(4, 4);
1660 }
1661
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001662 if (IsShader) {
1663 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1664
1665 // At least one interpolation mode must be enabled or else the GPU will
1666 // hang.
1667 //
1668 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1669 // set PSInputAddr, the user wants to enable some bits after the compilation
1670 // based on run-time states. Since we can't know what the final PSInputEna
1671 // will look like, so we shouldn't do anything here and the user should take
1672 // responsibility for the correct programming.
1673 //
1674 // Otherwise, the following restrictions apply:
1675 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1676 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1677 // enabled too.
Tim Renoufc8ffffe2017-10-12 16:16:41 +00001678 if (CallConv == CallingConv::AMDGPU_PS) {
1679 if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1680 ((Info->getPSInputAddr() & 0xF) == 0 &&
1681 Info->isPSInputAllocated(11))) {
1682 CCInfo.AllocateReg(AMDGPU::VGPR0);
1683 CCInfo.AllocateReg(AMDGPU::VGPR1);
1684 Info->markPSInputAllocated(0);
1685 Info->markPSInputEnabled(0);
1686 }
1687 if (Subtarget->isAmdPalOS()) {
1688 // For isAmdPalOS, the user does not enable some bits after compilation
1689 // based on run-time states; the register values being generated here are
1690 // the final ones set in hardware. Therefore we need to apply the
1691 // workaround to PSInputAddr and PSInputEnable together. (The case where
1692 // a bit is set in PSInputAddr but not PSInputEnable is where the
1693 // frontend set up an input arg for a particular interpolation mode, but
1694 // nothing uses that input arg. Really we should have an earlier pass
1695 // that removes such an arg.)
1696 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1697 if ((PsInputBits & 0x7F) == 0 ||
1698 ((PsInputBits & 0xF) == 0 &&
1699 (PsInputBits >> 11 & 1)))
1700 Info->markPSInputEnabled(
1701 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
1702 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001703 }
1704
Tom Stellard2f3f9852017-01-25 01:25:13 +00001705 assert(!Info->hasDispatchPtr() &&
Tom Stellardf110f8f2016-04-14 16:27:03 +00001706 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1707 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1708 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1709 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1710 !Info->hasWorkItemIDZ());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001711 } else if (IsKernel) {
1712 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001713 } else {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001714 Splits.append(Ins.begin(), Ins.end());
Tom Stellardaf775432013-10-23 00:44:32 +00001715 }
1716
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001717 if (IsEntryFunc) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001718 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001719 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
Tom Stellard2f3f9852017-01-25 01:25:13 +00001720 }
1721
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001722 if (IsKernel) {
Tom Stellardbbeb45a2016-09-16 21:53:00 +00001723 analyzeFormalArgumentsCompute(CCInfo, Ins);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001724 } else {
1725 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1726 CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1727 }
Christian Konig2c8f6d52013-03-07 09:03:52 +00001728
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001729 SmallVector<SDValue, 16> Chains;
1730
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001731 // FIXME: This is the minimum kernel argument alignment. We should improve
1732 // this to the maximum alignment of the arguments.
1733 //
1734 // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
1735 // kern arg offset.
1736 const unsigned KernelArgBaseAlign = 16;
1737 const unsigned ExplicitOffset = Subtarget->getExplicitKernelArgOffset(Fn);
1738
1739 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
Christian Konigb7be72d2013-05-17 09:46:48 +00001740 const ISD::InputArg &Arg = Ins[i];
Alexey Samsonova253bf92014-08-27 19:36:53 +00001741 if (Skipped[i]) {
Christian Konigb7be72d2013-05-17 09:46:48 +00001742 InVals.push_back(DAG.getUNDEF(Arg.VT));
Christian Konig99ee0f42013-03-07 09:04:14 +00001743 continue;
1744 }
1745
Christian Konig2c8f6d52013-03-07 09:03:52 +00001746 CCValAssign &VA = ArgLocs[ArgIdx++];
Craig Topper7f416c82014-11-16 21:17:18 +00001747 MVT VT = VA.getLocVT();
Tom Stellarded882c22013-06-03 17:40:11 +00001748
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001749 if (IsEntryFunc && VA.isMemLoc()) {
Tom Stellardaf775432013-10-23 00:44:32 +00001750 VT = Ins[i].VT;
Tom Stellardbbeb45a2016-09-16 21:53:00 +00001751 EVT MemVT = VA.getLocVT();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001752
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001753 const uint64_t Offset = ExplicitOffset + VA.getLocMemOffset();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001754 Info->setABIArgOffset(Offset + MemVT.getStoreSize());
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001755 unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001756
Tom Stellard94593ee2013-06-03 17:40:18 +00001757 // The first 36 bytes of the input buffer contains information about
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001758 // thread group and global sizes for clover.
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001759 SDValue Arg = lowerKernargMemParameter(
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001760 DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001761 Chains.push_back(Arg.getValue(1));
Tom Stellardca7ecf32014-08-22 18:49:31 +00001762
Craig Toppere3dcce92015-08-01 22:20:21 +00001763 auto *ParamTy =
Andrew Trick05938a52015-02-16 18:10:47 +00001764 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001765 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001766 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
Tom Stellardca7ecf32014-08-22 18:49:31 +00001767 // On SI local pointers are just offsets into LDS, so they are always
1768 // less than 16-bits. On CI and newer they could potentially be
1769 // real pointers, so we can't guarantee their size.
1770 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
1771 DAG.getValueType(MVT::i16));
1772 }
1773
Tom Stellarded882c22013-06-03 17:40:11 +00001774 InVals.push_back(Arg);
1775 continue;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001776 } else if (!IsEntryFunc && VA.isMemLoc()) {
1777 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
1778 InVals.push_back(Val);
1779 if (!Arg.Flags.isByVal())
1780 Chains.push_back(Val.getValue(1));
1781 continue;
Tom Stellarded882c22013-06-03 17:40:11 +00001782 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001783
Christian Konig2c8f6d52013-03-07 09:03:52 +00001784 assert(VA.isRegLoc() && "Parameter must be in a register!");
1785
1786 unsigned Reg = VA.getLocReg();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001787 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
Matt Arsenaultb3463552017-07-15 05:52:59 +00001788 EVT ValVT = VA.getValVT();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001789
1790 Reg = MF.addLiveIn(Reg, RC);
1791 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1792
Matt Arsenault45b98182017-11-15 00:45:43 +00001793 if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) {
1794 // The return object should be reasonably addressable.
1795
1796 // FIXME: This helps when the return is a real sret. If it is a
1797 // automatically inserted sret (i.e. CanLowerReturn returns false), an
1798 // extra copy is inserted in SelectionDAGBuilder which obscures this.
1799 unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits;
1800 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1801 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
1802 }
1803
Matt Arsenaultb3463552017-07-15 05:52:59 +00001804 // If this is an 8 or 16-bit value, it is really passed promoted
1805 // to 32 bits. Insert an assert[sz]ext to capture this, then
1806 // truncate to the right size.
1807 switch (VA.getLocInfo()) {
1808 case CCValAssign::Full:
1809 break;
1810 case CCValAssign::BCvt:
1811 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
1812 break;
1813 case CCValAssign::SExt:
1814 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
1815 DAG.getValueType(ValVT));
1816 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1817 break;
1818 case CCValAssign::ZExt:
1819 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1820 DAG.getValueType(ValVT));
1821 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1822 break;
1823 case CCValAssign::AExt:
1824 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1825 break;
1826 default:
1827 llvm_unreachable("Unknown loc info!");
1828 }
1829
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001830 if (IsShader && Arg.VT.isVector()) {
Christian Konig2c8f6d52013-03-07 09:03:52 +00001831 // Build a vector from the registers
Andrew Trick05938a52015-02-16 18:10:47 +00001832 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
Christian Konig2c8f6d52013-03-07 09:03:52 +00001833 unsigned NumElements = ParamType->getVectorNumElements();
1834
1835 SmallVector<SDValue, 4> Regs;
1836 Regs.push_back(Val);
1837 for (unsigned j = 1; j != NumElements; ++j) {
1838 Reg = ArgLocs[ArgIdx++].getLocReg();
1839 Reg = MF.addLiveIn(Reg, RC);
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001840
1841 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1842 Regs.push_back(Copy);
Christian Konig2c8f6d52013-03-07 09:03:52 +00001843 }
1844
1845 // Fill up the missing vector elements
1846 NumElements = Arg.VT.getVectorNumElements() - NumElements;
Benjamin Kramer6cd780f2015-02-17 15:29:18 +00001847 Regs.append(NumElements, DAG.getUNDEF(VT));
Matt Arsenault758659232013-05-18 00:21:46 +00001848
Ahmed Bougacha128f8732016-04-26 21:15:30 +00001849 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
Christian Konig2c8f6d52013-03-07 09:03:52 +00001850 continue;
1851 }
1852
1853 InVals.push_back(Val);
1854 }
Tom Stellarde99fb652015-01-20 19:33:04 +00001855
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001856 if (!IsEntryFunc) {
1857 // Special inputs come after user arguments.
1858 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
1859 }
1860
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001861 // Start adding system SGPRs.
1862 if (IsEntryFunc) {
1863 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001864 } else {
1865 CCInfo.AllocateReg(Info->getScratchRSrcReg());
1866 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
1867 CCInfo.AllocateReg(Info->getFrameOffsetReg());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001868 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001869 }
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001870
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001871 auto &ArgUsageInfo =
1872 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001873 ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001874
Matt Arsenault71bcbd42017-08-11 20:42:08 +00001875 unsigned StackArgSize = CCInfo.getNextStackOffset();
1876 Info->setBytesInStackArgArea(StackArgSize);
1877
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001878 return Chains.empty() ? Chain :
1879 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
Christian Konig2c8f6d52013-03-07 09:03:52 +00001880}
1881
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001882// TODO: If return values can't fit in registers, we should return as many as
1883// possible in registers before passing on stack.
1884bool SITargetLowering::CanLowerReturn(
1885 CallingConv::ID CallConv,
1886 MachineFunction &MF, bool IsVarArg,
1887 const SmallVectorImpl<ISD::OutputArg> &Outs,
1888 LLVMContext &Context) const {
1889 // Replacing returns with sret/stack usage doesn't make sense for shaders.
1890 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
1891 // for shaders. Vector types should be explicitly handled by CC.
1892 if (AMDGPU::isEntryFunctionCC(CallConv))
1893 return true;
1894
1895 SmallVector<CCValAssign, 16> RVLocs;
1896 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1897 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
1898}
1899
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001900SDValue
1901SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1902 bool isVarArg,
1903 const SmallVectorImpl<ISD::OutputArg> &Outs,
1904 const SmallVectorImpl<SDValue> &OutVals,
1905 const SDLoc &DL, SelectionDAG &DAG) const {
Marek Olsak8a0f3352016-01-13 17:23:04 +00001906 MachineFunction &MF = DAG.getMachineFunction();
1907 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1908
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001909 if (AMDGPU::isKernel(CallConv)) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00001910 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
1911 OutVals, DL, DAG);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001912 }
1913
1914 bool IsShader = AMDGPU::isShader(CallConv);
Marek Olsak8a0f3352016-01-13 17:23:04 +00001915
Marek Olsak8e9cc632016-01-13 17:23:09 +00001916 Info->setIfReturnsVoid(Outs.size() == 0);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001917 bool IsWaveEnd = Info->returnsVoid() && IsShader;
Marek Olsak8e9cc632016-01-13 17:23:09 +00001918
Marek Olsak8a0f3352016-01-13 17:23:04 +00001919 SmallVector<ISD::OutputArg, 48> Splits;
1920 SmallVector<SDValue, 48> SplitVals;
1921
1922 // Split vectors into their elements.
1923 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1924 const ISD::OutputArg &Out = Outs[i];
1925
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001926 if (IsShader && Out.VT.isVector()) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00001927 MVT VT = Out.VT.getVectorElementType();
1928 ISD::OutputArg NewOut = Out;
1929 NewOut.Flags.setSplit();
1930 NewOut.VT = VT;
1931
1932 // We want the original number of vector elements here, e.g.
1933 // three or five, not four or eight.
1934 unsigned NumElements = Out.ArgVT.getVectorNumElements();
1935
1936 for (unsigned j = 0; j != NumElements; ++j) {
1937 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i],
1938 DAG.getConstant(j, DL, MVT::i32));
1939 SplitVals.push_back(Elem);
1940 Splits.push_back(NewOut);
1941 NewOut.PartOffset += NewOut.VT.getStoreSize();
1942 }
1943 } else {
1944 SplitVals.push_back(OutVals[i]);
1945 Splits.push_back(Out);
1946 }
1947 }
1948
1949 // CCValAssign - represent the assignment of the return value to a location.
1950 SmallVector<CCValAssign, 48> RVLocs;
1951
1952 // CCState - Info about the registers and stack slots.
1953 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1954 *DAG.getContext());
1955
1956 // Analyze outgoing return values.
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001957 CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg));
Marek Olsak8a0f3352016-01-13 17:23:04 +00001958
1959 SDValue Flag;
1960 SmallVector<SDValue, 48> RetOps;
1961 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
1962
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001963 // Add return address for callable functions.
1964 if (!Info->isEntryFunction()) {
1965 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1966 SDValue ReturnAddrReg = CreateLiveInRegister(
1967 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
1968
1969 // FIXME: Should be able to use a vreg here, but need a way to prevent it
1970 // from being allcoated to a CSR.
1971
1972 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
1973 MVT::i64);
1974
1975 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
1976 Flag = Chain.getValue(1);
1977
1978 RetOps.push_back(PhysReturnAddrReg);
1979 }
1980
Marek Olsak8a0f3352016-01-13 17:23:04 +00001981 // Copy the result values into the output registers.
1982 for (unsigned i = 0, realRVLocIdx = 0;
1983 i != RVLocs.size();
1984 ++i, ++realRVLocIdx) {
1985 CCValAssign &VA = RVLocs[i];
1986 assert(VA.isRegLoc() && "Can only return in registers!");
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001987 // TODO: Partially return in registers if return values don't fit.
Marek Olsak8a0f3352016-01-13 17:23:04 +00001988
1989 SDValue Arg = SplitVals[realRVLocIdx];
1990
1991 // Copied from other backends.
1992 switch (VA.getLocInfo()) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00001993 case CCValAssign::Full:
1994 break;
1995 case CCValAssign::BCvt:
1996 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1997 break;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001998 case CCValAssign::SExt:
1999 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2000 break;
2001 case CCValAssign::ZExt:
2002 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2003 break;
2004 case CCValAssign::AExt:
2005 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2006 break;
2007 default:
2008 llvm_unreachable("Unknown loc info!");
Marek Olsak8a0f3352016-01-13 17:23:04 +00002009 }
2010
2011 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2012 Flag = Chain.getValue(1);
2013 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2014 }
2015
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002016 // FIXME: Does sret work properly?
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002017 if (!Info->isEntryFunction()) {
2018 const SIRegisterInfo *TRI
2019 = static_cast<const SISubtarget *>(Subtarget)->getRegisterInfo();
2020 const MCPhysReg *I =
2021 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2022 if (I) {
2023 for (; *I; ++I) {
2024 if (AMDGPU::SReg_64RegClass.contains(*I))
2025 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2026 else if (AMDGPU::SReg_32RegClass.contains(*I))
2027 RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2028 else
2029 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2030 }
2031 }
2032 }
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002033
Marek Olsak8a0f3352016-01-13 17:23:04 +00002034 // Update chain and glue.
2035 RetOps[0] = Chain;
2036 if (Flag.getNode())
2037 RetOps.push_back(Flag);
2038
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002039 unsigned Opc = AMDGPUISD::ENDPGM;
2040 if (!IsWaveEnd)
2041 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
Matt Arsenault9babdf42016-06-22 20:15:28 +00002042 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002043}
2044
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002045SDValue SITargetLowering::LowerCallResult(
2046 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2047 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2048 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2049 SDValue ThisVal) const {
2050 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2051
2052 // Assign locations to each value returned by this call.
2053 SmallVector<CCValAssign, 16> RVLocs;
2054 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2055 *DAG.getContext());
2056 CCInfo.AnalyzeCallResult(Ins, RetCC);
2057
2058 // Copy all of the result registers out of their specified physreg.
2059 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2060 CCValAssign VA = RVLocs[i];
2061 SDValue Val;
2062
2063 if (VA.isRegLoc()) {
2064 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2065 Chain = Val.getValue(1);
2066 InFlag = Val.getValue(2);
2067 } else if (VA.isMemLoc()) {
2068 report_fatal_error("TODO: return values in memory");
2069 } else
2070 llvm_unreachable("unknown argument location type");
2071
2072 switch (VA.getLocInfo()) {
2073 case CCValAssign::Full:
2074 break;
2075 case CCValAssign::BCvt:
2076 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2077 break;
2078 case CCValAssign::ZExt:
2079 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2080 DAG.getValueType(VA.getValVT()));
2081 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2082 break;
2083 case CCValAssign::SExt:
2084 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2085 DAG.getValueType(VA.getValVT()));
2086 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2087 break;
2088 case CCValAssign::AExt:
2089 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2090 break;
2091 default:
2092 llvm_unreachable("Unknown loc info!");
2093 }
2094
2095 InVals.push_back(Val);
2096 }
2097
2098 return Chain;
2099}
2100
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002101// Add code to pass special inputs required depending on used features separate
2102// from the explicit user arguments present in the IR.
2103void SITargetLowering::passSpecialInputs(
2104 CallLoweringInfo &CLI,
2105 const SIMachineFunctionInfo &Info,
2106 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2107 SmallVectorImpl<SDValue> &MemOpChains,
2108 SDValue Chain,
2109 SDValue StackPtr) const {
2110 // If we don't have a call site, this was a call inserted by
2111 // legalization. These can never use special inputs.
2112 if (!CLI.CS)
2113 return;
2114
2115 const Function *CalleeFunc = CLI.CS.getCalledFunction();
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002116 assert(CalleeFunc);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002117
2118 SelectionDAG &DAG = CLI.DAG;
2119 const SDLoc &DL = CLI.DL;
2120
2121 const SISubtarget *ST = getSubtarget();
2122 const SIRegisterInfo *TRI = ST->getRegisterInfo();
2123
2124 auto &ArgUsageInfo =
2125 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2126 const AMDGPUFunctionArgInfo &CalleeArgInfo
2127 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2128
2129 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2130
2131 // TODO: Unify with private memory register handling. This is complicated by
2132 // the fact that at least in kernels, the input argument is not necessarily
2133 // in the same location as the input.
2134 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2135 AMDGPUFunctionArgInfo::DISPATCH_PTR,
2136 AMDGPUFunctionArgInfo::QUEUE_PTR,
2137 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2138 AMDGPUFunctionArgInfo::DISPATCH_ID,
2139 AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2140 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2141 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2142 AMDGPUFunctionArgInfo::WORKITEM_ID_X,
2143 AMDGPUFunctionArgInfo::WORKITEM_ID_Y,
Matt Arsenault817c2532017-08-03 23:12:44 +00002144 AMDGPUFunctionArgInfo::WORKITEM_ID_Z,
2145 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002146 };
2147
2148 for (auto InputID : InputRegs) {
2149 const ArgDescriptor *OutgoingArg;
2150 const TargetRegisterClass *ArgRC;
2151
2152 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2153 if (!OutgoingArg)
2154 continue;
2155
2156 const ArgDescriptor *IncomingArg;
2157 const TargetRegisterClass *IncomingArgRC;
2158 std::tie(IncomingArg, IncomingArgRC)
2159 = CallerArgInfo.getPreloadedValue(InputID);
2160 assert(IncomingArgRC == ArgRC);
2161
2162 // All special arguments are ints for now.
2163 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
Matt Arsenault817c2532017-08-03 23:12:44 +00002164 SDValue InputReg;
2165
2166 if (IncomingArg) {
2167 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2168 } else {
2169 // The implicit arg ptr is special because it doesn't have a corresponding
2170 // input for kernels, and is computed from the kernarg segment pointer.
2171 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2172 InputReg = getImplicitArgPtr(DAG, DL);
2173 }
2174
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002175 if (OutgoingArg->isRegister()) {
2176 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2177 } else {
2178 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, StackPtr,
2179 InputReg,
2180 OutgoingArg->getStackOffset());
2181 MemOpChains.push_back(ArgStore);
2182 }
2183 }
2184}
2185
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002186static bool canGuaranteeTCO(CallingConv::ID CC) {
2187 return CC == CallingConv::Fast;
2188}
2189
2190/// Return true if we might ever do TCO for calls with this calling convention.
2191static bool mayTailCallThisCC(CallingConv::ID CC) {
2192 switch (CC) {
2193 case CallingConv::C:
2194 return true;
2195 default:
2196 return canGuaranteeTCO(CC);
2197 }
2198}
2199
2200bool SITargetLowering::isEligibleForTailCallOptimization(
2201 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2202 const SmallVectorImpl<ISD::OutputArg> &Outs,
2203 const SmallVectorImpl<SDValue> &OutVals,
2204 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2205 if (!mayTailCallThisCC(CalleeCC))
2206 return false;
2207
2208 MachineFunction &MF = DAG.getMachineFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00002209 const Function &CallerF = MF.getFunction();
2210 CallingConv::ID CallerCC = CallerF.getCallingConv();
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002211 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2212 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2213
2214 // Kernels aren't callable, and don't have a live in return address so it
2215 // doesn't make sense to do a tail call with entry functions.
2216 if (!CallerPreserved)
2217 return false;
2218
2219 bool CCMatch = CallerCC == CalleeCC;
2220
2221 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2222 if (canGuaranteeTCO(CalleeCC) && CCMatch)
2223 return true;
2224 return false;
2225 }
2226
2227 // TODO: Can we handle var args?
2228 if (IsVarArg)
2229 return false;
2230
Matthias Braunf1caa282017-12-15 22:22:58 +00002231 for (const Argument &Arg : CallerF.args()) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002232 if (Arg.hasByValAttr())
2233 return false;
2234 }
2235
2236 LLVMContext &Ctx = *DAG.getContext();
2237
2238 // Check that the call results are passed in the same way.
2239 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2240 CCAssignFnForCall(CalleeCC, IsVarArg),
2241 CCAssignFnForCall(CallerCC, IsVarArg)))
2242 return false;
2243
2244 // The callee has to preserve all registers the caller needs to preserve.
2245 if (!CCMatch) {
2246 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2247 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2248 return false;
2249 }
2250
2251 // Nothing more to check if the callee is taking no arguments.
2252 if (Outs.empty())
2253 return true;
2254
2255 SmallVector<CCValAssign, 16> ArgLocs;
2256 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2257
2258 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2259
2260 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2261 // If the stack arguments for this call do not fit into our own save area then
2262 // the call cannot be made tail.
2263 // TODO: Is this really necessary?
2264 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2265 return false;
2266
2267 const MachineRegisterInfo &MRI = MF.getRegInfo();
2268 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2269}
2270
2271bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2272 if (!CI->isTailCall())
2273 return false;
2274
2275 const Function *ParentFn = CI->getParent()->getParent();
2276 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2277 return false;
2278
2279 auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2280 return (Attr.getValueAsString() != "true");
2281}
2282
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002283// The wave scratch offset register is used as the global base pointer.
2284SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2285 SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002286 SelectionDAG &DAG = CLI.DAG;
2287 const SDLoc &DL = CLI.DL;
2288 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2289 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2290 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2291 SDValue Chain = CLI.Chain;
2292 SDValue Callee = CLI.Callee;
2293 bool &IsTailCall = CLI.IsTailCall;
2294 CallingConv::ID CallConv = CLI.CallConv;
2295 bool IsVarArg = CLI.IsVarArg;
2296 bool IsSibCall = false;
2297 bool IsThisReturn = false;
2298 MachineFunction &MF = DAG.getMachineFunction();
2299
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002300 if (IsVarArg) {
2301 return lowerUnhandledCall(CLI, InVals,
2302 "unsupported call to variadic function ");
2303 }
2304
2305 if (!CLI.CS.getCalledFunction()) {
2306 return lowerUnhandledCall(CLI, InVals,
2307 "unsupported indirect call to function ");
2308 }
2309
2310 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2311 return lowerUnhandledCall(CLI, InVals,
2312 "unsupported required tail call to function ");
2313 }
2314
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002315 // The first 4 bytes are reserved for the callee's emergency stack slot.
2316 const unsigned CalleeUsableStackOffset = 4;
2317
2318 if (IsTailCall) {
2319 IsTailCall = isEligibleForTailCallOptimization(
2320 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2321 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2322 report_fatal_error("failed to perform tail call elimination on a call "
2323 "site marked musttail");
2324 }
2325
2326 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2327
2328 // A sibling call is one where we're under the usual C ABI and not planning
2329 // to change that but can still do a tail call:
2330 if (!TailCallOpt && IsTailCall)
2331 IsSibCall = true;
2332
2333 if (IsTailCall)
2334 ++NumTailCalls;
2335 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002336
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002337 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee)) {
Yaxun Liu1ac16612017-11-06 13:01:33 +00002338 // FIXME: Remove this hack for function pointer types after removing
2339 // support of old address space mapping. In the new address space
2340 // mapping the pointer in default address space is 64 bit, therefore
2341 // does not need this hack.
2342 if (Callee.getValueType() == MVT::i32) {
2343 const GlobalValue *GV = GA->getGlobal();
2344 Callee = DAG.getGlobalAddress(GV, DL, MVT::i64, GA->getOffset(), false,
2345 GA->getTargetFlags());
2346 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002347 }
Yaxun Liu1ac16612017-11-06 13:01:33 +00002348 assert(Callee.getValueType() == MVT::i64);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002349
2350 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2351
2352 // Analyze operands of the call, assigning locations to each operand.
2353 SmallVector<CCValAssign, 16> ArgLocs;
2354 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2355 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
2356 CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2357
2358 // Get a count of how many bytes are to be pushed on the stack.
2359 unsigned NumBytes = CCInfo.getNextStackOffset();
2360
2361 if (IsSibCall) {
2362 // Since we're not changing the ABI to make this a tail call, the memory
2363 // operands are already available in the caller's incoming argument space.
2364 NumBytes = 0;
2365 }
2366
2367 // FPDiff is the byte offset of the call's argument area from the callee's.
2368 // Stores to callee stack arguments will be placed in FixedStackSlots offset
2369 // by this amount for a tail call. In a sibling call it must be 0 because the
2370 // caller will deallocate the entire stack and the callee still expects its
2371 // arguments to begin at SP+0. Completely unused for non-tail calls.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002372 int32_t FPDiff = 0;
2373 MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002374 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2375
Matt Arsenault6efd0822017-09-14 17:14:57 +00002376 SDValue CallerSavedFP;
2377
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002378 // Adjust the stack pointer for the new arguments...
2379 // These operations are automatically eliminated by the prolog/epilog pass
2380 if (!IsSibCall) {
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002381 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002382
2383 unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2384
2385 // In the HSA case, this should be an identity copy.
2386 SDValue ScratchRSrcReg
2387 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2388 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2389
2390 // TODO: Don't hardcode these registers and get from the callee function.
2391 SDValue ScratchWaveOffsetReg
2392 = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2393 RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
Matt Arsenault6efd0822017-09-14 17:14:57 +00002394
2395 if (!Info->isEntryFunction()) {
2396 // Avoid clobbering this function's FP value. In the current convention
2397 // callee will overwrite this, so do save/restore around the call site.
2398 CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2399 Info->getFrameOffsetReg(), MVT::i32);
2400 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002401 }
2402
2403 // Stack pointer relative accesses are done by changing the offset SGPR. This
2404 // is just the VGPR offset component.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002405 SDValue StackPtr = DAG.getConstant(CalleeUsableStackOffset, DL, MVT::i32);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002406
2407 SmallVector<SDValue, 8> MemOpChains;
2408 MVT PtrVT = MVT::i32;
2409
2410 // Walk the register/memloc assignments, inserting copies/loads.
2411 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2412 ++i, ++realArgIdx) {
2413 CCValAssign &VA = ArgLocs[i];
2414 SDValue Arg = OutVals[realArgIdx];
2415
2416 // Promote the value if needed.
2417 switch (VA.getLocInfo()) {
2418 case CCValAssign::Full:
2419 break;
2420 case CCValAssign::BCvt:
2421 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2422 break;
2423 case CCValAssign::ZExt:
2424 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2425 break;
2426 case CCValAssign::SExt:
2427 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2428 break;
2429 case CCValAssign::AExt:
2430 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2431 break;
2432 case CCValAssign::FPExt:
2433 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2434 break;
2435 default:
2436 llvm_unreachable("Unknown loc info!");
2437 }
2438
2439 if (VA.isRegLoc()) {
2440 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2441 } else {
2442 assert(VA.isMemLoc());
2443
2444 SDValue DstAddr;
2445 MachinePointerInfo DstInfo;
2446
2447 unsigned LocMemOffset = VA.getLocMemOffset();
2448 int32_t Offset = LocMemOffset;
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002449
2450 SDValue PtrOff = DAG.getObjectPtrOffset(DL, StackPtr, Offset);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002451
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002452 if (IsTailCall) {
2453 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2454 unsigned OpSize = Flags.isByVal() ?
2455 Flags.getByValSize() : VA.getValVT().getStoreSize();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002456
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002457 Offset = Offset + FPDiff;
2458 int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2459
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002460 DstAddr = DAG.getObjectPtrOffset(DL, DAG.getFrameIndex(FI, PtrVT),
2461 StackPtr);
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002462 DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2463
2464 // Make sure any stack arguments overlapping with where we're storing
2465 // are loaded before this eventual operation. Otherwise they'll be
2466 // clobbered.
2467
2468 // FIXME: Why is this really necessary? This seems to just result in a
2469 // lot of code to copy the stack and write them back to the same
2470 // locations, which are supposed to be immutable?
2471 Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2472 } else {
2473 DstAddr = PtrOff;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002474 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
2475 }
2476
2477 if (Outs[i].Flags.isByVal()) {
2478 SDValue SizeNode =
2479 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2480 SDValue Cpy = DAG.getMemcpy(
2481 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2482 /*isVol = */ false, /*AlwaysInline = */ true,
Yaxun Liuc5962262017-11-22 16:13:35 +00002483 /*isTailCall = */ false, DstInfo,
2484 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
2485 *DAG.getContext(), AMDGPUASI.PRIVATE_ADDRESS))));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002486
2487 MemOpChains.push_back(Cpy);
2488 } else {
2489 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
2490 MemOpChains.push_back(Store);
2491 }
2492 }
2493 }
2494
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002495 // Copy special input registers after user input arguments.
2496 passSpecialInputs(CLI, *Info, RegsToPass, MemOpChains, Chain, StackPtr);
2497
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002498 if (!MemOpChains.empty())
2499 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2500
2501 // Build a sequence of copy-to-reg nodes chained together with token chain
2502 // and flag operands which copy the outgoing args into the appropriate regs.
2503 SDValue InFlag;
2504 for (auto &RegToPass : RegsToPass) {
2505 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2506 RegToPass.second, InFlag);
2507 InFlag = Chain.getValue(1);
2508 }
2509
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002510
2511 SDValue PhysReturnAddrReg;
2512 if (IsTailCall) {
2513 // Since the return is being combined with the call, we need to pass on the
2514 // return address.
2515
2516 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2517 SDValue ReturnAddrReg = CreateLiveInRegister(
2518 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2519
2520 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2521 MVT::i64);
2522 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2523 InFlag = Chain.getValue(1);
2524 }
2525
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002526 // We don't usually want to end the call-sequence here because we would tidy
2527 // the frame up *after* the call, however in the ABI-changing tail-call case
2528 // we've carefully laid out the parameters so that when sp is reset they'll be
2529 // in the correct location.
2530 if (IsTailCall && !IsSibCall) {
2531 Chain = DAG.getCALLSEQ_END(Chain,
2532 DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2533 DAG.getTargetConstant(0, DL, MVT::i32),
2534 InFlag, DL);
2535 InFlag = Chain.getValue(1);
2536 }
2537
2538 std::vector<SDValue> Ops;
2539 Ops.push_back(Chain);
2540 Ops.push_back(Callee);
2541
2542 if (IsTailCall) {
2543 // Each tail call may have to adjust the stack by a different amount, so
2544 // this information must travel along with the operation for eventual
2545 // consumption by emitEpilogue.
2546 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002547
2548 Ops.push_back(PhysReturnAddrReg);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002549 }
2550
2551 // Add argument registers to the end of the list so that they are known live
2552 // into the call.
2553 for (auto &RegToPass : RegsToPass) {
2554 Ops.push_back(DAG.getRegister(RegToPass.first,
2555 RegToPass.second.getValueType()));
2556 }
2557
2558 // Add a register mask operand representing the call-preserved registers.
2559
2560 const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo();
2561 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2562 assert(Mask && "Missing call preserved mask for calling convention");
2563 Ops.push_back(DAG.getRegisterMask(Mask));
2564
2565 if (InFlag.getNode())
2566 Ops.push_back(InFlag);
2567
2568 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2569
2570 // If we're doing a tall call, use a TC_RETURN here rather than an
2571 // actual call instruction.
2572 if (IsTailCall) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002573 MFI.setHasTailCall();
2574 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002575 }
2576
2577 // Returns a chain and a flag for retval copy to use.
2578 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2579 Chain = Call.getValue(0);
2580 InFlag = Call.getValue(1);
2581
Matt Arsenault6efd0822017-09-14 17:14:57 +00002582 if (CallerSavedFP) {
2583 SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2584 Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2585 InFlag = Chain.getValue(1);
2586 }
2587
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002588 uint64_t CalleePopBytes = NumBytes;
2589 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002590 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2591 InFlag, DL);
2592 if (!Ins.empty())
2593 InFlag = Chain.getValue(1);
2594
2595 // Handle result values, copying them out of physregs into vregs that we
2596 // return.
2597 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2598 InVals, IsThisReturn,
2599 IsThisReturn ? OutVals[0] : SDValue());
2600}
2601
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002602unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2603 SelectionDAG &DAG) const {
2604 unsigned Reg = StringSwitch<unsigned>(RegName)
2605 .Case("m0", AMDGPU::M0)
2606 .Case("exec", AMDGPU::EXEC)
2607 .Case("exec_lo", AMDGPU::EXEC_LO)
2608 .Case("exec_hi", AMDGPU::EXEC_HI)
2609 .Case("flat_scratch", AMDGPU::FLAT_SCR)
2610 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2611 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2612 .Default(AMDGPU::NoRegister);
2613
2614 if (Reg == AMDGPU::NoRegister) {
2615 report_fatal_error(Twine("invalid register name \""
2616 + StringRef(RegName) + "\"."));
2617
2618 }
2619
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002620 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002621 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2622 report_fatal_error(Twine("invalid register \""
2623 + StringRef(RegName) + "\" for subtarget."));
2624 }
2625
2626 switch (Reg) {
2627 case AMDGPU::M0:
2628 case AMDGPU::EXEC_LO:
2629 case AMDGPU::EXEC_HI:
2630 case AMDGPU::FLAT_SCR_LO:
2631 case AMDGPU::FLAT_SCR_HI:
2632 if (VT.getSizeInBits() == 32)
2633 return Reg;
2634 break;
2635 case AMDGPU::EXEC:
2636 case AMDGPU::FLAT_SCR:
2637 if (VT.getSizeInBits() == 64)
2638 return Reg;
2639 break;
2640 default:
2641 llvm_unreachable("missing register type checking");
2642 }
2643
2644 report_fatal_error(Twine("invalid type for register \""
2645 + StringRef(RegName) + "\"."));
2646}
2647
Matt Arsenault786724a2016-07-12 21:41:32 +00002648// If kill is not the last instruction, split the block so kill is always a
2649// proper terminator.
2650MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
2651 MachineBasicBlock *BB) const {
2652 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2653
2654 MachineBasicBlock::iterator SplitPoint(&MI);
2655 ++SplitPoint;
2656
2657 if (SplitPoint == BB->end()) {
2658 // Don't bother with a new block.
Marek Olsakce76ea02017-10-24 10:27:13 +00002659 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002660 return BB;
2661 }
2662
2663 MachineFunction *MF = BB->getParent();
2664 MachineBasicBlock *SplitBB
2665 = MF->CreateMachineBasicBlock(BB->getBasicBlock());
2666
Matt Arsenault786724a2016-07-12 21:41:32 +00002667 MF->insert(++MachineFunction::iterator(BB), SplitBB);
2668 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2669
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002670 SplitBB->transferSuccessorsAndUpdatePHIs(BB);
Matt Arsenault786724a2016-07-12 21:41:32 +00002671 BB->addSuccessor(SplitBB);
2672
Marek Olsakce76ea02017-10-24 10:27:13 +00002673 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002674 return SplitBB;
2675}
2676
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002677// Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2678// wavefront. If the value is uniform and just happens to be in a VGPR, this
2679// will only do one iteration. In the worst case, this will loop 64 times.
2680//
2681// TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002682static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
2683 const SIInstrInfo *TII,
2684 MachineRegisterInfo &MRI,
2685 MachineBasicBlock &OrigBB,
2686 MachineBasicBlock &LoopBB,
2687 const DebugLoc &DL,
2688 const MachineOperand &IdxReg,
2689 unsigned InitReg,
2690 unsigned ResultReg,
2691 unsigned PhiReg,
2692 unsigned InitSaveExecReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002693 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002694 bool UseGPRIdxMode,
2695 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002696 MachineBasicBlock::iterator I = LoopBB.begin();
2697
2698 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2699 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2700 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2701 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2702
2703 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2704 .addReg(InitReg)
2705 .addMBB(&OrigBB)
2706 .addReg(ResultReg)
2707 .addMBB(&LoopBB);
2708
2709 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2710 .addReg(InitSaveExecReg)
2711 .addMBB(&OrigBB)
2712 .addReg(NewExec)
2713 .addMBB(&LoopBB);
2714
2715 // Read the next variant <- also loop target.
2716 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2717 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2718
2719 // Compare the just read M0 value to all possible Idx values.
2720 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2721 .addReg(CurrentIdxReg)
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +00002722 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002723
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002724 // Update EXEC, save the original EXEC value to VCC.
2725 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2726 .addReg(CondReg, RegState::Kill);
2727
2728 MRI.setSimpleHint(NewExec, CondReg);
2729
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002730 if (UseGPRIdxMode) {
2731 unsigned IdxReg;
2732 if (Offset == 0) {
2733 IdxReg = CurrentIdxReg;
2734 } else {
2735 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2736 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2737 .addReg(CurrentIdxReg, RegState::Kill)
2738 .addImm(Offset);
2739 }
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002740 unsigned IdxMode = IsIndirectSrc ?
2741 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
2742 MachineInstr *SetOn =
2743 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2744 .addReg(IdxReg, RegState::Kill)
2745 .addImm(IdxMode);
2746 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002747 } else {
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002748 // Move index from VCC into M0
2749 if (Offset == 0) {
2750 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2751 .addReg(CurrentIdxReg, RegState::Kill);
2752 } else {
2753 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2754 .addReg(CurrentIdxReg, RegState::Kill)
2755 .addImm(Offset);
2756 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002757 }
2758
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002759 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002760 MachineInstr *InsertPt =
2761 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002762 .addReg(AMDGPU::EXEC)
2763 .addReg(NewExec);
2764
2765 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2766 // s_cbranch_scc0?
2767
2768 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
2769 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
2770 .addMBB(&LoopBB);
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002771
2772 return InsertPt->getIterator();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002773}
2774
2775// This has slightly sub-optimal regalloc when the source vector is killed by
2776// the read. The register allocator does not understand that the kill is
2777// per-workitem, so is kept alive for the whole loop so we end up not re-using a
2778// subregister from it, using 1 more VGPR than necessary. This was saved when
2779// this was expanded after register allocation.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002780static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
2781 MachineBasicBlock &MBB,
2782 MachineInstr &MI,
2783 unsigned InitResultReg,
2784 unsigned PhiReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002785 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002786 bool UseGPRIdxMode,
2787 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002788 MachineFunction *MF = MBB.getParent();
2789 MachineRegisterInfo &MRI = MF->getRegInfo();
2790 const DebugLoc &DL = MI.getDebugLoc();
2791 MachineBasicBlock::iterator I(&MI);
2792
2793 unsigned DstReg = MI.getOperand(0).getReg();
Matt Arsenault301162c2017-11-15 21:51:43 +00002794 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2795 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002796
2797 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
2798
2799 // Save the EXEC mask
2800 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
2801 .addReg(AMDGPU::EXEC);
2802
2803 // To insert the loop we need to split the block. Move everything after this
2804 // point to a new block, and insert a new empty block between the two.
2805 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
2806 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
2807 MachineFunction::iterator MBBI(MBB);
2808 ++MBBI;
2809
2810 MF->insert(MBBI, LoopBB);
2811 MF->insert(MBBI, RemainderBB);
2812
2813 LoopBB->addSuccessor(LoopBB);
2814 LoopBB->addSuccessor(RemainderBB);
2815
2816 // Move the rest of the block into a new block.
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002817 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002818 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
2819
2820 MBB.addSuccessor(LoopBB);
2821
2822 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2823
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002824 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
2825 InitResultReg, DstReg, PhiReg, TmpExec,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002826 Offset, UseGPRIdxMode, IsIndirectSrc);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002827
2828 MachineBasicBlock::iterator First = RemainderBB->begin();
2829 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
2830 .addReg(SaveExec);
2831
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002832 return InsPt;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002833}
2834
2835// Returns subreg index, offset
2836static std::pair<unsigned, int>
2837computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
2838 const TargetRegisterClass *SuperRC,
2839 unsigned VecReg,
2840 int Offset) {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002841 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002842
2843 // Skip out of bounds offsets, or else we would end up using an undefined
2844 // register.
2845 if (Offset >= NumElts || Offset < 0)
2846 return std::make_pair(AMDGPU::sub0, Offset);
2847
2848 return std::make_pair(AMDGPU::sub0 + Offset, 0);
2849}
2850
2851// Return true if the index is an SGPR and was set.
2852static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
2853 MachineRegisterInfo &MRI,
2854 MachineInstr &MI,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002855 int Offset,
2856 bool UseGPRIdxMode,
2857 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002858 MachineBasicBlock *MBB = MI.getParent();
2859 const DebugLoc &DL = MI.getDebugLoc();
2860 MachineBasicBlock::iterator I(&MI);
2861
2862 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2863 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
2864
2865 assert(Idx->getReg() != AMDGPU::NoRegister);
2866
2867 if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
2868 return false;
2869
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002870 if (UseGPRIdxMode) {
2871 unsigned IdxMode = IsIndirectSrc ?
2872 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
2873 if (Offset == 0) {
2874 MachineInstr *SetOn =
Diana Picus116bbab2017-01-13 09:58:52 +00002875 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2876 .add(*Idx)
2877 .addImm(IdxMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002878
Matt Arsenaultdac31db2016-10-13 12:45:16 +00002879 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002880 } else {
2881 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
2882 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
Diana Picus116bbab2017-01-13 09:58:52 +00002883 .add(*Idx)
2884 .addImm(Offset);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002885 MachineInstr *SetOn =
2886 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2887 .addReg(Tmp, RegState::Kill)
2888 .addImm(IdxMode);
2889
Matt Arsenaultdac31db2016-10-13 12:45:16 +00002890 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002891 }
2892
2893 return true;
2894 }
2895
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002896 if (Offset == 0) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00002897 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2898 .add(*Idx);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002899 } else {
2900 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00002901 .add(*Idx)
2902 .addImm(Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002903 }
2904
2905 return true;
2906}
2907
2908// Control flow needs to be inserted if indexing with a VGPR.
2909static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
2910 MachineBasicBlock &MBB,
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002911 const SISubtarget &ST) {
2912 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002913 const SIRegisterInfo &TRI = TII->getRegisterInfo();
2914 MachineFunction *MF = MBB.getParent();
2915 MachineRegisterInfo &MRI = MF->getRegInfo();
2916
2917 unsigned Dst = MI.getOperand(0).getReg();
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002918 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002919 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
2920
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002921 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002922
2923 unsigned SubReg;
2924 std::tie(SubReg, Offset)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002925 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002926
Marek Olsake22fdb92017-03-21 17:00:32 +00002927 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002928
2929 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002930 MachineBasicBlock::iterator I(&MI);
2931 const DebugLoc &DL = MI.getDebugLoc();
2932
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002933 if (UseGPRIdxMode) {
2934 // TODO: Look at the uses to avoid the copy. This may require rescheduling
2935 // to avoid interfering with other uses, so probably requires a new
2936 // optimization pass.
2937 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002938 .addReg(SrcReg, RegState::Undef, SubReg)
2939 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002940 .addReg(AMDGPU::M0, RegState::Implicit);
2941 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2942 } else {
2943 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002944 .addReg(SrcReg, RegState::Undef, SubReg)
2945 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002946 }
2947
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002948 MI.eraseFromParent();
2949
2950 return &MBB;
2951 }
2952
2953 const DebugLoc &DL = MI.getDebugLoc();
2954 MachineBasicBlock::iterator I(&MI);
2955
2956 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2957 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2958
2959 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
2960
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002961 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
2962 Offset, UseGPRIdxMode, true);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002963 MachineBasicBlock *LoopBB = InsPt->getParent();
2964
2965 if (UseGPRIdxMode) {
2966 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002967 .addReg(SrcReg, RegState::Undef, SubReg)
2968 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002969 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002970 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002971 } else {
2972 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002973 .addReg(SrcReg, RegState::Undef, SubReg)
2974 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002975 }
2976
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00002977 MI.eraseFromParent();
2978
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002979 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002980}
2981
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002982static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
2983 const TargetRegisterClass *VecRC) {
2984 switch (TRI.getRegSizeInBits(*VecRC)) {
2985 case 32: // 4 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002986 return AMDGPU::V_MOVRELD_B32_V1;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002987 case 64: // 8 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002988 return AMDGPU::V_MOVRELD_B32_V2;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002989 case 128: // 16 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002990 return AMDGPU::V_MOVRELD_B32_V4;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002991 case 256: // 32 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002992 return AMDGPU::V_MOVRELD_B32_V8;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00002993 case 512: // 64 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00002994 return AMDGPU::V_MOVRELD_B32_V16;
2995 default:
2996 llvm_unreachable("unsupported size for MOVRELD pseudos");
2997 }
2998}
2999
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003000static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3001 MachineBasicBlock &MBB,
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003002 const SISubtarget &ST) {
3003 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003004 const SIRegisterInfo &TRI = TII->getRegisterInfo();
3005 MachineFunction *MF = MBB.getParent();
3006 MachineRegisterInfo &MRI = MF->getRegInfo();
3007
3008 unsigned Dst = MI.getOperand(0).getReg();
3009 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3010 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3011 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3012 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3013 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3014
3015 // This can be an immediate, but will be folded later.
3016 assert(Val->getReg());
3017
3018 unsigned SubReg;
3019 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3020 SrcVec->getReg(),
3021 Offset);
Marek Olsake22fdb92017-03-21 17:00:32 +00003022 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003023
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003024 if (Idx->getReg() == AMDGPU::NoRegister) {
3025 MachineBasicBlock::iterator I(&MI);
3026 const DebugLoc &DL = MI.getDebugLoc();
3027
3028 assert(Offset == 0);
3029
3030 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
Diana Picus116bbab2017-01-13 09:58:52 +00003031 .add(*SrcVec)
3032 .add(*Val)
3033 .addImm(SubReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003034
3035 MI.eraseFromParent();
3036 return &MBB;
3037 }
3038
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003039 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003040 MachineBasicBlock::iterator I(&MI);
3041 const DebugLoc &DL = MI.getDebugLoc();
3042
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003043 if (UseGPRIdxMode) {
3044 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003045 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3046 .add(*Val)
3047 .addReg(Dst, RegState::ImplicitDefine)
3048 .addReg(SrcVec->getReg(), RegState::Implicit)
3049 .addReg(AMDGPU::M0, RegState::Implicit);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003050
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003051 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3052 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003053 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003054
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003055 BuildMI(MBB, I, DL, MovRelDesc)
3056 .addReg(Dst, RegState::Define)
3057 .addReg(SrcVec->getReg())
Diana Picus116bbab2017-01-13 09:58:52 +00003058 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003059 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003060 }
3061
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003062 MI.eraseFromParent();
3063 return &MBB;
3064 }
3065
3066 if (Val->isReg())
3067 MRI.clearKillFlags(Val->getReg());
3068
3069 const DebugLoc &DL = MI.getDebugLoc();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003070
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003071 unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3072
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003073 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003074 Offset, UseGPRIdxMode, false);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003075 MachineBasicBlock *LoopBB = InsPt->getParent();
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003076
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003077 if (UseGPRIdxMode) {
3078 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003079 .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3080 .add(*Val) // src0
3081 .addReg(Dst, RegState::ImplicitDefine)
3082 .addReg(PhiReg, RegState::Implicit)
3083 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003084 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003085 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003086 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003087
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003088 BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3089 .addReg(Dst, RegState::Define)
3090 .addReg(PhiReg)
Diana Picus116bbab2017-01-13 09:58:52 +00003091 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003092 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003093 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003094
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003095 MI.eraseFromParent();
3096
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003097 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003098}
3099
Matt Arsenault786724a2016-07-12 21:41:32 +00003100MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3101 MachineInstr &MI, MachineBasicBlock *BB) const {
Tom Stellard244891d2016-12-20 15:52:17 +00003102
3103 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3104 MachineFunction *MF = BB->getParent();
3105 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3106
3107 if (TII->isMIMG(MI)) {
Matt Arsenault905f3512017-12-29 17:18:14 +00003108 if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3109 report_fatal_error("missing mem operand from MIMG instruction");
3110 }
Tom Stellard244891d2016-12-20 15:52:17 +00003111 // Add a memoperand for mimg instructions so that they aren't assumed to
3112 // be ordered memory instuctions.
3113
Tom Stellard244891d2016-12-20 15:52:17 +00003114 return BB;
3115 }
3116
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003117 switch (MI.getOpcode()) {
Matt Arsenault301162c2017-11-15 21:51:43 +00003118 case AMDGPU::S_ADD_U64_PSEUDO:
3119 case AMDGPU::S_SUB_U64_PSEUDO: {
3120 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3121 const DebugLoc &DL = MI.getDebugLoc();
3122
3123 MachineOperand &Dest = MI.getOperand(0);
3124 MachineOperand &Src0 = MI.getOperand(1);
3125 MachineOperand &Src1 = MI.getOperand(2);
3126
3127 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3128 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3129
3130 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3131 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3132 &AMDGPU::SReg_32_XM0RegClass);
3133 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3134 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3135 &AMDGPU::SReg_32_XM0RegClass);
3136
3137 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3138 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3139 &AMDGPU::SReg_32_XM0RegClass);
3140 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3141 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3142 &AMDGPU::SReg_32_XM0RegClass);
3143
3144 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3145
3146 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3147 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3148 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3149 .add(Src0Sub0)
3150 .add(Src1Sub0);
3151 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3152 .add(Src0Sub1)
3153 .add(Src1Sub1);
3154 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3155 .addReg(DestSub0)
3156 .addImm(AMDGPU::sub0)
3157 .addReg(DestSub1)
3158 .addImm(AMDGPU::sub1);
3159 MI.eraseFromParent();
3160 return BB;
3161 }
3162 case AMDGPU::SI_INIT_M0: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003163 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
Matt Arsenault4ac341c2016-04-14 21:58:15 +00003164 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Diana Picus116bbab2017-01-13 09:58:52 +00003165 .add(MI.getOperand(0));
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003166 MI.eraseFromParent();
Matt Arsenault20711b72015-02-20 22:10:45 +00003167 return BB;
Matt Arsenault301162c2017-11-15 21:51:43 +00003168 }
Marek Olsak2d825902017-04-28 20:21:58 +00003169 case AMDGPU::SI_INIT_EXEC:
3170 // This should be before all vector instructions.
3171 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3172 AMDGPU::EXEC)
3173 .addImm(MI.getOperand(0).getImm());
3174 MI.eraseFromParent();
3175 return BB;
3176
3177 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3178 // Extract the thread count from an SGPR input and set EXEC accordingly.
3179 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3180 //
3181 // S_BFE_U32 count, input, {shift, 7}
3182 // S_BFM_B64 exec, count, 0
3183 // S_CMP_EQ_U32 count, 64
3184 // S_CMOV_B64 exec, -1
3185 MachineInstr *FirstMI = &*BB->begin();
3186 MachineRegisterInfo &MRI = MF->getRegInfo();
3187 unsigned InputReg = MI.getOperand(0).getReg();
3188 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3189 bool Found = false;
3190
3191 // Move the COPY of the input reg to the beginning, so that we can use it.
3192 for (auto I = BB->begin(); I != &MI; I++) {
3193 if (I->getOpcode() != TargetOpcode::COPY ||
3194 I->getOperand(0).getReg() != InputReg)
3195 continue;
3196
3197 if (I == FirstMI) {
3198 FirstMI = &*++BB->begin();
3199 } else {
3200 I->removeFromParent();
3201 BB->insert(FirstMI, &*I);
3202 }
3203 Found = true;
3204 break;
3205 }
3206 assert(Found);
Davide Italiano0dcc0152017-05-11 19:58:52 +00003207 (void)Found;
Marek Olsak2d825902017-04-28 20:21:58 +00003208
3209 // This should be before all vector instructions.
3210 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3211 .addReg(InputReg)
3212 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3213 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3214 AMDGPU::EXEC)
3215 .addReg(CountReg)
3216 .addImm(0);
3217 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3218 .addReg(CountReg, RegState::Kill)
3219 .addImm(64);
3220 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3221 AMDGPU::EXEC)
3222 .addImm(-1);
3223 MI.eraseFromParent();
3224 return BB;
3225 }
3226
Changpeng Fang01f60622016-03-15 17:28:44 +00003227 case AMDGPU::GET_GROUPSTATICSIZE: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003228 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3c07c812016-07-22 17:01:33 +00003229 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
Diana Picus116bbab2017-01-13 09:58:52 +00003230 .add(MI.getOperand(0))
3231 .addImm(MFI->getLDSSize());
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003232 MI.eraseFromParent();
Changpeng Fang01f60622016-03-15 17:28:44 +00003233 return BB;
3234 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003235 case AMDGPU::SI_INDIRECT_SRC_V1:
3236 case AMDGPU::SI_INDIRECT_SRC_V2:
3237 case AMDGPU::SI_INDIRECT_SRC_V4:
3238 case AMDGPU::SI_INDIRECT_SRC_V8:
3239 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003240 return emitIndirectSrc(MI, *BB, *getSubtarget());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003241 case AMDGPU::SI_INDIRECT_DST_V1:
3242 case AMDGPU::SI_INDIRECT_DST_V2:
3243 case AMDGPU::SI_INDIRECT_DST_V4:
3244 case AMDGPU::SI_INDIRECT_DST_V8:
3245 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003246 return emitIndirectDst(MI, *BB, *getSubtarget());
Marek Olsakce76ea02017-10-24 10:27:13 +00003247 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3248 case AMDGPU::SI_KILL_I1_PSEUDO:
Matt Arsenault786724a2016-07-12 21:41:32 +00003249 return splitKillBlock(MI, BB);
Matt Arsenault22e41792016-08-27 01:00:37 +00003250 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3251 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
Matt Arsenault22e41792016-08-27 01:00:37 +00003252
3253 unsigned Dst = MI.getOperand(0).getReg();
3254 unsigned Src0 = MI.getOperand(1).getReg();
3255 unsigned Src1 = MI.getOperand(2).getReg();
3256 const DebugLoc &DL = MI.getDebugLoc();
3257 unsigned SrcCond = MI.getOperand(3).getReg();
3258
3259 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3260 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003261 unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenault22e41792016-08-27 01:00:37 +00003262
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003263 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3264 .addReg(SrcCond);
Matt Arsenault22e41792016-08-27 01:00:37 +00003265 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3266 .addReg(Src0, 0, AMDGPU::sub0)
3267 .addReg(Src1, 0, AMDGPU::sub0)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003268 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003269 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3270 .addReg(Src0, 0, AMDGPU::sub1)
3271 .addReg(Src1, 0, AMDGPU::sub1)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003272 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003273
3274 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3275 .addReg(DstLo)
3276 .addImm(AMDGPU::sub0)
3277 .addReg(DstHi)
3278 .addImm(AMDGPU::sub1);
3279 MI.eraseFromParent();
3280 return BB;
3281 }
Matt Arsenault327188a2016-12-15 21:57:11 +00003282 case AMDGPU::SI_BR_UNDEF: {
3283 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3284 const DebugLoc &DL = MI.getDebugLoc();
3285 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
Diana Picus116bbab2017-01-13 09:58:52 +00003286 .add(MI.getOperand(0));
Matt Arsenault327188a2016-12-15 21:57:11 +00003287 Br->getOperand(1).setIsUndef(true); // read undef SCC
3288 MI.eraseFromParent();
3289 return BB;
3290 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003291 case AMDGPU::ADJCALLSTACKUP:
3292 case AMDGPU::ADJCALLSTACKDOWN: {
3293 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3294 MachineInstrBuilder MIB(*MF, &MI);
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003295
3296 // Add an implicit use of the frame offset reg to prevent the restore copy
3297 // inserted after the call from being reorderd after stack operations in the
3298 // the caller's frame.
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003299 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003300 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3301 .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003302 return BB;
3303 }
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003304 case AMDGPU::SI_CALL_ISEL:
3305 case AMDGPU::SI_TCRETURN_ISEL: {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003306 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3307 const DebugLoc &DL = MI.getDebugLoc();
3308 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003309
3310 MachineRegisterInfo &MRI = MF->getRegInfo();
3311 unsigned GlobalAddrReg = MI.getOperand(0).getReg();
3312 MachineInstr *PCRel = MRI.getVRegDef(GlobalAddrReg);
3313 assert(PCRel->getOpcode() == AMDGPU::SI_PC_ADD_REL_OFFSET);
3314
3315 const GlobalValue *G = PCRel->getOperand(1).getGlobal();
3316
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003317 MachineInstrBuilder MIB;
3318 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
3319 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg)
3320 .add(MI.getOperand(0))
3321 .addGlobalAddress(G);
3322 } else {
3323 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_TCRETURN))
3324 .add(MI.getOperand(0))
3325 .addGlobalAddress(G);
3326
3327 // There is an additional imm operand for tcreturn, but it should be in the
3328 // right place already.
3329 }
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003330
3331 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003332 MIB.add(MI.getOperand(I));
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003333
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003334 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003335 MI.eraseFromParent();
3336 return BB;
3337 }
Changpeng Fang01f60622016-03-15 17:28:44 +00003338 default:
3339 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
Tom Stellard75aadc22012-12-11 21:25:42 +00003340 }
Tom Stellard75aadc22012-12-11 21:25:42 +00003341}
3342
Matt Arsenaulte11d8ac2017-10-13 21:10:22 +00003343bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3344 return isTypeLegal(VT.getScalarType());
3345}
3346
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003347bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3348 // This currently forces unfolding various combinations of fsub into fma with
3349 // free fneg'd operands. As long as we have fast FMA (controlled by
3350 // isFMAFasterThanFMulAndFAdd), we should perform these.
3351
3352 // When fma is quarter rate, for f64 where add / sub are at best half rate,
3353 // most of these combines appear to be cycle neutral but save on instruction
3354 // count / code size.
3355 return true;
3356}
3357
Mehdi Amini44ede332015-07-09 02:09:04 +00003358EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3359 EVT VT) const {
Tom Stellard83747202013-07-18 21:43:53 +00003360 if (!VT.isVector()) {
3361 return MVT::i1;
3362 }
Matt Arsenault8596f712014-11-28 22:51:38 +00003363 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
Tom Stellard75aadc22012-12-11 21:25:42 +00003364}
3365
Matt Arsenault94163282016-12-22 16:36:25 +00003366MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3367 // TODO: Should i16 be used always if legal? For now it would force VALU
3368 // shifts.
3369 return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
Christian Konig082a14a2013-03-18 11:34:05 +00003370}
3371
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003372// Answering this is somewhat tricky and depends on the specific device which
3373// have different rates for fma or all f64 operations.
3374//
3375// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3376// regardless of which device (although the number of cycles differs between
3377// devices), so it is always profitable for f64.
3378//
3379// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3380// only on full rate devices. Normally, we should prefer selecting v_mad_f32
3381// which we can always do even without fused FP ops since it returns the same
3382// result as the separate operations and since it is always full
3383// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3384// however does not support denormals, so we do report fma as faster if we have
3385// a fast fma device and require denormals.
3386//
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003387bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3388 VT = VT.getScalarType();
3389
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003390 switch (VT.getSimpleVT().SimpleTy) {
Matt Arsenault0084adc2018-04-30 19:08:16 +00003391 case MVT::f32: {
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003392 // This is as fast on some subtargets. However, we always have full rate f32
3393 // mad available which returns the same result as the separate operations
Matt Arsenault8d630032015-02-20 22:10:41 +00003394 // which we should prefer over fma. We can't use this if we want to support
3395 // denormals, so only report this in these cases.
Matt Arsenault0084adc2018-04-30 19:08:16 +00003396 if (Subtarget->hasFP32Denormals())
3397 return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3398
3399 // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3400 return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3401 }
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003402 case MVT::f64:
3403 return true;
Matt Arsenault9e22bc22016-12-22 03:21:48 +00003404 case MVT::f16:
3405 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003406 default:
3407 break;
3408 }
3409
3410 return false;
3411}
3412
Tom Stellard75aadc22012-12-11 21:25:42 +00003413//===----------------------------------------------------------------------===//
3414// Custom DAG Lowering Operations
3415//===----------------------------------------------------------------------===//
3416
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003417// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3418// wider vector type is legal.
3419SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3420 SelectionDAG &DAG) const {
3421 unsigned Opc = Op.getOpcode();
3422 EVT VT = Op.getValueType();
3423 assert(VT == MVT::v4f16);
3424
3425 SDValue Lo, Hi;
3426 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3427
3428 SDLoc SL(Op);
3429 SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3430 Op->getFlags());
3431 SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3432 Op->getFlags());
3433
3434 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3435}
3436
3437// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3438// wider vector type is legal.
3439SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3440 SelectionDAG &DAG) const {
3441 unsigned Opc = Op.getOpcode();
3442 EVT VT = Op.getValueType();
3443 assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3444
3445 SDValue Lo0, Hi0;
3446 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3447 SDValue Lo1, Hi1;
3448 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3449
3450 SDLoc SL(Op);
3451
3452 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3453 Op->getFlags());
3454 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3455 Op->getFlags());
3456
3457 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3458}
3459
Tom Stellard75aadc22012-12-11 21:25:42 +00003460SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3461 switch (Op.getOpcode()) {
3462 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
Tom Stellardf8794352012-12-19 22:10:31 +00003463 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
Tom Stellard35bb18c2013-08-26 15:06:04 +00003464 case ISD::LOAD: {
Tom Stellarde812f2f2014-07-21 15:45:06 +00003465 SDValue Result = LowerLOAD(Op, DAG);
3466 assert((!Result.getNode() ||
3467 Result.getNode()->getNumValues() == 2) &&
3468 "Load should return a value and a chain");
3469 return Result;
Tom Stellard35bb18c2013-08-26 15:06:04 +00003470 }
Tom Stellardaf775432013-10-23 00:44:32 +00003471
Matt Arsenaultad14ce82014-07-19 18:44:39 +00003472 case ISD::FSIN:
3473 case ISD::FCOS:
3474 return LowerTrig(Op, DAG);
Tom Stellard0ec134f2014-02-04 17:18:40 +00003475 case ISD::SELECT: return LowerSELECT(Op, DAG);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00003476 case ISD::FDIV: return LowerFDIV(Op, DAG);
Tom Stellard354a43c2016-04-01 18:27:37 +00003477 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +00003478 case ISD::STORE: return LowerSTORE(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003479 case ISD::GlobalAddress: {
3480 MachineFunction &MF = DAG.getMachineFunction();
3481 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3482 return LowerGlobalAddress(MFI, Op, DAG);
Tom Stellard94593ee2013-06-03 17:40:18 +00003483 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003484 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00003485 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003486 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00003487 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
Matt Arsenault3aef8092017-01-23 23:09:58 +00003488 case ISD::INSERT_VECTOR_ELT:
3489 return lowerINSERT_VECTOR_ELT(Op, DAG);
3490 case ISD::EXTRACT_VECTOR_ELT:
3491 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
Matt Arsenault67a98152018-05-16 11:47:30 +00003492 case ISD::BUILD_VECTOR:
3493 return lowerBUILD_VECTOR(Op, DAG);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003494 case ISD::FP_ROUND:
3495 return lowerFP_ROUND(Op, DAG);
Matt Arsenault3e025382017-04-24 17:49:13 +00003496 case ISD::TRAP:
Matt Arsenault3e025382017-04-24 17:49:13 +00003497 return lowerTRAP(Op, DAG);
Tony Tye43259df2018-05-16 16:19:34 +00003498 case ISD::DEBUGTRAP:
3499 return lowerDEBUGTRAP(Op, DAG);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003500 case ISD::FABS:
3501 case ISD::FNEG:
3502 return splitUnaryVectorOp(Op, DAG);
3503 case ISD::SHL:
3504 case ISD::SRA:
3505 case ISD::SRL:
3506 case ISD::ADD:
3507 case ISD::SUB:
3508 case ISD::MUL:
3509 case ISD::SMIN:
3510 case ISD::SMAX:
3511 case ISD::UMIN:
3512 case ISD::UMAX:
3513 case ISD::FMINNUM:
3514 case ISD::FMAXNUM:
3515 case ISD::FADD:
3516 case ISD::FMUL:
3517 return splitBinaryVectorOp(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +00003518 }
3519 return SDValue();
3520}
3521
Changpeng Fang4737e892018-01-18 22:08:53 +00003522static unsigned getImageOpcode(unsigned IID) {
3523 switch (IID) {
3524 case Intrinsic::amdgcn_image_load:
3525 return AMDGPUISD::IMAGE_LOAD;
3526 case Intrinsic::amdgcn_image_load_mip:
3527 return AMDGPUISD::IMAGE_LOAD_MIP;
3528
3529 // Basic sample.
3530 case Intrinsic::amdgcn_image_sample:
3531 return AMDGPUISD::IMAGE_SAMPLE;
3532 case Intrinsic::amdgcn_image_sample_cl:
3533 return AMDGPUISD::IMAGE_SAMPLE_CL;
3534 case Intrinsic::amdgcn_image_sample_d:
3535 return AMDGPUISD::IMAGE_SAMPLE_D;
3536 case Intrinsic::amdgcn_image_sample_d_cl:
3537 return AMDGPUISD::IMAGE_SAMPLE_D_CL;
3538 case Intrinsic::amdgcn_image_sample_l:
3539 return AMDGPUISD::IMAGE_SAMPLE_L;
3540 case Intrinsic::amdgcn_image_sample_b:
3541 return AMDGPUISD::IMAGE_SAMPLE_B;
3542 case Intrinsic::amdgcn_image_sample_b_cl:
3543 return AMDGPUISD::IMAGE_SAMPLE_B_CL;
3544 case Intrinsic::amdgcn_image_sample_lz:
3545 return AMDGPUISD::IMAGE_SAMPLE_LZ;
3546 case Intrinsic::amdgcn_image_sample_cd:
3547 return AMDGPUISD::IMAGE_SAMPLE_CD;
3548 case Intrinsic::amdgcn_image_sample_cd_cl:
3549 return AMDGPUISD::IMAGE_SAMPLE_CD_CL;
3550
3551 // Sample with comparison.
3552 case Intrinsic::amdgcn_image_sample_c:
3553 return AMDGPUISD::IMAGE_SAMPLE_C;
3554 case Intrinsic::amdgcn_image_sample_c_cl:
3555 return AMDGPUISD::IMAGE_SAMPLE_C_CL;
3556 case Intrinsic::amdgcn_image_sample_c_d:
3557 return AMDGPUISD::IMAGE_SAMPLE_C_D;
3558 case Intrinsic::amdgcn_image_sample_c_d_cl:
3559 return AMDGPUISD::IMAGE_SAMPLE_C_D_CL;
3560 case Intrinsic::amdgcn_image_sample_c_l:
3561 return AMDGPUISD::IMAGE_SAMPLE_C_L;
3562 case Intrinsic::amdgcn_image_sample_c_b:
3563 return AMDGPUISD::IMAGE_SAMPLE_C_B;
3564 case Intrinsic::amdgcn_image_sample_c_b_cl:
3565 return AMDGPUISD::IMAGE_SAMPLE_C_B_CL;
3566 case Intrinsic::amdgcn_image_sample_c_lz:
3567 return AMDGPUISD::IMAGE_SAMPLE_C_LZ;
3568 case Intrinsic::amdgcn_image_sample_c_cd:
3569 return AMDGPUISD::IMAGE_SAMPLE_C_CD;
3570 case Intrinsic::amdgcn_image_sample_c_cd_cl:
3571 return AMDGPUISD::IMAGE_SAMPLE_C_CD_CL;
3572
3573 // Sample with offsets.
3574 case Intrinsic::amdgcn_image_sample_o:
3575 return AMDGPUISD::IMAGE_SAMPLE_O;
3576 case Intrinsic::amdgcn_image_sample_cl_o:
3577 return AMDGPUISD::IMAGE_SAMPLE_CL_O;
3578 case Intrinsic::amdgcn_image_sample_d_o:
3579 return AMDGPUISD::IMAGE_SAMPLE_D_O;
3580 case Intrinsic::amdgcn_image_sample_d_cl_o:
3581 return AMDGPUISD::IMAGE_SAMPLE_D_CL_O;
3582 case Intrinsic::amdgcn_image_sample_l_o:
3583 return AMDGPUISD::IMAGE_SAMPLE_L_O;
3584 case Intrinsic::amdgcn_image_sample_b_o:
3585 return AMDGPUISD::IMAGE_SAMPLE_B_O;
3586 case Intrinsic::amdgcn_image_sample_b_cl_o:
3587 return AMDGPUISD::IMAGE_SAMPLE_B_CL_O;
3588 case Intrinsic::amdgcn_image_sample_lz_o:
3589 return AMDGPUISD::IMAGE_SAMPLE_LZ_O;
3590 case Intrinsic::amdgcn_image_sample_cd_o:
3591 return AMDGPUISD::IMAGE_SAMPLE_CD_O;
3592 case Intrinsic::amdgcn_image_sample_cd_cl_o:
3593 return AMDGPUISD::IMAGE_SAMPLE_CD_CL_O;
3594
3595 // Sample with comparison and offsets.
3596 case Intrinsic::amdgcn_image_sample_c_o:
3597 return AMDGPUISD::IMAGE_SAMPLE_C_O;
3598 case Intrinsic::amdgcn_image_sample_c_cl_o:
3599 return AMDGPUISD::IMAGE_SAMPLE_C_CL_O;
3600 case Intrinsic::amdgcn_image_sample_c_d_o:
3601 return AMDGPUISD::IMAGE_SAMPLE_C_D_O;
3602 case Intrinsic::amdgcn_image_sample_c_d_cl_o:
3603 return AMDGPUISD::IMAGE_SAMPLE_C_D_CL_O;
3604 case Intrinsic::amdgcn_image_sample_c_l_o:
3605 return AMDGPUISD::IMAGE_SAMPLE_C_L_O;
3606 case Intrinsic::amdgcn_image_sample_c_b_o:
3607 return AMDGPUISD::IMAGE_SAMPLE_C_B_O;
3608 case Intrinsic::amdgcn_image_sample_c_b_cl_o:
3609 return AMDGPUISD::IMAGE_SAMPLE_C_B_CL_O;
3610 case Intrinsic::amdgcn_image_sample_c_lz_o:
3611 return AMDGPUISD::IMAGE_SAMPLE_C_LZ_O;
3612 case Intrinsic::amdgcn_image_sample_c_cd_o:
3613 return AMDGPUISD::IMAGE_SAMPLE_C_CD_O;
3614 case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
3615 return AMDGPUISD::IMAGE_SAMPLE_C_CD_CL_O;
3616
3617 // Basic gather4.
3618 case Intrinsic::amdgcn_image_gather4:
3619 return AMDGPUISD::IMAGE_GATHER4;
3620 case Intrinsic::amdgcn_image_gather4_cl:
3621 return AMDGPUISD::IMAGE_GATHER4_CL;
3622 case Intrinsic::amdgcn_image_gather4_l:
3623 return AMDGPUISD::IMAGE_GATHER4_L;
3624 case Intrinsic::amdgcn_image_gather4_b:
3625 return AMDGPUISD::IMAGE_GATHER4_B;
3626 case Intrinsic::amdgcn_image_gather4_b_cl:
3627 return AMDGPUISD::IMAGE_GATHER4_B_CL;
3628 case Intrinsic::amdgcn_image_gather4_lz:
3629 return AMDGPUISD::IMAGE_GATHER4_LZ;
3630
3631 // Gather4 with comparison.
3632 case Intrinsic::amdgcn_image_gather4_c:
3633 return AMDGPUISD::IMAGE_GATHER4_C;
3634 case Intrinsic::amdgcn_image_gather4_c_cl:
3635 return AMDGPUISD::IMAGE_GATHER4_C_CL;
3636 case Intrinsic::amdgcn_image_gather4_c_l:
3637 return AMDGPUISD::IMAGE_GATHER4_C_L;
3638 case Intrinsic::amdgcn_image_gather4_c_b:
3639 return AMDGPUISD::IMAGE_GATHER4_C_B;
3640 case Intrinsic::amdgcn_image_gather4_c_b_cl:
3641 return AMDGPUISD::IMAGE_GATHER4_C_B_CL;
3642 case Intrinsic::amdgcn_image_gather4_c_lz:
3643 return AMDGPUISD::IMAGE_GATHER4_C_LZ;
3644
3645 // Gather4 with offsets.
3646 case Intrinsic::amdgcn_image_gather4_o:
3647 return AMDGPUISD::IMAGE_GATHER4_O;
3648 case Intrinsic::amdgcn_image_gather4_cl_o:
3649 return AMDGPUISD::IMAGE_GATHER4_CL_O;
3650 case Intrinsic::amdgcn_image_gather4_l_o:
3651 return AMDGPUISD::IMAGE_GATHER4_L_O;
3652 case Intrinsic::amdgcn_image_gather4_b_o:
3653 return AMDGPUISD::IMAGE_GATHER4_B_O;
3654 case Intrinsic::amdgcn_image_gather4_b_cl_o:
3655 return AMDGPUISD::IMAGE_GATHER4_B_CL_O;
3656 case Intrinsic::amdgcn_image_gather4_lz_o:
3657 return AMDGPUISD::IMAGE_GATHER4_LZ_O;
3658
3659 // Gather4 with comparison and offsets.
3660 case Intrinsic::amdgcn_image_gather4_c_o:
3661 return AMDGPUISD::IMAGE_GATHER4_C_O;
3662 case Intrinsic::amdgcn_image_gather4_c_cl_o:
3663 return AMDGPUISD::IMAGE_GATHER4_C_CL_O;
3664 case Intrinsic::amdgcn_image_gather4_c_l_o:
3665 return AMDGPUISD::IMAGE_GATHER4_C_L_O;
3666 case Intrinsic::amdgcn_image_gather4_c_b_o:
3667 return AMDGPUISD::IMAGE_GATHER4_C_B_O;
3668 case Intrinsic::amdgcn_image_gather4_c_b_cl_o:
3669 return AMDGPUISD::IMAGE_GATHER4_C_B_CL_O;
3670 case Intrinsic::amdgcn_image_gather4_c_lz_o:
3671 return AMDGPUISD::IMAGE_GATHER4_C_LZ_O;
3672
3673 default:
3674 break;
3675 }
3676 return 0;
3677}
3678
Matt Arsenault1349a042018-05-22 06:32:10 +00003679static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
3680 const SDLoc &DL,
3681 SelectionDAG &DAG, bool Unpacked) {
3682 if (!LoadVT.isVector())
3683 return Result;
3684
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003685 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3686 // Truncate to v2i16/v4i16.
3687 EVT IntLoadVT = LoadVT.changeTypeToInteger();
Matt Arsenault1349a042018-05-22 06:32:10 +00003688
3689 // Workaround legalizer not scalarizing truncate after vector op
3690 // legalization byt not creating intermediate vector trunc.
3691 SmallVector<SDValue, 4> Elts;
3692 DAG.ExtractVectorElements(Result, Elts);
3693 for (SDValue &Elt : Elts)
3694 Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
3695
3696 Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
3697
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003698 // Bitcast to original type (v2f16/v4f16).
Matt Arsenault1349a042018-05-22 06:32:10 +00003699 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003700 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003701
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003702 // Cast back to the original packed type.
3703 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3704}
3705
Matt Arsenault1349a042018-05-22 06:32:10 +00003706SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
3707 MemSDNode *M,
3708 SelectionDAG &DAG,
3709 bool IsIntrinsic) const {
3710 SDLoc DL(M);
3711 SmallVector<SDValue, 10> Ops;
3712 Ops.reserve(M->getNumOperands());
3713
3714 Ops.push_back(M->getOperand(0));
3715 if (IsIntrinsic)
3716 Ops.push_back(DAG.getConstant(Opcode, DL, MVT::i32));
3717
3718 // Skip 1, as it is the intrinsic ID.
3719 for (unsigned I = 2, E = M->getNumOperands(); I != E; ++I)
3720 Ops.push_back(M->getOperand(I));
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003721
3722 bool Unpacked = Subtarget->hasUnpackedD16VMem();
Matt Arsenault1349a042018-05-22 06:32:10 +00003723 EVT LoadVT = M->getValueType(0);
3724
Matt Arsenault1349a042018-05-22 06:32:10 +00003725 EVT EquivLoadVT = LoadVT;
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003726 if (Unpacked && LoadVT.isVector()) {
3727 EquivLoadVT = LoadVT.isVector() ?
3728 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3729 LoadVT.getVectorNumElements()) : LoadVT;
Matt Arsenault1349a042018-05-22 06:32:10 +00003730 }
3731
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003732 // Change from v4f16/v2f16 to EquivLoadVT.
3733 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3734
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003735 SDValue Load
3736 = DAG.getMemIntrinsicNode(
3737 IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
3738 VTList, Ops, M->getMemoryVT(),
3739 M->getMemOperand());
3740 if (!Unpacked) // Just adjusted the opcode.
3741 return Load;
Changpeng Fang4737e892018-01-18 22:08:53 +00003742
Matt Arsenault1349a042018-05-22 06:32:10 +00003743 SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
Changpeng Fang4737e892018-01-18 22:08:53 +00003744
Matt Arsenault1349a042018-05-22 06:32:10 +00003745 return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003746}
3747
Matt Arsenault3aef8092017-01-23 23:09:58 +00003748void SITargetLowering::ReplaceNodeResults(SDNode *N,
3749 SmallVectorImpl<SDValue> &Results,
3750 SelectionDAG &DAG) const {
3751 switch (N->getOpcode()) {
3752 case ISD::INSERT_VECTOR_ELT: {
3753 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3754 Results.push_back(Res);
3755 return;
3756 }
3757 case ISD::EXTRACT_VECTOR_ELT: {
3758 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3759 Results.push_back(Res);
3760 return;
3761 }
Matt Arsenault1f17c662017-02-22 00:27:34 +00003762 case ISD::INTRINSIC_WO_CHAIN: {
3763 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Marek Olsak13e47412018-01-31 20:18:04 +00003764 switch (IID) {
3765 case Intrinsic::amdgcn_cvt_pkrtz: {
Matt Arsenault1f17c662017-02-22 00:27:34 +00003766 SDValue Src0 = N->getOperand(1);
3767 SDValue Src1 = N->getOperand(2);
3768 SDLoc SL(N);
3769 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
3770 Src0, Src1);
Matt Arsenault1f17c662017-02-22 00:27:34 +00003771 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3772 return;
3773 }
Marek Olsak13e47412018-01-31 20:18:04 +00003774 case Intrinsic::amdgcn_cvt_pknorm_i16:
3775 case Intrinsic::amdgcn_cvt_pknorm_u16:
3776 case Intrinsic::amdgcn_cvt_pk_i16:
3777 case Intrinsic::amdgcn_cvt_pk_u16: {
3778 SDValue Src0 = N->getOperand(1);
3779 SDValue Src1 = N->getOperand(2);
3780 SDLoc SL(N);
3781 unsigned Opcode;
3782
3783 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
3784 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
3785 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
3786 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
3787 else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
3788 Opcode = AMDGPUISD::CVT_PK_I16_I32;
3789 else
3790 Opcode = AMDGPUISD::CVT_PK_U16_U32;
3791
3792 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
3793 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
3794 return;
3795 }
3796 }
Simon Pilgrimd362d272017-07-08 19:50:03 +00003797 break;
Matt Arsenault1f17c662017-02-22 00:27:34 +00003798 }
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003799 case ISD::INTRINSIC_W_CHAIN: {
Matt Arsenault1349a042018-05-22 06:32:10 +00003800 if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003801 Results.push_back(Res);
Matt Arsenault1349a042018-05-22 06:32:10 +00003802 Results.push_back(Res.getValue(1));
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003803 return;
3804 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003805
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003806 break;
3807 }
Matt Arsenault4a486232017-04-19 20:53:07 +00003808 case ISD::SELECT: {
3809 SDLoc SL(N);
3810 EVT VT = N->getValueType(0);
3811 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3812 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3813 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3814
3815 EVT SelectVT = NewVT;
3816 if (NewVT.bitsLT(MVT::i32)) {
3817 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3818 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3819 SelectVT = MVT::i32;
3820 }
3821
3822 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3823 N->getOperand(0), LHS, RHS);
3824
3825 if (NewVT != SelectVT)
3826 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3827 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3828 return;
3829 }
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003830 case ISD::FNEG: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003831 if (N->getValueType(0) != MVT::v2f16)
3832 break;
3833
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003834 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003835 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3836
3837 SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
3838 BC,
3839 DAG.getConstant(0x80008000, SL, MVT::i32));
3840 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3841 return;
3842 }
3843 case ISD::FABS: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003844 if (N->getValueType(0) != MVT::v2f16)
3845 break;
3846
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003847 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003848 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3849
3850 SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
3851 BC,
3852 DAG.getConstant(0x7fff7fff, SL, MVT::i32));
3853 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3854 return;
3855 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00003856 default:
3857 break;
3858 }
3859}
3860
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00003861/// Helper function for LowerBRCOND
Tom Stellardf8794352012-12-19 22:10:31 +00003862static SDNode *findUser(SDValue Value, unsigned Opcode) {
Tom Stellard75aadc22012-12-11 21:25:42 +00003863
Tom Stellardf8794352012-12-19 22:10:31 +00003864 SDNode *Parent = Value.getNode();
3865 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
3866 I != E; ++I) {
3867
3868 if (I.getUse().get() != Value)
3869 continue;
3870
3871 if (I->getOpcode() == Opcode)
3872 return *I;
3873 }
Craig Topper062a2ba2014-04-25 05:30:21 +00003874 return nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00003875}
3876
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003877unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
Matt Arsenault6408c912016-09-16 22:11:18 +00003878 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
3879 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003880 case Intrinsic::amdgcn_if:
3881 return AMDGPUISD::IF;
3882 case Intrinsic::amdgcn_else:
3883 return AMDGPUISD::ELSE;
3884 case Intrinsic::amdgcn_loop:
3885 return AMDGPUISD::LOOP;
3886 case Intrinsic::amdgcn_end_cf:
3887 llvm_unreachable("should not occur");
Matt Arsenault6408c912016-09-16 22:11:18 +00003888 default:
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003889 return 0;
Matt Arsenault6408c912016-09-16 22:11:18 +00003890 }
Tom Stellardbc4497b2016-02-12 23:45:29 +00003891 }
Matt Arsenault6408c912016-09-16 22:11:18 +00003892
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003893 // break, if_break, else_break are all only used as inputs to loop, not
3894 // directly as branch conditions.
3895 return 0;
Tom Stellardbc4497b2016-02-12 23:45:29 +00003896}
3897
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003898void SITargetLowering::createDebuggerPrologueStackObjects(
3899 MachineFunction &MF) const {
3900 // Create stack objects that are used for emitting debugger prologue.
3901 //
3902 // Debugger prologue writes work group IDs and work item IDs to scratch memory
3903 // at fixed location in the following format:
3904 // offset 0: work group ID x
3905 // offset 4: work group ID y
3906 // offset 8: work group ID z
3907 // offset 16: work item ID x
3908 // offset 20: work item ID y
3909 // offset 24: work item ID z
3910 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3911 int ObjectIdx = 0;
3912
3913 // For each dimension:
3914 for (unsigned i = 0; i < 3; ++i) {
3915 // Create fixed stack object for work group ID.
Matthias Braun941a7052016-07-28 18:40:00 +00003916 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003917 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
3918 // Create fixed stack object for work item ID.
Matthias Braun941a7052016-07-28 18:40:00 +00003919 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003920 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
3921 }
3922}
3923
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003924bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
3925 const Triple &TT = getTargetMachine().getTargetTriple();
Matt Arsenault923712b2018-02-09 16:57:57 +00003926 return (GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
3927 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003928 AMDGPU::shouldEmitConstantsToTextSection(TT);
3929}
3930
3931bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00003932 return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +00003933 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
3934 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003935 !shouldEmitFixup(GV) &&
3936 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3937}
3938
3939bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
3940 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
3941}
3942
Tom Stellardf8794352012-12-19 22:10:31 +00003943/// This transforms the control flow intrinsics to get the branch destination as
3944/// last parameter, also switches branch target with BR if the need arise
3945SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
3946 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00003947 SDLoc DL(BRCOND);
Tom Stellardf8794352012-12-19 22:10:31 +00003948
3949 SDNode *Intr = BRCOND.getOperand(1).getNode();
3950 SDValue Target = BRCOND.getOperand(2);
Craig Topper062a2ba2014-04-25 05:30:21 +00003951 SDNode *BR = nullptr;
Tom Stellardbc4497b2016-02-12 23:45:29 +00003952 SDNode *SetCC = nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00003953
3954 if (Intr->getOpcode() == ISD::SETCC) {
3955 // As long as we negate the condition everything is fine
Tom Stellardbc4497b2016-02-12 23:45:29 +00003956 SetCC = Intr;
Tom Stellardf8794352012-12-19 22:10:31 +00003957 Intr = SetCC->getOperand(0).getNode();
3958
3959 } else {
3960 // Get the target from BR if we don't negate the condition
3961 BR = findUser(BRCOND, ISD::BR);
3962 Target = BR->getOperand(1);
3963 }
3964
Matt Arsenault6408c912016-09-16 22:11:18 +00003965 // FIXME: This changes the types of the intrinsics instead of introducing new
3966 // nodes with the correct types.
3967 // e.g. llvm.amdgcn.loop
3968
3969 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
3970 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
3971
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003972 unsigned CFNode = isCFIntrinsic(Intr);
3973 if (CFNode == 0) {
Tom Stellardbc4497b2016-02-12 23:45:29 +00003974 // This is a uniform branch so we don't need to legalize.
3975 return BRCOND;
3976 }
3977
Matt Arsenault6408c912016-09-16 22:11:18 +00003978 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
3979 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
3980
Tom Stellardbc4497b2016-02-12 23:45:29 +00003981 assert(!SetCC ||
3982 (SetCC->getConstantOperandVal(1) == 1 &&
Tom Stellardbc4497b2016-02-12 23:45:29 +00003983 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
3984 ISD::SETNE));
Tom Stellardf8794352012-12-19 22:10:31 +00003985
Tom Stellardf8794352012-12-19 22:10:31 +00003986 // operands of the new intrinsic call
3987 SmallVector<SDValue, 4> Ops;
Matt Arsenault6408c912016-09-16 22:11:18 +00003988 if (HaveChain)
3989 Ops.push_back(BRCOND.getOperand(0));
3990
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003991 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
Tom Stellardf8794352012-12-19 22:10:31 +00003992 Ops.push_back(Target);
3993
Matt Arsenault6408c912016-09-16 22:11:18 +00003994 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
3995
Tom Stellardf8794352012-12-19 22:10:31 +00003996 // build the new intrinsic call
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003997 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00003998
Matt Arsenault6408c912016-09-16 22:11:18 +00003999 if (!HaveChain) {
4000 SDValue Ops[] = {
4001 SDValue(Result, 0),
4002 BRCOND.getOperand(0)
4003 };
4004
4005 Result = DAG.getMergeValues(Ops, DL).getNode();
4006 }
4007
Tom Stellardf8794352012-12-19 22:10:31 +00004008 if (BR) {
4009 // Give the branch instruction our target
4010 SDValue Ops[] = {
4011 BR->getOperand(0),
4012 BRCOND.getOperand(2)
4013 };
Chandler Carruth356665a2014-08-01 22:09:43 +00004014 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4015 DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4016 BR = NewBR.getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00004017 }
4018
4019 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4020
4021 // Copy the intrinsic results to registers
4022 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4023 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
4024 if (!CopyToReg)
4025 continue;
4026
4027 Chain = DAG.getCopyToReg(
4028 Chain, DL,
4029 CopyToReg->getOperand(1),
4030 SDValue(Result, i - 1),
4031 SDValue());
4032
4033 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4034 }
4035
4036 // Remove the old intrinsic from the chain
4037 DAG.ReplaceAllUsesOfValueWith(
4038 SDValue(Intr, Intr->getNumValues() - 1),
4039 Intr->getOperand(0));
4040
4041 return Chain;
Tom Stellard75aadc22012-12-11 21:25:42 +00004042}
4043
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004044SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4045 SDValue Op,
4046 const SDLoc &DL,
4047 EVT VT) const {
4048 return Op.getValueType().bitsLE(VT) ?
4049 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4050 DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4051}
4052
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004053SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004054 assert(Op.getValueType() == MVT::f16 &&
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004055 "Do not know how to custom lower FP_ROUND for non-f16 type");
4056
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004057 SDValue Src = Op.getOperand(0);
4058 EVT SrcVT = Src.getValueType();
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004059 if (SrcVT != MVT::f64)
4060 return Op;
4061
4062 SDLoc DL(Op);
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004063
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004064 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4065 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
Mandeep Singh Grang5e1697e2017-06-06 05:08:36 +00004066 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004067}
4068
Matt Arsenault3e025382017-04-24 17:49:13 +00004069SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4070 SDLoc SL(Op);
Matt Arsenault3e025382017-04-24 17:49:13 +00004071 SDValue Chain = Op.getOperand(0);
4072
Tony Tye43259df2018-05-16 16:19:34 +00004073 if (Subtarget->getTrapHandlerAbi() != SISubtarget::TrapHandlerAbiHsa ||
4074 !Subtarget->isTrapHandlerEnabled())
Matt Arsenault3e025382017-04-24 17:49:13 +00004075 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
Tony Tye43259df2018-05-16 16:19:34 +00004076
4077 MachineFunction &MF = DAG.getMachineFunction();
4078 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4079 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4080 assert(UserSGPR != AMDGPU::NoRegister);
4081 SDValue QueuePtr = CreateLiveInRegister(
4082 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4083 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4084 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4085 QueuePtr, SDValue());
4086 SDValue Ops[] = {
4087 ToReg,
4088 DAG.getTargetConstant(SISubtarget::TrapIDLLVMTrap, SL, MVT::i16),
4089 SGPR01,
4090 ToReg.getValue(1)
4091 };
4092 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4093}
4094
4095SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4096 SDLoc SL(Op);
4097 SDValue Chain = Op.getOperand(0);
4098 MachineFunction &MF = DAG.getMachineFunction();
4099
4100 if (Subtarget->getTrapHandlerAbi() != SISubtarget::TrapHandlerAbiHsa ||
4101 !Subtarget->isTrapHandlerEnabled()) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004102 DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
Matt Arsenault3e025382017-04-24 17:49:13 +00004103 "debugtrap handler not supported",
4104 Op.getDebugLoc(),
4105 DS_Warning);
Matthias Braunf1caa282017-12-15 22:22:58 +00004106 LLVMContext &Ctx = MF.getFunction().getContext();
Matt Arsenault3e025382017-04-24 17:49:13 +00004107 Ctx.diagnose(NoTrap);
4108 return Chain;
4109 }
Matt Arsenault3e025382017-04-24 17:49:13 +00004110
Tony Tye43259df2018-05-16 16:19:34 +00004111 SDValue Ops[] = {
4112 Chain,
4113 DAG.getTargetConstant(SISubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
4114 };
4115 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
Matt Arsenault3e025382017-04-24 17:49:13 +00004116}
4117
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004118SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
Matt Arsenault99c14522016-04-25 19:27:24 +00004119 SelectionDAG &DAG) const {
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004120 // FIXME: Use inline constants (src_{shared, private}_base) instead.
4121 if (Subtarget->hasApertureRegs()) {
4122 unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ?
4123 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4124 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
4125 unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ?
4126 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4127 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4128 unsigned Encoding =
4129 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4130 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4131 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
Matt Arsenaulte823d922017-02-18 18:29:53 +00004132
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004133 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4134 SDValue ApertureReg = SDValue(
4135 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4136 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4137 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
Matt Arsenaulte823d922017-02-18 18:29:53 +00004138 }
4139
Matt Arsenault99c14522016-04-25 19:27:24 +00004140 MachineFunction &MF = DAG.getMachineFunction();
4141 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004142 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4143 assert(UserSGPR != AMDGPU::NoRegister);
4144
Matt Arsenault99c14522016-04-25 19:27:24 +00004145 SDValue QueuePtr = CreateLiveInRegister(
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004146 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
Matt Arsenault99c14522016-04-25 19:27:24 +00004147
4148 // Offset into amd_queue_t for group_segment_aperture_base_hi /
4149 // private_segment_aperture_base_hi.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004150 uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44;
Matt Arsenault99c14522016-04-25 19:27:24 +00004151
Matt Arsenaultb655fa92017-11-29 01:25:12 +00004152 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
Matt Arsenault99c14522016-04-25 19:27:24 +00004153
4154 // TODO: Use custom target PseudoSourceValue.
4155 // TODO: We should use the value from the IR intrinsic call, but it might not
4156 // be available and how do we get it?
4157 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004158 AMDGPUASI.CONSTANT_ADDRESS));
Matt Arsenault99c14522016-04-25 19:27:24 +00004159
4160 MachinePointerInfo PtrInfo(V, StructOffset);
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004161 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
Justin Lebar9c375812016-07-15 18:27:10 +00004162 MinAlign(64, StructOffset),
Justin Lebaradbf09e2016-09-11 01:38:58 +00004163 MachineMemOperand::MODereferenceable |
4164 MachineMemOperand::MOInvariant);
Matt Arsenault99c14522016-04-25 19:27:24 +00004165}
4166
4167SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4168 SelectionDAG &DAG) const {
4169 SDLoc SL(Op);
4170 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4171
4172 SDValue Src = ASC->getOperand(0);
Matt Arsenault99c14522016-04-25 19:27:24 +00004173 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4174
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004175 const AMDGPUTargetMachine &TM =
4176 static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4177
Matt Arsenault99c14522016-04-25 19:27:24 +00004178 // flat -> local/private
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004179 if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004180 unsigned DestAS = ASC->getDestAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004181
4182 if (DestAS == AMDGPUASI.LOCAL_ADDRESS ||
4183 DestAS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004184 unsigned NullVal = TM.getNullPointerValue(DestAS);
4185 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault99c14522016-04-25 19:27:24 +00004186 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4187 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4188
4189 return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4190 NonNull, Ptr, SegmentNullPtr);
4191 }
4192 }
4193
4194 // local/private -> flat
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004195 if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004196 unsigned SrcAS = ASC->getSrcAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004197
4198 if (SrcAS == AMDGPUASI.LOCAL_ADDRESS ||
4199 SrcAS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004200 unsigned NullVal = TM.getNullPointerValue(SrcAS);
4201 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault971c85e2017-03-13 19:47:31 +00004202
Matt Arsenault99c14522016-04-25 19:27:24 +00004203 SDValue NonNull
4204 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4205
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004206 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00004207 SDValue CvtPtr
4208 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4209
4210 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4211 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4212 FlatNullPtr);
4213 }
4214 }
4215
4216 // global <-> flat are no-ops and never emitted.
4217
4218 const MachineFunction &MF = DAG.getMachineFunction();
4219 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
Matthias Braunf1caa282017-12-15 22:22:58 +00004220 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
Matt Arsenault99c14522016-04-25 19:27:24 +00004221 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4222
4223 return DAG.getUNDEF(ASC->getValueType(0));
4224}
4225
Matt Arsenault3aef8092017-01-23 23:09:58 +00004226SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4227 SelectionDAG &DAG) const {
Matt Arsenault67a98152018-05-16 11:47:30 +00004228 SDValue Vec = Op.getOperand(0);
4229 SDValue InsVal = Op.getOperand(1);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004230 SDValue Idx = Op.getOperand(2);
Matt Arsenault67a98152018-05-16 11:47:30 +00004231 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004232 EVT EltVT = VecVT.getVectorElementType();
4233 unsigned VecSize = VecVT.getSizeInBits();
4234 unsigned EltSize = EltVT.getSizeInBits();
Matt Arsenault67a98152018-05-16 11:47:30 +00004235
Matt Arsenault9224c002018-06-05 19:52:46 +00004236
4237 assert(VecSize <= 64);
Matt Arsenault67a98152018-05-16 11:47:30 +00004238
4239 unsigned NumElts = VecVT.getVectorNumElements();
4240 SDLoc SL(Op);
4241 auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4242
Matt Arsenault9224c002018-06-05 19:52:46 +00004243 if (NumElts == 4 && EltSize == 16 && KIdx) {
Matt Arsenault67a98152018-05-16 11:47:30 +00004244 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4245
4246 SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4247 DAG.getConstant(0, SL, MVT::i32));
4248 SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4249 DAG.getConstant(1, SL, MVT::i32));
4250
4251 SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4252 SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4253
4254 unsigned Idx = KIdx->getZExtValue();
4255 bool InsertLo = Idx < 2;
4256 SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4257 InsertLo ? LoVec : HiVec,
4258 DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4259 DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4260
4261 InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4262
4263 SDValue Concat = InsertLo ?
4264 DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4265 DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4266
4267 return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4268 }
4269
Matt Arsenault3aef8092017-01-23 23:09:58 +00004270 if (isa<ConstantSDNode>(Idx))
4271 return SDValue();
4272
Matt Arsenault9224c002018-06-05 19:52:46 +00004273 MVT IntVT = MVT::getIntegerVT(VecSize);
Matt Arsenault67a98152018-05-16 11:47:30 +00004274
Matt Arsenault3aef8092017-01-23 23:09:58 +00004275 // Avoid stack access for dynamic indexing.
Matt Arsenault9224c002018-06-05 19:52:46 +00004276 SDValue Val = InsVal;
4277 if (InsVal.getValueType() == MVT::f16)
4278 Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004279
4280 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
Matt Arsenault67a98152018-05-16 11:47:30 +00004281 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Val);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004282
Matt Arsenault9224c002018-06-05 19:52:46 +00004283 assert(isPowerOf2_32(EltSize));
4284 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4285
Matt Arsenault3aef8092017-01-23 23:09:58 +00004286 // Convert vector index to bit-index.
Matt Arsenault9224c002018-06-05 19:52:46 +00004287 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004288
Matt Arsenault67a98152018-05-16 11:47:30 +00004289 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4290 SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4291 DAG.getConstant(0xffff, SL, IntVT),
Matt Arsenault3aef8092017-01-23 23:09:58 +00004292 ScaledIdx);
4293
Matt Arsenault67a98152018-05-16 11:47:30 +00004294 SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4295 SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4296 DAG.getNOT(SL, BFM, IntVT), BCVec);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004297
Matt Arsenault67a98152018-05-16 11:47:30 +00004298 SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4299 return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004300}
4301
4302SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4303 SelectionDAG &DAG) const {
4304 SDLoc SL(Op);
4305
4306 EVT ResultVT = Op.getValueType();
4307 SDValue Vec = Op.getOperand(0);
4308 SDValue Idx = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004309 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004310 unsigned VecSize = VecVT.getSizeInBits();
4311 EVT EltVT = VecVT.getVectorElementType();
4312 assert(VecSize <= 64);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004313
Matt Arsenault98f29462017-05-17 20:30:58 +00004314 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4315
Hiroshi Inoue372ffa12018-04-13 11:37:06 +00004316 // Make sure we do any optimizations that will make it easier to fold
Matt Arsenault98f29462017-05-17 20:30:58 +00004317 // source modifiers before obscuring it with bit operations.
4318
4319 // XXX - Why doesn't this get called when vector_shuffle is expanded?
4320 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4321 return Combined;
4322
Matt Arsenault9224c002018-06-05 19:52:46 +00004323 unsigned EltSize = EltVT.getSizeInBits();
4324 assert(isPowerOf2_32(EltSize));
Matt Arsenault3aef8092017-01-23 23:09:58 +00004325
Matt Arsenault9224c002018-06-05 19:52:46 +00004326 MVT IntVT = MVT::getIntegerVT(VecSize);
4327 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4328
4329 // Convert vector index to bit-index (* EltSize)
4330 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004331
Matt Arsenault67a98152018-05-16 11:47:30 +00004332 SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4333 SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004334
Matt Arsenault67a98152018-05-16 11:47:30 +00004335 if (ResultVT == MVT::f16) {
4336 SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4337 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4338 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00004339
Matt Arsenault67a98152018-05-16 11:47:30 +00004340 return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4341}
4342
4343SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4344 SelectionDAG &DAG) const {
4345 SDLoc SL(Op);
4346 EVT VT = Op.getValueType();
Matt Arsenault67a98152018-05-16 11:47:30 +00004347
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004348 if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4349 EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4350
4351 // Turn into pair of packed build_vectors.
4352 // TODO: Special case for constants that can be materialized with s_mov_b64.
4353 SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4354 { Op.getOperand(0), Op.getOperand(1) });
4355 SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4356 { Op.getOperand(2), Op.getOperand(3) });
4357
4358 SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4359 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4360
4361 SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4362 return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4363 }
4364
Matt Arsenault1349a042018-05-22 06:32:10 +00004365 assert(VT == MVT::v2f16 || VT == MVT::v2i16);
Matt Arsenault67a98152018-05-16 11:47:30 +00004366
Matt Arsenault1349a042018-05-22 06:32:10 +00004367 SDValue Lo = Op.getOperand(0);
4368 SDValue Hi = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004369
Matt Arsenault1349a042018-05-22 06:32:10 +00004370 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4371 Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
Matt Arsenault67a98152018-05-16 11:47:30 +00004372
Matt Arsenault1349a042018-05-22 06:32:10 +00004373 Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
4374 Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4375
4376 SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4377 DAG.getConstant(16, SL, MVT::i32));
4378
4379 SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
4380
4381 return DAG.getNode(ISD::BITCAST, SL, VT, Or);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004382}
4383
Tom Stellard418beb72016-07-13 14:23:33 +00004384bool
4385SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4386 // We can fold offsets for anything that doesn't require a GOT relocation.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004387 return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
Matt Arsenault923712b2018-02-09 16:57:57 +00004388 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
4389 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004390 !shouldEmitGOTReloc(GA->getGlobal());
Tom Stellard418beb72016-07-13 14:23:33 +00004391}
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004392
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004393static SDValue
4394buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4395 const SDLoc &DL, unsigned Offset, EVT PtrVT,
4396 unsigned GAFlags = SIInstrInfo::MO_NONE) {
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004397 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4398 // lowered to the following code sequence:
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004399 //
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004400 // For constant address space:
4401 // s_getpc_b64 s[0:1]
4402 // s_add_u32 s0, s0, $symbol
4403 // s_addc_u32 s1, s1, 0
4404 //
4405 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4406 // a fixup or relocation is emitted to replace $symbol with a literal
4407 // constant, which is a pc-relative offset from the encoding of the $symbol
4408 // operand to the global variable.
4409 //
4410 // For global address space:
4411 // s_getpc_b64 s[0:1]
4412 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4413 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4414 //
4415 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4416 // fixups or relocations are emitted to replace $symbol@*@lo and
4417 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4418 // which is a 64-bit pc-relative offset from the encoding of the $symbol
4419 // operand to the global variable.
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004420 //
4421 // What we want here is an offset from the value returned by s_getpc
4422 // (which is the address of the s_add_u32 instruction) to the global
4423 // variable, but since the encoding of $symbol starts 4 bytes after the start
4424 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4425 // small. This requires us to add 4 to the global variable offset in order to
4426 // compute the correct address.
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004427 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4428 GAFlags);
4429 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4430 GAFlags == SIInstrInfo::MO_NONE ?
4431 GAFlags : GAFlags + 1);
4432 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004433}
4434
Tom Stellard418beb72016-07-13 14:23:33 +00004435SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
4436 SDValue Op,
4437 SelectionDAG &DAG) const {
4438 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00004439 const GlobalValue *GV = GSD->getGlobal();
Tom Stellard418beb72016-07-13 14:23:33 +00004440
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004441 if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
Matt Arsenault923712b2018-02-09 16:57:57 +00004442 GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS_32BIT &&
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00004443 GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS &&
4444 // FIXME: It isn't correct to rely on the type of the pointer. This should
4445 // be removed when address space 0 is 64-bit.
4446 !GV->getType()->getElementType()->isFunctionTy())
Tom Stellard418beb72016-07-13 14:23:33 +00004447 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
4448
4449 SDLoc DL(GSD);
Tom Stellard418beb72016-07-13 14:23:33 +00004450 EVT PtrVT = Op.getValueType();
4451
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004452 if (shouldEmitFixup(GV))
Tom Stellard418beb72016-07-13 14:23:33 +00004453 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004454 else if (shouldEmitPCReloc(GV))
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004455 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
4456 SIInstrInfo::MO_REL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004457
4458 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004459 SIInstrInfo::MO_GOTPCREL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004460
4461 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004462 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
Tom Stellard418beb72016-07-13 14:23:33 +00004463 const DataLayout &DataLayout = DAG.getDataLayout();
4464 unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
4465 // FIXME: Use a PseudoSourceValue once those can be assigned an address space.
4466 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
4467
Justin Lebar9c375812016-07-15 18:27:10 +00004468 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
Justin Lebaradbf09e2016-09-11 01:38:58 +00004469 MachineMemOperand::MODereferenceable |
4470 MachineMemOperand::MOInvariant);
Tom Stellard418beb72016-07-13 14:23:33 +00004471}
4472
Benjamin Kramerbdc49562016-06-12 15:39:02 +00004473SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
4474 const SDLoc &DL, SDValue V) const {
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004475 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
4476 // the destination register.
4477 //
Tom Stellardfc92e772015-05-12 14:18:14 +00004478 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
4479 // so we will end up with redundant moves to m0.
4480 //
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004481 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
4482
4483 // A Null SDValue creates a glue result.
4484 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
4485 V, Chain);
4486 return SDValue(M0, 0);
Tom Stellardfc92e772015-05-12 14:18:14 +00004487}
4488
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004489SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
4490 SDValue Op,
4491 MVT VT,
4492 unsigned Offset) const {
4493 SDLoc SL(Op);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004494 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004495 DAG.getEntryNode(), Offset, 4, false);
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004496 // The local size values will have the hi 16-bits as zero.
4497 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
4498 DAG.getValueType(VT));
4499}
4500
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004501static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4502 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004503 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004504 "non-hsa intrinsic with hsa target",
4505 DL.getDebugLoc());
4506 DAG.getContext()->diagnose(BadIntrin);
4507 return DAG.getUNDEF(VT);
4508}
4509
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004510static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4511 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004512 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004513 "intrinsic not supported on subtarget",
4514 DL.getDebugLoc());
Matt Arsenaulte0132462016-01-30 05:19:45 +00004515 DAG.getContext()->diagnose(BadIntrin);
4516 return DAG.getUNDEF(VT);
4517}
4518
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004519SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4520 SelectionDAG &DAG) const {
4521 MachineFunction &MF = DAG.getMachineFunction();
Tom Stellarddcb9f092015-07-09 21:20:37 +00004522 auto MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004523
4524 EVT VT = Op.getValueType();
4525 SDLoc DL(Op);
4526 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4527
Sanjay Patela2607012015-09-16 16:31:21 +00004528 // TODO: Should this propagate fast-math-flags?
4529
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004530 switch (IntrinsicID) {
Tom Stellard2f3f9852017-01-25 01:25:13 +00004531 case Intrinsic::amdgcn_implicit_buffer_ptr: {
Matt Arsenaultceafc552018-05-29 17:42:50 +00004532 if (getSubtarget()->isAmdCodeObjectV2(MF.getFunction()))
Matt Arsenault10fc0622017-06-26 03:01:31 +00004533 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004534 return getPreloadedValue(DAG, *MFI, VT,
4535 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
Tom Stellard2f3f9852017-01-25 01:25:13 +00004536 }
Tom Stellard48f29f22015-11-26 00:43:29 +00004537 case Intrinsic::amdgcn_dispatch_ptr:
Matt Arsenault48ab5262016-04-25 19:27:18 +00004538 case Intrinsic::amdgcn_queue_ptr: {
Matt Arsenaultceafc552018-05-29 17:42:50 +00004539 if (!Subtarget->isAmdCodeObjectV2(MF.getFunction())) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00004540 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00004541 MF.getFunction(), "unsupported hsa intrinsic without hsa target",
Oliver Stannard7e7d9832016-02-02 13:52:43 +00004542 DL.getDebugLoc());
Matt Arsenault800fecf2016-01-11 21:18:33 +00004543 DAG.getContext()->diagnose(BadIntrin);
4544 return DAG.getUNDEF(VT);
4545 }
4546
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004547 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
4548 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
4549 return getPreloadedValue(DAG, *MFI, VT, RegID);
Matt Arsenault48ab5262016-04-25 19:27:18 +00004550 }
Jan Veselyfea814d2016-06-21 20:46:20 +00004551 case Intrinsic::amdgcn_implicitarg_ptr: {
Matt Arsenault9166ce82017-07-28 15:52:08 +00004552 if (MFI->isEntryFunction())
4553 return getImplicitArgPtr(DAG, DL);
Matt Arsenault817c2532017-08-03 23:12:44 +00004554 return getPreloadedValue(DAG, *MFI, VT,
4555 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
Jan Veselyfea814d2016-06-21 20:46:20 +00004556 }
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00004557 case Intrinsic::amdgcn_kernarg_segment_ptr: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004558 return getPreloadedValue(DAG, *MFI, VT,
4559 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00004560 }
Matt Arsenault8d718dc2016-07-22 17:01:30 +00004561 case Intrinsic::amdgcn_dispatch_id: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004562 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
Matt Arsenault8d718dc2016-07-22 17:01:30 +00004563 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004564 case Intrinsic::amdgcn_rcp:
4565 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
4566 case Intrinsic::amdgcn_rsq:
4567 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00004568 case Intrinsic::amdgcn_rsq_legacy:
Matt Arsenault43e92fe2016-06-24 06:30:11 +00004569 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004570 return emitRemovedIntrinsicError(DAG, DL, VT);
4571
4572 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00004573 case Intrinsic::amdgcn_rcp_legacy:
Matt Arsenault32fc5272016-07-26 16:45:45 +00004574 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
4575 return emitRemovedIntrinsicError(DAG, DL, VT);
4576 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
Matt Arsenault09b2c4a2016-07-15 21:26:52 +00004577 case Intrinsic::amdgcn_rsq_clamp: {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00004578 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
Matt Arsenault79963e82016-02-13 01:03:00 +00004579 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
Tom Stellard48f29f22015-11-26 00:43:29 +00004580
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004581 Type *Type = VT.getTypeForEVT(*DAG.getContext());
4582 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
4583 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
4584
4585 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
4586 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
4587 DAG.getConstantFP(Max, DL, VT));
4588 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
4589 DAG.getConstantFP(Min, DL, VT));
4590 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004591 case Intrinsic::r600_read_ngroups_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004592 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004593 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004594
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004595 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004596 SI::KernelInputOffsets::NGROUPS_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004597 case Intrinsic::r600_read_ngroups_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004598 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004599 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004600
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004601 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004602 SI::KernelInputOffsets::NGROUPS_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004603 case Intrinsic::r600_read_ngroups_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004604 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004605 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004606
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004607 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004608 SI::KernelInputOffsets::NGROUPS_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004609 case Intrinsic::r600_read_global_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004610 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004611 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004612
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004613 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004614 SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004615 case Intrinsic::r600_read_global_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004616 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004617 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004618
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004619 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004620 SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004621 case Intrinsic::r600_read_global_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004622 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004623 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004624
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004625 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004626 SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004627 case Intrinsic::r600_read_local_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004628 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004629 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004630
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004631 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4632 SI::KernelInputOffsets::LOCAL_SIZE_X);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004633 case Intrinsic::r600_read_local_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004634 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004635 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004636
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004637 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4638 SI::KernelInputOffsets::LOCAL_SIZE_Y);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004639 case Intrinsic::r600_read_local_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00004640 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004641 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00004642
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004643 return lowerImplicitZextParam(DAG, Op, MVT::i16,
4644 SI::KernelInputOffsets::LOCAL_SIZE_Z);
Matt Arsenault43976df2016-01-30 04:25:19 +00004645 case Intrinsic::amdgcn_workgroup_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004646 case Intrinsic::r600_read_tgid_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004647 return getPreloadedValue(DAG, *MFI, VT,
4648 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
Matt Arsenault43976df2016-01-30 04:25:19 +00004649 case Intrinsic::amdgcn_workgroup_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004650 case Intrinsic::r600_read_tgid_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004651 return getPreloadedValue(DAG, *MFI, VT,
4652 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
Matt Arsenault43976df2016-01-30 04:25:19 +00004653 case Intrinsic::amdgcn_workgroup_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004654 case Intrinsic::r600_read_tgid_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004655 return getPreloadedValue(DAG, *MFI, VT,
4656 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
4657 case Intrinsic::amdgcn_workitem_id_x: {
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004658 case Intrinsic::r600_read_tidig_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004659 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4660 SDLoc(DAG.getEntryNode()),
4661 MFI->getArgInfo().WorkItemIDX);
4662 }
Matt Arsenault43976df2016-01-30 04:25:19 +00004663 case Intrinsic::amdgcn_workitem_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004664 case Intrinsic::r600_read_tidig_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004665 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4666 SDLoc(DAG.getEntryNode()),
4667 MFI->getArgInfo().WorkItemIDY);
Matt Arsenault43976df2016-01-30 04:25:19 +00004668 case Intrinsic::amdgcn_workitem_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004669 case Intrinsic::r600_read_tidig_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00004670 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
4671 SDLoc(DAG.getEntryNode()),
4672 MFI->getArgInfo().WorkItemIDZ);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004673 case AMDGPUIntrinsic::SI_load_const: {
4674 SDValue Ops[] = {
4675 Op.getOperand(1),
4676 Op.getOperand(2)
4677 };
4678
4679 MachineMemOperand *MMO = MF.getMachineMemOperand(
Justin Lebaradbf09e2016-09-11 01:38:58 +00004680 MachinePointerInfo(),
4681 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
4682 MachineMemOperand::MOInvariant,
4683 VT.getStoreSize(), 4);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004684 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
4685 Op->getVTList(), Ops, VT, MMO);
4686 }
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004687 case Intrinsic::amdgcn_fdiv_fast:
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00004688 return lowerFDIV_FAST(Op, DAG);
Tom Stellard2187bb82016-12-06 23:52:13 +00004689 case Intrinsic::amdgcn_interp_mov: {
4690 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4691 SDValue Glue = M0.getValue(1);
4692 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
4693 Op.getOperand(2), Op.getOperand(3), Glue);
4694 }
Tom Stellardad7d03d2015-12-15 17:02:49 +00004695 case Intrinsic::amdgcn_interp_p1: {
4696 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
4697 SDValue Glue = M0.getValue(1);
4698 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
4699 Op.getOperand(2), Op.getOperand(3), Glue);
4700 }
4701 case Intrinsic::amdgcn_interp_p2: {
4702 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
4703 SDValue Glue = SDValue(M0.getNode(), 1);
4704 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
4705 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
4706 Glue);
4707 }
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004708 case Intrinsic::amdgcn_sin:
4709 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
4710
4711 case Intrinsic::amdgcn_cos:
4712 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
4713
4714 case Intrinsic::amdgcn_log_clamp: {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00004715 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004716 return SDValue();
4717
4718 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00004719 MF.getFunction(), "intrinsic not supported on subtarget",
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00004720 DL.getDebugLoc());
4721 DAG.getContext()->diagnose(BadIntrin);
4722 return DAG.getUNDEF(VT);
4723 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004724 case Intrinsic::amdgcn_ldexp:
4725 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
4726 Op.getOperand(1), Op.getOperand(2));
Matt Arsenault74015162016-05-28 00:19:52 +00004727
4728 case Intrinsic::amdgcn_fract:
4729 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
4730
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004731 case Intrinsic::amdgcn_class:
4732 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
4733 Op.getOperand(1), Op.getOperand(2));
4734 case Intrinsic::amdgcn_div_fmas:
4735 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
4736 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
4737 Op.getOperand(4));
4738
4739 case Intrinsic::amdgcn_div_fixup:
4740 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
4741 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4742
4743 case Intrinsic::amdgcn_trig_preop:
4744 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
4745 Op.getOperand(1), Op.getOperand(2));
4746 case Intrinsic::amdgcn_div_scale: {
4747 // 3rd parameter required to be a constant.
4748 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4749 if (!Param)
Matt Arsenault206f8262017-08-01 20:49:41 +00004750 return DAG.getMergeValues({ DAG.getUNDEF(VT), DAG.getUNDEF(MVT::i1) }, DL);
Matt Arsenaultf75257a2016-01-23 05:32:20 +00004751
4752 // Translate to the operands expected by the machine instruction. The
4753 // first parameter must be the same as the first instruction.
4754 SDValue Numerator = Op.getOperand(1);
4755 SDValue Denominator = Op.getOperand(2);
4756
4757 // Note this order is opposite of the machine instruction's operations,
4758 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
4759 // intrinsic has the numerator as the first operand to match a normal
4760 // division operation.
4761
4762 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
4763
4764 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
4765 Denominator, Numerator);
4766 }
Wei Ding07e03712016-07-28 16:42:13 +00004767 case Intrinsic::amdgcn_icmp: {
4768 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004769 if (!CD)
4770 return DAG.getUNDEF(VT);
Wei Ding07e03712016-07-28 16:42:13 +00004771
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004772 int CondCode = CD->getSExtValue();
Wei Ding07e03712016-07-28 16:42:13 +00004773 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004774 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
Wei Ding07e03712016-07-28 16:42:13 +00004775 return DAG.getUNDEF(VT);
4776
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00004777 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
Wei Ding07e03712016-07-28 16:42:13 +00004778 ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4779 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4780 Op.getOperand(2), DAG.getCondCode(CCOpcode));
4781 }
4782 case Intrinsic::amdgcn_fcmp: {
4783 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004784 if (!CD)
4785 return DAG.getUNDEF(VT);
Wei Ding07e03712016-07-28 16:42:13 +00004786
Matt Arsenaultf6cf1032017-02-17 19:49:10 +00004787 int CondCode = CD->getSExtValue();
4788 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4789 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE)
Wei Ding07e03712016-07-28 16:42:13 +00004790 return DAG.getUNDEF(VT);
4791
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00004792 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
Wei Ding07e03712016-07-28 16:42:13 +00004793 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
4794 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
4795 Op.getOperand(2), DAG.getCondCode(CCOpcode));
4796 }
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00004797 case Intrinsic::amdgcn_fmed3:
4798 return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
4799 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Matt Arsenault32fc5272016-07-26 16:45:45 +00004800 case Intrinsic::amdgcn_fmul_legacy:
4801 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
4802 Op.getOperand(1), Op.getOperand(2));
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00004803 case Intrinsic::amdgcn_sffbh:
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00004804 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
Matt Arsenaultf5262252017-02-22 23:04:58 +00004805 case Intrinsic::amdgcn_sbfe:
4806 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
4807 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4808 case Intrinsic::amdgcn_ubfe:
4809 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
4810 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Marek Olsak13e47412018-01-31 20:18:04 +00004811 case Intrinsic::amdgcn_cvt_pkrtz:
4812 case Intrinsic::amdgcn_cvt_pknorm_i16:
4813 case Intrinsic::amdgcn_cvt_pknorm_u16:
4814 case Intrinsic::amdgcn_cvt_pk_i16:
4815 case Intrinsic::amdgcn_cvt_pk_u16: {
4816 // FIXME: Stop adding cast if v2f16/v2i16 are legal.
Matt Arsenault1f17c662017-02-22 00:27:34 +00004817 EVT VT = Op.getValueType();
Marek Olsak13e47412018-01-31 20:18:04 +00004818 unsigned Opcode;
4819
4820 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
4821 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
4822 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
4823 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
4824 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
4825 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
4826 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
4827 Opcode = AMDGPUISD::CVT_PK_I16_I32;
4828 else
4829 Opcode = AMDGPUISD::CVT_PK_U16_U32;
4830
4831 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
Matt Arsenault1f17c662017-02-22 00:27:34 +00004832 Op.getOperand(1), Op.getOperand(2));
4833 return DAG.getNode(ISD::BITCAST, DL, VT, Node);
4834 }
Connor Abbott8c217d02017-08-04 18:36:49 +00004835 case Intrinsic::amdgcn_wqm: {
4836 SDValue Src = Op.getOperand(1);
4837 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src),
4838 0);
4839 }
Connor Abbott92638ab2017-08-04 18:36:52 +00004840 case Intrinsic::amdgcn_wwm: {
4841 SDValue Src = Op.getOperand(1);
4842 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src),
4843 0);
4844 }
Matt Arsenault856777d2017-12-08 20:00:57 +00004845 case Intrinsic::amdgcn_image_getlod:
4846 case Intrinsic::amdgcn_image_getresinfo: {
4847 unsigned Idx = (IntrinsicID == Intrinsic::amdgcn_image_getresinfo) ? 3 : 4;
4848
4849 // Replace dmask with everything disabled with undef.
4850 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(Idx));
4851 if (!DMask || DMask->isNullValue())
4852 return DAG.getUNDEF(Op.getValueType());
4853 return SDValue();
4854 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004855 default:
Matt Arsenault754dd3e2017-04-03 18:08:08 +00004856 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004857 }
4858}
4859
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00004860SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4861 SelectionDAG &DAG) const {
4862 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Tom Stellard6f9ef142016-12-20 17:19:44 +00004863 SDLoc DL(Op);
David Stuttard70e8bc12017-06-22 16:29:22 +00004864
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00004865 switch (IntrID) {
4866 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00004867 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00004868 case Intrinsic::amdgcn_ds_fadd:
4869 case Intrinsic::amdgcn_ds_fmin:
4870 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00004871 MemSDNode *M = cast<MemSDNode>(Op);
Daniil Fukalovd5fca552018-01-17 14:05:05 +00004872 unsigned Opc;
4873 switch (IntrID) {
4874 case Intrinsic::amdgcn_atomic_inc:
4875 Opc = AMDGPUISD::ATOMIC_INC;
4876 break;
4877 case Intrinsic::amdgcn_atomic_dec:
4878 Opc = AMDGPUISD::ATOMIC_DEC;
4879 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00004880 case Intrinsic::amdgcn_ds_fadd:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00004881 Opc = AMDGPUISD::ATOMIC_LOAD_FADD;
4882 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00004883 case Intrinsic::amdgcn_ds_fmin:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00004884 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
4885 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00004886 case Intrinsic::amdgcn_ds_fmax:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00004887 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
4888 break;
4889 default:
4890 llvm_unreachable("Unknown intrinsic!");
4891 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00004892 SDValue Ops[] = {
4893 M->getOperand(0), // Chain
4894 M->getOperand(2), // Ptr
4895 M->getOperand(3) // Value
4896 };
4897
4898 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
4899 M->getMemoryVT(), M->getMemOperand());
4900 }
Tom Stellard6f9ef142016-12-20 17:19:44 +00004901 case Intrinsic::amdgcn_buffer_load:
4902 case Intrinsic::amdgcn_buffer_load_format: {
4903 SDValue Ops[] = {
4904 Op.getOperand(0), // Chain
4905 Op.getOperand(2), // rsrc
4906 Op.getOperand(3), // vindex
4907 Op.getOperand(4), // offset
4908 Op.getOperand(5), // glc
4909 Op.getOperand(6) // slc
4910 };
Tom Stellard6f9ef142016-12-20 17:19:44 +00004911
4912 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
4913 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
4914 EVT VT = Op.getValueType();
4915 EVT IntVT = VT.changeTypeToInteger();
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004916 auto *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00004917 EVT LoadVT = Op.getValueType();
4918 bool IsD16 = LoadVT.getScalarType() == MVT::f16;
4919 if (IsD16)
4920 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG);
4921
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004922 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
4923 M->getMemOperand());
Tom Stellard6f9ef142016-12-20 17:19:44 +00004924 }
David Stuttard70e8bc12017-06-22 16:29:22 +00004925 case Intrinsic::amdgcn_tbuffer_load: {
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004926 MemSDNode *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00004927 EVT LoadVT = Op.getValueType();
4928 bool IsD16 = LoadVT.getScalarType() == MVT::f16;
4929 if (IsD16) {
4930 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, M, DAG);
4931 }
4932
David Stuttard70e8bc12017-06-22 16:29:22 +00004933 SDValue Ops[] = {
4934 Op.getOperand(0), // Chain
4935 Op.getOperand(2), // rsrc
4936 Op.getOperand(3), // vindex
4937 Op.getOperand(4), // voffset
4938 Op.getOperand(5), // soffset
4939 Op.getOperand(6), // offset
4940 Op.getOperand(7), // dfmt
4941 Op.getOperand(8), // nfmt
4942 Op.getOperand(9), // glc
4943 Op.getOperand(10) // slc
4944 };
4945
David Stuttard70e8bc12017-06-22 16:29:22 +00004946 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
Matt Arsenault1349a042018-05-22 06:32:10 +00004947 Op->getVTList(), Ops, LoadVT,
4948 M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00004949 }
Marek Olsak5cec6412017-11-09 01:52:48 +00004950 case Intrinsic::amdgcn_buffer_atomic_swap:
4951 case Intrinsic::amdgcn_buffer_atomic_add:
4952 case Intrinsic::amdgcn_buffer_atomic_sub:
4953 case Intrinsic::amdgcn_buffer_atomic_smin:
4954 case Intrinsic::amdgcn_buffer_atomic_umin:
4955 case Intrinsic::amdgcn_buffer_atomic_smax:
4956 case Intrinsic::amdgcn_buffer_atomic_umax:
4957 case Intrinsic::amdgcn_buffer_atomic_and:
4958 case Intrinsic::amdgcn_buffer_atomic_or:
4959 case Intrinsic::amdgcn_buffer_atomic_xor: {
4960 SDValue Ops[] = {
4961 Op.getOperand(0), // Chain
4962 Op.getOperand(2), // vdata
4963 Op.getOperand(3), // rsrc
4964 Op.getOperand(4), // vindex
4965 Op.getOperand(5), // offset
4966 Op.getOperand(6) // slc
4967 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00004968 EVT VT = Op.getValueType();
4969
4970 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00004971 unsigned Opcode = 0;
4972
4973 switch (IntrID) {
4974 case Intrinsic::amdgcn_buffer_atomic_swap:
4975 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
4976 break;
4977 case Intrinsic::amdgcn_buffer_atomic_add:
4978 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
4979 break;
4980 case Intrinsic::amdgcn_buffer_atomic_sub:
4981 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
4982 break;
4983 case Intrinsic::amdgcn_buffer_atomic_smin:
4984 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
4985 break;
4986 case Intrinsic::amdgcn_buffer_atomic_umin:
4987 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
4988 break;
4989 case Intrinsic::amdgcn_buffer_atomic_smax:
4990 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
4991 break;
4992 case Intrinsic::amdgcn_buffer_atomic_umax:
4993 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
4994 break;
4995 case Intrinsic::amdgcn_buffer_atomic_and:
4996 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
4997 break;
4998 case Intrinsic::amdgcn_buffer_atomic_or:
4999 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
5000 break;
5001 case Intrinsic::amdgcn_buffer_atomic_xor:
5002 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
5003 break;
5004 default:
5005 llvm_unreachable("unhandled atomic opcode");
5006 }
5007
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005008 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
5009 M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005010 }
5011
5012 case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
5013 SDValue Ops[] = {
5014 Op.getOperand(0), // Chain
5015 Op.getOperand(2), // src
5016 Op.getOperand(3), // cmp
5017 Op.getOperand(4), // rsrc
5018 Op.getOperand(5), // vindex
5019 Op.getOperand(6), // offset
5020 Op.getOperand(7) // slc
5021 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005022 EVT VT = Op.getValueType();
5023 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00005024
5025 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005026 Op->getVTList(), Ops, VT, M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005027 }
5028
Matt Arsenault1349a042018-05-22 06:32:10 +00005029 case Intrinsic::amdgcn_image_load:
5030 case Intrinsic::amdgcn_image_load_mip: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005031 EVT VT = Op.getValueType();
5032 if (Subtarget->hasUnpackedD16VMem() &&
5033 VT.isVector() && VT.getScalarSizeInBits() == 16) {
5034 return adjustLoadValueType(getImageOpcode(IntrID), cast<MemSDNode>(Op),
5035 DAG);
Matt Arsenault1349a042018-05-22 06:32:10 +00005036 }
5037
5038 return SDValue();
5039 }
5040
Matt Arsenaultf8fb6052017-03-21 16:32:17 +00005041 // Basic sample.
5042 case Intrinsic::amdgcn_image_sample:
5043 case Intrinsic::amdgcn_image_sample_cl:
5044 case Intrinsic::amdgcn_image_sample_d:
5045 case Intrinsic::amdgcn_image_sample_d_cl:
5046 case Intrinsic::amdgcn_image_sample_l:
5047 case Intrinsic::amdgcn_image_sample_b:
5048 case Intrinsic::amdgcn_image_sample_b_cl:
5049 case Intrinsic::amdgcn_image_sample_lz:
5050 case Intrinsic::amdgcn_image_sample_cd:
5051 case Intrinsic::amdgcn_image_sample_cd_cl:
5052
5053 // Sample with comparison.
5054 case Intrinsic::amdgcn_image_sample_c:
5055 case Intrinsic::amdgcn_image_sample_c_cl:
5056 case Intrinsic::amdgcn_image_sample_c_d:
5057 case Intrinsic::amdgcn_image_sample_c_d_cl:
5058 case Intrinsic::amdgcn_image_sample_c_l:
5059 case Intrinsic::amdgcn_image_sample_c_b:
5060 case Intrinsic::amdgcn_image_sample_c_b_cl:
5061 case Intrinsic::amdgcn_image_sample_c_lz:
5062 case Intrinsic::amdgcn_image_sample_c_cd:
5063 case Intrinsic::amdgcn_image_sample_c_cd_cl:
5064
5065 // Sample with offsets.
5066 case Intrinsic::amdgcn_image_sample_o:
5067 case Intrinsic::amdgcn_image_sample_cl_o:
5068 case Intrinsic::amdgcn_image_sample_d_o:
5069 case Intrinsic::amdgcn_image_sample_d_cl_o:
5070 case Intrinsic::amdgcn_image_sample_l_o:
5071 case Intrinsic::amdgcn_image_sample_b_o:
5072 case Intrinsic::amdgcn_image_sample_b_cl_o:
5073 case Intrinsic::amdgcn_image_sample_lz_o:
5074 case Intrinsic::amdgcn_image_sample_cd_o:
5075 case Intrinsic::amdgcn_image_sample_cd_cl_o:
5076
5077 // Sample with comparison and offsets.
5078 case Intrinsic::amdgcn_image_sample_c_o:
5079 case Intrinsic::amdgcn_image_sample_c_cl_o:
5080 case Intrinsic::amdgcn_image_sample_c_d_o:
5081 case Intrinsic::amdgcn_image_sample_c_d_cl_o:
5082 case Intrinsic::amdgcn_image_sample_c_l_o:
5083 case Intrinsic::amdgcn_image_sample_c_b_o:
5084 case Intrinsic::amdgcn_image_sample_c_b_cl_o:
5085 case Intrinsic::amdgcn_image_sample_c_lz_o:
5086 case Intrinsic::amdgcn_image_sample_c_cd_o:
Matt Arsenault1349a042018-05-22 06:32:10 +00005087 case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
5088
5089 // Basic gather4
5090 case Intrinsic::amdgcn_image_gather4:
5091 case Intrinsic::amdgcn_image_gather4_cl:
5092 case Intrinsic::amdgcn_image_gather4_l:
5093 case Intrinsic::amdgcn_image_gather4_b:
5094 case Intrinsic::amdgcn_image_gather4_b_cl:
5095 case Intrinsic::amdgcn_image_gather4_lz:
5096
5097 // Gather4 with comparison
5098 case Intrinsic::amdgcn_image_gather4_c:
5099 case Intrinsic::amdgcn_image_gather4_c_cl:
5100 case Intrinsic::amdgcn_image_gather4_c_l:
5101 case Intrinsic::amdgcn_image_gather4_c_b:
5102 case Intrinsic::amdgcn_image_gather4_c_b_cl:
5103 case Intrinsic::amdgcn_image_gather4_c_lz:
5104
5105 // Gather4 with offsets
5106 case Intrinsic::amdgcn_image_gather4_o:
5107 case Intrinsic::amdgcn_image_gather4_cl_o:
5108 case Intrinsic::amdgcn_image_gather4_l_o:
5109 case Intrinsic::amdgcn_image_gather4_b_o:
5110 case Intrinsic::amdgcn_image_gather4_b_cl_o:
5111 case Intrinsic::amdgcn_image_gather4_lz_o:
5112
5113 // Gather4 with comparison and offsets
5114 case Intrinsic::amdgcn_image_gather4_c_o:
5115 case Intrinsic::amdgcn_image_gather4_c_cl_o:
5116 case Intrinsic::amdgcn_image_gather4_c_l_o:
5117 case Intrinsic::amdgcn_image_gather4_c_b_o:
5118 case Intrinsic::amdgcn_image_gather4_c_b_cl_o:
5119 case Intrinsic::amdgcn_image_gather4_c_lz_o: {
Matt Arsenaultf8fb6052017-03-21 16:32:17 +00005120 // Replace dmask with everything disabled with undef.
5121 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(5));
5122 if (!DMask || DMask->isNullValue()) {
5123 SDValue Undef = DAG.getUNDEF(Op.getValueType());
5124 return DAG.getMergeValues({ Undef, Op.getOperand(0) }, SDLoc(Op));
5125 }
5126
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005127 if (Subtarget->hasUnpackedD16VMem() &&
5128 Op.getValueType().isVector() &&
5129 Op.getValueType().getScalarSizeInBits() == 16) {
Matt Arsenault1349a042018-05-22 06:32:10 +00005130 return adjustLoadValueType(getImageOpcode(IntrID), cast<MemSDNode>(Op),
5131 DAG);
5132 }
5133
Matt Arsenaultf8fb6052017-03-21 16:32:17 +00005134 return SDValue();
5135 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005136 default:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005137 if (Subtarget->hasUnpackedD16VMem() &&
5138 Op.getValueType().isVector() &&
5139 Op.getValueType().getScalarSizeInBits() == 16) {
5140 if (const AMDGPU::D16ImageDimIntrinsic *D16ImageDimIntr =
5141 AMDGPU::lookupD16ImageDimIntrinsicByIntr(IntrID)) {
5142 return adjustLoadValueType(D16ImageDimIntr->D16HelperIntr,
5143 cast<MemSDNode>(Op), DAG, true);
5144 }
Matt Arsenault1349a042018-05-22 06:32:10 +00005145 }
5146
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005147 return SDValue();
5148 }
5149}
5150
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005151SDValue SITargetLowering::handleD16VData(SDValue VData,
5152 SelectionDAG &DAG) const {
5153 EVT StoreVT = VData.getValueType();
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005154
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005155 // No change for f16 and legal vector D16 types.
Matt Arsenault1349a042018-05-22 06:32:10 +00005156 if (!StoreVT.isVector())
5157 return VData;
5158
5159 SDLoc DL(VData);
5160 assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
5161
5162 if (Subtarget->hasUnpackedD16VMem()) {
5163 // We need to unpack the packed data to store.
5164 EVT IntStoreVT = StoreVT.changeTypeToInteger();
5165 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
5166
5167 EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
5168 StoreVT.getVectorNumElements());
5169 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
5170 return DAG.UnrollVectorOp(ZExt.getNode());
5171 }
5172
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005173 assert(isTypeLegal(StoreVT));
5174 return VData;
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005175}
5176
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005177SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
5178 SelectionDAG &DAG) const {
Tom Stellardfc92e772015-05-12 14:18:14 +00005179 SDLoc DL(Op);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005180 SDValue Chain = Op.getOperand(0);
5181 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
David Stuttard70e8bc12017-06-22 16:29:22 +00005182 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005183
5184 switch (IntrinsicID) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00005185 case Intrinsic::amdgcn_exp: {
Matt Arsenault4165efd2017-01-17 07:26:53 +00005186 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
5187 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
5188 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
5189 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
5190
5191 const SDValue Ops[] = {
5192 Chain,
5193 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
5194 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
5195 Op.getOperand(4), // src0
5196 Op.getOperand(5), // src1
5197 Op.getOperand(6), // src2
5198 Op.getOperand(7), // src3
5199 DAG.getTargetConstant(0, DL, MVT::i1), // compr
5200 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
5201 };
5202
5203 unsigned Opc = Done->isNullValue() ?
5204 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
5205 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
5206 }
5207 case Intrinsic::amdgcn_exp_compr: {
5208 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
5209 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
5210 SDValue Src0 = Op.getOperand(4);
5211 SDValue Src1 = Op.getOperand(5);
5212 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
5213 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
5214
5215 SDValue Undef = DAG.getUNDEF(MVT::f32);
5216 const SDValue Ops[] = {
5217 Chain,
5218 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
5219 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
5220 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
5221 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
5222 Undef, // src2
5223 Undef, // src3
5224 DAG.getTargetConstant(1, DL, MVT::i1), // compr
5225 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
5226 };
5227
5228 unsigned Opc = Done->isNullValue() ?
5229 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
5230 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
5231 }
5232 case Intrinsic::amdgcn_s_sendmsg:
Matt Arsenaultd3e5cb72017-02-16 02:01:17 +00005233 case Intrinsic::amdgcn_s_sendmsghalt: {
5234 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
5235 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
Tom Stellardfc92e772015-05-12 14:18:14 +00005236 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
5237 SDValue Glue = Chain.getValue(1);
Matt Arsenaulta78ca622017-02-15 22:17:09 +00005238 return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
Jan Veselyd48445d2017-01-04 18:06:55 +00005239 Op.getOperand(2), Glue);
5240 }
Marek Olsak2d825902017-04-28 20:21:58 +00005241 case Intrinsic::amdgcn_init_exec: {
5242 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
5243 Op.getOperand(2));
5244 }
5245 case Intrinsic::amdgcn_init_exec_from_input: {
5246 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
5247 Op.getOperand(2), Op.getOperand(3));
5248 }
Matt Arsenault00568682016-07-13 06:04:22 +00005249 case AMDGPUIntrinsic::AMDGPU_kill: {
Matt Arsenault03006fd2016-07-19 16:27:56 +00005250 SDValue Src = Op.getOperand(2);
5251 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) {
Matt Arsenault00568682016-07-13 06:04:22 +00005252 if (!K->isNegative())
5253 return Chain;
Matt Arsenault03006fd2016-07-19 16:27:56 +00005254
5255 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32);
5256 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne);
Matt Arsenault00568682016-07-13 06:04:22 +00005257 }
5258
Matt Arsenault03006fd2016-07-19 16:27:56 +00005259 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src);
5260 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast);
Matt Arsenault00568682016-07-13 06:04:22 +00005261 }
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00005262 case Intrinsic::amdgcn_s_barrier: {
5263 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00005264 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Matthias Braunf1caa282017-12-15 22:22:58 +00005265 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00005266 if (WGSize <= ST.getWavefrontSize())
5267 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
5268 Op.getOperand(0)), 0);
5269 }
5270 return SDValue();
5271 };
David Stuttard70e8bc12017-06-22 16:29:22 +00005272 case AMDGPUIntrinsic::SI_tbuffer_store: {
5273
5274 // Extract vindex and voffset from vaddr as appropriate
5275 const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10));
5276 const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11));
5277 SDValue VAddr = Op.getOperand(5);
5278
5279 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
5280
5281 assert(!(OffEn->isOne() && IdxEn->isOne()) &&
5282 "Legacy intrinsic doesn't support both offset and index - use new version");
5283
5284 SDValue VIndex = IdxEn->isOne() ? VAddr : Zero;
5285 SDValue VOffset = OffEn->isOne() ? VAddr : Zero;
5286
5287 // Deal with the vec-3 case
5288 const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4));
5289 auto Opcode = NumChannels->getZExtValue() == 3 ?
5290 AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT;
5291
5292 SDValue Ops[] = {
5293 Chain,
5294 Op.getOperand(3), // vdata
5295 Op.getOperand(2), // rsrc
5296 VIndex,
5297 VOffset,
5298 Op.getOperand(6), // soffset
5299 Op.getOperand(7), // inst_offset
5300 Op.getOperand(8), // dfmt
5301 Op.getOperand(9), // nfmt
5302 Op.getOperand(12), // glc
5303 Op.getOperand(13), // slc
5304 };
5305
David Stuttardf6779662017-06-22 17:15:49 +00005306 assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 &&
David Stuttard70e8bc12017-06-22 16:29:22 +00005307 "Value of tfe other than zero is unsupported");
5308
5309 EVT VT = Op.getOperand(3).getValueType();
5310 MachineMemOperand *MMO = MF.getMachineMemOperand(
5311 MachinePointerInfo(),
5312 MachineMemOperand::MOStore,
5313 VT.getStoreSize(), 4);
5314 return DAG.getMemIntrinsicNode(Opcode, DL,
5315 Op->getVTList(), Ops, VT, MMO);
5316 }
5317
5318 case Intrinsic::amdgcn_tbuffer_store: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005319 SDValue VData = Op.getOperand(2);
5320 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
5321 if (IsD16)
5322 VData = handleD16VData(VData, DAG);
David Stuttard70e8bc12017-06-22 16:29:22 +00005323 SDValue Ops[] = {
5324 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005325 VData, // vdata
David Stuttard70e8bc12017-06-22 16:29:22 +00005326 Op.getOperand(3), // rsrc
5327 Op.getOperand(4), // vindex
5328 Op.getOperand(5), // voffset
5329 Op.getOperand(6), // soffset
5330 Op.getOperand(7), // offset
5331 Op.getOperand(8), // dfmt
5332 Op.getOperand(9), // nfmt
5333 Op.getOperand(10), // glc
5334 Op.getOperand(11) // slc
5335 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005336 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
5337 AMDGPUISD::TBUFFER_STORE_FORMAT;
5338 MemSDNode *M = cast<MemSDNode>(Op);
5339 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
5340 M->getMemoryVT(), M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00005341 }
5342
Marek Olsak5cec6412017-11-09 01:52:48 +00005343 case Intrinsic::amdgcn_buffer_store:
5344 case Intrinsic::amdgcn_buffer_store_format: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005345 SDValue VData = Op.getOperand(2);
5346 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
5347 if (IsD16)
5348 VData = handleD16VData(VData, DAG);
Marek Olsak5cec6412017-11-09 01:52:48 +00005349 SDValue Ops[] = {
5350 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005351 VData, // vdata
Marek Olsak5cec6412017-11-09 01:52:48 +00005352 Op.getOperand(3), // rsrc
5353 Op.getOperand(4), // vindex
5354 Op.getOperand(5), // offset
5355 Op.getOperand(6), // glc
5356 Op.getOperand(7) // slc
5357 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005358 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
5359 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
5360 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
5361 MemSDNode *M = cast<MemSDNode>(Op);
5362 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
5363 M->getMemoryVT(), M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005364 }
Changpeng Fang4737e892018-01-18 22:08:53 +00005365 case Intrinsic::amdgcn_image_store:
5366 case Intrinsic::amdgcn_image_store_mip: {
5367 SDValue VData = Op.getOperand(2);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005368 EVT VT = VData.getValueType();
5369 if (Subtarget->hasUnpackedD16VMem() &&
5370 VT.isVector() && VT.getScalarSizeInBits() == 16) {
Matt Arsenault1349a042018-05-22 06:32:10 +00005371 SDValue Chain = Op.getOperand(0);
Changpeng Fang4737e892018-01-18 22:08:53 +00005372
Matt Arsenault1349a042018-05-22 06:32:10 +00005373 VData = handleD16VData(VData, DAG);
5374 SDValue Ops[] = {
5375 Chain, // Chain
5376 VData, // vdata
5377 Op.getOperand(3), // vaddr
5378 Op.getOperand(4), // rsrc
5379 Op.getOperand(5), // dmask
5380 Op.getOperand(6), // glc
5381 Op.getOperand(7), // slc
5382 Op.getOperand(8), // lwe
5383 Op.getOperand(9) // da
5384 };
5385 unsigned Opc = (IntrinsicID == Intrinsic::amdgcn_image_store) ?
5386 AMDGPUISD::IMAGE_STORE : AMDGPUISD::IMAGE_STORE_MIP;
5387 MemSDNode *M = cast<MemSDNode>(Op);
5388 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
5389 M->getMemoryVT(), M->getMemOperand());
5390 }
5391
5392 return SDValue();
5393 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005394 default: {
5395 const AMDGPU::D16ImageDimIntrinsic *D16ImageDimIntr =
5396 AMDGPU::lookupD16ImageDimIntrinsicByIntr(IntrinsicID);
5397 if (D16ImageDimIntr) {
5398 SDValue VData = Op.getOperand(2);
5399 EVT StoreVT = VData.getValueType();
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005400 if (Subtarget->hasUnpackedD16VMem() &&
5401 StoreVT.isVector() &&
5402 StoreVT.getScalarSizeInBits() == 16) {
Matt Arsenault1349a042018-05-22 06:32:10 +00005403 SmallVector<SDValue, 12> Ops(Op.getNode()->op_values());
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005404
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005405 Ops[1] = DAG.getConstant(D16ImageDimIntr->D16HelperIntr, DL, MVT::i32);
Matt Arsenault1349a042018-05-22 06:32:10 +00005406 Ops[2] = handleD16VData(VData, DAG);
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005407
5408 MemSDNode *M = cast<MemSDNode>(Op);
5409 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Op->getVTList(),
5410 Ops, M->getMemoryVT(),
5411 M->getMemOperand());
5412 }
5413 }
5414
Matt Arsenault754dd3e2017-04-03 18:08:08 +00005415 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005416 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00005417 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005418}
5419
Matt Arsenault90083d32018-06-07 09:54:49 +00005420static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
5421 ISD::LoadExtType ExtType, SDValue Op,
5422 const SDLoc &SL, EVT VT) {
5423 if (VT.bitsLT(Op.getValueType()))
5424 return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
5425
5426 switch (ExtType) {
5427 case ISD::SEXTLOAD:
5428 return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
5429 case ISD::ZEXTLOAD:
5430 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
5431 case ISD::EXTLOAD:
5432 return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
5433 case ISD::NON_EXTLOAD:
5434 return Op;
5435 }
5436
5437 llvm_unreachable("invalid ext type");
5438}
5439
5440SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
5441 SelectionDAG &DAG = DCI.DAG;
5442 if (Ld->getAlignment() < 4 || Ld->isDivergent())
5443 return SDValue();
5444
5445 // FIXME: Constant loads should all be marked invariant.
5446 unsigned AS = Ld->getAddressSpace();
5447 if (AS != AMDGPUASI.CONSTANT_ADDRESS &&
5448 AS != AMDGPUASI.CONSTANT_ADDRESS_32BIT &&
5449 (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
5450 return SDValue();
5451
5452 // Don't do this early, since it may interfere with adjacent load merging for
5453 // illegal types. We can avoid losing alignment information for exotic types
5454 // pre-legalize.
5455 EVT MemVT = Ld->getMemoryVT();
5456 if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
5457 MemVT.getSizeInBits() >= 32)
5458 return SDValue();
5459
5460 SDLoc SL(Ld);
5461
5462 assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
5463 "unexpected vector extload");
5464
5465 // TODO: Drop only high part of range.
5466 SDValue Ptr = Ld->getBasePtr();
5467 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
5468 MVT::i32, SL, Ld->getChain(), Ptr,
5469 Ld->getOffset(),
5470 Ld->getPointerInfo(), MVT::i32,
5471 Ld->getAlignment(),
5472 Ld->getMemOperand()->getFlags(),
5473 Ld->getAAInfo(),
5474 nullptr); // Drop ranges
5475
5476 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
5477 if (MemVT.isFloatingPoint()) {
5478 assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
5479 "unexpected fp extload");
5480 TruncVT = MemVT.changeTypeToInteger();
5481 }
5482
5483 SDValue Cvt = NewLoad;
5484 if (Ld->getExtensionType() == ISD::SEXTLOAD) {
5485 Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
5486 DAG.getValueType(TruncVT));
5487 } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
5488 Ld->getExtensionType() == ISD::NON_EXTLOAD) {
5489 Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
5490 } else {
5491 assert(Ld->getExtensionType() == ISD::EXTLOAD);
5492 }
5493
5494 EVT VT = Ld->getValueType(0);
5495 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
5496
5497 DCI.AddToWorklist(Cvt.getNode());
5498
5499 // We may need to handle exotic cases, such as i16->i64 extloads, so insert
5500 // the appropriate extension from the 32-bit load.
5501 Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
5502 DCI.AddToWorklist(Cvt.getNode());
5503
5504 // Handle conversion back to floating point if necessary.
5505 Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
5506
5507 return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
5508}
5509
Tom Stellard81d871d2013-11-13 23:36:50 +00005510SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
5511 SDLoc DL(Op);
5512 LoadSDNode *Load = cast<LoadSDNode>(Op);
Matt Arsenault6dfda962016-02-10 18:21:39 +00005513 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaulta1436412016-02-10 18:21:45 +00005514 EVT MemVT = Load->getMemoryVT();
Matt Arsenault6dfda962016-02-10 18:21:39 +00005515
Matt Arsenaulta1436412016-02-10 18:21:45 +00005516 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
Matt Arsenault65ca292a2017-09-07 05:37:34 +00005517 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
5518 return SDValue();
5519
Matt Arsenault6dfda962016-02-10 18:21:39 +00005520 // FIXME: Copied from PPC
5521 // First, load into 32 bits, then truncate to 1 bit.
5522
5523 SDValue Chain = Load->getChain();
5524 SDValue BasePtr = Load->getBasePtr();
5525 MachineMemOperand *MMO = Load->getMemOperand();
5526
Tom Stellard115a6152016-11-10 16:02:37 +00005527 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
5528
Matt Arsenault6dfda962016-02-10 18:21:39 +00005529 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
Tom Stellard115a6152016-11-10 16:02:37 +00005530 BasePtr, RealMemVT, MMO);
Matt Arsenault6dfda962016-02-10 18:21:39 +00005531
5532 SDValue Ops[] = {
Matt Arsenaulta1436412016-02-10 18:21:45 +00005533 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
Matt Arsenault6dfda962016-02-10 18:21:39 +00005534 NewLD.getValue(1)
5535 };
5536
5537 return DAG.getMergeValues(Ops, DL);
5538 }
Tom Stellard81d871d2013-11-13 23:36:50 +00005539
Matt Arsenaulta1436412016-02-10 18:21:45 +00005540 if (!MemVT.isVector())
5541 return SDValue();
Matt Arsenault4d801cd2015-11-24 12:05:03 +00005542
Matt Arsenaulta1436412016-02-10 18:21:45 +00005543 assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
5544 "Custom lowering for non-i32 vectors hasn't been implemented.");
Matt Arsenault4d801cd2015-11-24 12:05:03 +00005545
Farhana Aleen89196642018-03-07 17:09:18 +00005546 unsigned Alignment = Load->getAlignment();
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005547 unsigned AS = Load->getAddressSpace();
5548 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
Farhana Aleen89196642018-03-07 17:09:18 +00005549 AS, Alignment)) {
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005550 SDValue Ops[2];
5551 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
5552 return DAG.getMergeValues(Ops, DL);
5553 }
5554
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005555 MachineFunction &MF = DAG.getMachineFunction();
5556 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
5557 // If there is a possibilty that flat instruction access scratch memory
5558 // then we need to use the same legalization rules we use for private.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005559 if (AS == AMDGPUASI.FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005560 AS = MFI->hasFlatScratchInit() ?
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005561 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005562
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005563 unsigned NumElements = MemVT.getVectorNumElements();
Matt Arsenault6c041a32018-03-29 19:59:28 +00005564
Matt Arsenault923712b2018-02-09 16:57:57 +00005565 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5566 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) {
Matt Arsenault6c041a32018-03-29 19:59:28 +00005567 if (!Op->isDivergent() && Alignment >= 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00005568 return SDValue();
5569 // Non-uniform loads will be selected to MUBUF instructions, so they
Alexander Timofeev18009562016-12-08 17:28:47 +00005570 // have the same legalization requirements as global and private
Matt Arsenaulta1436412016-02-10 18:21:45 +00005571 // loads.
5572 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005573 }
Matt Arsenault6c041a32018-03-29 19:59:28 +00005574
Matt Arsenault923712b2018-02-09 16:57:57 +00005575 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5576 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT ||
5577 AS == AMDGPUASI.GLOBAL_ADDRESS) {
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00005578 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
Farhana Aleen89196642018-03-07 17:09:18 +00005579 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
Matt Arsenault6c041a32018-03-29 19:59:28 +00005580 Alignment >= 4)
Alexander Timofeev18009562016-12-08 17:28:47 +00005581 return SDValue();
5582 // Non-uniform loads will be selected to MUBUF instructions, so they
5583 // have the same legalization requirements as global and private
5584 // loads.
5585 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005586 }
Matt Arsenault923712b2018-02-09 16:57:57 +00005587 if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
5588 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT ||
5589 AS == AMDGPUASI.GLOBAL_ADDRESS ||
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005590 AS == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005591 if (NumElements > 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00005592 return SplitVectorLoad(Op, DAG);
5593 // v4 loads are supported for private and global memory.
5594 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005595 }
5596 if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005597 // Depending on the setting of the private_element_size field in the
5598 // resource descriptor, we can only make private accesses up to a certain
5599 // size.
5600 switch (Subtarget->getMaxPrivateElementSize()) {
5601 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00005602 return scalarizeVectorLoad(Load, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005603 case 8:
5604 if (NumElements > 2)
5605 return SplitVectorLoad(Op, DAG);
5606 return SDValue();
5607 case 16:
5608 // Same as global/flat
5609 if (NumElements > 4)
5610 return SplitVectorLoad(Op, DAG);
5611 return SDValue();
5612 default:
5613 llvm_unreachable("unsupported private_element_size");
5614 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005615 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
Farhana Aleena7cb3112018-03-09 17:41:39 +00005616 // Use ds_read_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00005617 if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
Farhana Aleena7cb3112018-03-09 17:41:39 +00005618 MemVT.getStoreSize() == 16)
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005619 return SDValue();
5620
Farhana Aleena7cb3112018-03-09 17:41:39 +00005621 if (NumElements > 2)
5622 return SplitVectorLoad(Op, DAG);
Tom Stellarde9373602014-01-22 19:24:14 +00005623 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005624 return SDValue();
Tom Stellard81d871d2013-11-13 23:36:50 +00005625}
5626
Tom Stellard0ec134f2014-02-04 17:18:40 +00005627SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005628 EVT VT = Op.getValueType();
5629 assert(VT.getSizeInBits() == 64);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005630
5631 SDLoc DL(Op);
5632 SDValue Cond = Op.getOperand(0);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005633
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005634 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
5635 SDValue One = DAG.getConstant(1, DL, MVT::i32);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005636
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00005637 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
5638 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
5639
5640 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
5641 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005642
5643 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
5644
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00005645 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
5646 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005647
5648 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
5649
Ahmed Bougacha128f8732016-04-26 21:15:30 +00005650 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005651 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
Tom Stellard0ec134f2014-02-04 17:18:40 +00005652}
5653
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005654// Catch division cases where we can use shortcuts with rcp and rsq
5655// instructions.
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005656SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
5657 SelectionDAG &DAG) const {
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005658 SDLoc SL(Op);
5659 SDValue LHS = Op.getOperand(0);
5660 SDValue RHS = Op.getOperand(1);
5661 EVT VT = Op.getValueType();
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005662 const SDNodeFlags Flags = Op->getFlags();
Michael Berg7acc81b2018-05-04 18:48:20 +00005663 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005664
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00005665 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
5666 return SDValue();
5667
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005668 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00005669 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
Matt Arsenault979902b2016-08-02 22:25:04 +00005670 if (CLHS->isExactlyValue(1.0)) {
5671 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
5672 // the CI documentation has a worst case error of 1 ulp.
5673 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
5674 // use it as long as we aren't trying to use denormals.
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005675 //
5676 // v_rcp_f16 and v_rsq_f16 DO support denormals.
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005677
Matt Arsenault979902b2016-08-02 22:25:04 +00005678 // 1.0 / sqrt(x) -> rsq(x)
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005679
Matt Arsenault979902b2016-08-02 22:25:04 +00005680 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
5681 // error seems really high at 2^29 ULP.
5682 if (RHS.getOpcode() == ISD::FSQRT)
5683 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
5684
5685 // 1.0 / x -> rcp(x)
5686 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
5687 }
5688
5689 // Same as for 1.0, but expand the sign out of the constant.
5690 if (CLHS->isExactlyValue(-1.0)) {
5691 // -1.0 / x -> rcp (fneg x)
5692 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
5693 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
5694 }
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005695 }
5696 }
5697
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005698 if (Unsafe) {
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005699 // Turn into multiply by the reciprocal.
5700 // x / y -> x * (1.0 / y)
5701 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00005702 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005703 }
5704
5705 return SDValue();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005706}
5707
Tom Stellard8485fa02016-12-07 02:42:15 +00005708static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
5709 EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
5710 if (GlueChain->getNumValues() <= 1) {
5711 return DAG.getNode(Opcode, SL, VT, A, B);
5712 }
5713
5714 assert(GlueChain->getNumValues() == 3);
5715
5716 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
5717 switch (Opcode) {
5718 default: llvm_unreachable("no chain equivalent for opcode");
5719 case ISD::FMUL:
5720 Opcode = AMDGPUISD::FMUL_W_CHAIN;
5721 break;
5722 }
5723
5724 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
5725 GlueChain.getValue(2));
5726}
5727
5728static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
5729 EVT VT, SDValue A, SDValue B, SDValue C,
5730 SDValue GlueChain) {
5731 if (GlueChain->getNumValues() <= 1) {
5732 return DAG.getNode(Opcode, SL, VT, A, B, C);
5733 }
5734
5735 assert(GlueChain->getNumValues() == 3);
5736
5737 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
5738 switch (Opcode) {
5739 default: llvm_unreachable("no chain equivalent for opcode");
5740 case ISD::FMA:
5741 Opcode = AMDGPUISD::FMA_W_CHAIN;
5742 break;
5743 }
5744
5745 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
5746 GlueChain.getValue(2));
5747}
5748
Matt Arsenault4052a572016-12-22 03:05:41 +00005749SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00005750 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
5751 return FastLowered;
5752
Matt Arsenault4052a572016-12-22 03:05:41 +00005753 SDLoc SL(Op);
5754 SDValue Src0 = Op.getOperand(0);
5755 SDValue Src1 = Op.getOperand(1);
5756
5757 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
5758 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
5759
5760 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
5761 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
5762
5763 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
5764 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
5765
5766 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
5767}
5768
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005769// Faster 2.5 ULP division that does not support denormals.
5770SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
5771 SDLoc SL(Op);
5772 SDValue LHS = Op.getOperand(1);
5773 SDValue RHS = Op.getOperand(2);
5774
5775 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
5776
5777 const APFloat K0Val(BitsToFloat(0x6f800000));
5778 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
5779
5780 const APFloat K1Val(BitsToFloat(0x2f800000));
5781 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
5782
5783 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
5784
5785 EVT SetCCVT =
5786 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
5787
5788 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
5789
5790 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
5791
5792 // TODO: Should this propagate fast-math-flags?
5793 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
5794
5795 // rcp does not support denormals.
5796 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
5797
5798 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
5799
5800 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
5801}
5802
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005803SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005804 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
Eric Christopher538d09d02016-06-07 20:27:12 +00005805 return FastLowered;
Matt Arsenault22ca3f82014-07-15 23:50:10 +00005806
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005807 SDLoc SL(Op);
5808 SDValue LHS = Op.getOperand(0);
5809 SDValue RHS = Op.getOperand(1);
5810
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005811 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005812
Wei Dinged0f97f2016-06-09 19:17:15 +00005813 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005814
Tom Stellard8485fa02016-12-07 02:42:15 +00005815 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
5816 RHS, RHS, LHS);
5817 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
5818 LHS, RHS, LHS);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005819
Matt Arsenaultdfec5ce2016-07-09 07:48:11 +00005820 // Denominator is scaled to not be denormal, so using rcp is ok.
Tom Stellard8485fa02016-12-07 02:42:15 +00005821 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
5822 DenominatorScaled);
5823 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
5824 DenominatorScaled);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005825
Tom Stellard8485fa02016-12-07 02:42:15 +00005826 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
5827 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
5828 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005829
Tom Stellard8485fa02016-12-07 02:42:15 +00005830 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005831
Tom Stellard8485fa02016-12-07 02:42:15 +00005832 if (!Subtarget->hasFP32Denormals()) {
5833 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
5834 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
5835 SL, MVT::i32);
5836 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
5837 DAG.getEntryNode(),
5838 EnableDenormValue, BitField);
5839 SDValue Ops[3] = {
5840 NegDivScale0,
5841 EnableDenorm.getValue(0),
5842 EnableDenorm.getValue(1)
5843 };
Matt Arsenault37fefd62016-06-10 02:18:02 +00005844
Tom Stellard8485fa02016-12-07 02:42:15 +00005845 NegDivScale0 = DAG.getMergeValues(Ops, SL);
5846 }
5847
5848 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
5849 ApproxRcp, One, NegDivScale0);
5850
5851 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
5852 ApproxRcp, Fma0);
5853
5854 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
5855 Fma1, Fma1);
5856
5857 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
5858 NumeratorScaled, Mul);
5859
5860 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
5861
5862 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
5863 NumeratorScaled, Fma3);
5864
5865 if (!Subtarget->hasFP32Denormals()) {
5866 const SDValue DisableDenormValue =
5867 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
5868 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
5869 Fma4.getValue(1),
5870 DisableDenormValue,
5871 BitField,
5872 Fma4.getValue(2));
5873
5874 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
5875 DisableDenorm, DAG.getRoot());
5876 DAG.setRoot(OutputChain);
5877 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00005878
Wei Dinged0f97f2016-06-09 19:17:15 +00005879 SDValue Scale = NumeratorScaled.getValue(1);
Tom Stellard8485fa02016-12-07 02:42:15 +00005880 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
5881 Fma4, Fma1, Fma3, Scale);
Matt Arsenault37fefd62016-06-10 02:18:02 +00005882
Wei Dinged0f97f2016-06-09 19:17:15 +00005883 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005884}
5885
5886SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005887 if (DAG.getTarget().Options.UnsafeFPMath)
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005888 return lowerFastUnsafeFDIV(Op, DAG);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005889
5890 SDLoc SL(Op);
5891 SDValue X = Op.getOperand(0);
5892 SDValue Y = Op.getOperand(1);
5893
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005894 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005895
5896 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
5897
5898 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
5899
5900 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
5901
5902 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
5903
5904 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
5905
5906 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
5907
5908 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
5909
5910 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
5911
5912 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
5913 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
5914
5915 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
5916 NegDivScale0, Mul, DivScale1);
5917
5918 SDValue Scale;
5919
Matt Arsenault43e92fe2016-06-24 06:30:11 +00005920 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005921 // Workaround a hardware bug on SI where the condition output from div_scale
5922 // is not usable.
5923
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00005924 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00005925
5926 // Figure out if the scale to use for div_fmas.
5927 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
5928 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
5929 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
5930 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
5931
5932 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
5933 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
5934
5935 SDValue Scale0Hi
5936 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
5937 SDValue Scale1Hi
5938 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
5939
5940 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
5941 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
5942 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
5943 } else {
5944 Scale = DivScale1.getValue(1);
5945 }
5946
5947 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
5948 Fma4, Fma3, Mul, Scale);
5949
5950 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005951}
5952
5953SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
5954 EVT VT = Op.getValueType();
5955
5956 if (VT == MVT::f32)
5957 return LowerFDIV32(Op, DAG);
5958
5959 if (VT == MVT::f64)
5960 return LowerFDIV64(Op, DAG);
5961
Matt Arsenault4052a572016-12-22 03:05:41 +00005962 if (VT == MVT::f16)
5963 return LowerFDIV16(Op, DAG);
5964
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00005965 llvm_unreachable("Unexpected type for fdiv");
5966}
5967
Tom Stellard81d871d2013-11-13 23:36:50 +00005968SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
5969 SDLoc DL(Op);
5970 StoreSDNode *Store = cast<StoreSDNode>(Op);
5971 EVT VT = Store->getMemoryVT();
5972
Matt Arsenault95245662016-02-11 05:32:46 +00005973 if (VT == MVT::i1) {
5974 return DAG.getTruncStore(Store->getChain(), DL,
5975 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
5976 Store->getBasePtr(), MVT::i1, Store->getMemOperand());
Tom Stellardb02094e2014-07-21 15:45:01 +00005977 }
5978
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00005979 assert(VT.isVector() &&
5980 Store->getValue().getValueType().getScalarType() == MVT::i32);
5981
5982 unsigned AS = Store->getAddressSpace();
5983 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
5984 AS, Store->getAlignment())) {
5985 return expandUnalignedStore(Store, DAG);
5986 }
Tom Stellard81d871d2013-11-13 23:36:50 +00005987
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005988 MachineFunction &MF = DAG.getMachineFunction();
5989 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
5990 // If there is a possibilty that flat instruction access scratch memory
5991 // then we need to use the same legalization rules we use for private.
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005992 if (AS == AMDGPUASI.FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005993 AS = MFI->hasFlatScratchInit() ?
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005994 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00005995
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005996 unsigned NumElements = VT.getVectorNumElements();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00005997 if (AS == AMDGPUASI.GLOBAL_ADDRESS ||
5998 AS == AMDGPUASI.FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00005999 if (NumElements > 4)
6000 return SplitVectorStore(Op, DAG);
6001 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006002 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006003 switch (Subtarget->getMaxPrivateElementSize()) {
6004 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00006005 return scalarizeVectorStore(Store, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006006 case 8:
6007 if (NumElements > 2)
6008 return SplitVectorStore(Op, DAG);
6009 return SDValue();
6010 case 16:
6011 if (NumElements > 4)
6012 return SplitVectorStore(Op, DAG);
6013 return SDValue();
6014 default:
6015 llvm_unreachable("unsupported private_element_size");
6016 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006017 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006018 // Use ds_write_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00006019 if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006020 VT.getStoreSize() == 16)
6021 return SDValue();
6022
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006023 if (NumElements > 2)
6024 return SplitVectorStore(Op, DAG);
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006025 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006026 } else {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006027 llvm_unreachable("unhandled address space");
Matt Arsenault95245662016-02-11 05:32:46 +00006028 }
Tom Stellard81d871d2013-11-13 23:36:50 +00006029}
6030
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006031SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006032 SDLoc DL(Op);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006033 EVT VT = Op.getValueType();
6034 SDValue Arg = Op.getOperand(0);
Sanjay Patela2607012015-09-16 16:31:21 +00006035 // TODO: Should this propagate fast-math-flags?
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006036 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
6037 DAG.getNode(ISD::FMUL, DL, VT, Arg,
6038 DAG.getConstantFP(0.5/M_PI, DL,
6039 VT)));
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006040
6041 switch (Op.getOpcode()) {
6042 case ISD::FCOS:
6043 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart);
6044 case ISD::FSIN:
6045 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart);
6046 default:
6047 llvm_unreachable("Wrong trig opcode");
6048 }
6049}
6050
Tom Stellard354a43c2016-04-01 18:27:37 +00006051SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
6052 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
6053 assert(AtomicNode->isCompareAndSwap());
6054 unsigned AS = AtomicNode->getAddressSpace();
6055
6056 // No custom lowering required for local address space
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006057 if (!isFlatGlobalAddrSpace(AS, AMDGPUASI))
Tom Stellard354a43c2016-04-01 18:27:37 +00006058 return Op;
6059
6060 // Non-local address space requires custom lowering for atomic compare
6061 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
6062 SDLoc DL(Op);
6063 SDValue ChainIn = Op.getOperand(0);
6064 SDValue Addr = Op.getOperand(1);
6065 SDValue Old = Op.getOperand(2);
6066 SDValue New = Op.getOperand(3);
6067 EVT VT = Op.getValueType();
6068 MVT SimpleVT = VT.getSimpleVT();
6069 MVT VecType = MVT::getVectorVT(SimpleVT, 2);
6070
Ahmed Bougacha128f8732016-04-26 21:15:30 +00006071 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
Tom Stellard354a43c2016-04-01 18:27:37 +00006072 SDValue Ops[] = { ChainIn, Addr, NewOld };
Matt Arsenault88701812016-06-09 23:42:48 +00006073
6074 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
6075 Ops, VT, AtomicNode->getMemOperand());
Tom Stellard354a43c2016-04-01 18:27:37 +00006076}
6077
Tom Stellard75aadc22012-12-11 21:25:42 +00006078//===----------------------------------------------------------------------===//
6079// Custom DAG optimizations
6080//===----------------------------------------------------------------------===//
6081
Matt Arsenault364a6742014-06-11 17:50:44 +00006082SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
Matt Arsenaulte6986632015-01-14 01:35:22 +00006083 DAGCombinerInfo &DCI) const {
Matt Arsenault364a6742014-06-11 17:50:44 +00006084 EVT VT = N->getValueType(0);
6085 EVT ScalarVT = VT.getScalarType();
6086 if (ScalarVT != MVT::f32)
6087 return SDValue();
6088
6089 SelectionDAG &DAG = DCI.DAG;
6090 SDLoc DL(N);
6091
6092 SDValue Src = N->getOperand(0);
6093 EVT SrcVT = Src.getValueType();
6094
6095 // TODO: We could try to match extracting the higher bytes, which would be
6096 // easier if i8 vectors weren't promoted to i32 vectors, particularly after
6097 // types are legalized. v4i8 -> v4f32 is probably the only case to worry
6098 // about in practice.
Craig Topper80d3bb32018-03-06 19:44:52 +00006099 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
Matt Arsenault364a6742014-06-11 17:50:44 +00006100 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
6101 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
6102 DCI.AddToWorklist(Cvt.getNode());
6103 return Cvt;
6104 }
6105 }
6106
Matt Arsenault364a6742014-06-11 17:50:44 +00006107 return SDValue();
6108}
6109
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006110// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
6111
6112// This is a variant of
6113// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
6114//
6115// The normal DAG combiner will do this, but only if the add has one use since
6116// that would increase the number of instructions.
6117//
6118// This prevents us from seeing a constant offset that can be folded into a
6119// memory instruction's addressing mode. If we know the resulting add offset of
6120// a pointer can be folded into an addressing offset, we can replace the pointer
6121// operand with the add of new constant offset. This eliminates one of the uses,
6122// and may allow the remaining use to also be simplified.
6123//
6124SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
6125 unsigned AddrSpace,
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006126 EVT MemVT,
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006127 DAGCombinerInfo &DCI) const {
6128 SDValue N0 = N->getOperand(0);
6129 SDValue N1 = N->getOperand(1);
6130
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006131 // We only do this to handle cases where it's profitable when there are
6132 // multiple uses of the add, so defer to the standard combine.
Matt Arsenaultc8903122017-11-14 23:46:42 +00006133 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
6134 N0->hasOneUse())
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006135 return SDValue();
6136
6137 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
6138 if (!CN1)
6139 return SDValue();
6140
6141 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
6142 if (!CAdd)
6143 return SDValue();
6144
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006145 // If the resulting offset is too large, we can't fold it into the addressing
6146 // mode offset.
6147 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006148 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
6149
6150 AddrMode AM;
6151 AM.HasBaseReg = true;
6152 AM.BaseOffs = Offset.getSExtValue();
6153 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006154 return SDValue();
6155
6156 SelectionDAG &DAG = DCI.DAG;
6157 SDLoc SL(N);
6158 EVT VT = N->getValueType(0);
6159
6160 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006161 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006162
Matt Arsenaulte5e0c742017-11-13 05:33:35 +00006163 SDNodeFlags Flags;
6164 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
6165 (N0.getOpcode() == ISD::OR ||
6166 N0->getFlags().hasNoUnsignedWrap()));
6167
6168 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00006169}
6170
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006171SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
6172 DAGCombinerInfo &DCI) const {
6173 SDValue Ptr = N->getBasePtr();
6174 SelectionDAG &DAG = DCI.DAG;
6175 SDLoc SL(N);
6176
6177 // TODO: We could also do this for multiplies.
Matt Arsenaultfbe95332017-11-13 05:11:54 +00006178 if (Ptr.getOpcode() == ISD::SHL) {
6179 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(),
6180 N->getMemoryVT(), DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00006181 if (NewPtr) {
6182 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
6183
6184 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
6185 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
6186 }
6187 }
6188
6189 return SDValue();
6190}
6191
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006192static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
6193 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
6194 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
6195 (Opc == ISD::XOR && Val == 0);
6196}
6197
6198// Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
6199// will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
6200// integer combine opportunities since most 64-bit operations are decomposed
6201// this way. TODO: We won't want this for SALU especially if it is an inline
6202// immediate.
6203SDValue SITargetLowering::splitBinaryBitConstantOp(
6204 DAGCombinerInfo &DCI,
6205 const SDLoc &SL,
6206 unsigned Opc, SDValue LHS,
6207 const ConstantSDNode *CRHS) const {
6208 uint64_t Val = CRHS->getZExtValue();
6209 uint32_t ValLo = Lo_32(Val);
6210 uint32_t ValHi = Hi_32(Val);
6211 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6212
6213 if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
6214 bitOpWithConstantIsReducible(Opc, ValHi)) ||
6215 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
6216 // If we need to materialize a 64-bit immediate, it will be split up later
6217 // anyway. Avoid creating the harder to understand 64-bit immediate
6218 // materialization.
6219 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
6220 }
6221
6222 return SDValue();
6223}
6224
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00006225// Returns true if argument is a boolean value which is not serialized into
6226// memory or argument and does not require v_cmdmask_b32 to be deserialized.
6227static bool isBoolSGPR(SDValue V) {
6228 if (V.getValueType() != MVT::i1)
6229 return false;
6230 switch (V.getOpcode()) {
6231 default: break;
6232 case ISD::SETCC:
6233 case ISD::AND:
6234 case ISD::OR:
6235 case ISD::XOR:
6236 case AMDGPUISD::FP_CLASS:
6237 return true;
6238 }
6239 return false;
6240}
6241
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006242// If a constant has all zeroes or all ones within each byte return it.
6243// Otherwise return 0.
6244static uint32_t getConstantPermuteMask(uint32_t C) {
6245 // 0xff for any zero byte in the mask
6246 uint32_t ZeroByteMask = 0;
6247 if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
6248 if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
6249 if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
6250 if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
6251 uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
6252 if ((NonZeroByteMask & C) != NonZeroByteMask)
6253 return 0; // Partial bytes selected.
6254 return C;
6255}
6256
6257// Check if a node selects whole bytes from its operand 0 starting at a byte
6258// boundary while masking the rest. Returns select mask as in the v_perm_b32
6259// or -1 if not succeeded.
6260// Note byte select encoding:
6261// value 0-3 selects corresponding source byte;
6262// value 0xc selects zero;
6263// value 0xff selects 0xff.
6264static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
6265 assert(V.getValueSizeInBits() == 32);
6266
6267 if (V.getNumOperands() != 2)
6268 return ~0;
6269
6270 ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
6271 if (!N1)
6272 return ~0;
6273
6274 uint32_t C = N1->getZExtValue();
6275
6276 switch (V.getOpcode()) {
6277 default:
6278 break;
6279 case ISD::AND:
6280 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
6281 return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
6282 }
6283 break;
6284
6285 case ISD::OR:
6286 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
6287 return (0x03020100 & ~ConstMask) | ConstMask;
6288 }
6289 break;
6290
6291 case ISD::SHL:
6292 if (C % 8)
6293 return ~0;
6294
6295 return uint32_t((0x030201000c0c0c0cull << C) >> 32);
6296
6297 case ISD::SRL:
6298 if (C % 8)
6299 return ~0;
6300
6301 return uint32_t(0x0c0c0c0c03020100ull >> C);
6302 }
6303
6304 return ~0;
6305}
6306
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006307SDValue SITargetLowering::performAndCombine(SDNode *N,
6308 DAGCombinerInfo &DCI) const {
6309 if (DCI.isBeforeLegalize())
6310 return SDValue();
6311
6312 SelectionDAG &DAG = DCI.DAG;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006313 EVT VT = N->getValueType(0);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006314 SDValue LHS = N->getOperand(0);
6315 SDValue RHS = N->getOperand(1);
6316
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006317
Stanislav Mekhanoshin53a21292017-05-23 19:54:48 +00006318 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
6319 if (VT == MVT::i64 && CRHS) {
6320 if (SDValue Split
6321 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
6322 return Split;
6323 }
6324
6325 if (CRHS && VT == MVT::i32) {
6326 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
6327 // nb = number of trailing zeroes in mask
6328 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
6329 // given that we are selecting 8 or 16 bit fields starting at byte boundary.
6330 uint64_t Mask = CRHS->getZExtValue();
6331 unsigned Bits = countPopulation(Mask);
6332 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
6333 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
6334 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
6335 unsigned Shift = CShift->getZExtValue();
6336 unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
6337 unsigned Offset = NB + Shift;
6338 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
6339 SDLoc SL(N);
6340 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
6341 LHS->getOperand(0),
6342 DAG.getConstant(Offset, SL, MVT::i32),
6343 DAG.getConstant(Bits, SL, MVT::i32));
6344 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
6345 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
6346 DAG.getValueType(NarrowVT));
6347 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
6348 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
6349 return Shl;
6350 }
6351 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006352 }
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006353
6354 // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
6355 if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
6356 isa<ConstantSDNode>(LHS.getOperand(2))) {
6357 uint32_t Sel = getConstantPermuteMask(Mask);
6358 if (!Sel)
6359 return SDValue();
6360
6361 // Select 0xc for all zero bytes
6362 Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
6363 SDLoc DL(N);
6364 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
6365 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
6366 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006367 }
6368
6369 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
6370 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
6371 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006372 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6373 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
6374
6375 SDValue X = LHS.getOperand(0);
6376 SDValue Y = RHS.getOperand(0);
6377 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
6378 return SDValue();
6379
6380 if (LCC == ISD::SETO) {
6381 if (X != LHS.getOperand(1))
6382 return SDValue();
6383
6384 if (RCC == ISD::SETUNE) {
6385 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
6386 if (!C1 || !C1->isInfinity() || C1->isNegative())
6387 return SDValue();
6388
6389 const uint32_t Mask = SIInstrFlags::N_NORMAL |
6390 SIInstrFlags::N_SUBNORMAL |
6391 SIInstrFlags::N_ZERO |
6392 SIInstrFlags::P_ZERO |
6393 SIInstrFlags::P_SUBNORMAL |
6394 SIInstrFlags::P_NORMAL;
6395
6396 static_assert(((~(SIInstrFlags::S_NAN |
6397 SIInstrFlags::Q_NAN |
6398 SIInstrFlags::N_INFINITY |
6399 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
6400 "mask not equal");
6401
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006402 SDLoc DL(N);
6403 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
6404 X, DAG.getConstant(Mask, DL, MVT::i32));
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006405 }
6406 }
6407 }
6408
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00006409 if (VT == MVT::i32 &&
6410 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
6411 // and x, (sext cc from i1) => select cc, x, 0
6412 if (RHS.getOpcode() != ISD::SIGN_EXTEND)
6413 std::swap(LHS, RHS);
6414 if (isBoolSGPR(RHS.getOperand(0)))
6415 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
6416 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
6417 }
6418
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006419 // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
6420 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6421 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
6422 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
6423 uint32_t LHSMask = getPermuteMask(DAG, LHS);
6424 uint32_t RHSMask = getPermuteMask(DAG, RHS);
6425 if (LHSMask != ~0u && RHSMask != ~0u) {
6426 // Canonicalize the expression in an attempt to have fewer unique masks
6427 // and therefore fewer registers used to hold the masks.
6428 if (LHSMask > RHSMask) {
6429 std::swap(LHSMask, RHSMask);
6430 std::swap(LHS, RHS);
6431 }
6432
6433 // Select 0xc for each lane used from source operand. Zero has 0xc mask
6434 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
6435 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6436 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6437
6438 // Check of we need to combine values from two sources within a byte.
6439 if (!(LHSUsedLanes & RHSUsedLanes) &&
6440 // If we select high and lower word keep it for SDWA.
6441 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
6442 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
6443 // Each byte in each mask is either selector mask 0-3, or has higher
6444 // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
6445 // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
6446 // mask which is not 0xff wins. By anding both masks we have a correct
6447 // result except that 0x0c shall be corrected to give 0x0c only.
6448 uint32_t Mask = LHSMask & RHSMask;
6449 for (unsigned I = 0; I < 32; I += 8) {
6450 uint32_t ByteSel = 0xff << I;
6451 if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
6452 Mask &= (0x0c << I) & 0xffffffff;
6453 }
6454
6455 // Add 4 to each active LHS lane. It will not affect any existing 0xff
6456 // or 0x0c.
6457 uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
6458 SDLoc DL(N);
6459
6460 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
6461 LHS.getOperand(0), RHS.getOperand(0),
6462 DAG.getConstant(Sel, DL, MVT::i32));
6463 }
6464 }
6465 }
6466
Matt Arsenaultd0101a22015-01-06 23:00:46 +00006467 return SDValue();
6468}
6469
Matt Arsenaultf2290332015-01-06 23:00:39 +00006470SDValue SITargetLowering::performOrCombine(SDNode *N,
6471 DAGCombinerInfo &DCI) const {
6472 SelectionDAG &DAG = DCI.DAG;
6473 SDValue LHS = N->getOperand(0);
6474 SDValue RHS = N->getOperand(1);
6475
Matt Arsenault3b082382016-04-12 18:24:38 +00006476 EVT VT = N->getValueType(0);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006477 if (VT == MVT::i1) {
6478 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
6479 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
6480 RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
6481 SDValue Src = LHS.getOperand(0);
6482 if (Src != RHS.getOperand(0))
6483 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00006484
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006485 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
6486 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
6487 if (!CLHS || !CRHS)
6488 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00006489
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006490 // Only 10 bits are used.
6491 static const uint32_t MaxMask = 0x3ff;
Matt Arsenault3b082382016-04-12 18:24:38 +00006492
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006493 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
6494 SDLoc DL(N);
6495 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
6496 Src, DAG.getConstant(NewMask, DL, MVT::i32));
6497 }
Matt Arsenault3b082382016-04-12 18:24:38 +00006498
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006499 return SDValue();
6500 }
6501
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00006502 // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
6503 if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
6504 LHS.getOpcode() == AMDGPUISD::PERM &&
6505 isa<ConstantSDNode>(LHS.getOperand(2))) {
6506 uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
6507 if (!Sel)
6508 return SDValue();
6509
6510 Sel |= LHS.getConstantOperandVal(2);
6511 SDLoc DL(N);
6512 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
6513 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
6514 }
6515
6516 // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
6517 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
6518 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
6519 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
6520 uint32_t LHSMask = getPermuteMask(DAG, LHS);
6521 uint32_t RHSMask = getPermuteMask(DAG, RHS);
6522 if (LHSMask != ~0u && RHSMask != ~0u) {
6523 // Canonicalize the expression in an attempt to have fewer unique masks
6524 // and therefore fewer registers used to hold the masks.
6525 if (LHSMask > RHSMask) {
6526 std::swap(LHSMask, RHSMask);
6527 std::swap(LHS, RHS);
6528 }
6529
6530 // Select 0xc for each lane used from source operand. Zero has 0xc mask
6531 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
6532 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6533 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
6534
6535 // Check of we need to combine values from two sources within a byte.
6536 if (!(LHSUsedLanes & RHSUsedLanes) &&
6537 // If we select high and lower word keep it for SDWA.
6538 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
6539 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
6540 // Kill zero bytes selected by other mask. Zero value is 0xc.
6541 LHSMask &= ~RHSUsedLanes;
6542 RHSMask &= ~LHSUsedLanes;
6543 // Add 4 to each active LHS lane
6544 LHSMask |= LHSUsedLanes & 0x04040404;
6545 // Combine masks
6546 uint32_t Sel = LHSMask | RHSMask;
6547 SDLoc DL(N);
6548
6549 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
6550 LHS.getOperand(0), RHS.getOperand(0),
6551 DAG.getConstant(Sel, DL, MVT::i32));
6552 }
6553 }
6554 }
6555
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006556 if (VT != MVT::i64)
6557 return SDValue();
6558
6559 // TODO: This could be a generic combine with a predicate for extracting the
6560 // high half of an integer being free.
6561
6562 // (or i64:x, (zero_extend i32:y)) ->
6563 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
6564 if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
6565 RHS.getOpcode() != ISD::ZERO_EXTEND)
6566 std::swap(LHS, RHS);
6567
6568 if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
6569 SDValue ExtSrc = RHS.getOperand(0);
6570 EVT SrcVT = ExtSrc.getValueType();
6571 if (SrcVT == MVT::i32) {
6572 SDLoc SL(N);
6573 SDValue LowLHS, HiBits;
6574 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
6575 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
6576
6577 DCI.AddToWorklist(LowOr.getNode());
6578 DCI.AddToWorklist(HiBits.getNode());
6579
6580 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
6581 LowOr, HiBits);
6582 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
Matt Arsenault3b082382016-04-12 18:24:38 +00006583 }
6584 }
6585
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006586 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
6587 if (CRHS) {
6588 if (SDValue Split
6589 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
6590 return Split;
6591 }
Matt Arsenaultf2290332015-01-06 23:00:39 +00006592
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006593 return SDValue();
6594}
Matt Arsenaultf2290332015-01-06 23:00:39 +00006595
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006596SDValue SITargetLowering::performXorCombine(SDNode *N,
6597 DAGCombinerInfo &DCI) const {
6598 EVT VT = N->getValueType(0);
6599 if (VT != MVT::i64)
6600 return SDValue();
Matt Arsenaultf2290332015-01-06 23:00:39 +00006601
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00006602 SDValue LHS = N->getOperand(0);
6603 SDValue RHS = N->getOperand(1);
6604
6605 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
6606 if (CRHS) {
6607 if (SDValue Split
6608 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
6609 return Split;
Matt Arsenaultf2290332015-01-06 23:00:39 +00006610 }
6611
6612 return SDValue();
6613}
6614
Matt Arsenault5cf42712017-04-06 20:58:30 +00006615// Instructions that will be lowered with a final instruction that zeros the
6616// high result bits.
6617// XXX - probably only need to list legal operations.
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006618static bool fp16SrcZerosHighBits(unsigned Opc) {
6619 switch (Opc) {
Matt Arsenault5cf42712017-04-06 20:58:30 +00006620 case ISD::FADD:
6621 case ISD::FSUB:
6622 case ISD::FMUL:
6623 case ISD::FDIV:
6624 case ISD::FREM:
6625 case ISD::FMA:
6626 case ISD::FMAD:
6627 case ISD::FCANONICALIZE:
6628 case ISD::FP_ROUND:
6629 case ISD::UINT_TO_FP:
6630 case ISD::SINT_TO_FP:
6631 case ISD::FABS:
6632 // Fabs is lowered to a bit operation, but it's an and which will clear the
6633 // high bits anyway.
6634 case ISD::FSQRT:
6635 case ISD::FSIN:
6636 case ISD::FCOS:
6637 case ISD::FPOWI:
6638 case ISD::FPOW:
6639 case ISD::FLOG:
6640 case ISD::FLOG2:
6641 case ISD::FLOG10:
6642 case ISD::FEXP:
6643 case ISD::FEXP2:
6644 case ISD::FCEIL:
6645 case ISD::FTRUNC:
6646 case ISD::FRINT:
6647 case ISD::FNEARBYINT:
6648 case ISD::FROUND:
6649 case ISD::FFLOOR:
6650 case ISD::FMINNUM:
6651 case ISD::FMAXNUM:
6652 case AMDGPUISD::FRACT:
6653 case AMDGPUISD::CLAMP:
6654 case AMDGPUISD::COS_HW:
6655 case AMDGPUISD::SIN_HW:
6656 case AMDGPUISD::FMIN3:
6657 case AMDGPUISD::FMAX3:
6658 case AMDGPUISD::FMED3:
6659 case AMDGPUISD::FMAD_FTZ:
6660 case AMDGPUISD::RCP:
6661 case AMDGPUISD::RSQ:
6662 case AMDGPUISD::LDEXP:
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006663 return true;
Matt Arsenault5cf42712017-04-06 20:58:30 +00006664 default:
6665 // fcopysign, select and others may be lowered to 32-bit bit operations
6666 // which don't zero the high bits.
6667 return false;
Matt Arsenault8edfaee2017-03-31 19:53:03 +00006668 }
6669}
6670
6671SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
6672 DAGCombinerInfo &DCI) const {
6673 if (!Subtarget->has16BitInsts() ||
6674 DCI.getDAGCombineLevel() < AfterLegalizeDAG)
6675 return SDValue();
6676
6677 EVT VT = N->getValueType(0);
6678 if (VT != MVT::i32)
6679 return SDValue();
6680
6681 SDValue Src = N->getOperand(0);
6682 if (Src.getValueType() != MVT::i16)
6683 return SDValue();
6684
6685 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
6686 // FIXME: It is not universally true that the high bits are zeroed on gfx9.
6687 if (Src.getOpcode() == ISD::BITCAST) {
6688 SDValue BCSrc = Src.getOperand(0);
6689 if (BCSrc.getValueType() == MVT::f16 &&
6690 fp16SrcZerosHighBits(BCSrc.getOpcode()))
6691 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
6692 }
6693
6694 return SDValue();
6695}
6696
Matt Arsenaultf2290332015-01-06 23:00:39 +00006697SDValue SITargetLowering::performClassCombine(SDNode *N,
6698 DAGCombinerInfo &DCI) const {
6699 SelectionDAG &DAG = DCI.DAG;
6700 SDValue Mask = N->getOperand(1);
6701
6702 // fp_class x, 0 -> false
6703 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
6704 if (CMask->isNullValue())
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006705 return DAG.getConstant(0, SDLoc(N), MVT::i1);
Matt Arsenaultf2290332015-01-06 23:00:39 +00006706 }
6707
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00006708 if (N->getOperand(0).isUndef())
6709 return DAG.getUNDEF(MVT::i1);
6710
Matt Arsenaultf2290332015-01-06 23:00:39 +00006711 return SDValue();
6712}
6713
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006714static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
6715 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions())
6716 return true;
6717
6718 return DAG.isKnownNeverNaN(Op);
6719}
6720
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006721static bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
6722 const SISubtarget *ST, unsigned MaxDepth=5) {
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006723 // If source is a result of another standard FP operation it is already in
6724 // canonical form.
6725
6726 switch (Op.getOpcode()) {
6727 default:
6728 break;
6729
6730 // These will flush denorms if required.
6731 case ISD::FADD:
6732 case ISD::FSUB:
6733 case ISD::FMUL:
6734 case ISD::FSQRT:
6735 case ISD::FCEIL:
6736 case ISD::FFLOOR:
6737 case ISD::FMA:
6738 case ISD::FMAD:
6739
6740 case ISD::FCANONICALIZE:
6741 return true;
6742
6743 case ISD::FP_ROUND:
6744 return Op.getValueType().getScalarType() != MVT::f16 ||
6745 ST->hasFP16Denormals();
6746
6747 case ISD::FP_EXTEND:
6748 return Op.getOperand(0).getValueType().getScalarType() != MVT::f16 ||
6749 ST->hasFP16Denormals();
6750
6751 case ISD::FP16_TO_FP:
6752 case ISD::FP_TO_FP16:
6753 return ST->hasFP16Denormals();
6754
6755 // It can/will be lowered or combined as a bit operation.
6756 // Need to check their input recursively to handle.
6757 case ISD::FNEG:
6758 case ISD::FABS:
6759 return (MaxDepth > 0) &&
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006760 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006761
6762 case ISD::FSIN:
6763 case ISD::FCOS:
6764 case ISD::FSINCOS:
6765 return Op.getValueType().getScalarType() != MVT::f16;
6766
6767 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms.
6768 // For such targets need to check their input recursively.
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006769 case ISD::FMINNUM:
6770 case ISD::FMAXNUM:
6771 case ISD::FMINNAN:
6772 case ISD::FMAXNAN:
6773
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006774 if (ST->supportsMinMaxDenormModes() &&
6775 DAG.isKnownNeverNaN(Op.getOperand(0)) &&
6776 DAG.isKnownNeverNaN(Op.getOperand(1)))
6777 return true;
6778
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006779 return (MaxDepth > 0) &&
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006780 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1) &&
6781 isCanonicalized(DAG, Op.getOperand(1), ST, MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006782
6783 case ISD::ConstantFP: {
6784 auto F = cast<ConstantFPSDNode>(Op)->getValueAPF();
6785 return !F.isDenormal() && !(F.isNaN() && F.isSignaling());
6786 }
6787 }
6788 return false;
6789}
6790
Matt Arsenault9cd90712016-04-14 01:42:16 +00006791// Constant fold canonicalize.
6792SDValue SITargetLowering::performFCanonicalizeCombine(
6793 SDNode *N,
6794 DAGCombinerInfo &DCI) const {
Matt Arsenault9cd90712016-04-14 01:42:16 +00006795 SelectionDAG &DAG = DCI.DAG;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006796 ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0));
6797
6798 if (!CFP) {
6799 SDValue N0 = N->getOperand(0);
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006800 EVT VT = N0.getValueType().getScalarType();
6801 auto ST = getSubtarget();
6802
6803 if (((VT == MVT::f32 && ST->hasFP32Denormals()) ||
6804 (VT == MVT::f64 && ST->hasFP64Denormals()) ||
6805 (VT == MVT::f16 && ST->hasFP16Denormals())) &&
6806 DAG.isKnownNeverNaN(N0))
6807 return N0;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006808
6809 bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction());
6810
6811 if ((IsIEEEMode || isKnownNeverSNan(DAG, N0)) &&
Stanislav Mekhanoshindc2890a2017-07-13 23:59:15 +00006812 isCanonicalized(DAG, N0, ST))
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00006813 return N0;
6814
6815 return SDValue();
6816 }
6817
Matt Arsenault9cd90712016-04-14 01:42:16 +00006818 const APFloat &C = CFP->getValueAPF();
6819
6820 // Flush denormals to 0 if not enabled.
6821 if (C.isDenormal()) {
6822 EVT VT = N->getValueType(0);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00006823 EVT SVT = VT.getScalarType();
6824 if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals())
Matt Arsenault9cd90712016-04-14 01:42:16 +00006825 return DAG.getConstantFP(0.0, SDLoc(N), VT);
6826
Matt Arsenaulteb522e62017-02-27 22:15:25 +00006827 if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals())
Matt Arsenault9cd90712016-04-14 01:42:16 +00006828 return DAG.getConstantFP(0.0, SDLoc(N), VT);
Matt Arsenaultce841302016-12-22 03:05:37 +00006829
Matt Arsenaulteb522e62017-02-27 22:15:25 +00006830 if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals())
Matt Arsenaultce841302016-12-22 03:05:37 +00006831 return DAG.getConstantFP(0.0, SDLoc(N), VT);
Matt Arsenault9cd90712016-04-14 01:42:16 +00006832 }
6833
6834 if (C.isNaN()) {
6835 EVT VT = N->getValueType(0);
6836 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
6837 if (C.isSignaling()) {
6838 // Quiet a signaling NaN.
6839 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
6840 }
6841
6842 // Make sure it is the canonical NaN bitpattern.
6843 //
6844 // TODO: Can we use -1 as the canonical NaN value since it's an inline
6845 // immediate?
6846 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
6847 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
6848 }
6849
Matt Arsenaulteb522e62017-02-27 22:15:25 +00006850 return N->getOperand(0);
Matt Arsenault9cd90712016-04-14 01:42:16 +00006851}
6852
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006853static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
6854 switch (Opc) {
6855 case ISD::FMAXNUM:
6856 return AMDGPUISD::FMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006857 case ISD::SMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006858 return AMDGPUISD::SMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006859 case ISD::UMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006860 return AMDGPUISD::UMAX3;
6861 case ISD::FMINNUM:
6862 return AMDGPUISD::FMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006863 case ISD::SMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006864 return AMDGPUISD::SMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00006865 case ISD::UMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006866 return AMDGPUISD::UMIN3;
6867 default:
6868 llvm_unreachable("Not a min/max opcode");
6869 }
6870}
6871
Matt Arsenault10268f92017-02-27 22:40:39 +00006872SDValue SITargetLowering::performIntMed3ImmCombine(
6873 SelectionDAG &DAG, const SDLoc &SL,
6874 SDValue Op0, SDValue Op1, bool Signed) const {
Matt Arsenaultf639c322016-01-28 20:53:42 +00006875 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
6876 if (!K1)
6877 return SDValue();
6878
6879 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
6880 if (!K0)
6881 return SDValue();
6882
Matt Arsenaultf639c322016-01-28 20:53:42 +00006883 if (Signed) {
6884 if (K0->getAPIntValue().sge(K1->getAPIntValue()))
6885 return SDValue();
6886 } else {
6887 if (K0->getAPIntValue().uge(K1->getAPIntValue()))
6888 return SDValue();
6889 }
6890
6891 EVT VT = K0->getValueType(0);
Matt Arsenault10268f92017-02-27 22:40:39 +00006892 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
6893 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
6894 return DAG.getNode(Med3Opc, SL, VT,
6895 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
6896 }
Tom Stellard115a6152016-11-10 16:02:37 +00006897
Matt Arsenault10268f92017-02-27 22:40:39 +00006898 // If there isn't a 16-bit med3 operation, convert to 32-bit.
Tom Stellard115a6152016-11-10 16:02:37 +00006899 MVT NVT = MVT::i32;
6900 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6901
Matt Arsenault10268f92017-02-27 22:40:39 +00006902 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
6903 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
6904 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
Tom Stellard115a6152016-11-10 16:02:37 +00006905
Matt Arsenault10268f92017-02-27 22:40:39 +00006906 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
6907 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
Matt Arsenaultf639c322016-01-28 20:53:42 +00006908}
6909
Matt Arsenault6b114d22017-08-30 01:20:17 +00006910static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
6911 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
6912 return C;
6913
6914 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
6915 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
6916 return C;
6917 }
6918
6919 return nullptr;
6920}
6921
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006922SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
6923 const SDLoc &SL,
6924 SDValue Op0,
6925 SDValue Op1) const {
Matt Arsenault6b114d22017-08-30 01:20:17 +00006926 ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
Matt Arsenaultf639c322016-01-28 20:53:42 +00006927 if (!K1)
6928 return SDValue();
6929
Matt Arsenault6b114d22017-08-30 01:20:17 +00006930 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
Matt Arsenaultf639c322016-01-28 20:53:42 +00006931 if (!K0)
6932 return SDValue();
6933
6934 // Ordered >= (although NaN inputs should have folded away by now).
6935 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
6936 if (Cmp == APFloat::cmpGreaterThan)
6937 return SDValue();
6938
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006939 // TODO: Check IEEE bit enabled?
Matt Arsenault6b114d22017-08-30 01:20:17 +00006940 EVT VT = Op0.getValueType();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006941 if (Subtarget->enableDX10Clamp()) {
6942 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
6943 // hardware fmed3 behavior converting to a min.
6944 // FIXME: Should this be allowing -0.0?
6945 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
6946 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
6947 }
6948
Matt Arsenault6b114d22017-08-30 01:20:17 +00006949 // med3 for f16 is only available on gfx9+, and not available for v2f16.
6950 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
6951 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
6952 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
6953 // then give the other result, which is different from med3 with a NaN
6954 // input.
6955 SDValue Var = Op0.getOperand(0);
6956 if (!isKnownNeverSNan(DAG, Var))
6957 return SDValue();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00006958
Matt Arsenault6b114d22017-08-30 01:20:17 +00006959 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
6960 Var, SDValue(K0, 0), SDValue(K1, 0));
6961 }
Matt Arsenaultf639c322016-01-28 20:53:42 +00006962
Matt Arsenault6b114d22017-08-30 01:20:17 +00006963 return SDValue();
Matt Arsenaultf639c322016-01-28 20:53:42 +00006964}
6965
6966SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
6967 DAGCombinerInfo &DCI) const {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006968 SelectionDAG &DAG = DCI.DAG;
6969
Matt Arsenault79a45db2017-02-22 23:53:37 +00006970 EVT VT = N->getValueType(0);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006971 unsigned Opc = N->getOpcode();
6972 SDValue Op0 = N->getOperand(0);
6973 SDValue Op1 = N->getOperand(1);
6974
6975 // Only do this if the inner op has one use since this will just increases
6976 // register pressure for no benefit.
6977
Matt Arsenault79a45db2017-02-22 23:53:37 +00006978
6979 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
Farhana Aleene80aeac2018-04-03 23:00:30 +00006980 !VT.isVector() && VT != MVT::f64 &&
Matt Arsenaultee324ff2017-05-17 19:25:06 +00006981 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) {
Matt Arsenault5b39b342016-01-28 20:53:48 +00006982 // max(max(a, b), c) -> max3(a, b, c)
6983 // min(min(a, b), c) -> min3(a, b, c)
6984 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
6985 SDLoc DL(N);
6986 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
6987 DL,
6988 N->getValueType(0),
6989 Op0.getOperand(0),
6990 Op0.getOperand(1),
6991 Op1);
6992 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00006993
Matt Arsenault5b39b342016-01-28 20:53:48 +00006994 // Try commuted.
6995 // max(a, max(b, c)) -> max3(a, b, c)
6996 // min(a, min(b, c)) -> min3(a, b, c)
6997 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
6998 SDLoc DL(N);
6999 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
7000 DL,
7001 N->getValueType(0),
7002 Op0,
7003 Op1.getOperand(0),
7004 Op1.getOperand(1));
7005 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007006 }
7007
Matt Arsenaultf639c322016-01-28 20:53:42 +00007008 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
7009 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
7010 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
7011 return Med3;
7012 }
7013
7014 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
7015 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
7016 return Med3;
7017 }
7018
7019 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
Matt Arsenault5b39b342016-01-28 20:53:48 +00007020 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
7021 (Opc == AMDGPUISD::FMIN_LEGACY &&
7022 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
Matt Arsenault79a45db2017-02-22 23:53:37 +00007023 (VT == MVT::f32 || VT == MVT::f64 ||
Matt Arsenault6b114d22017-08-30 01:20:17 +00007024 (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
7025 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007026 Op0.hasOneUse()) {
Matt Arsenaultf639c322016-01-28 20:53:42 +00007027 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
7028 return Res;
7029 }
7030
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007031 return SDValue();
7032}
7033
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007034static bool isClampZeroToOne(SDValue A, SDValue B) {
7035 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
7036 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
7037 // FIXME: Should this be allowing -0.0?
7038 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
7039 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
7040 }
7041 }
7042
7043 return false;
7044}
7045
7046// FIXME: Should only worry about snans for version with chain.
7047SDValue SITargetLowering::performFMed3Combine(SDNode *N,
7048 DAGCombinerInfo &DCI) const {
7049 EVT VT = N->getValueType(0);
7050 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
7051 // NaNs. With a NaN input, the order of the operands may change the result.
7052
7053 SelectionDAG &DAG = DCI.DAG;
7054 SDLoc SL(N);
7055
7056 SDValue Src0 = N->getOperand(0);
7057 SDValue Src1 = N->getOperand(1);
7058 SDValue Src2 = N->getOperand(2);
7059
7060 if (isClampZeroToOne(Src0, Src1)) {
7061 // const_a, const_b, x -> clamp is safe in all cases including signaling
7062 // nans.
7063 // FIXME: Should this be allowing -0.0?
7064 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
7065 }
7066
7067 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
7068 // handling no dx10-clamp?
7069 if (Subtarget->enableDX10Clamp()) {
7070 // If NaNs is clamped to 0, we are free to reorder the inputs.
7071
7072 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
7073 std::swap(Src0, Src1);
7074
7075 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
7076 std::swap(Src1, Src2);
7077
7078 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
7079 std::swap(Src0, Src1);
7080
7081 if (isClampZeroToOne(Src1, Src2))
7082 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
7083 }
7084
7085 return SDValue();
7086}
7087
Matt Arsenault1f17c662017-02-22 00:27:34 +00007088SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
7089 DAGCombinerInfo &DCI) const {
7090 SDValue Src0 = N->getOperand(0);
7091 SDValue Src1 = N->getOperand(1);
7092 if (Src0.isUndef() && Src1.isUndef())
7093 return DCI.DAG.getUNDEF(N->getValueType(0));
7094 return SDValue();
7095}
7096
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007097SDValue SITargetLowering::performExtractVectorEltCombine(
7098 SDNode *N, DAGCombinerInfo &DCI) const {
7099 SDValue Vec = N->getOperand(0);
Matt Arsenault8cbb4882017-09-20 21:01:24 +00007100 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault63bc0e32018-06-15 15:31:36 +00007101
7102 EVT VecVT = Vec.getValueType();
7103 EVT EltVT = VecVT.getVectorElementType();
7104
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00007105 if ((Vec.getOpcode() == ISD::FNEG ||
7106 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007107 SDLoc SL(N);
7108 EVT EltVT = N->getValueType(0);
7109 SDValue Idx = N->getOperand(1);
7110 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7111 Vec.getOperand(0), Idx);
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00007112 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007113 }
7114
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00007115 // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
7116 // =>
7117 // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
7118 // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
7119 // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
Farhana Aleene24f3ff2018-05-09 21:18:34 +00007120 if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00007121 SDLoc SL(N);
7122 EVT EltVT = N->getValueType(0);
7123 SDValue Idx = N->getOperand(1);
7124 unsigned Opc = Vec.getOpcode();
7125
7126 switch(Opc) {
7127 default:
7128 return SDValue();
7129 // TODO: Support other binary operations.
7130 case ISD::FADD:
7131 case ISD::ADD:
Farhana Aleene24f3ff2018-05-09 21:18:34 +00007132 case ISD::UMIN:
7133 case ISD::UMAX:
7134 case ISD::SMIN:
7135 case ISD::SMAX:
7136 case ISD::FMAXNUM:
7137 case ISD::FMINNUM:
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00007138 return DAG.getNode(Opc, SL, EltVT,
7139 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7140 Vec.getOperand(0), Idx),
7141 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7142 Vec.getOperand(1), Idx));
7143 }
7144 }
Matt Arsenault63bc0e32018-06-15 15:31:36 +00007145
7146 if (!DCI.isBeforeLegalize())
7147 return SDValue();
7148
7149 unsigned VecSize = VecVT.getSizeInBits();
7150 unsigned EltSize = EltVT.getSizeInBits();
7151
7152 // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
7153 // elements. This exposes more load reduction opportunities by replacing
7154 // multiple small extract_vector_elements with a single 32-bit extract.
7155 auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
7156 if (EltSize <= 16 &&
7157 EltVT.isByteSized() &&
7158 VecSize > 32 &&
7159 VecSize % 32 == 0 &&
7160 Idx) {
7161 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
7162
7163 unsigned BitIndex = Idx->getZExtValue() * EltSize;
7164 unsigned EltIdx = BitIndex / 32;
7165 unsigned LeftoverBitIdx = BitIndex % 32;
7166 SDLoc SL(N);
7167
7168 SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
7169 DCI.AddToWorklist(Cast.getNode());
7170
7171 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
7172 DAG.getConstant(EltIdx, SL, MVT::i32));
7173 DCI.AddToWorklist(Elt.getNode());
7174 SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
7175 DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
7176 DCI.AddToWorklist(Srl.getNode());
7177
7178 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
7179 DCI.AddToWorklist(Trunc.getNode());
7180 return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
7181 }
7182
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007183 return SDValue();
7184}
7185
Matt Arsenault8cbb4882017-09-20 21:01:24 +00007186static bool convertBuildVectorCastElt(SelectionDAG &DAG,
7187 SDValue &Lo, SDValue &Hi) {
7188 if (Hi.getOpcode() == ISD::BITCAST &&
7189 Hi.getOperand(0).getValueType() == MVT::f16 &&
7190 (isa<ConstantSDNode>(Lo) || Lo.isUndef())) {
7191 Lo = DAG.getNode(ISD::BITCAST, SDLoc(Lo), MVT::f16, Lo);
7192 Hi = Hi.getOperand(0);
7193 return true;
7194 }
7195
7196 return false;
7197}
7198
7199SDValue SITargetLowering::performBuildVectorCombine(
7200 SDNode *N, DAGCombinerInfo &DCI) const {
7201 SDLoc SL(N);
7202
7203 if (!isTypeLegal(MVT::v2i16))
7204 return SDValue();
7205 SelectionDAG &DAG = DCI.DAG;
7206 EVT VT = N->getValueType(0);
7207
7208 if (VT == MVT::v2i16) {
7209 SDValue Lo = N->getOperand(0);
7210 SDValue Hi = N->getOperand(1);
7211
7212 // v2i16 build_vector (const|undef), (bitcast f16:$x)
7213 // -> bitcast (v2f16 build_vector const|undef, $x
7214 if (convertBuildVectorCastElt(DAG, Lo, Hi)) {
7215 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Lo, Hi });
7216 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
7217 }
7218
7219 if (convertBuildVectorCastElt(DAG, Hi, Lo)) {
7220 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Hi, Lo });
7221 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec);
7222 }
7223 }
7224
7225 return SDValue();
7226}
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007227
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007228unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
7229 const SDNode *N0,
7230 const SDNode *N1) const {
7231 EVT VT = N0->getValueType(0);
7232
Matt Arsenault770ec862016-12-22 03:55:35 +00007233 // Only do this if we are not trying to support denormals. v_mad_f32 does not
7234 // support denormals ever.
7235 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
7236 (VT == MVT::f16 && !Subtarget->hasFP16Denormals()))
7237 return ISD::FMAD;
7238
7239 const TargetOptions &Options = DAG.getTarget().Options;
Amara Emersond28f0cd42017-05-01 15:17:51 +00007240 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
Michael Berg7acc81b2018-05-04 18:48:20 +00007241 (N0->getFlags().hasAllowContract() &&
7242 N1->getFlags().hasAllowContract())) &&
Matt Arsenault770ec862016-12-22 03:55:35 +00007243 isFMAFasterThanFMulAndFAdd(VT)) {
7244 return ISD::FMA;
7245 }
7246
7247 return 0;
7248}
7249
Matt Arsenault4f6318f2017-11-06 17:04:37 +00007250static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
7251 EVT VT,
7252 SDValue N0, SDValue N1, SDValue N2,
7253 bool Signed) {
7254 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
7255 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
7256 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
7257 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
7258}
7259
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007260SDValue SITargetLowering::performAddCombine(SDNode *N,
7261 DAGCombinerInfo &DCI) const {
7262 SelectionDAG &DAG = DCI.DAG;
7263 EVT VT = N->getValueType(0);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007264 SDLoc SL(N);
7265 SDValue LHS = N->getOperand(0);
7266 SDValue RHS = N->getOperand(1);
7267
Matt Arsenault4f6318f2017-11-06 17:04:37 +00007268 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
7269 && Subtarget->hasMad64_32() &&
7270 !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
7271 VT.getScalarSizeInBits() <= 64) {
7272 if (LHS.getOpcode() != ISD::MUL)
7273 std::swap(LHS, RHS);
7274
7275 SDValue MulLHS = LHS.getOperand(0);
7276 SDValue MulRHS = LHS.getOperand(1);
7277 SDValue AddRHS = RHS;
7278
7279 // TODO: Maybe restrict if SGPR inputs.
7280 if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
7281 numBitsUnsigned(MulRHS, DAG) <= 32) {
7282 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
7283 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
7284 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
7285 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
7286 }
7287
7288 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
7289 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
7290 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
7291 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
7292 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
7293 }
7294
7295 return SDValue();
7296 }
7297
Farhana Aleen07e61232018-05-02 18:16:39 +00007298 if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
Matt Arsenault4f6318f2017-11-06 17:04:37 +00007299 return SDValue();
7300
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007301 // add x, zext (setcc) => addcarry x, 0, setcc
7302 // add x, sext (setcc) => subcarry x, 0, setcc
7303 unsigned Opc = LHS.getOpcode();
7304 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007305 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007306 std::swap(RHS, LHS);
7307
7308 Opc = RHS.getOpcode();
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007309 switch (Opc) {
7310 default: break;
7311 case ISD::ZERO_EXTEND:
7312 case ISD::SIGN_EXTEND:
7313 case ISD::ANY_EXTEND: {
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007314 auto Cond = RHS.getOperand(0);
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00007315 if (!isBoolSGPR(Cond))
Stanislav Mekhanoshin3ed38c62017-06-21 23:46:22 +00007316 break;
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007317 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
7318 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
7319 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
7320 return DAG.getNode(Opc, SL, VTList, Args);
7321 }
7322 case ISD::ADDCARRY: {
7323 // add x, (addcarry y, 0, cc) => addcarry x, y, cc
7324 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
7325 if (!C || C->getZExtValue() != 0) break;
7326 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
7327 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
7328 }
7329 }
7330 return SDValue();
7331}
7332
7333SDValue SITargetLowering::performSubCombine(SDNode *N,
7334 DAGCombinerInfo &DCI) const {
7335 SelectionDAG &DAG = DCI.DAG;
7336 EVT VT = N->getValueType(0);
7337
7338 if (VT != MVT::i32)
7339 return SDValue();
7340
7341 SDLoc SL(N);
7342 SDValue LHS = N->getOperand(0);
7343 SDValue RHS = N->getOperand(1);
7344
7345 unsigned Opc = LHS.getOpcode();
7346 if (Opc != ISD::SUBCARRY)
7347 std::swap(RHS, LHS);
7348
7349 if (LHS.getOpcode() == ISD::SUBCARRY) {
7350 // sub (subcarry x, 0, cc), y => subcarry x, y, cc
7351 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
7352 if (!C || C->getZExtValue() != 0)
7353 return SDValue();
7354 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
7355 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
7356 }
7357 return SDValue();
7358}
7359
7360SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
7361 DAGCombinerInfo &DCI) const {
7362
7363 if (N->getValueType(0) != MVT::i32)
7364 return SDValue();
7365
7366 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7367 if (!C || C->getZExtValue() != 0)
7368 return SDValue();
7369
7370 SelectionDAG &DAG = DCI.DAG;
7371 SDValue LHS = N->getOperand(0);
7372
7373 // addcarry (add x, y), 0, cc => addcarry x, y, cc
7374 // subcarry (sub x, y), 0, cc => subcarry x, y, cc
7375 unsigned LHSOpc = LHS.getOpcode();
7376 unsigned Opc = N->getOpcode();
7377 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
7378 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
7379 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
7380 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007381 }
7382 return SDValue();
7383}
7384
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007385SDValue SITargetLowering::performFAddCombine(SDNode *N,
7386 DAGCombinerInfo &DCI) const {
7387 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
7388 return SDValue();
7389
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007390 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault770ec862016-12-22 03:55:35 +00007391 EVT VT = N->getValueType(0);
Matt Arsenault770ec862016-12-22 03:55:35 +00007392
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007393 SDLoc SL(N);
7394 SDValue LHS = N->getOperand(0);
7395 SDValue RHS = N->getOperand(1);
7396
7397 // These should really be instruction patterns, but writing patterns with
7398 // source modiifiers is a pain.
7399
7400 // fadd (fadd (a, a), b) -> mad 2.0, a, b
7401 if (LHS.getOpcode() == ISD::FADD) {
7402 SDValue A = LHS.getOperand(0);
7403 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007404 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007405 if (FusedOp != 0) {
7406 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007407 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00007408 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007409 }
7410 }
7411
7412 // fadd (b, fadd (a, a)) -> mad 2.0, a, b
7413 if (RHS.getOpcode() == ISD::FADD) {
7414 SDValue A = RHS.getOperand(0);
7415 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007416 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007417 if (FusedOp != 0) {
7418 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007419 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00007420 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007421 }
7422 }
7423
7424 return SDValue();
7425}
7426
7427SDValue SITargetLowering::performFSubCombine(SDNode *N,
7428 DAGCombinerInfo &DCI) const {
7429 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
7430 return SDValue();
7431
7432 SelectionDAG &DAG = DCI.DAG;
7433 SDLoc SL(N);
7434 EVT VT = N->getValueType(0);
7435 assert(!VT.isVector());
7436
7437 // Try to get the fneg to fold into the source modifier. This undoes generic
7438 // DAG combines and folds them into the mad.
7439 //
7440 // Only do this if we are not trying to support denormals. v_mad_f32 does
7441 // not support denormals ever.
Matt Arsenault770ec862016-12-22 03:55:35 +00007442 SDValue LHS = N->getOperand(0);
7443 SDValue RHS = N->getOperand(1);
7444 if (LHS.getOpcode() == ISD::FADD) {
7445 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
7446 SDValue A = LHS.getOperand(0);
7447 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007448 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007449 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007450 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
7451 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
7452
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007453 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007454 }
7455 }
Matt Arsenault770ec862016-12-22 03:55:35 +00007456 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007457
Matt Arsenault770ec862016-12-22 03:55:35 +00007458 if (RHS.getOpcode() == ISD::FADD) {
7459 // (fsub c, (fadd a, a)) -> mad -2.0, a, c
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007460
Matt Arsenault770ec862016-12-22 03:55:35 +00007461 SDValue A = RHS.getOperand(0);
7462 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00007463 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00007464 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007465 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00007466 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007467 }
7468 }
7469 }
7470
7471 return SDValue();
7472}
7473
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007474SDValue SITargetLowering::performSetCCCombine(SDNode *N,
7475 DAGCombinerInfo &DCI) const {
7476 SelectionDAG &DAG = DCI.DAG;
7477 SDLoc SL(N);
7478
7479 SDValue LHS = N->getOperand(0);
7480 SDValue RHS = N->getOperand(1);
7481 EVT VT = LHS.getValueType();
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00007482 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
7483
7484 auto CRHS = dyn_cast<ConstantSDNode>(RHS);
7485 if (!CRHS) {
7486 CRHS = dyn_cast<ConstantSDNode>(LHS);
7487 if (CRHS) {
7488 std::swap(LHS, RHS);
7489 CC = getSetCCSwappedOperands(CC);
7490 }
7491 }
7492
7493 if (CRHS && VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
7494 isBoolSGPR(LHS.getOperand(0))) {
7495 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
7496 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
7497 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1
7498 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc
7499 if ((CRHS->isAllOnesValue() &&
7500 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
7501 (CRHS->isNullValue() &&
7502 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
7503 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
7504 DAG.getConstant(-1, SL, MVT::i1));
7505 if ((CRHS->isAllOnesValue() &&
7506 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
7507 (CRHS->isNullValue() &&
7508 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
7509 return LHS.getOperand(0);
7510 }
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007511
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00007512 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
7513 VT != MVT::f16))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007514 return SDValue();
7515
7516 // Match isinf pattern
7517 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007518 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) {
7519 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
7520 if (!CRHS)
7521 return SDValue();
7522
7523 const APFloat &APF = CRHS->getValueAPF();
7524 if (APF.isInfinity() && !APF.isNegative()) {
7525 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007526 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
7527 DAG.getConstant(Mask, SL, MVT::i32));
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007528 }
7529 }
7530
7531 return SDValue();
7532}
7533
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007534SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
7535 DAGCombinerInfo &DCI) const {
7536 SelectionDAG &DAG = DCI.DAG;
7537 SDLoc SL(N);
7538 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
7539
7540 SDValue Src = N->getOperand(0);
7541 SDValue Srl = N->getOperand(0);
7542 if (Srl.getOpcode() == ISD::ZERO_EXTEND)
7543 Srl = Srl.getOperand(0);
7544
7545 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
7546 if (Srl.getOpcode() == ISD::SRL) {
7547 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
7548 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
7549 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
7550
7551 if (const ConstantSDNode *C =
7552 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
7553 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
7554 EVT(MVT::i32));
7555
7556 unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
7557 if (SrcOffset < 32 && SrcOffset % 8 == 0) {
7558 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
7559 MVT::f32, Srl);
7560 }
7561 }
7562 }
7563
7564 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
7565
Craig Topperd0af7e82017-04-28 05:31:46 +00007566 KnownBits Known;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007567 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
7568 !DCI.isBeforeLegalizeOps());
7569 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Akira Hatanaka22e839f2017-04-21 18:53:12 +00007570 if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) ||
Craig Topperd0af7e82017-04-28 05:31:46 +00007571 TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007572 DCI.CommitTargetLoweringOpt(TLO);
7573 }
7574
7575 return SDValue();
7576}
7577
Tom Stellard1b95fed2018-05-24 05:28:34 +00007578SDValue SITargetLowering::performClampCombine(SDNode *N,
7579 DAGCombinerInfo &DCI) const {
7580 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
7581 if (!CSrc)
7582 return SDValue();
7583
7584 const APFloat &F = CSrc->getValueAPF();
7585 APFloat Zero = APFloat::getZero(F.getSemantics());
7586 APFloat::cmpResult Cmp0 = F.compare(Zero);
7587 if (Cmp0 == APFloat::cmpLessThan ||
7588 (Cmp0 == APFloat::cmpUnordered && Subtarget->enableDX10Clamp())) {
7589 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
7590 }
7591
7592 APFloat One(F.getSemantics(), "1.0");
7593 APFloat::cmpResult Cmp1 = F.compare(One);
7594 if (Cmp1 == APFloat::cmpGreaterThan)
7595 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
7596
7597 return SDValue(CSrc, 0);
7598}
7599
7600
Tom Stellard75aadc22012-12-11 21:25:42 +00007601SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
7602 DAGCombinerInfo &DCI) const {
Tom Stellard75aadc22012-12-11 21:25:42 +00007603 switch (N->getOpcode()) {
Matt Arsenault22b4c252014-12-21 16:48:42 +00007604 default:
7605 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00007606 case ISD::ADD:
7607 return performAddCombine(N, DCI);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00007608 case ISD::SUB:
7609 return performSubCombine(N, DCI);
7610 case ISD::ADDCARRY:
7611 case ISD::SUBCARRY:
7612 return performAddCarrySubCarryCombine(N, DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007613 case ISD::FADD:
7614 return performFAddCombine(N, DCI);
7615 case ISD::FSUB:
7616 return performFSubCombine(N, DCI);
Matt Arsenault6f6233d2015-01-06 23:00:41 +00007617 case ISD::SETCC:
7618 return performSetCCCombine(N, DCI);
Matt Arsenault5b39b342016-01-28 20:53:48 +00007619 case ISD::FMAXNUM:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007620 case ISD::FMINNUM:
Matt Arsenault5881f4e2015-06-09 00:52:37 +00007621 case ISD::SMAX:
7622 case ISD::SMIN:
7623 case ISD::UMAX:
Matt Arsenault5b39b342016-01-28 20:53:48 +00007624 case ISD::UMIN:
7625 case AMDGPUISD::FMIN_LEGACY:
7626 case AMDGPUISD::FMAX_LEGACY: {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007627 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
7628 getTargetMachine().getOptLevel() > CodeGenOpt::None)
Matt Arsenaultf639c322016-01-28 20:53:42 +00007629 return performMinMaxCombine(N, DCI);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007630 break;
7631 }
Matt Arsenault90083d32018-06-07 09:54:49 +00007632 case ISD::LOAD: {
7633 if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
7634 return Widended;
7635 LLVM_FALLTHROUGH;
7636 }
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007637 case ISD::STORE:
7638 case ISD::ATOMIC_LOAD:
7639 case ISD::ATOMIC_STORE:
7640 case ISD::ATOMIC_CMP_SWAP:
7641 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
7642 case ISD::ATOMIC_SWAP:
7643 case ISD::ATOMIC_LOAD_ADD:
7644 case ISD::ATOMIC_LOAD_SUB:
7645 case ISD::ATOMIC_LOAD_AND:
7646 case ISD::ATOMIC_LOAD_OR:
7647 case ISD::ATOMIC_LOAD_XOR:
7648 case ISD::ATOMIC_LOAD_NAND:
7649 case ISD::ATOMIC_LOAD_MIN:
7650 case ISD::ATOMIC_LOAD_MAX:
7651 case ISD::ATOMIC_LOAD_UMIN:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00007652 case ISD::ATOMIC_LOAD_UMAX:
7653 case AMDGPUISD::ATOMIC_INC:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00007654 case AMDGPUISD::ATOMIC_DEC:
7655 case AMDGPUISD::ATOMIC_LOAD_FADD:
7656 case AMDGPUISD::ATOMIC_LOAD_FMIN:
7657 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007658 if (DCI.isBeforeLegalize())
7659 break;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007660 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007661 case ISD::AND:
7662 return performAndCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00007663 case ISD::OR:
7664 return performOrCombine(N, DCI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007665 case ISD::XOR:
7666 return performXorCombine(N, DCI);
Matt Arsenault8edfaee2017-03-31 19:53:03 +00007667 case ISD::ZERO_EXTEND:
7668 return performZeroExtendCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00007669 case AMDGPUISD::FP_CLASS:
7670 return performClassCombine(N, DCI);
Matt Arsenault9cd90712016-04-14 01:42:16 +00007671 case ISD::FCANONICALIZE:
7672 return performFCanonicalizeCombine(N, DCI);
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007673 case AMDGPUISD::FRACT:
7674 case AMDGPUISD::RCP:
7675 case AMDGPUISD::RSQ:
Matt Arsenault32fc5272016-07-26 16:45:45 +00007676 case AMDGPUISD::RCP_LEGACY:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007677 case AMDGPUISD::RSQ_LEGACY:
7678 case AMDGPUISD::RSQ_CLAMP:
7679 case AMDGPUISD::LDEXP: {
7680 SDValue Src = N->getOperand(0);
7681 if (Src.isUndef())
7682 return Src;
7683 break;
7684 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007685 case ISD::SINT_TO_FP:
7686 case ISD::UINT_TO_FP:
7687 return performUCharToFloatCombine(N, DCI);
7688 case AMDGPUISD::CVT_F32_UBYTE0:
7689 case AMDGPUISD::CVT_F32_UBYTE1:
7690 case AMDGPUISD::CVT_F32_UBYTE2:
7691 case AMDGPUISD::CVT_F32_UBYTE3:
7692 return performCvtF32UByteNCombine(N, DCI);
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00007693 case AMDGPUISD::FMED3:
7694 return performFMed3Combine(N, DCI);
Matt Arsenault1f17c662017-02-22 00:27:34 +00007695 case AMDGPUISD::CVT_PKRTZ_F16_F32:
7696 return performCvtPkRTZCombine(N, DCI);
Tom Stellard1b95fed2018-05-24 05:28:34 +00007697 case AMDGPUISD::CLAMP:
7698 return performClampCombine(N, DCI);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00007699 case ISD::SCALAR_TO_VECTOR: {
7700 SelectionDAG &DAG = DCI.DAG;
7701 EVT VT = N->getValueType(0);
7702
7703 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
7704 if (VT == MVT::v2i16 || VT == MVT::v2f16) {
7705 SDLoc SL(N);
7706 SDValue Src = N->getOperand(0);
7707 EVT EltVT = Src.getValueType();
7708 if (EltVT == MVT::f16)
7709 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
7710
7711 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
7712 return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
7713 }
7714
7715 break;
7716 }
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00007717 case ISD::EXTRACT_VECTOR_ELT:
7718 return performExtractVectorEltCombine(N, DCI);
Matt Arsenault8cbb4882017-09-20 21:01:24 +00007719 case ISD::BUILD_VECTOR:
7720 return performBuildVectorCombine(N, DCI);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007721 }
Matt Arsenault5565f65e2014-05-22 18:09:07 +00007722 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Tom Stellard75aadc22012-12-11 21:25:42 +00007723}
Christian Konigd910b7d2013-02-26 17:52:16 +00007724
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00007725/// Helper function for adjustWritemask
Benjamin Kramer635e3682013-05-23 15:43:05 +00007726static unsigned SubIdx2Lane(unsigned Idx) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00007727 switch (Idx) {
7728 default: return 0;
7729 case AMDGPU::sub0: return 0;
7730 case AMDGPU::sub1: return 1;
7731 case AMDGPU::sub2: return 2;
7732 case AMDGPU::sub3: return 3;
7733 }
7734}
7735
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00007736/// Adjust the writemask of MIMG instructions
Matt Arsenault68f05052017-12-04 22:18:27 +00007737SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
7738 SelectionDAG &DAG) const {
7739 SDNode *Users[4] = { nullptr };
Tom Stellard54774e52013-10-23 02:53:47 +00007740 unsigned Lane = 0;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00007741 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3;
7742 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
Tom Stellard54774e52013-10-23 02:53:47 +00007743 unsigned NewDmask = 0;
Matt Arsenault856777d2017-12-08 20:00:57 +00007744 bool HasChain = Node->getNumValues() > 1;
7745
7746 if (OldDmask == 0) {
7747 // These are folded out, but on the chance it happens don't assert.
7748 return Node;
7749 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00007750
7751 // Try to figure out the used register components
7752 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
7753 I != E; ++I) {
7754
Matt Arsenault93e65ea2017-02-22 21:16:41 +00007755 // Don't look at users of the chain.
7756 if (I.getUse().getResNo() != 0)
7757 continue;
7758
Christian Konig8e06e2a2013-04-10 08:39:08 +00007759 // Abort if we can't understand the usage
7760 if (!I->isMachineOpcode() ||
7761 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
Matt Arsenault68f05052017-12-04 22:18:27 +00007762 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007763
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +00007764 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
Tom Stellard54774e52013-10-23 02:53:47 +00007765 // Note that subregs are packed, i.e. Lane==0 is the first bit set
7766 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
7767 // set, etc.
Christian Konig8b1ed282013-04-10 08:39:16 +00007768 Lane = SubIdx2Lane(I->getConstantOperandVal(1));
Christian Konig8e06e2a2013-04-10 08:39:08 +00007769
Tom Stellard54774e52013-10-23 02:53:47 +00007770 // Set which texture component corresponds to the lane.
7771 unsigned Comp;
7772 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
Tom Stellard03a5c082013-10-23 03:50:25 +00007773 Comp = countTrailingZeros(Dmask);
Tom Stellard54774e52013-10-23 02:53:47 +00007774 Dmask &= ~(1 << Comp);
7775 }
7776
Christian Konig8e06e2a2013-04-10 08:39:08 +00007777 // Abort if we have more than one user per component
7778 if (Users[Lane])
Matt Arsenault68f05052017-12-04 22:18:27 +00007779 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007780
7781 Users[Lane] = *I;
Tom Stellard54774e52013-10-23 02:53:47 +00007782 NewDmask |= 1 << Comp;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007783 }
7784
Tom Stellard54774e52013-10-23 02:53:47 +00007785 // Abort if there's no change
7786 if (NewDmask == OldDmask)
Matt Arsenault68f05052017-12-04 22:18:27 +00007787 return Node;
7788
7789 unsigned BitsSet = countPopulation(NewDmask);
7790
7791 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenaultcad7fa82017-12-13 21:07:51 +00007792 int NewOpcode = AMDGPU::getMaskedMIMGOp(*TII,
7793 Node->getMachineOpcode(), BitsSet);
Matt Arsenault68f05052017-12-04 22:18:27 +00007794 assert(NewOpcode != -1 &&
7795 NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
7796 "failed to find equivalent MIMG op");
Christian Konig8e06e2a2013-04-10 08:39:08 +00007797
7798 // Adjust the writemask in the node
Matt Arsenault68f05052017-12-04 22:18:27 +00007799 SmallVector<SDValue, 12> Ops;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00007800 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007801 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
Nikolay Haustov2f684f12016-02-26 09:51:05 +00007802 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
Christian Konig8e06e2a2013-04-10 08:39:08 +00007803
Matt Arsenault68f05052017-12-04 22:18:27 +00007804 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
7805
Matt Arsenault856777d2017-12-08 20:00:57 +00007806 MVT ResultVT = BitsSet == 1 ?
7807 SVT : MVT::getVectorVT(SVT, BitsSet == 3 ? 4 : BitsSet);
7808 SDVTList NewVTList = HasChain ?
7809 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
7810
Matt Arsenault68f05052017-12-04 22:18:27 +00007811
7812 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
7813 NewVTList, Ops);
Matt Arsenaultecad0d532017-12-08 20:00:45 +00007814
Matt Arsenault856777d2017-12-08 20:00:57 +00007815 if (HasChain) {
7816 // Update chain.
7817 NewNode->setMemRefs(Node->memoperands_begin(), Node->memoperands_end());
7818 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
7819 }
Matt Arsenault68f05052017-12-04 22:18:27 +00007820
7821 if (BitsSet == 1) {
7822 assert(Node->hasNUsesOfValue(1, 0));
7823 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
7824 SDLoc(Node), Users[Lane]->getValueType(0),
7825 SDValue(NewNode, 0));
Christian Konig8b1ed282013-04-10 08:39:16 +00007826 DAG.ReplaceAllUsesWith(Users[Lane], Copy);
Matt Arsenault68f05052017-12-04 22:18:27 +00007827 return nullptr;
Christian Konig8b1ed282013-04-10 08:39:16 +00007828 }
7829
Christian Konig8e06e2a2013-04-10 08:39:08 +00007830 // Update the users of the node with the new indices
7831 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00007832 SDNode *User = Users[i];
7833 if (!User)
7834 continue;
7835
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007836 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
Matt Arsenault68f05052017-12-04 22:18:27 +00007837 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
Christian Konig8e06e2a2013-04-10 08:39:08 +00007838
7839 switch (Idx) {
7840 default: break;
7841 case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
7842 case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
7843 case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
7844 }
7845 }
Matt Arsenault68f05052017-12-04 22:18:27 +00007846
7847 DAG.RemoveDeadNode(Node);
7848 return nullptr;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007849}
7850
Tom Stellardc98ee202015-07-16 19:40:07 +00007851static bool isFrameIndexOp(SDValue Op) {
7852 if (Op.getOpcode() == ISD::AssertZext)
7853 Op = Op.getOperand(0);
7854
7855 return isa<FrameIndexSDNode>(Op);
7856}
7857
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00007858/// Legalize target independent instructions (e.g. INSERT_SUBREG)
Tom Stellard3457a842014-10-09 19:06:00 +00007859/// with frame index operands.
7860/// LLVM assumes that inputs are to these instructions are registers.
Matt Arsenault0d0d6c22017-04-12 21:58:23 +00007861SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
7862 SelectionDAG &DAG) const {
7863 if (Node->getOpcode() == ISD::CopyToReg) {
7864 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
7865 SDValue SrcVal = Node->getOperand(2);
7866
7867 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
7868 // to try understanding copies to physical registers.
7869 if (SrcVal.getValueType() == MVT::i1 &&
7870 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
7871 SDLoc SL(Node);
7872 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
7873 SDValue VReg = DAG.getRegister(
7874 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
7875
7876 SDNode *Glued = Node->getGluedNode();
7877 SDValue ToVReg
7878 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
7879 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
7880 SDValue ToResultReg
7881 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
7882 VReg, ToVReg.getValue(1));
7883 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
7884 DAG.RemoveDeadNode(Node);
7885 return ToResultReg.getNode();
7886 }
7887 }
Tom Stellard8dd392e2014-10-09 18:09:15 +00007888
7889 SmallVector<SDValue, 8> Ops;
Tom Stellard3457a842014-10-09 19:06:00 +00007890 for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
Tom Stellardc98ee202015-07-16 19:40:07 +00007891 if (!isFrameIndexOp(Node->getOperand(i))) {
Tom Stellard3457a842014-10-09 19:06:00 +00007892 Ops.push_back(Node->getOperand(i));
Tom Stellard8dd392e2014-10-09 18:09:15 +00007893 continue;
7894 }
7895
Tom Stellard3457a842014-10-09 19:06:00 +00007896 SDLoc DL(Node);
Tom Stellard8dd392e2014-10-09 18:09:15 +00007897 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
Tom Stellard3457a842014-10-09 19:06:00 +00007898 Node->getOperand(i).getValueType(),
7899 Node->getOperand(i)), 0));
Tom Stellard8dd392e2014-10-09 18:09:15 +00007900 }
7901
Mark Searles4e3d6162017-10-16 23:38:53 +00007902 return DAG.UpdateNodeOperands(Node, Ops);
Tom Stellard8dd392e2014-10-09 18:09:15 +00007903}
7904
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00007905/// Fold the instructions after selecting them.
Matt Arsenault68f05052017-12-04 22:18:27 +00007906/// Returns null if users were already updated.
Christian Konig8e06e2a2013-04-10 08:39:08 +00007907SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
7908 SelectionDAG &DAG) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00007909 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00007910 unsigned Opcode = Node->getMachineOpcode();
Christian Konig8e06e2a2013-04-10 08:39:08 +00007911
Nicolai Haehnlec06bfa12016-07-11 21:59:43 +00007912 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
Changpeng Fang4737e892018-01-18 22:08:53 +00007913 !TII->isGather4(Opcode) && !TII->isD16(Opcode)) {
Matt Arsenault68f05052017-12-04 22:18:27 +00007914 return adjustWritemask(Node, DAG);
7915 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00007916
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00007917 if (Opcode == AMDGPU::INSERT_SUBREG ||
7918 Opcode == AMDGPU::REG_SEQUENCE) {
Tom Stellard8dd392e2014-10-09 18:09:15 +00007919 legalizeTargetIndependentNode(Node, DAG);
7920 return Node;
7921 }
Matt Arsenault206f8262017-08-01 20:49:41 +00007922
7923 switch (Opcode) {
7924 case AMDGPU::V_DIV_SCALE_F32:
7925 case AMDGPU::V_DIV_SCALE_F64: {
7926 // Satisfy the operand register constraint when one of the inputs is
7927 // undefined. Ordinarily each undef value will have its own implicit_def of
7928 // a vreg, so force these to use a single register.
7929 SDValue Src0 = Node->getOperand(0);
7930 SDValue Src1 = Node->getOperand(1);
7931 SDValue Src2 = Node->getOperand(2);
7932
7933 if ((Src0.isMachineOpcode() &&
7934 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
7935 (Src0 == Src1 || Src0 == Src2))
7936 break;
7937
7938 MVT VT = Src0.getValueType().getSimpleVT();
7939 const TargetRegisterClass *RC = getRegClassFor(VT);
7940
7941 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
7942 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
7943
7944 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
7945 UndefReg, Src0, SDValue());
7946
7947 // src0 must be the same register as src1 or src2, even if the value is
7948 // undefined, so make sure we don't violate this constraint.
7949 if (Src0.isMachineOpcode() &&
7950 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
7951 if (Src1.isMachineOpcode() &&
7952 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
7953 Src0 = Src1;
7954 else if (Src2.isMachineOpcode() &&
7955 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
7956 Src0 = Src2;
7957 else {
7958 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
7959 Src0 = UndefReg;
7960 Src1 = UndefReg;
7961 }
7962 } else
7963 break;
7964
7965 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
7966 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
7967 Ops.push_back(Node->getOperand(I));
7968
7969 Ops.push_back(ImpDef.getValue(1));
7970 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
7971 }
7972 default:
7973 break;
7974 }
7975
Tom Stellard654d6692015-01-08 15:08:17 +00007976 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00007977}
Christian Konig8b1ed282013-04-10 08:39:16 +00007978
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00007979/// Assign the register class depending on the number of
Christian Konig8b1ed282013-04-10 08:39:16 +00007980/// bits set in the writemask
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007981void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
Christian Konig8b1ed282013-04-10 08:39:16 +00007982 SDNode *Node) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00007983 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00007984
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007985 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Matt Arsenault6005fcb2015-10-21 21:51:02 +00007986
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007987 if (TII->isVOP3(MI.getOpcode())) {
Matt Arsenault6005fcb2015-10-21 21:51:02 +00007988 // Make sure constant bus requirements are respected.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007989 TII->legalizeOperandsVOP3(MRI, MI);
Matt Arsenault6005fcb2015-10-21 21:51:02 +00007990 return;
7991 }
Matt Arsenaultcb0ac3d2014-09-26 17:54:59 +00007992
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00007993 // Replace unused atomics with the no return version.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007994 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00007995 if (NoRetAtomicOp != -1) {
7996 if (!Node->hasAnyUseOfValue(0)) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00007997 MI.setDesc(TII->get(NoRetAtomicOp));
7998 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00007999 return;
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008000 }
8001
Tom Stellard354a43c2016-04-01 18:27:37 +00008002 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
8003 // instruction, because the return type of these instructions is a vec2 of
8004 // the memory type, so it can be tied to the input operand.
8005 // This means these instructions always have a use, so we need to add a
8006 // special case to check if the atomic has only one extract_subreg use,
8007 // which itself has no uses.
8008 if ((Node->hasNUsesOfValue(1, 0) &&
Nicolai Haehnle750082d2016-04-15 14:42:36 +00008009 Node->use_begin()->isMachineOpcode() &&
Tom Stellard354a43c2016-04-01 18:27:37 +00008010 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
8011 !Node->use_begin()->hasAnyUseOfValue(0))) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008012 unsigned Def = MI.getOperand(0).getReg();
Tom Stellard354a43c2016-04-01 18:27:37 +00008013
8014 // Change this into a noret atomic.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008015 MI.setDesc(TII->get(NoRetAtomicOp));
8016 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00008017
8018 // If we only remove the def operand from the atomic instruction, the
8019 // extract_subreg will be left with a use of a vreg without a def.
8020 // So we need to insert an implicit_def to avoid machine verifier
8021 // errors.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00008022 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
Tom Stellard354a43c2016-04-01 18:27:37 +00008023 TII->get(AMDGPU::IMPLICIT_DEF), Def);
8024 }
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00008025 return;
8026 }
Christian Konig8b1ed282013-04-10 08:39:16 +00008027}
Tom Stellard0518ff82013-06-03 17:39:58 +00008028
Benjamin Kramerbdc49562016-06-12 15:39:02 +00008029static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
8030 uint64_t Val) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008031 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
Matt Arsenault485defe2014-11-05 19:01:17 +00008032 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
8033}
8034
8035MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00008036 const SDLoc &DL,
Matt Arsenault485defe2014-11-05 19:01:17 +00008037 SDValue Ptr) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00008038 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault485defe2014-11-05 19:01:17 +00008039
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008040 // Build the half of the subregister with the constants before building the
8041 // full 128-bit register. If we are building multiple resource descriptors,
8042 // this will allow CSEing of the 2-component register.
8043 const SDValue Ops0[] = {
8044 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
8045 buildSMovImm32(DAG, DL, 0),
8046 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
8047 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
8048 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
8049 };
Matt Arsenault485defe2014-11-05 19:01:17 +00008050
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008051 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
8052 MVT::v2i32, Ops0), 0);
Matt Arsenault485defe2014-11-05 19:01:17 +00008053
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008054 // Combine the constants and the pointer.
8055 const SDValue Ops1[] = {
8056 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
8057 Ptr,
8058 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
8059 SubRegHi,
8060 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
8061 };
Matt Arsenault485defe2014-11-05 19:01:17 +00008062
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00008063 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
Matt Arsenault485defe2014-11-05 19:01:17 +00008064}
8065
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00008066/// Return a resource descriptor with the 'Add TID' bit enabled
Benjamin Kramerdf005cb2015-08-08 18:27:36 +00008067/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
8068/// of the resource descriptor) to create an offset, which is added to
8069/// the resource pointer.
Benjamin Kramerbdc49562016-06-12 15:39:02 +00008070MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
8071 SDValue Ptr, uint32_t RsrcDword1,
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008072 uint64_t RsrcDword2And3) const {
8073 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
8074 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
8075 if (RsrcDword1) {
8076 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008077 DAG.getConstant(RsrcDword1, DL, MVT::i32)),
8078 0);
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008079 }
8080
8081 SDValue DataLo = buildSMovImm32(DAG, DL,
8082 RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
8083 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
8084
8085 const SDValue Ops[] = {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008086 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008087 PtrLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008088 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008089 PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008090 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008091 DataLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008092 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008093 DataHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008094 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00008095 };
8096
8097 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
8098}
8099
Tom Stellardd7e6f132015-04-08 01:09:26 +00008100//===----------------------------------------------------------------------===//
8101// SI Inline Assembly Support
8102//===----------------------------------------------------------------------===//
8103
8104std::pair<unsigned, const TargetRegisterClass *>
8105SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +00008106 StringRef Constraint,
Tom Stellardd7e6f132015-04-08 01:09:26 +00008107 MVT VT) const {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008108 const TargetRegisterClass *RC = nullptr;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008109 if (Constraint.size() == 1) {
8110 switch (Constraint[0]) {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008111 default:
8112 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008113 case 's':
8114 case 'r':
8115 switch (VT.getSizeInBits()) {
8116 default:
8117 return std::make_pair(0U, nullptr);
8118 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00008119 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008120 RC = &AMDGPU::SReg_32_XM0RegClass;
8121 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008122 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008123 RC = &AMDGPU::SGPR_64RegClass;
8124 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008125 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008126 RC = &AMDGPU::SReg_128RegClass;
8127 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008128 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008129 RC = &AMDGPU::SReg_256RegClass;
8130 break;
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +00008131 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008132 RC = &AMDGPU::SReg_512RegClass;
8133 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008134 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008135 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008136 case 'v':
8137 switch (VT.getSizeInBits()) {
8138 default:
8139 return std::make_pair(0U, nullptr);
8140 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00008141 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008142 RC = &AMDGPU::VGPR_32RegClass;
8143 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008144 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008145 RC = &AMDGPU::VReg_64RegClass;
8146 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008147 case 96:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008148 RC = &AMDGPU::VReg_96RegClass;
8149 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008150 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008151 RC = &AMDGPU::VReg_128RegClass;
8152 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008153 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008154 RC = &AMDGPU::VReg_256RegClass;
8155 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008156 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008157 RC = &AMDGPU::VReg_512RegClass;
8158 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008159 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008160 break;
Tom Stellardd7e6f132015-04-08 01:09:26 +00008161 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00008162 // We actually support i128, i16 and f16 as inline parameters
8163 // even if they are not reported as legal
8164 if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
8165 VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
8166 return std::make_pair(0U, RC);
Tom Stellardd7e6f132015-04-08 01:09:26 +00008167 }
8168
8169 if (Constraint.size() > 1) {
Tom Stellardd7e6f132015-04-08 01:09:26 +00008170 if (Constraint[1] == 'v') {
8171 RC = &AMDGPU::VGPR_32RegClass;
8172 } else if (Constraint[1] == 's') {
8173 RC = &AMDGPU::SGPR_32RegClass;
8174 }
8175
8176 if (RC) {
Matt Arsenault0b554ed2015-06-23 02:05:55 +00008177 uint32_t Idx;
8178 bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
8179 if (!Failed && Idx < RC->getNumRegs())
Tom Stellardd7e6f132015-04-08 01:09:26 +00008180 return std::make_pair(RC->getRegister(Idx), RC);
8181 }
8182 }
8183 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8184}
Tom Stellardb3c3bda2015-12-10 02:12:53 +00008185
8186SITargetLowering::ConstraintType
8187SITargetLowering::getConstraintType(StringRef Constraint) const {
8188 if (Constraint.size() == 1) {
8189 switch (Constraint[0]) {
8190 default: break;
8191 case 's':
8192 case 'v':
8193 return C_RegisterClass;
8194 }
8195 }
8196 return TargetLowering::getConstraintType(Constraint);
8197}
Matt Arsenault1cc47f82017-07-18 16:44:56 +00008198
8199// Figure out which registers should be reserved for stack access. Only after
8200// the function is legalized do we know all of the non-spill stack objects or if
8201// calls are present.
8202void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
8203 MachineRegisterInfo &MRI = MF.getRegInfo();
8204 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
8205 const MachineFrameInfo &MFI = MF.getFrameInfo();
8206 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
8207 const SIRegisterInfo *TRI = ST.getRegisterInfo();
8208
8209 if (Info->isEntryFunction()) {
8210 // Callable functions have fixed registers used for stack access.
8211 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
8212 }
8213
8214 // We have to assume the SP is needed in case there are calls in the function
8215 // during lowering. Calls are only detected after the function is
8216 // lowered. We're about to reserve registers, so don't bother using it if we
8217 // aren't really going to use it.
8218 bool NeedSP = !Info->isEntryFunction() ||
8219 MFI.hasVarSizedObjects() ||
8220 MFI.hasCalls();
8221
8222 if (NeedSP) {
8223 unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF);
8224 Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg);
8225
8226 assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg());
8227 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
8228 Info->getStackPtrOffsetReg()));
8229 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
8230 }
8231
8232 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
8233 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
8234 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
8235 Info->getScratchWaveOffsetReg());
8236
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +00008237 Info->limitOccupancy(MF);
8238
Matt Arsenault1cc47f82017-07-18 16:44:56 +00008239 TargetLoweringBase::finalizeLowering(MF);
8240}
Matt Arsenault45b98182017-11-15 00:45:43 +00008241
8242void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
8243 KnownBits &Known,
8244 const APInt &DemandedElts,
8245 const SelectionDAG &DAG,
8246 unsigned Depth) const {
8247 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
8248 DAG, Depth);
8249
8250 if (getSubtarget()->enableHugePrivateBuffer())
8251 return;
8252
8253 // Technically it may be possible to have a dispatch with a single workitem
8254 // that uses the full private memory size, but that's not really useful. We
8255 // can't use vaddr in MUBUF instructions if we don't know the address
8256 // calculation won't overflow, so assume the sign bit is never set.
8257 Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits);
8258}
Tom Stellard264c1712018-06-13 15:06:37 +00008259
8260bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
8261 FunctionLoweringInfo * FLI, DivergenceAnalysis * DA) const
8262{
8263 switch (N->getOpcode()) {
8264 case ISD::Register:
8265 case ISD::CopyFromReg:
8266 {
8267 const RegisterSDNode *R = nullptr;
8268 if (N->getOpcode() == ISD::Register) {
8269 R = dyn_cast<RegisterSDNode>(N);
8270 }
8271 else {
8272 R = dyn_cast<RegisterSDNode>(N->getOperand(1));
8273 }
8274 if (R)
8275 {
8276 const MachineFunction * MF = FLI->MF;
8277 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
8278 const MachineRegisterInfo &MRI = MF->getRegInfo();
8279 const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
8280 unsigned Reg = R->getReg();
8281 if (TRI.isPhysicalRegister(Reg))
8282 return TRI.isVGPR(MRI, Reg);
8283
8284 if (MRI.isLiveIn(Reg)) {
8285 // workitem.id.x workitem.id.y workitem.id.z
8286 // Any VGPR formal argument is also considered divergent
8287 if (TRI.isVGPR(MRI, Reg))
8288 return true;
8289 // Formal arguments of non-entry functions
8290 // are conservatively considered divergent
8291 else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
8292 return true;
8293 }
8294 return !DA || DA->isDivergent(FLI->getValueFromVirtualReg(Reg));
8295 }
8296 }
8297 break;
8298 case ISD::LOAD: {
8299 const LoadSDNode *L = dyn_cast<LoadSDNode>(N);
8300 if (L->getMemOperand()->getAddrSpace() ==
8301 Subtarget->getAMDGPUAS().PRIVATE_ADDRESS)
8302 return true;
8303 } break;
8304 case ISD::CALLSEQ_END:
8305 return true;
8306 break;
8307 case ISD::INTRINSIC_WO_CHAIN:
8308 {
8309
8310 }
8311 return AMDGPU::isIntrinsicSourceOfDivergence(
8312 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
8313 case ISD::INTRINSIC_W_CHAIN:
8314 return AMDGPU::isIntrinsicSourceOfDivergence(
8315 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
8316 // In some cases intrinsics that are a source of divergence have been
8317 // lowered to AMDGPUISD so we also need to check those too.
8318 case AMDGPUISD::INTERP_MOV:
8319 case AMDGPUISD::INTERP_P1:
8320 case AMDGPUISD::INTERP_P2:
8321 return true;
8322 }
8323 return false;
8324}