blob: 65568667b4d4ad67276a5ab4ffa1f731f8f5ab75 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard75aadc22012-12-11 21:25:42 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// Custom DAG lowering for SI
Tom Stellard75aadc22012-12-11 21:25:42 +000011//
12//===----------------------------------------------------------------------===//
13
Sylvestre Ledrudf92dab2018-11-02 17:25:40 +000014#if defined(_MSC_VER) || defined(__MINGW32__)
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000015// Provide M_PI.
16#define _USE_MATH_DEFINES
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000017#endif
18
Chandler Carruth6bda14b2017-06-06 11:49:48 +000019#include "SIISelLowering.h"
Christian Konig99ee0f42013-03-07 09:04:14 +000020#include "AMDGPU.h"
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000021#include "AMDGPUSubtarget.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000022#include "AMDGPUTargetMachine.h"
Tom Stellard8485fa02016-12-07 02:42:15 +000023#include "SIDefines.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000024#include "SIInstrInfo.h"
25#include "SIMachineFunctionInfo.h"
26#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000027#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000028#include "Utils/AMDGPUBaseInfo.h"
29#include "llvm/ADT/APFloat.h"
30#include "llvm/ADT/APInt.h"
31#include "llvm/ADT/ArrayRef.h"
Alexey Samsonova253bf92014-08-27 19:36:53 +000032#include "llvm/ADT/BitVector.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000033#include "llvm/ADT/SmallVector.h"
Matt Arsenault71bcbd42017-08-11 20:42:08 +000034#include "llvm/ADT/Statistic.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000035#include "llvm/ADT/StringRef.h"
Matt Arsenault9a10cea2016-01-26 04:29:24 +000036#include "llvm/ADT/StringSwitch.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000037#include "llvm/ADT/Twine.h"
Wei Ding07e03712016-07-28 16:42:13 +000038#include "llvm/CodeGen/Analysis.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000039#include "llvm/CodeGen/CallingConvLower.h"
40#include "llvm/CodeGen/DAGCombine.h"
41#include "llvm/CodeGen/ISDOpcodes.h"
42#include "llvm/CodeGen/MachineBasicBlock.h"
43#include "llvm/CodeGen/MachineFrameInfo.h"
44#include "llvm/CodeGen/MachineFunction.h"
45#include "llvm/CodeGen/MachineInstr.h"
46#include "llvm/CodeGen/MachineInstrBuilder.h"
47#include "llvm/CodeGen/MachineMemOperand.h"
Matt Arsenault8623e8d2017-08-03 23:00:29 +000048#include "llvm/CodeGen/MachineModuleInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000049#include "llvm/CodeGen/MachineOperand.h"
50#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000051#include "llvm/CodeGen/SelectionDAG.h"
52#include "llvm/CodeGen/SelectionDAGNodes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000053#include "llvm/CodeGen/TargetCallingConv.h"
54#include "llvm/CodeGen/TargetRegisterInfo.h"
Craig Topper2fa14362018-03-29 17:21:10 +000055#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000056#include "llvm/IR/Constants.h"
57#include "llvm/IR/DataLayout.h"
58#include "llvm/IR/DebugLoc.h"
59#include "llvm/IR/DerivedTypes.h"
Oliver Stannard7e7d9832016-02-02 13:52:43 +000060#include "llvm/IR/DiagnosticInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000061#include "llvm/IR/Function.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000062#include "llvm/IR/GlobalValue.h"
63#include "llvm/IR/InstrTypes.h"
64#include "llvm/IR/Instruction.h"
65#include "llvm/IR/Instructions.h"
Matt Arsenault7dc01c92017-03-15 23:15:12 +000066#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000067#include "llvm/IR/Type.h"
68#include "llvm/Support/Casting.h"
69#include "llvm/Support/CodeGen.h"
70#include "llvm/Support/CommandLine.h"
71#include "llvm/Support/Compiler.h"
72#include "llvm/Support/ErrorHandling.h"
Craig Topperd0af7e82017-04-28 05:31:46 +000073#include "llvm/Support/KnownBits.h"
David Blaikie13e77db2018-03-23 23:58:25 +000074#include "llvm/Support/MachineValueType.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000075#include "llvm/Support/MathExtras.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000076#include "llvm/Target/TargetOptions.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000077#include <cassert>
78#include <cmath>
79#include <cstdint>
80#include <iterator>
81#include <tuple>
82#include <utility>
83#include <vector>
Tom Stellard75aadc22012-12-11 21:25:42 +000084
85using namespace llvm;
86
Matt Arsenault71bcbd42017-08-11 20:42:08 +000087#define DEBUG_TYPE "si-lower"
88
89STATISTIC(NumTailCalls, "Number of tail calls");
90
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000091static cl::opt<bool> EnableVGPRIndexMode(
92 "amdgpu-vgpr-index-mode",
93 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
94 cl::init(false));
95
Stanislav Mekhanoshin93f15c92019-05-03 21:17:29 +000096static cl::opt<bool> DisableLoopAlignment(
97 "amdgpu-disable-loop-alignment",
98 cl::desc("Do not align and prefetch loops"),
99 cl::init(false));
100
Tom Stellardf110f8f2016-04-14 16:27:03 +0000101static unsigned findFirstFreeSGPR(CCState &CCInfo) {
102 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
103 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
104 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
105 return AMDGPU::SGPR0 + Reg;
106 }
107 }
108 llvm_unreachable("Cannot allocate sgpr");
109}
110
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000111SITargetLowering::SITargetLowering(const TargetMachine &TM,
Tom Stellard5bfbae52018-07-11 20:59:01 +0000112 const GCNSubtarget &STI)
Tom Stellardc5a154d2018-06-28 23:47:12 +0000113 : AMDGPUTargetLowering(TM, STI),
114 Subtarget(&STI) {
Tom Stellard1bd80722014-04-30 15:31:33 +0000115 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
Tom Stellard436780b2014-05-15 14:41:57 +0000116 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000117
Marek Olsak79c05872016-11-25 17:37:09 +0000118 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000119 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000120
Tom Stellard436780b2014-05-15 14:41:57 +0000121 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
122 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
123 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000124
Tim Renouf361b5b22019-03-21 12:01:21 +0000125 addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
126 addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
127
Matt Arsenault61001bb2015-11-25 19:58:34 +0000128 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
129 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
130
Tom Stellard436780b2014-05-15 14:41:57 +0000131 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
132 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000133
Tim Renouf033f99a2019-03-22 10:11:21 +0000134 addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
135 addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
136
Tom Stellardf0a21072014-11-18 20:39:39 +0000137 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000138 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
139
Tom Stellardf0a21072014-11-18 20:39:39 +0000140 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000141 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000142
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000143 if (Subtarget->has16BitInsts()) {
Marek Olsak79c05872016-11-25 17:37:09 +0000144 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
145 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard115a6152016-11-10 16:02:37 +0000146
Matt Arsenault1349a042018-05-22 06:32:10 +0000147 // Unless there are also VOP3P operations, not operations are really legal.
Matt Arsenault7596f132017-02-27 20:52:10 +0000148 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
149 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000150 addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
151 addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
Matt Arsenault7596f132017-02-27 20:52:10 +0000152 }
153
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000154 if (Subtarget->hasMAIInsts()) {
Stanislav Mekhanoshin6e0fa292019-07-16 20:06:00 +0000155 addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass);
156 addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass);
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000157 }
158
Tom Stellardc5a154d2018-06-28 23:47:12 +0000159 computeRegisterProperties(Subtarget->getRegisterInfo());
Tom Stellard75aadc22012-12-11 21:25:42 +0000160
Tom Stellard35bb18c2013-08-26 15:06:04 +0000161 // We need to custom lower vector stores from local memory
Matt Arsenault71e66762016-05-21 02:27:49 +0000162 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
Tim Renouf361b5b22019-03-21 12:01:21 +0000163 setOperationAction(ISD::LOAD, MVT::v3i32, Custom);
Tom Stellard35bb18c2013-08-26 15:06:04 +0000164 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
Tim Renouf033f99a2019-03-22 10:11:21 +0000165 setOperationAction(ISD::LOAD, MVT::v5i32, Custom);
Tom Stellardaf775432013-10-23 00:44:32 +0000166 setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
167 setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000168 setOperationAction(ISD::LOAD, MVT::i1, Custom);
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +0000169 setOperationAction(ISD::LOAD, MVT::v32i32, Custom);
Matt Arsenault2b957b52016-05-02 20:07:26 +0000170
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000171 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
Tim Renouf361b5b22019-03-21 12:01:21 +0000172 setOperationAction(ISD::STORE, MVT::v3i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000173 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
Tim Renouf033f99a2019-03-22 10:11:21 +0000174 setOperationAction(ISD::STORE, MVT::v5i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000175 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
176 setOperationAction(ISD::STORE, MVT::v16i32, Custom);
177 setOperationAction(ISD::STORE, MVT::i1, Custom);
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +0000178 setOperationAction(ISD::STORE, MVT::v32i32, Custom);
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000179
Jan Vesely06200bd2017-01-06 21:00:46 +0000180 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
181 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
182 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
183 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
184 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
185 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
186 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
187 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
188 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
189 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
190
Matt Arsenault71e66762016-05-21 02:27:49 +0000191 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
192 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000193
194 setOperationAction(ISD::SELECT, MVT::i1, Promote);
Tom Stellard0ec134f2014-02-04 17:18:40 +0000195 setOperationAction(ISD::SELECT, MVT::i64, Custom);
Tom Stellardda99c6e2014-03-24 16:07:30 +0000196 setOperationAction(ISD::SELECT, MVT::f64, Promote);
197 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
Tom Stellard81d871d2013-11-13 23:36:50 +0000198
Tom Stellard3ca1bfc2014-06-10 16:01:22 +0000199 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
200 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
201 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
202 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
Matt Arsenault71e66762016-05-21 02:27:49 +0000203 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
Tom Stellard754f80f2013-04-05 23:31:51 +0000204
Tom Stellardd1efda82016-01-20 21:48:24 +0000205 setOperationAction(ISD::SETCC, MVT::i1, Promote);
Tom Stellard83747202013-07-18 21:43:53 +0000206 setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
207 setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
Matt Arsenault18f56be2016-12-22 16:27:11 +0000208 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
Tom Stellard83747202013-07-18 21:43:53 +0000209
Matt Arsenault71e66762016-05-21 02:27:49 +0000210 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
211 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
Matt Arsenaulte306a322014-10-21 16:25:08 +0000212
Matt Arsenault4e466652014-04-16 01:41:30 +0000213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000217 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
218 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000219 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
220
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000221 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000222 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000223 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
224 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
225 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
226 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
Tom Stellardafcf12f2013-09-12 02:55:14 +0000227
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +0000228 setOperationAction(ISD::UADDO, MVT::i32, Legal);
229 setOperationAction(ISD::USUBO, MVT::i32, Legal);
230
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000231 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
232 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
233
Matt Arsenaulte7191392018-08-08 16:58:33 +0000234 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
235 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
236 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
237
Matt Arsenault84445dd2017-11-30 22:51:26 +0000238#if 0
239 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
240 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
241#endif
242
Benjamin Kramer867bfc52015-03-07 17:41:00 +0000243 // We only support LOAD/STORE and vector manipulation ops for vectors
244 // with > 4 elements.
Stanislav Mekhanoshin1dfae6f2019-07-12 22:42:01 +0000245 for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
246 MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16,
247 MVT::v32i32, MVT::v32f32 }) {
Tom Stellard967bf582014-02-13 23:34:15 +0000248 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000249 switch (Op) {
Tom Stellard967bf582014-02-13 23:34:15 +0000250 case ISD::LOAD:
251 case ISD::STORE:
252 case ISD::BUILD_VECTOR:
253 case ISD::BITCAST:
254 case ISD::EXTRACT_VECTOR_ELT:
255 case ISD::INSERT_VECTOR_ELT:
Tom Stellard967bf582014-02-13 23:34:15 +0000256 case ISD::INSERT_SUBVECTOR:
257 case ISD::EXTRACT_SUBVECTOR:
Matt Arsenault61001bb2015-11-25 19:58:34 +0000258 case ISD::SCALAR_TO_VECTOR:
Tom Stellard967bf582014-02-13 23:34:15 +0000259 break;
Tom Stellardc0503db2014-08-09 01:06:56 +0000260 case ISD::CONCAT_VECTORS:
261 setOperationAction(Op, VT, Custom);
262 break;
Tom Stellard967bf582014-02-13 23:34:15 +0000263 default:
Matt Arsenaultd504a742014-05-15 21:44:05 +0000264 setOperationAction(Op, VT, Expand);
Tom Stellard967bf582014-02-13 23:34:15 +0000265 break;
266 }
267 }
268 }
269
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000270 setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
271
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000272 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
273 // is expanded to avoid having two separate loops in case the index is a VGPR.
274
Matt Arsenault61001bb2015-11-25 19:58:34 +0000275 // Most operations are naturally 32-bit vector operations. We only support
276 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
277 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
278 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
279 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
280
281 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
282 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
283
284 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
285 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
286
287 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
288 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
289 }
290
Matt Arsenault71e66762016-05-21 02:27:49 +0000291 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
292 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
293 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
294 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000295
Matt Arsenault67a98152018-05-16 11:47:30 +0000296 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
297 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
298
Matt Arsenault3aef8092017-01-23 23:09:58 +0000299 // Avoid stack access for these.
300 // TODO: Generalize to more vector types.
301 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
302 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault67a98152018-05-16 11:47:30 +0000303 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
304 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
305
Matt Arsenault3aef8092017-01-23 23:09:58 +0000306 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
307 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault9224c002018-06-05 19:52:46 +0000308 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
309 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
310 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
311
312 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
313 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
314 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
Matt Arsenault3aef8092017-01-23 23:09:58 +0000315
Matt Arsenault67a98152018-05-16 11:47:30 +0000316 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
317 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
318 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
319 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
320
Tim Renouf361b5b22019-03-21 12:01:21 +0000321 // Deal with vec3 vector operations when widened to vec4.
Tim Renouf58168892019-07-04 17:38:24 +0000322 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom);
323 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom);
324 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom);
325 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom);
Tim Renouf361b5b22019-03-21 12:01:21 +0000326
Tim Renouf033f99a2019-03-22 10:11:21 +0000327 // Deal with vec5 vector operations when widened to vec8.
Tim Renouf58168892019-07-04 17:38:24 +0000328 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom);
329 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom);
330 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom);
331 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom);
Tim Renouf033f99a2019-03-22 10:11:21 +0000332
Tom Stellard354a43c2016-04-01 18:27:37 +0000333 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
334 // and output demarshalling
335 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
336 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
337
338 // We can't return success/failure, only the old value,
339 // let LLVM add the comparison
340 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
341 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
342
Tom Stellardc5a154d2018-06-28 23:47:12 +0000343 if (Subtarget->hasFlatAddressSpace()) {
Matt Arsenault99c14522016-04-25 19:27:24 +0000344 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
345 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
346 }
347
Matt Arsenault71e66762016-05-21 02:27:49 +0000348 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
349 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
350
351 // On SI this is s_memtime and s_memrealtime on VI.
352 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
Matt Arsenault3e025382017-04-24 17:49:13 +0000353 setOperationAction(ISD::TRAP, MVT::Other, Custom);
354 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000355
Tom Stellardc5a154d2018-06-28 23:47:12 +0000356 if (Subtarget->has16BitInsts()) {
357 setOperationAction(ISD::FLOG, MVT::f16, Custom);
Matt Arsenault7121bed2018-08-16 17:07:52 +0000358 setOperationAction(ISD::FEXP, MVT::f16, Custom);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000359 setOperationAction(ISD::FLOG10, MVT::f16, Custom);
360 }
361
362 // v_mad_f32 does not support denormals according to some sources.
363 if (!Subtarget->hasFP32Denormals())
364 setOperationAction(ISD::FMAD, MVT::f32, Legal);
365
366 if (!Subtarget->hasBFI()) {
367 // fcopysign can be done in a single instruction with BFI.
368 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
369 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
370 }
371
372 if (!Subtarget->hasBCNT(32))
373 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
374
375 if (!Subtarget->hasBCNT(64))
376 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
377
378 if (Subtarget->hasFFBH())
379 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
380
381 if (Subtarget->hasFFBL())
382 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
383
384 // We only really have 32-bit BFE instructions (and 16-bit on VI).
385 //
386 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
387 // effort to match them now. We want this to be false for i64 cases when the
388 // extraction isn't restricted to the upper or lower half. Ideally we would
389 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
390 // span the midpoint are probably relatively rare, so don't worry about them
391 // for now.
392 if (Subtarget->hasBFE())
393 setHasExtractBitsInsn(true);
394
Matt Arsenault687ec752018-10-22 16:27:27 +0000395 setOperationAction(ISD::FMINNUM, MVT::f32, Custom);
396 setOperationAction(ISD::FMAXNUM, MVT::f32, Custom);
397 setOperationAction(ISD::FMINNUM, MVT::f64, Custom);
398 setOperationAction(ISD::FMAXNUM, MVT::f64, Custom);
399
400
401 // These are really only legal for ieee_mode functions. We should be avoiding
402 // them for functions that don't have ieee_mode enabled, so just say they are
403 // legal.
404 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
405 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
406 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
407 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
408
Matt Arsenault71e66762016-05-21 02:27:49 +0000409
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000410 if (Subtarget->haveRoundOpsF64()) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000411 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
412 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
413 setOperationAction(ISD::FRINT, MVT::f64, Legal);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000414 } else {
415 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
416 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
417 setOperationAction(ISD::FRINT, MVT::f64, Custom);
418 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000419 }
420
421 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
422
423 setOperationAction(ISD::FSIN, MVT::f32, Custom);
424 setOperationAction(ISD::FCOS, MVT::f32, Custom);
425 setOperationAction(ISD::FDIV, MVT::f32, Custom);
426 setOperationAction(ISD::FDIV, MVT::f64, Custom);
427
Tom Stellard115a6152016-11-10 16:02:37 +0000428 if (Subtarget->has16BitInsts()) {
429 setOperationAction(ISD::Constant, MVT::i16, Legal);
430
431 setOperationAction(ISD::SMIN, MVT::i16, Legal);
432 setOperationAction(ISD::SMAX, MVT::i16, Legal);
433
434 setOperationAction(ISD::UMIN, MVT::i16, Legal);
435 setOperationAction(ISD::UMAX, MVT::i16, Legal);
436
Tom Stellard115a6152016-11-10 16:02:37 +0000437 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
438 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
439
440 setOperationAction(ISD::ROTR, MVT::i16, Promote);
441 setOperationAction(ISD::ROTL, MVT::i16, Promote);
442
443 setOperationAction(ISD::SDIV, MVT::i16, Promote);
444 setOperationAction(ISD::UDIV, MVT::i16, Promote);
445 setOperationAction(ISD::SREM, MVT::i16, Promote);
446 setOperationAction(ISD::UREM, MVT::i16, Promote);
447
448 setOperationAction(ISD::BSWAP, MVT::i16, Promote);
449 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
450
451 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
452 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
453 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
454 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
Jan Veselyb283ea02018-03-02 02:50:22 +0000455 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
Tom Stellard115a6152016-11-10 16:02:37 +0000456
457 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
458
459 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
460
461 setOperationAction(ISD::LOAD, MVT::i16, Custom);
462
463 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
464
Tom Stellard115a6152016-11-10 16:02:37 +0000465 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
466 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
467 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
468 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000469
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000470 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
471 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
472 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
473 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000474
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000475 // F16 - Constant Actions.
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000476 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000477
478 // F16 - Load/Store Actions.
479 setOperationAction(ISD::LOAD, MVT::f16, Promote);
480 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
481 setOperationAction(ISD::STORE, MVT::f16, Promote);
482 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
483
484 // F16 - VOP1 Actions.
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +0000485 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000486 setOperationAction(ISD::FCOS, MVT::f16, Promote);
487 setOperationAction(ISD::FSIN, MVT::f16, Promote);
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000488 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
489 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
490 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
491 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
Matt Arsenaultb5d23272017-03-24 20:04:18 +0000492 setOperationAction(ISD::FROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000493
494 // F16 - VOP2 Actions.
Konstantin Zhuravlyov662e01d2016-11-17 03:49:01 +0000495 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000496 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
Matt Arsenault687ec752018-10-22 16:27:27 +0000497
Matt Arsenault4052a572016-12-22 03:05:41 +0000498 setOperationAction(ISD::FDIV, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000499
500 // F16 - VOP3 Actions.
501 setOperationAction(ISD::FMA, MVT::f16, Legal);
Stanislav Mekhanoshin28a19362019-05-04 04:20:37 +0000502 if (!Subtarget->hasFP16Denormals() && STI.hasMadF16())
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000503 setOperationAction(ISD::FMAD, MVT::f16, Legal);
Tom Stellard115a6152016-11-10 16:02:37 +0000504
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000505 for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
Matt Arsenault7596f132017-02-27 20:52:10 +0000506 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
507 switch (Op) {
508 case ISD::LOAD:
509 case ISD::STORE:
510 case ISD::BUILD_VECTOR:
511 case ISD::BITCAST:
512 case ISD::EXTRACT_VECTOR_ELT:
513 case ISD::INSERT_VECTOR_ELT:
514 case ISD::INSERT_SUBVECTOR:
515 case ISD::EXTRACT_SUBVECTOR:
516 case ISD::SCALAR_TO_VECTOR:
517 break;
518 case ISD::CONCAT_VECTORS:
519 setOperationAction(Op, VT, Custom);
520 break;
521 default:
522 setOperationAction(Op, VT, Expand);
523 break;
524 }
525 }
526 }
527
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000528 // XXX - Do these do anything? Vector constants turn into build_vector.
529 setOperationAction(ISD::Constant, MVT::v2i16, Legal);
530 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
531
Matt Arsenaultdfb88df2018-05-13 10:04:38 +0000532 setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
533 setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
534
Matt Arsenault7596f132017-02-27 20:52:10 +0000535 setOperationAction(ISD::STORE, MVT::v2i16, Promote);
536 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
537 setOperationAction(ISD::STORE, MVT::v2f16, Promote);
538 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
539
540 setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
541 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
542 setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
543 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000544
545 setOperationAction(ISD::AND, MVT::v2i16, Promote);
546 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
547 setOperationAction(ISD::OR, MVT::v2i16, Promote);
548 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
549 setOperationAction(ISD::XOR, MVT::v2i16, Promote);
550 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000551
Matt Arsenault1349a042018-05-22 06:32:10 +0000552 setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
553 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
554 setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
555 AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
556
557 setOperationAction(ISD::STORE, MVT::v4i16, Promote);
558 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
559 setOperationAction(ISD::STORE, MVT::v4f16, Promote);
560 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
561
562 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
563 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
564 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
565 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
566
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000567 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
568 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
569 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
570
Matt Arsenault1349a042018-05-22 06:32:10 +0000571 if (!Subtarget->hasVOP3PInsts()) {
572 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
573 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
574 }
575
576 setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
577 // This isn't really legal, but this avoids the legalizer unrolling it (and
578 // allows matching fneg (fabs x) patterns)
579 setOperationAction(ISD::FABS, MVT::v2f16, Legal);
Matt Arsenault687ec752018-10-22 16:27:27 +0000580
581 setOperationAction(ISD::FMAXNUM, MVT::f16, Custom);
582 setOperationAction(ISD::FMINNUM, MVT::f16, Custom);
583 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal);
584 setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal);
585
586 setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom);
587 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom);
588
589 setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand);
590 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand);
Matt Arsenault1349a042018-05-22 06:32:10 +0000591 }
592
593 if (Subtarget->hasVOP3PInsts()) {
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000594 setOperationAction(ISD::ADD, MVT::v2i16, Legal);
595 setOperationAction(ISD::SUB, MVT::v2i16, Legal);
596 setOperationAction(ISD::MUL, MVT::v2i16, Legal);
597 setOperationAction(ISD::SHL, MVT::v2i16, Legal);
598 setOperationAction(ISD::SRL, MVT::v2i16, Legal);
599 setOperationAction(ISD::SRA, MVT::v2i16, Legal);
600 setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
601 setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
602 setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
603 setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
604
605 setOperationAction(ISD::FADD, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000606 setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
607 setOperationAction(ISD::FMA, MVT::v2f16, Legal);
Matt Arsenault687ec752018-10-22 16:27:27 +0000608
609 setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal);
610 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal);
611
Matt Arsenault540512c2018-04-26 19:21:37 +0000612 setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000613
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000614 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
615 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000616
Matt Arsenault5fe851b2019-07-02 19:15:45 +0000617 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom);
618 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
619
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000620 setOperationAction(ISD::SHL, MVT::v4i16, Custom);
621 setOperationAction(ISD::SRA, MVT::v4i16, Custom);
622 setOperationAction(ISD::SRL, MVT::v4i16, Custom);
623 setOperationAction(ISD::ADD, MVT::v4i16, Custom);
624 setOperationAction(ISD::SUB, MVT::v4i16, Custom);
625 setOperationAction(ISD::MUL, MVT::v4i16, Custom);
626
627 setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
628 setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
629 setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
630 setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
631
632 setOperationAction(ISD::FADD, MVT::v4f16, Custom);
633 setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
David Stuttard20235ef2019-07-29 08:15:10 +0000634 setOperationAction(ISD::FMA, MVT::v4f16, Custom);
Matt Arsenault687ec752018-10-22 16:27:27 +0000635
636 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom);
637 setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom);
638
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000639 setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
640 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
Matt Arsenault36cdcfa2018-08-02 13:43:42 +0000641 setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000642
Matt Arsenault7121bed2018-08-16 17:07:52 +0000643 setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000644 setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
645 setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
Matt Arsenault1349a042018-05-22 06:32:10 +0000646 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000647
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000648 setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
649 setOperationAction(ISD::FABS, MVT::v4f16, Custom);
650
Matt Arsenault1349a042018-05-22 06:32:10 +0000651 if (Subtarget->has16BitInsts()) {
652 setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
653 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
654 setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
655 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
Matt Arsenault4a486232017-04-19 20:53:07 +0000656 } else {
Matt Arsenault1349a042018-05-22 06:32:10 +0000657 // Legalization hack.
Matt Arsenault4a486232017-04-19 20:53:07 +0000658 setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
659 setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
Matt Arsenaulte9524f12018-06-06 21:28:11 +0000660
661 setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
662 setOperationAction(ISD::FABS, MVT::v2f16, Custom);
Matt Arsenault4a486232017-04-19 20:53:07 +0000663 }
664
665 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
666 setOperationAction(ISD::SELECT, VT, Custom);
Matt Arsenault7596f132017-02-27 20:52:10 +0000667 }
668
Matt Arsenault0e0a1c82019-08-05 14:57:59 +0000669 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
670 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
671 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
672 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
673 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom);
674 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
675 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
676
677 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
678 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
679 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom);
680 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
681 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
682 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
683
684 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
685 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
686 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
687 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
688 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4i16, Custom);
689 setOperationAction(ISD::INTRINSIC_VOID, MVT::f16, Custom);
690 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
691 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
692
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000693 setTargetDAGCombine(ISD::ADD);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +0000694 setTargetDAGCombine(ISD::ADDCARRY);
695 setTargetDAGCombine(ISD::SUB);
696 setTargetDAGCombine(ISD::SUBCARRY);
Matt Arsenault02cb0ff2014-09-29 14:59:34 +0000697 setTargetDAGCombine(ISD::FADD);
Matt Arsenault8675db12014-08-29 16:01:14 +0000698 setTargetDAGCombine(ISD::FSUB);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +0000699 setTargetDAGCombine(ISD::FMINNUM);
700 setTargetDAGCombine(ISD::FMAXNUM);
Matt Arsenault687ec752018-10-22 16:27:27 +0000701 setTargetDAGCombine(ISD::FMINNUM_IEEE);
702 setTargetDAGCombine(ISD::FMAXNUM_IEEE);
Farhana Aleenc370d7b2018-07-16 18:19:59 +0000703 setTargetDAGCombine(ISD::FMA);
Matt Arsenault5881f4e2015-06-09 00:52:37 +0000704 setTargetDAGCombine(ISD::SMIN);
705 setTargetDAGCombine(ISD::SMAX);
706 setTargetDAGCombine(ISD::UMIN);
707 setTargetDAGCombine(ISD::UMAX);
Tom Stellard75aadc22012-12-11 21:25:42 +0000708 setTargetDAGCombine(ISD::SETCC);
Matt Arsenaultd0101a22015-01-06 23:00:46 +0000709 setTargetDAGCombine(ISD::AND);
Matt Arsenaultf2290332015-01-06 23:00:39 +0000710 setTargetDAGCombine(ISD::OR);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000711 setTargetDAGCombine(ISD::XOR);
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +0000712 setTargetDAGCombine(ISD::SINT_TO_FP);
Matt Arsenault364a6742014-06-11 17:50:44 +0000713 setTargetDAGCombine(ISD::UINT_TO_FP);
Matt Arsenault9cd90712016-04-14 01:42:16 +0000714 setTargetDAGCombine(ISD::FCANONICALIZE);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000715 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
Matt Arsenault8edfaee2017-03-31 19:53:03 +0000716 setTargetDAGCombine(ISD::ZERO_EXTEND);
Ryan Taylor00e063a2019-03-19 16:07:00 +0000717 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +0000718 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +0000719 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
Matt Arsenault364a6742014-06-11 17:50:44 +0000720
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000721 // All memory operations. Some folding on the pointer operand is done to help
722 // matching the constant offsets in the addressing modes.
723 setTargetDAGCombine(ISD::LOAD);
724 setTargetDAGCombine(ISD::STORE);
725 setTargetDAGCombine(ISD::ATOMIC_LOAD);
726 setTargetDAGCombine(ISD::ATOMIC_STORE);
727 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
728 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
729 setTargetDAGCombine(ISD::ATOMIC_SWAP);
730 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
731 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
732 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
733 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
734 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
735 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
736 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
737 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
738 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
739 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
Matt Arsenaulta5840c32019-01-22 18:36:06 +0000740 setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000741
Christian Konigeecebd02013-03-26 14:04:02 +0000742 setSchedulingPreference(Sched::RegPressure);
Tom Stellard75aadc22012-12-11 21:25:42 +0000743}
744
Tom Stellard5bfbae52018-07-11 20:59:01 +0000745const GCNSubtarget *SITargetLowering::getSubtarget() const {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000746 return Subtarget;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000747}
748
Tom Stellard0125f2a2013-06-25 02:39:35 +0000749//===----------------------------------------------------------------------===//
750// TargetLowering queries
751//===----------------------------------------------------------------------===//
752
Tom Stellardb12f4de2018-05-22 19:37:55 +0000753// v_mad_mix* support a conversion from f16 to f32.
754//
755// There is only one special case when denormals are enabled we don't currently,
756// where this is OK to use.
757bool SITargetLowering::isFPExtFoldable(unsigned Opcode,
758 EVT DestVT, EVT SrcVT) const {
759 return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
760 (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
761 DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
762 SrcVT.getScalarType() == MVT::f16;
763}
764
Zvi Rackover1b736822017-07-26 08:06:58 +0000765bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000766 // SI has some legal vector types, but no legal vector operations. Say no
767 // shuffles are legal in order to prefer scalarizing some vector operations.
768 return false;
769}
770
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000771MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
772 CallingConv::ID CC,
773 EVT VT) const {
Matt Arsenault1022c0d2019-07-19 13:57:44 +0000774 if (CC == CallingConv::AMDGPU_KERNEL)
775 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
776
777 if (VT.isVector()) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000778 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000779 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000780 if (Size == 32)
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000781 return ScalarVT.getSimpleVT();
Matt Arsenault0395da72018-07-31 19:17:47 +0000782
Matt Arsenault1022c0d2019-07-19 13:57:44 +0000783 if (Size > 32)
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000784 return MVT::i32;
785
Matt Arsenault57b59662018-09-10 11:49:23 +0000786 if (Size == 16 && Subtarget->has16BitInsts())
Matt Arsenault0395da72018-07-31 19:17:47 +0000787 return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
Matt Arsenault1022c0d2019-07-19 13:57:44 +0000788 } else if (VT.getSizeInBits() > 32)
789 return MVT::i32;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000790
791 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
792}
793
794unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
795 CallingConv::ID CC,
796 EVT VT) const {
Matt Arsenault1022c0d2019-07-19 13:57:44 +0000797 if (CC == CallingConv::AMDGPU_KERNEL)
798 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
799
800 if (VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000801 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000802 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000803 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenault0395da72018-07-31 19:17:47 +0000804
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000805 if (Size == 32)
Matt Arsenault0395da72018-07-31 19:17:47 +0000806 return NumElts;
807
Matt Arsenault1022c0d2019-07-19 13:57:44 +0000808 if (Size > 32)
809 return NumElts * ((Size + 31) / 32);
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000810
Matt Arsenault57b59662018-09-10 11:49:23 +0000811 if (Size == 16 && Subtarget->has16BitInsts())
Matt Arsenault1022c0d2019-07-19 13:57:44 +0000812 return (NumElts + 1) / 2;
813 } else if (VT.getSizeInBits() > 32)
814 return (VT.getSizeInBits() + 31) / 32;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000815
816 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
817}
818
819unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
820 LLVMContext &Context, CallingConv::ID CC,
821 EVT VT, EVT &IntermediateVT,
822 unsigned &NumIntermediates, MVT &RegisterVT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000823 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000824 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000825 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000826 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000827 if (Size == 32) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000828 RegisterVT = ScalarVT.getSimpleVT();
829 IntermediateVT = RegisterVT;
Matt Arsenault0395da72018-07-31 19:17:47 +0000830 NumIntermediates = NumElts;
831 return NumIntermediates;
832 }
833
Matt Arsenault1022c0d2019-07-19 13:57:44 +0000834 if (Size > 32) {
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000835 RegisterVT = MVT::i32;
836 IntermediateVT = RegisterVT;
Matt Arsenault1022c0d2019-07-19 13:57:44 +0000837 NumIntermediates = NumElts * ((Size + 31) / 32);
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000838 return NumIntermediates;
839 }
840
Matt Arsenault0395da72018-07-31 19:17:47 +0000841 // FIXME: We should fix the ABI to be the same on targets without 16-bit
842 // support, but unless we can properly handle 3-vectors, it will be still be
843 // inconsistent.
Matt Arsenault57b59662018-09-10 11:49:23 +0000844 if (Size == 16 && Subtarget->has16BitInsts()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000845 RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
846 IntermediateVT = RegisterVT;
Matt Arsenault57b59662018-09-10 11:49:23 +0000847 NumIntermediates = (NumElts + 1) / 2;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000848 return NumIntermediates;
849 }
850 }
851
852 return TargetLowering::getVectorTypeBreakdownForCallingConv(
853 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
854}
855
David Stuttardf77079f2019-01-14 11:55:24 +0000856static MVT memVTFromAggregate(Type *Ty) {
857 // Only limited forms of aggregate type currently expected.
858 assert(Ty->isStructTy() && "Expected struct type");
859
860
861 Type *ElementType = nullptr;
862 unsigned NumElts;
863 if (Ty->getContainedType(0)->isVectorTy()) {
864 VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0));
865 ElementType = VecComponent->getElementType();
866 NumElts = VecComponent->getNumElements();
867 } else {
868 ElementType = Ty->getContainedType(0);
869 NumElts = 1;
870 }
871
872 assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type");
873
874 // Calculate the size of the memVT type from the aggregate
875 unsigned Pow2Elts = 0;
876 unsigned ElementSize;
877 switch (ElementType->getTypeID()) {
878 default:
879 llvm_unreachable("Unknown type!");
880 case Type::IntegerTyID:
881 ElementSize = cast<IntegerType>(ElementType)->getBitWidth();
882 break;
883 case Type::HalfTyID:
884 ElementSize = 16;
885 break;
886 case Type::FloatTyID:
887 ElementSize = 32;
888 break;
889 }
890 unsigned AdditionalElts = ElementSize == 16 ? 2 : 1;
891 Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts);
892
893 return MVT::getVectorVT(MVT::getVT(ElementType, false),
894 Pow2Elts);
895}
896
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000897bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
898 const CallInst &CI,
Matt Arsenault7d7adf42017-12-14 22:34:10 +0000899 MachineFunction &MF,
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000900 unsigned IntrID) const {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000901 if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +0000902 AMDGPU::lookupRsrcIntrinsic(IntrID)) {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000903 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
904 (Intrinsic::ID)IntrID);
905 if (Attr.hasFnAttribute(Attribute::ReadNone))
906 return false;
907
908 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
909
910 if (RsrcIntr->IsImage) {
911 Info.ptrVal = MFI->getImagePSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000912 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000913 CI.getArgOperand(RsrcIntr->RsrcArg));
Guillaume Chateletc97a3d12019-08-05 11:02:05 +0000914 Info.align.reset();
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000915 } else {
916 Info.ptrVal = MFI->getBufferPSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000917 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000918 CI.getArgOperand(RsrcIntr->RsrcArg));
919 }
920
921 Info.flags = MachineMemOperand::MODereferenceable;
922 if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
923 Info.opc = ISD::INTRINSIC_W_CHAIN;
David Stuttardf77079f2019-01-14 11:55:24 +0000924 Info.memVT = MVT::getVT(CI.getType(), true);
925 if (Info.memVT == MVT::Other) {
926 // Some intrinsics return an aggregate type - special case to work out
927 // the correct memVT
928 Info.memVT = memVTFromAggregate(CI.getType());
929 }
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000930 Info.flags |= MachineMemOperand::MOLoad;
931 } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
932 Info.opc = ISD::INTRINSIC_VOID;
933 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
934 Info.flags |= MachineMemOperand::MOStore;
935 } else {
936 // Atomic
937 Info.opc = ISD::INTRINSIC_W_CHAIN;
938 Info.memVT = MVT::getVT(CI.getType());
939 Info.flags = MachineMemOperand::MOLoad |
940 MachineMemOperand::MOStore |
941 MachineMemOperand::MODereferenceable;
942
943 // XXX - Should this be volatile without known ordering?
944 Info.flags |= MachineMemOperand::MOVolatile;
945 }
946 return true;
947 }
948
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000949 switch (IntrID) {
950 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000951 case Intrinsic::amdgcn_atomic_dec:
Marek Olsakc5cec5e2019-01-16 15:43:53 +0000952 case Intrinsic::amdgcn_ds_ordered_add:
953 case Intrinsic::amdgcn_ds_ordered_swap:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000954 case Intrinsic::amdgcn_ds_fadd:
955 case Intrinsic::amdgcn_ds_fmin:
956 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000957 Info.opc = ISD::INTRINSIC_W_CHAIN;
958 Info.memVT = MVT::getVT(CI.getType());
959 Info.ptrVal = CI.getOperand(0);
Guillaume Chateletc97a3d12019-08-05 11:02:05 +0000960 Info.align.reset();
Matt Arsenault11171332017-12-14 21:39:51 +0000961 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000962
Matt Arsenaultcaf13162019-03-12 21:02:54 +0000963 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
964 if (!Vol->isZero())
Matt Arsenault11171332017-12-14 21:39:51 +0000965 Info.flags |= MachineMemOperand::MOVolatile;
966
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000967 return true;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000968 }
Stanislav Mekhanoshine93279f2019-07-11 00:10:17 +0000969 case Intrinsic::amdgcn_buffer_atomic_fadd: {
970 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
971
972 Info.opc = ISD::INTRINSIC_VOID;
973 Info.memVT = MVT::getVT(CI.getOperand(0)->getType());
974 Info.ptrVal = MFI->getBufferPSV(
975 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
976 CI.getArgOperand(1));
Guillaume Chateletc97a3d12019-08-05 11:02:05 +0000977 Info.align.reset();
Stanislav Mekhanoshine93279f2019-07-11 00:10:17 +0000978 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
979
980 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
981 if (!Vol || !Vol->isZero())
982 Info.flags |= MachineMemOperand::MOVolatile;
983
984 return true;
985 }
986 case Intrinsic::amdgcn_global_atomic_fadd: {
987 Info.opc = ISD::INTRINSIC_VOID;
988 Info.memVT = MVT::getVT(CI.getOperand(0)->getType()
989 ->getPointerElementType());
990 Info.ptrVal = CI.getOperand(0);
Guillaume Chateletc97a3d12019-08-05 11:02:05 +0000991 Info.align.reset();
Stanislav Mekhanoshine93279f2019-07-11 00:10:17 +0000992 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
993
994 return true;
995 }
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000996 case Intrinsic::amdgcn_ds_append:
997 case Intrinsic::amdgcn_ds_consume: {
998 Info.opc = ISD::INTRINSIC_W_CHAIN;
999 Info.memVT = MVT::getVT(CI.getType());
1000 Info.ptrVal = CI.getOperand(0);
Guillaume Chateletc97a3d12019-08-05 11:02:05 +00001001 Info.align.reset();
Matt Arsenaultcdd191d2019-01-28 20:14:49 +00001002 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Matt Arsenault905f3512017-12-29 17:18:14 +00001003
Matt Arsenaultcaf13162019-03-12 21:02:54 +00001004 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
1005 if (!Vol->isZero())
Matt Arsenaultcdd191d2019-01-28 20:14:49 +00001006 Info.flags |= MachineMemOperand::MOVolatile;
1007
1008 return true;
1009 }
Matt Arsenault4d55d022019-06-19 19:55:27 +00001010 case Intrinsic::amdgcn_ds_gws_init:
Matt Arsenault740322f2019-06-20 21:11:42 +00001011 case Intrinsic::amdgcn_ds_gws_barrier:
1012 case Intrinsic::amdgcn_ds_gws_sema_v:
1013 case Intrinsic::amdgcn_ds_gws_sema_br:
1014 case Intrinsic::amdgcn_ds_gws_sema_p:
1015 case Intrinsic::amdgcn_ds_gws_sema_release_all: {
Matt Arsenault4d55d022019-06-19 19:55:27 +00001016 Info.opc = ISD::INTRINSIC_VOID;
1017
1018 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1019 Info.ptrVal =
1020 MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo());
1021
1022 // This is an abstract access, but we need to specify a type and size.
1023 Info.memVT = MVT::i32;
1024 Info.size = 4;
Guillaume Chateletc97a3d12019-08-05 11:02:05 +00001025 Info.align = Align(4);
Matt Arsenault4d55d022019-06-19 19:55:27 +00001026
1027 Info.flags = MachineMemOperand::MOStore;
1028 if (IntrID == Intrinsic::amdgcn_ds_gws_barrier)
1029 Info.flags = MachineMemOperand::MOLoad;
1030 return true;
1031 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00001032 default:
1033 return false;
1034 }
1035}
1036
Matt Arsenault7dc01c92017-03-15 23:15:12 +00001037bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
1038 SmallVectorImpl<Value*> &Ops,
1039 Type *&AccessTy) const {
1040 switch (II->getIntrinsicID()) {
1041 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00001042 case Intrinsic::amdgcn_atomic_dec:
Marek Olsakc5cec5e2019-01-16 15:43:53 +00001043 case Intrinsic::amdgcn_ds_ordered_add:
1044 case Intrinsic::amdgcn_ds_ordered_swap:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00001045 case Intrinsic::amdgcn_ds_fadd:
1046 case Intrinsic::amdgcn_ds_fmin:
1047 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenault7dc01c92017-03-15 23:15:12 +00001048 Value *Ptr = II->getArgOperand(0);
1049 AccessTy = II->getType();
1050 Ops.push_back(Ptr);
1051 return true;
1052 }
1053 default:
1054 return false;
1055 }
Matt Arsenaulte306a322014-10-21 16:25:08 +00001056}
1057
Tom Stellard70580f82015-07-20 14:28:41 +00001058bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
Matt Arsenaultd9b77842017-06-12 17:06:35 +00001059 if (!Subtarget->hasFlatInstOffsets()) {
1060 // Flat instructions do not have offsets, and only have the register
1061 // address.
1062 return AM.BaseOffs == 0 && AM.Scale == 0;
1063 }
1064
1065 // GFX9 added a 13-bit signed offset. When using regular flat instructions,
1066 // the sign bit is ignored and is treated as a 12-bit unsigned offset.
1067
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001068 // GFX10 shrinked signed offset to 12 bits. When using regular flat
1069 // instructions, the sign bit is also ignored and is treated as 11-bit
1070 // unsigned offset.
1071
1072 if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
1073 return isUInt<11>(AM.BaseOffs) && AM.Scale == 0;
1074
Matt Arsenaultd9b77842017-06-12 17:06:35 +00001075 // Just r + i
1076 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
Tom Stellard70580f82015-07-20 14:28:41 +00001077}
1078
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +00001079bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
1080 if (Subtarget->hasFlatGlobalInsts())
1081 return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
1082
1083 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
1084 // Assume the we will use FLAT for all global memory accesses
1085 // on VI.
1086 // FIXME: This assumption is currently wrong. On VI we still use
1087 // MUBUF instructions for the r + i addressing mode. As currently
1088 // implemented, the MUBUF instructions only work on buffer < 4GB.
1089 // It may be possible to support > 4GB buffers with MUBUF instructions,
1090 // by setting the stride value in the resource descriptor which would
1091 // increase the size limit to (stride * 4GB). However, this is risky,
1092 // because it has never been validated.
1093 return isLegalFlatAddressingMode(AM);
1094 }
1095
1096 return isLegalMUBUFAddressingMode(AM);
1097}
1098
Matt Arsenault711b3902015-08-07 20:18:34 +00001099bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1100 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1101 // additionally can do r + r + i with addr64. 32-bit has more addressing
1102 // mode options. Depending on the resource constant, it can also do
1103 // (i64 r0) + (i32 r1) * (i14 i).
1104 //
1105 // Private arrays end up using a scratch buffer most of the time, so also
1106 // assume those use MUBUF instructions. Scratch loads / stores are currently
1107 // implemented as mubuf instructions with offen bit set, so slightly
1108 // different than the normal addr64.
1109 if (!isUInt<12>(AM.BaseOffs))
1110 return false;
1111
1112 // FIXME: Since we can split immediate into soffset and immediate offset,
1113 // would it make sense to allow any immediate?
1114
1115 switch (AM.Scale) {
1116 case 0: // r + i or just i, depending on HasBaseReg.
1117 return true;
1118 case 1:
1119 return true; // We have r + r or r + i.
1120 case 2:
1121 if (AM.HasBaseReg) {
1122 // Reject 2 * r + r.
1123 return false;
1124 }
1125
1126 // Allow 2 * r as r + r
1127 // Or 2 * r + i is allowed as r + r + i.
1128 return true;
1129 default: // Don't allow n * r
1130 return false;
1131 }
1132}
1133
Mehdi Amini0cdec1e2015-07-09 02:09:40 +00001134bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
1135 const AddrMode &AM, Type *Ty,
Jonas Paulsson024e3192017-07-21 11:59:37 +00001136 unsigned AS, Instruction *I) const {
Matt Arsenault5015a892014-08-15 17:17:07 +00001137 // No global is ever allowed as a base.
1138 if (AM.BaseGV)
1139 return false;
1140
Matt Arsenault0da63502018-08-31 05:49:54 +00001141 if (AS == AMDGPUAS::GLOBAL_ADDRESS)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +00001142 return isLegalGlobalAddressingMode(AM);
Matt Arsenault5015a892014-08-15 17:17:07 +00001143
Matt Arsenault0da63502018-08-31 05:49:54 +00001144 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
Neil Henning523dab02019-03-18 14:44:28 +00001145 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
1146 AS == AMDGPUAS::BUFFER_FAT_POINTER) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001147 // If the offset isn't a multiple of 4, it probably isn't going to be
1148 // correctly aligned.
Matt Arsenault3cc1e002016-08-13 01:43:51 +00001149 // FIXME: Can we get the real alignment here?
Matt Arsenault711b3902015-08-07 20:18:34 +00001150 if (AM.BaseOffs % 4 != 0)
1151 return isLegalMUBUFAddressingMode(AM);
1152
1153 // There are no SMRD extloads, so if we have to do a small type access we
1154 // will use a MUBUF load.
1155 // FIXME?: We also need to do this if unaligned, but we don't know the
1156 // alignment here.
Stanislav Mekhanoshin57d341c2018-05-15 22:07:51 +00001157 if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +00001158 return isLegalGlobalAddressingMode(AM);
Matt Arsenault711b3902015-08-07 20:18:34 +00001159
Tom Stellard5bfbae52018-07-11 20:59:01 +00001160 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001161 // SMRD instructions have an 8-bit, dword offset on SI.
1162 if (!isUInt<8>(AM.BaseOffs / 4))
1163 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +00001164 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001165 // On CI+, this can also be a 32-bit literal constant offset. If it fits
1166 // in 8-bits, it can use a smaller encoding.
1167 if (!isUInt<32>(AM.BaseOffs / 4))
1168 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +00001169 } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001170 // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1171 if (!isUInt<20>(AM.BaseOffs))
1172 return false;
1173 } else
1174 llvm_unreachable("unhandled generation");
1175
1176 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1177 return true;
1178
1179 if (AM.Scale == 1 && AM.HasBaseReg)
1180 return true;
1181
1182 return false;
Matt Arsenault711b3902015-08-07 20:18:34 +00001183
Matt Arsenault0da63502018-08-31 05:49:54 +00001184 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001185 return isLegalMUBUFAddressingMode(AM);
Matt Arsenault0da63502018-08-31 05:49:54 +00001186 } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1187 AS == AMDGPUAS::REGION_ADDRESS) {
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001188 // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1189 // field.
1190 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1191 // an 8-bit dword offset but we don't know the alignment here.
1192 if (!isUInt<16>(AM.BaseOffs))
Matt Arsenault5015a892014-08-15 17:17:07 +00001193 return false;
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001194
1195 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1196 return true;
1197
1198 if (AM.Scale == 1 && AM.HasBaseReg)
1199 return true;
1200
Matt Arsenault5015a892014-08-15 17:17:07 +00001201 return false;
Matt Arsenault0da63502018-08-31 05:49:54 +00001202 } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1203 AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
Matt Arsenault7d1b6c82016-04-29 06:25:10 +00001204 // For an unknown address space, this usually means that this is for some
1205 // reason being used for pure arithmetic, and not based on some addressing
1206 // computation. We don't have instructions that compute pointers with any
1207 // addressing modes, so treat them as having no offset like flat
1208 // instructions.
Tom Stellard70580f82015-07-20 14:28:41 +00001209 return isLegalFlatAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001210 } else {
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001211 llvm_unreachable("unhandled address space");
1212 }
Matt Arsenault5015a892014-08-15 17:17:07 +00001213}
1214
Nirav Dave4dcad5d2017-07-10 20:25:54 +00001215bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1216 const SelectionDAG &DAG) const {
Matt Arsenault0da63502018-08-31 05:49:54 +00001217 if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001218 return (MemVT.getSizeInBits() <= 4 * 32);
Matt Arsenault0da63502018-08-31 05:49:54 +00001219 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001220 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1221 return (MemVT.getSizeInBits() <= MaxPrivateBits);
Nicolai Haehnle4dc3b2b2019-07-01 17:17:45 +00001222 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001223 return (MemVT.getSizeInBits() <= 2 * 32);
1224 }
1225 return true;
1226}
1227
Simon Pilgrim4e0648a2019-06-12 17:14:03 +00001228bool SITargetLowering::allowsMisalignedMemoryAccesses(
1229 EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
1230 bool *IsFast) const {
Matt Arsenault1018c892014-04-24 17:08:26 +00001231 if (IsFast)
1232 *IsFast = false;
1233
Matt Arsenault1018c892014-04-24 17:08:26 +00001234 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1235 // which isn't a simple VT.
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001236 // Until MVT is extended to handle this, simply check for the size and
1237 // rely on the condition below: allow accesses if the size is a multiple of 4.
1238 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1239 VT.getStoreSize() > 16)) {
Tom Stellard81d871d2013-11-13 23:36:50 +00001240 return false;
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001241 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001242
Matt Arsenault0da63502018-08-31 05:49:54 +00001243 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1244 AddrSpace == AMDGPUAS::REGION_ADDRESS) {
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001245 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1246 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1247 // with adjacent offsets.
Sanjay Patelce74db92015-09-03 15:03:19 +00001248 bool AlignedBy4 = (Align % 4 == 0);
1249 if (IsFast)
1250 *IsFast = AlignedBy4;
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001251
Sanjay Patelce74db92015-09-03 15:03:19 +00001252 return AlignedBy4;
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001253 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001254
Tom Stellard64a9d082016-10-14 18:10:39 +00001255 // FIXME: We have to be conservative here and assume that flat operations
1256 // will access scratch. If we had access to the IR function, then we
1257 // could determine if any private memory was used in the function.
1258 if (!Subtarget->hasUnalignedScratchAccess() &&
Matt Arsenault0da63502018-08-31 05:49:54 +00001259 (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1260 AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
Matt Arsenaultf4320112018-09-24 13:18:15 +00001261 bool AlignedBy4 = Align >= 4;
1262 if (IsFast)
1263 *IsFast = AlignedBy4;
1264
1265 return AlignedBy4;
Tom Stellard64a9d082016-10-14 18:10:39 +00001266 }
1267
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001268 if (Subtarget->hasUnalignedBufferAccess()) {
1269 // If we have an uniform constant load, it still requires using a slow
1270 // buffer instruction if unaligned.
1271 if (IsFast) {
Matt Arsenault0da63502018-08-31 05:49:54 +00001272 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1273 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001274 (Align % 4 == 0) : true;
1275 }
1276
1277 return true;
1278 }
1279
Tom Stellard33e64c62015-02-04 20:49:52 +00001280 // Smaller than dword value must be aligned.
Tom Stellard33e64c62015-02-04 20:49:52 +00001281 if (VT.bitsLT(MVT::i32))
1282 return false;
1283
Matt Arsenault1018c892014-04-24 17:08:26 +00001284 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1285 // byte-address are ignored, thus forcing Dword alignment.
Tom Stellarde812f2f2014-07-21 15:45:06 +00001286 // This applies to private, global, and constant memory.
Matt Arsenault1018c892014-04-24 17:08:26 +00001287 if (IsFast)
1288 *IsFast = true;
Tom Stellardc6b299c2015-02-02 18:02:28 +00001289
1290 return VT.bitsGT(MVT::i32) && Align % 4 == 0;
Tom Stellard0125f2a2013-06-25 02:39:35 +00001291}
1292
Sjoerd Meijer180f1ae2019-04-30 08:38:12 +00001293EVT SITargetLowering::getOptimalMemOpType(
1294 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
1295 bool ZeroMemset, bool MemcpyStrSrc,
1296 const AttributeList &FuncAttributes) const {
Matt Arsenault46645fa2014-07-28 17:49:26 +00001297 // FIXME: Should account for address space here.
1298
1299 // The default fallback uses the private pointer size as a guess for a type to
1300 // use. Make sure we switch these to 64-bit accesses.
1301
1302 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1303 return MVT::v4i32;
1304
1305 if (Size >= 8 && DstAlign >= 4)
1306 return MVT::v2i32;
1307
1308 // Use the default.
1309 return MVT::Other;
1310}
1311
Matt Arsenault0da63502018-08-31 05:49:54 +00001312static bool isFlatGlobalAddrSpace(unsigned AS) {
1313 return AS == AMDGPUAS::GLOBAL_ADDRESS ||
1314 AS == AMDGPUAS::FLAT_ADDRESS ||
Matt Arsenaulta8b43392019-02-08 02:40:47 +00001315 AS == AMDGPUAS::CONSTANT_ADDRESS ||
1316 AS > AMDGPUAS::MAX_AMDGPU_ADDRESS;
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001317}
1318
1319bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1320 unsigned DestAS) const {
Matt Arsenault0da63502018-08-31 05:49:54 +00001321 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001322}
1323
Alexander Timofeev18009562016-12-08 17:28:47 +00001324bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1325 const MemSDNode *MemNode = cast<MemSDNode>(N);
1326 const Value *Ptr = MemNode->getMemOperand()->getValue();
Matt Arsenault0a0c8712018-03-27 18:39:45 +00001327 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
Alexander Timofeev18009562016-12-08 17:28:47 +00001328 return I && I->getMetadata("amdgpu.noclobber");
1329}
1330
Matt Arsenault8dbeb922019-06-03 18:41:34 +00001331bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
1332 unsigned DestAS) const {
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001333 // Flat -> private/local is a simple truncate.
1334 // Flat -> global is no-op
Matt Arsenault0da63502018-08-31 05:49:54 +00001335 if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001336 return true;
1337
1338 return isNoopAddrSpaceCast(SrcAS, DestAS);
1339}
1340
Tom Stellarda6f24c62015-12-15 20:55:55 +00001341bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1342 const MemSDNode *MemNode = cast<MemSDNode>(N);
Tom Stellarda6f24c62015-12-15 20:55:55 +00001343
Matt Arsenaultbcf7bec2018-02-09 16:57:48 +00001344 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
Tom Stellarda6f24c62015-12-15 20:55:55 +00001345}
1346
Chandler Carruth9d010ff2014-07-03 00:23:43 +00001347TargetLoweringBase::LegalizeTypeAction
Craig Topper0b5f8162018-11-05 23:26:13 +00001348SITargetLowering::getPreferredVectorAction(MVT VT) const {
Chandler Carruth9d010ff2014-07-03 00:23:43 +00001349 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1350 return TypeSplitVector;
1351
1352 return TargetLoweringBase::getPreferredVectorAction(VT);
Tom Stellardd86003e2013-08-14 23:25:00 +00001353}
Tom Stellard0125f2a2013-06-25 02:39:35 +00001354
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001355bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1356 Type *Ty) const {
Matt Arsenault749035b2016-07-30 01:40:36 +00001357 // FIXME: Could be smarter if called for vector constants.
1358 return true;
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001359}
1360
Tom Stellard2e045bb2016-01-20 00:13:22 +00001361bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001362 if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1363 switch (Op) {
1364 case ISD::LOAD:
1365 case ISD::STORE:
Tom Stellard2e045bb2016-01-20 00:13:22 +00001366
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001367 // These operations are done with 32-bit instructions anyway.
1368 case ISD::AND:
1369 case ISD::OR:
1370 case ISD::XOR:
1371 case ISD::SELECT:
1372 // TODO: Extensions?
1373 return true;
1374 default:
1375 return false;
1376 }
1377 }
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +00001378
Tom Stellard2e045bb2016-01-20 00:13:22 +00001379 // SimplifySetCC uses this function to determine whether or not it should
1380 // create setcc with i1 operands. We don't have instructions for i1 setcc.
1381 if (VT == MVT::i1 && Op == ISD::SETCC)
1382 return false;
1383
1384 return TargetLowering::isTypeDesirableForOp(Op, VT);
1385}
1386
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001387SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1388 const SDLoc &SL,
1389 SDValue Chain,
1390 uint64_t Offset) const {
Mehdi Aminia749f2a2015-07-09 02:09:52 +00001391 const DataLayout &DL = DAG.getDataLayout();
Tom Stellardec2e43c2014-09-22 15:35:29 +00001392 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001393 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1394
1395 const ArgDescriptor *InputPtrReg;
1396 const TargetRegisterClass *RC;
1397
1398 std::tie(InputPtrReg, RC)
1399 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Tom Stellard94593ee2013-06-03 17:40:18 +00001400
Matt Arsenault86033ca2014-07-28 17:31:39 +00001401 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
Matt Arsenault0da63502018-08-31 05:49:54 +00001402 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenaulta0269b62015-06-01 21:58:24 +00001403 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001404 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1405
Matt Arsenault2fb9ccf2018-05-29 17:42:38 +00001406 return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
Jan Veselyfea814d2016-06-21 20:46:20 +00001407}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001408
Matt Arsenault9166ce82017-07-28 15:52:08 +00001409SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1410 const SDLoc &SL) const {
Matt Arsenault75e71922018-06-28 10:18:55 +00001411 uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1412 FIRST_IMPLICIT);
Matt Arsenault9166ce82017-07-28 15:52:08 +00001413 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1414}
1415
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001416SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1417 const SDLoc &SL, SDValue Val,
1418 bool Signed,
Matt Arsenault6dca5422017-01-09 18:52:39 +00001419 const ISD::InputArg *Arg) const {
Tim Renouf361b5b22019-03-21 12:01:21 +00001420 // First, if it is a widened vector, narrow it.
1421 if (VT.isVector() &&
1422 VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
1423 EVT NarrowedVT =
1424 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(),
1425 VT.getVectorNumElements());
1426 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
1427 DAG.getConstant(0, SL, MVT::i32));
1428 }
1429
1430 // Then convert the vector elements or scalar value.
Matt Arsenault6dca5422017-01-09 18:52:39 +00001431 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1432 VT.bitsLT(MemVT)) {
1433 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1434 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1435 }
1436
Tom Stellardbc6c5232016-10-17 16:21:45 +00001437 if (MemVT.isFloatingPoint())
Matt Arsenault6dca5422017-01-09 18:52:39 +00001438 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001439 else if (Signed)
Matt Arsenault6dca5422017-01-09 18:52:39 +00001440 Val = DAG.getSExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001441 else
Matt Arsenault6dca5422017-01-09 18:52:39 +00001442 Val = DAG.getZExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001443
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001444 return Val;
1445}
1446
1447SDValue SITargetLowering::lowerKernargMemParameter(
1448 SelectionDAG &DAG, EVT VT, EVT MemVT,
1449 const SDLoc &SL, SDValue Chain,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001450 uint64_t Offset, unsigned Align, bool Signed,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001451 const ISD::InputArg *Arg) const {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001452 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
Matt Arsenault0da63502018-08-31 05:49:54 +00001453 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001454 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1455
Matt Arsenault90083d32018-06-07 09:54:49 +00001456 // Try to avoid using an extload by loading earlier than the argument address,
1457 // and extracting the relevant bits. The load should hopefully be merged with
1458 // the previous argument.
Matt Arsenault4bec7d42018-07-20 09:05:08 +00001459 if (MemVT.getStoreSize() < 4 && Align < 4) {
1460 // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
Matt Arsenault90083d32018-06-07 09:54:49 +00001461 int64_t AlignDownOffset = alignDown(Offset, 4);
1462 int64_t OffsetDiff = Offset - AlignDownOffset;
1463
1464 EVT IntVT = MemVT.changeTypeToInteger();
1465
1466 // TODO: If we passed in the base kernel offset we could have a better
1467 // alignment than 4, but we don't really need it.
1468 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1469 SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1470 MachineMemOperand::MODereferenceable |
1471 MachineMemOperand::MOInvariant);
1472
1473 SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1474 SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1475
1476 SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1477 ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1478 ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1479
1480
1481 return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1482 }
1483
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001484 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1485 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001486 MachineMemOperand::MODereferenceable |
1487 MachineMemOperand::MOInvariant);
1488
1489 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
Matt Arsenault6dca5422017-01-09 18:52:39 +00001490 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
Tom Stellard94593ee2013-06-03 17:40:18 +00001491}
1492
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001493SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1494 const SDLoc &SL, SDValue Chain,
1495 const ISD::InputArg &Arg) const {
1496 MachineFunction &MF = DAG.getMachineFunction();
1497 MachineFrameInfo &MFI = MF.getFrameInfo();
1498
1499 if (Arg.Flags.isByVal()) {
1500 unsigned Size = Arg.Flags.getByValSize();
1501 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1502 return DAG.getFrameIndex(FrameIdx, MVT::i32);
1503 }
1504
1505 unsigned ArgOffset = VA.getLocMemOffset();
1506 unsigned ArgSize = VA.getValVT().getStoreSize();
1507
1508 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1509
1510 // Create load nodes to retrieve arguments from the stack.
1511 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1512 SDValue ArgValue;
1513
1514 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1515 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1516 MVT MemVT = VA.getValVT();
1517
1518 switch (VA.getLocInfo()) {
1519 default:
1520 break;
1521 case CCValAssign::BCvt:
1522 MemVT = VA.getLocVT();
1523 break;
1524 case CCValAssign::SExt:
1525 ExtType = ISD::SEXTLOAD;
1526 break;
1527 case CCValAssign::ZExt:
1528 ExtType = ISD::ZEXTLOAD;
1529 break;
1530 case CCValAssign::AExt:
1531 ExtType = ISD::EXTLOAD;
1532 break;
1533 }
1534
1535 ArgValue = DAG.getExtLoad(
1536 ExtType, SL, VA.getLocVT(), Chain, FIN,
1537 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1538 MemVT);
1539 return ArgValue;
1540}
1541
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001542SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1543 const SIMachineFunctionInfo &MFI,
1544 EVT VT,
1545 AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1546 const ArgDescriptor *Reg;
1547 const TargetRegisterClass *RC;
1548
1549 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1550 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1551}
1552
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001553static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1554 CallingConv::ID CallConv,
1555 ArrayRef<ISD::InputArg> Ins,
1556 BitVector &Skipped,
1557 FunctionType *FType,
1558 SIMachineFunctionInfo *Info) {
1559 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001560 const ISD::InputArg *Arg = &Ins[I];
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001561
Matt Arsenault55ab9212018-08-01 19:57:34 +00001562 assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1563 "vector type argument should have been split");
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001564
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001565 // First check if it's a PS input addr.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001566 if (CallConv == CallingConv::AMDGPU_PS &&
Matt Arsenault51a05d72019-07-12 20:12:17 +00001567 !Arg->Flags.isInReg() && PSInputNum <= 15) {
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001568 bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1569
1570 // Inconveniently only the first part of the split is marked as isSplit,
1571 // so skip to the end. We only want to increment PSInputNum once for the
1572 // entire split argument.
1573 if (Arg->Flags.isSplit()) {
1574 while (!Arg->Flags.isSplitEnd()) {
Matt Arsenaulta85af762019-07-25 13:55:07 +00001575 assert((!Arg->VT.isVector() ||
1576 Arg->VT.getScalarSizeInBits() == 16) &&
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001577 "unexpected vector split in ps argument type");
1578 if (!SkipArg)
1579 Splits.push_back(*Arg);
1580 Arg = &Ins[++I];
1581 }
1582 }
1583
1584 if (SkipArg) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001585 // We can safely skip PS inputs.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001586 Skipped.set(Arg->getOrigArgIndex());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001587 ++PSInputNum;
1588 continue;
1589 }
1590
1591 Info->markPSInputAllocated(PSInputNum);
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001592 if (Arg->Used)
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001593 Info->markPSInputEnabled(PSInputNum);
1594
1595 ++PSInputNum;
1596 }
1597
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001598 Splits.push_back(*Arg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001599 }
1600}
1601
1602// Allocate special inputs passed in VGPRs.
Matt Arsenaultfecf43e2019-07-19 14:15:18 +00001603void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1604 MachineFunction &MF,
1605 const SIRegisterInfo &TRI,
1606 SIMachineFunctionInfo &Info) const {
1607 const LLT S32 = LLT::scalar(32);
1608 MachineRegisterInfo &MRI = MF.getRegInfo();
1609
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001610 if (Info.hasWorkItemIDX()) {
Matt Arsenaultfecf43e2019-07-19 14:15:18 +00001611 Register Reg = AMDGPU::VGPR0;
1612 MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001613
1614 CCInfo.AllocateReg(Reg);
1615 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1616 }
1617
1618 if (Info.hasWorkItemIDY()) {
Matt Arsenaultfecf43e2019-07-19 14:15:18 +00001619 Register Reg = AMDGPU::VGPR1;
1620 MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001621
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001622 CCInfo.AllocateReg(Reg);
1623 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1624 }
1625
1626 if (Info.hasWorkItemIDZ()) {
Matt Arsenaultfecf43e2019-07-19 14:15:18 +00001627 Register Reg = AMDGPU::VGPR2;
1628 MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001629
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001630 CCInfo.AllocateReg(Reg);
1631 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1632 }
1633}
1634
1635// Try to allocate a VGPR at the end of the argument list, or if no argument
1636// VGPRs are left allocating a stack slot.
Stanislav Mekhanoshin07fd88d2019-06-28 01:52:13 +00001637// If \p Mask is is given it indicates bitfield position in the register.
1638// If \p Arg is given use it with new ]p Mask instead of allocating new.
1639static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u,
1640 ArgDescriptor Arg = ArgDescriptor()) {
1641 if (Arg.isSet())
1642 return ArgDescriptor::createArg(Arg, Mask);
1643
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001644 ArrayRef<MCPhysReg> ArgVGPRs
1645 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1646 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1647 if (RegIdx == ArgVGPRs.size()) {
1648 // Spill to stack required.
1649 int64_t Offset = CCInfo.AllocateStack(4, 4);
1650
Stanislav Mekhanoshin07fd88d2019-06-28 01:52:13 +00001651 return ArgDescriptor::createStack(Offset, Mask);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001652 }
1653
1654 unsigned Reg = ArgVGPRs[RegIdx];
1655 Reg = CCInfo.AllocateReg(Reg);
1656 assert(Reg != AMDGPU::NoRegister);
1657
1658 MachineFunction &MF = CCInfo.getMachineFunction();
1659 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
Stanislav Mekhanoshin07fd88d2019-06-28 01:52:13 +00001660 return ArgDescriptor::createRegister(Reg, Mask);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001661}
1662
1663static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1664 const TargetRegisterClass *RC,
1665 unsigned NumArgRegs) {
1666 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1667 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1668 if (RegIdx == ArgSGPRs.size())
1669 report_fatal_error("ran out of SGPRs for arguments");
1670
1671 unsigned Reg = ArgSGPRs[RegIdx];
1672 Reg = CCInfo.AllocateReg(Reg);
1673 assert(Reg != AMDGPU::NoRegister);
1674
1675 MachineFunction &MF = CCInfo.getMachineFunction();
1676 MF.addLiveIn(Reg, RC);
1677 return ArgDescriptor::createRegister(Reg);
1678}
1679
1680static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1681 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1682}
1683
1684static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1685 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1686}
1687
Matt Arsenaultfecf43e2019-07-19 14:15:18 +00001688void SITargetLowering::allocateSpecialInputVGPRs(CCState &CCInfo,
1689 MachineFunction &MF,
1690 const SIRegisterInfo &TRI,
1691 SIMachineFunctionInfo &Info) const {
Stanislav Mekhanoshin07fd88d2019-06-28 01:52:13 +00001692 const unsigned Mask = 0x3ff;
1693 ArgDescriptor Arg;
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001694
Stanislav Mekhanoshin07fd88d2019-06-28 01:52:13 +00001695 if (Info.hasWorkItemIDX()) {
1696 Arg = allocateVGPR32Input(CCInfo, Mask);
1697 Info.setWorkItemIDX(Arg);
1698 }
1699
1700 if (Info.hasWorkItemIDY()) {
1701 Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg);
1702 Info.setWorkItemIDY(Arg);
1703 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001704
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001705 if (Info.hasWorkItemIDZ())
Stanislav Mekhanoshin07fd88d2019-06-28 01:52:13 +00001706 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg));
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001707}
1708
Matt Arsenaultfecf43e2019-07-19 14:15:18 +00001709void SITargetLowering::allocateSpecialInputSGPRs(
1710 CCState &CCInfo,
1711 MachineFunction &MF,
1712 const SIRegisterInfo &TRI,
1713 SIMachineFunctionInfo &Info) const {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001714 auto &ArgInfo = Info.getArgInfo();
1715
1716 // TODO: Unify handling with private memory pointers.
1717
1718 if (Info.hasDispatchPtr())
1719 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1720
1721 if (Info.hasQueuePtr())
1722 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1723
1724 if (Info.hasKernargSegmentPtr())
1725 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1726
1727 if (Info.hasDispatchID())
1728 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1729
1730 // flat_scratch_init is not applicable for non-kernel functions.
1731
1732 if (Info.hasWorkGroupIDX())
1733 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1734
1735 if (Info.hasWorkGroupIDY())
1736 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1737
1738 if (Info.hasWorkGroupIDZ())
1739 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
Matt Arsenault817c2532017-08-03 23:12:44 +00001740
1741 if (Info.hasImplicitArgPtr())
1742 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001743}
1744
1745// Allocate special inputs passed in user SGPRs.
Matt Arsenaultfecf43e2019-07-19 14:15:18 +00001746void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
1747 MachineFunction &MF,
1748 const SIRegisterInfo &TRI,
1749 SIMachineFunctionInfo &Info) const {
Matt Arsenault10fc0622017-06-26 03:01:31 +00001750 if (Info.hasImplicitBufferPtr()) {
1751 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1752 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1753 CCInfo.AllocateReg(ImplicitBufferPtrReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001754 }
1755
1756 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1757 if (Info.hasPrivateSegmentBuffer()) {
1758 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1759 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1760 CCInfo.AllocateReg(PrivateSegmentBufferReg);
1761 }
1762
1763 if (Info.hasDispatchPtr()) {
1764 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1765 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1766 CCInfo.AllocateReg(DispatchPtrReg);
1767 }
1768
1769 if (Info.hasQueuePtr()) {
1770 unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1771 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1772 CCInfo.AllocateReg(QueuePtrReg);
1773 }
1774
1775 if (Info.hasKernargSegmentPtr()) {
Matt Arsenaultfecf43e2019-07-19 14:15:18 +00001776 MachineRegisterInfo &MRI = MF.getRegInfo();
1777 Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001778 CCInfo.AllocateReg(InputPtrReg);
Matt Arsenaultfecf43e2019-07-19 14:15:18 +00001779
1780 Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1781 MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001782 }
1783
1784 if (Info.hasDispatchID()) {
1785 unsigned DispatchIDReg = Info.addDispatchID(TRI);
1786 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1787 CCInfo.AllocateReg(DispatchIDReg);
1788 }
1789
1790 if (Info.hasFlatScratchInit()) {
1791 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1792 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1793 CCInfo.AllocateReg(FlatScratchInitReg);
1794 }
1795
1796 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1797 // these from the dispatch pointer.
1798}
1799
1800// Allocate special input registers that are initialized per-wave.
Matt Arsenaultfecf43e2019-07-19 14:15:18 +00001801void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
1802 MachineFunction &MF,
1803 SIMachineFunctionInfo &Info,
1804 CallingConv::ID CallConv,
1805 bool IsShader) const {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001806 if (Info.hasWorkGroupIDX()) {
1807 unsigned Reg = Info.addWorkGroupIDX();
1808 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1809 CCInfo.AllocateReg(Reg);
1810 }
1811
1812 if (Info.hasWorkGroupIDY()) {
1813 unsigned Reg = Info.addWorkGroupIDY();
1814 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1815 CCInfo.AllocateReg(Reg);
1816 }
1817
1818 if (Info.hasWorkGroupIDZ()) {
1819 unsigned Reg = Info.addWorkGroupIDZ();
1820 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1821 CCInfo.AllocateReg(Reg);
1822 }
1823
1824 if (Info.hasWorkGroupInfo()) {
1825 unsigned Reg = Info.addWorkGroupInfo();
1826 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1827 CCInfo.AllocateReg(Reg);
1828 }
1829
1830 if (Info.hasPrivateSegmentWaveByteOffset()) {
1831 // Scratch wave offset passed in system SGPR.
1832 unsigned PrivateSegmentWaveByteOffsetReg;
1833
1834 if (IsShader) {
Marek Olsak584d2c02017-05-04 22:25:20 +00001835 PrivateSegmentWaveByteOffsetReg =
1836 Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1837
1838 // This is true if the scratch wave byte offset doesn't have a fixed
1839 // location.
1840 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1841 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1842 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1843 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001844 } else
1845 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1846
1847 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1848 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1849 }
1850}
1851
1852static void reservePrivateMemoryRegs(const TargetMachine &TM,
1853 MachineFunction &MF,
1854 const SIRegisterInfo &TRI,
Matt Arsenault1cc47f82017-07-18 16:44:56 +00001855 SIMachineFunctionInfo &Info) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001856 // Now that we've figured out where the scratch register inputs are, see if
1857 // should reserve the arguments and use them directly.
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001858 MachineFrameInfo &MFI = MF.getFrameInfo();
1859 bool HasStackObjects = MFI.hasStackObjects();
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001860 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001861
1862 // Record that we know we have non-spill stack objects so we don't need to
1863 // check all stack objects later.
1864 if (HasStackObjects)
1865 Info.setHasNonSpillStackObjects(true);
1866
1867 // Everything live out of a block is spilled with fast regalloc, so it's
1868 // almost certain that spilling will be required.
1869 if (TM.getOptLevel() == CodeGenOpt::None)
1870 HasStackObjects = true;
1871
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001872 // For now assume stack access is needed in any callee functions, so we need
1873 // the scratch registers to pass in.
1874 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1875
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001876 if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) {
1877 // If we have stack objects, we unquestionably need the private buffer
1878 // resource. For the Code Object V2 ABI, this will be the first 4 user
1879 // SGPR inputs. We can reserve those and use them directly.
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001880
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001881 unsigned PrivateSegmentBufferReg =
1882 Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
1883 Info.setScratchRSrcReg(PrivateSegmentBufferReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001884 } else {
1885 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001886 // We tentatively reserve the last registers (skipping the last registers
1887 // which may contain VCC, FLAT_SCR, and XNACK). After register allocation,
1888 // we'll replace these with the ones immediately after those which were
1889 // really allocated. In the prologue copies will be inserted from the
1890 // argument to these reserved registers.
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001891
1892 // Without HSA, relocations are used for the scratch pointer and the
1893 // buffer resource setup is always inserted in the prologue. Scratch wave
1894 // offset is still in an input SGPR.
1895 Info.setScratchRSrcReg(ReservedBufferReg);
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001896 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001897
Matt Arsenault22e3dc62019-06-21 20:04:02 +00001898 // hasFP should be accurate for kernels even before the frame is finalized.
1899 if (ST.getFrameLowering()->hasFP(MF)) {
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001900 MachineRegisterInfo &MRI = MF.getRegInfo();
1901
1902 // Try to use s32 as the SP, but move it if it would interfere with input
1903 // arguments. This won't work with calls though.
1904 //
1905 // FIXME: Move SP to avoid any possible inputs, or find a way to spill input
1906 // registers.
1907 if (!MRI.isLiveIn(AMDGPU::SGPR32)) {
1908 Info.setStackPtrOffsetReg(AMDGPU::SGPR32);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001909 } else {
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001910 assert(AMDGPU::isShader(MF.getFunction().getCallingConv()));
1911
1912 if (MFI.hasCalls())
1913 report_fatal_error("call in graphics shader with too many input SGPRs");
1914
1915 for (unsigned Reg : AMDGPU::SGPR_32RegClass) {
1916 if (!MRI.isLiveIn(Reg)) {
1917 Info.setStackPtrOffsetReg(Reg);
1918 break;
1919 }
1920 }
1921
1922 if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG)
1923 report_fatal_error("failed to find register for SP");
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001924 }
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001925
Matt Arsenault22e3dc62019-06-21 20:04:02 +00001926 if (MFI.hasCalls()) {
1927 Info.setScratchWaveOffsetReg(AMDGPU::SGPR33);
1928 Info.setFrameOffsetReg(AMDGPU::SGPR33);
1929 } else {
1930 unsigned ReservedOffsetReg =
1931 TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1932 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1933 Info.setFrameOffsetReg(ReservedOffsetReg);
1934 }
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001935 } else if (RequiresStackAccess) {
1936 assert(!MFI.hasCalls());
1937 // We know there are accesses and they will be done relative to SP, so just
1938 // pin it to the input.
1939 //
1940 // FIXME: Should not do this if inline asm is reading/writing these
1941 // registers.
1942 unsigned PreloadedSP = Info.getPreloadedReg(
1943 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1944
1945 Info.setStackPtrOffsetReg(PreloadedSP);
1946 Info.setScratchWaveOffsetReg(PreloadedSP);
1947 Info.setFrameOffsetReg(PreloadedSP);
1948 } else {
1949 assert(!MFI.hasCalls());
1950
1951 // There may not be stack access at all. There may still be spills, or
1952 // access of a constant pointer (in which cases an extra copy will be
1953 // emitted in the prolog).
1954 unsigned ReservedOffsetReg
1955 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1956 Info.setStackPtrOffsetReg(ReservedOffsetReg);
1957 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1958 Info.setFrameOffsetReg(ReservedOffsetReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001959 }
1960}
1961
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001962bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1963 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1964 return !Info->isEntryFunction();
1965}
1966
1967void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1968
1969}
1970
1971void SITargetLowering::insertCopiesSplitCSR(
1972 MachineBasicBlock *Entry,
1973 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1974 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1975
1976 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1977 if (!IStart)
1978 return;
1979
1980 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1981 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1982 MachineBasicBlock::iterator MBBI = Entry->begin();
1983 for (const MCPhysReg *I = IStart; *I; ++I) {
1984 const TargetRegisterClass *RC = nullptr;
1985 if (AMDGPU::SReg_64RegClass.contains(*I))
1986 RC = &AMDGPU::SGPR_64RegClass;
1987 else if (AMDGPU::SReg_32RegClass.contains(*I))
1988 RC = &AMDGPU::SGPR_32RegClass;
1989 else
1990 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1991
1992 unsigned NewVR = MRI->createVirtualRegister(RC);
1993 // Create copy from CSR to a virtual register.
1994 Entry->addLiveIn(*I);
1995 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1996 .addReg(*I);
1997
1998 // Insert the copy-back instructions right before the terminator.
1999 for (auto *Exit : Exits)
2000 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
2001 TII->get(TargetOpcode::COPY), *I)
2002 .addReg(NewVR);
2003 }
2004}
2005
Christian Konig2c8f6d52013-03-07 09:03:52 +00002006SDValue SITargetLowering::LowerFormalArguments(
Eric Christopher7792e322015-01-30 23:24:40 +00002007 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002008 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2009 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002010 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Christian Konig2c8f6d52013-03-07 09:03:52 +00002011
2012 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaultceafc552018-05-29 17:42:50 +00002013 const Function &Fn = MF.getFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00002014 FunctionType *FType = MF.getFunction().getFunctionType();
Christian Konig99ee0f42013-03-07 09:04:14 +00002015 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Christian Konig2c8f6d52013-03-07 09:03:52 +00002016
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +00002017 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00002018 DiagnosticInfoUnsupported NoGraphicsHSA(
Matthias Braunf1caa282017-12-15 22:22:58 +00002019 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
Matt Arsenaultd48da142015-11-02 23:23:02 +00002020 DAG.getContext()->diagnose(NoGraphicsHSA);
Diana Picus81bc3172016-05-26 15:24:55 +00002021 return DAG.getEntryNode();
Matt Arsenaultd48da142015-11-02 23:23:02 +00002022 }
2023
Christian Konig2c8f6d52013-03-07 09:03:52 +00002024 SmallVector<ISD::InputArg, 16> Splits;
Christian Konig2c8f6d52013-03-07 09:03:52 +00002025 SmallVector<CCValAssign, 16> ArgLocs;
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002026 BitVector Skipped(Ins.size());
Eric Christopherb5217502014-08-06 18:45:26 +00002027 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2028 *DAG.getContext());
Christian Konig2c8f6d52013-03-07 09:03:52 +00002029
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002030 bool IsShader = AMDGPU::isShader(CallConv);
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +00002031 bool IsKernel = AMDGPU::isKernel(CallConv);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002032 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
Christian Konig99ee0f42013-03-07 09:04:14 +00002033
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002034 if (IsShader) {
2035 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
2036
2037 // At least one interpolation mode must be enabled or else the GPU will
2038 // hang.
2039 //
2040 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
2041 // set PSInputAddr, the user wants to enable some bits after the compilation
2042 // based on run-time states. Since we can't know what the final PSInputEna
2043 // will look like, so we shouldn't do anything here and the user should take
2044 // responsibility for the correct programming.
2045 //
2046 // Otherwise, the following restrictions apply:
2047 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
2048 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
2049 // enabled too.
Tim Renoufc8ffffe2017-10-12 16:16:41 +00002050 if (CallConv == CallingConv::AMDGPU_PS) {
2051 if ((Info->getPSInputAddr() & 0x7F) == 0 ||
2052 ((Info->getPSInputAddr() & 0xF) == 0 &&
2053 Info->isPSInputAllocated(11))) {
2054 CCInfo.AllocateReg(AMDGPU::VGPR0);
2055 CCInfo.AllocateReg(AMDGPU::VGPR1);
2056 Info->markPSInputAllocated(0);
2057 Info->markPSInputEnabled(0);
2058 }
2059 if (Subtarget->isAmdPalOS()) {
2060 // For isAmdPalOS, the user does not enable some bits after compilation
2061 // based on run-time states; the register values being generated here are
2062 // the final ones set in hardware. Therefore we need to apply the
2063 // workaround to PSInputAddr and PSInputEnable together. (The case where
2064 // a bit is set in PSInputAddr but not PSInputEnable is where the
2065 // frontend set up an input arg for a particular interpolation mode, but
2066 // nothing uses that input arg. Really we should have an earlier pass
2067 // that removes such an arg.)
2068 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
2069 if ((PsInputBits & 0x7F) == 0 ||
2070 ((PsInputBits & 0xF) == 0 &&
2071 (PsInputBits >> 11 & 1)))
2072 Info->markPSInputEnabled(
2073 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
2074 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002075 }
2076
Tom Stellard2f3f9852017-01-25 01:25:13 +00002077 assert(!Info->hasDispatchPtr() &&
Tom Stellardf110f8f2016-04-14 16:27:03 +00002078 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
2079 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
2080 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
2081 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
2082 !Info->hasWorkItemIDZ());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002083 } else if (IsKernel) {
2084 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002085 } else {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002086 Splits.append(Ins.begin(), Ins.end());
Tom Stellardaf775432013-10-23 00:44:32 +00002087 }
2088
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002089 if (IsEntryFunc) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002090 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002091 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
Tom Stellard2f3f9852017-01-25 01:25:13 +00002092 }
2093
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002094 if (IsKernel) {
Tom Stellardbbeb45a2016-09-16 21:53:00 +00002095 analyzeFormalArgumentsCompute(CCInfo, Ins);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002096 } else {
2097 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
2098 CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
2099 }
Christian Konig2c8f6d52013-03-07 09:03:52 +00002100
Matt Arsenaultcf13d182015-07-10 22:51:36 +00002101 SmallVector<SDValue, 16> Chains;
2102
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002103 // FIXME: This is the minimum kernel argument alignment. We should improve
2104 // this to the maximum alignment of the arguments.
2105 //
2106 // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
2107 // kern arg offset.
2108 const unsigned KernelArgBaseAlign = 16;
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002109
2110 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
Christian Konigb7be72d2013-05-17 09:46:48 +00002111 const ISD::InputArg &Arg = Ins[i];
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00002112 if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
Christian Konigb7be72d2013-05-17 09:46:48 +00002113 InVals.push_back(DAG.getUNDEF(Arg.VT));
Christian Konig99ee0f42013-03-07 09:04:14 +00002114 continue;
2115 }
2116
Christian Konig2c8f6d52013-03-07 09:03:52 +00002117 CCValAssign &VA = ArgLocs[ArgIdx++];
Craig Topper7f416c82014-11-16 21:17:18 +00002118 MVT VT = VA.getLocVT();
Tom Stellarded882c22013-06-03 17:40:11 +00002119
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002120 if (IsEntryFunc && VA.isMemLoc()) {
Tom Stellardaf775432013-10-23 00:44:32 +00002121 VT = Ins[i].VT;
Tom Stellardbbeb45a2016-09-16 21:53:00 +00002122 EVT MemVT = VA.getLocVT();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002123
Matt Arsenault4bec7d42018-07-20 09:05:08 +00002124 const uint64_t Offset = VA.getLocMemOffset();
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002125 unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002126
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002127 SDValue Arg = lowerKernargMemParameter(
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002128 DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
Matt Arsenaultcf13d182015-07-10 22:51:36 +00002129 Chains.push_back(Arg.getValue(1));
Tom Stellardca7ecf32014-08-22 18:49:31 +00002130
Craig Toppere3dcce92015-08-01 22:20:21 +00002131 auto *ParamTy =
Andrew Trick05938a52015-02-16 18:10:47 +00002132 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
Tom Stellard5bfbae52018-07-11 20:59:01 +00002133 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
Matt Arsenaultcdd191d2019-01-28 20:14:49 +00002134 ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2135 ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
Tom Stellardca7ecf32014-08-22 18:49:31 +00002136 // On SI local pointers are just offsets into LDS, so they are always
2137 // less than 16-bits. On CI and newer they could potentially be
2138 // real pointers, so we can't guarantee their size.
2139 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
2140 DAG.getValueType(MVT::i16));
2141 }
2142
Tom Stellarded882c22013-06-03 17:40:11 +00002143 InVals.push_back(Arg);
2144 continue;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002145 } else if (!IsEntryFunc && VA.isMemLoc()) {
2146 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2147 InVals.push_back(Val);
2148 if (!Arg.Flags.isByVal())
2149 Chains.push_back(Val.getValue(1));
2150 continue;
Tom Stellarded882c22013-06-03 17:40:11 +00002151 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002152
Christian Konig2c8f6d52013-03-07 09:03:52 +00002153 assert(VA.isRegLoc() && "Parameter must be in a register!");
2154
2155 unsigned Reg = VA.getLocReg();
Christian Konig2c8f6d52013-03-07 09:03:52 +00002156 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
Matt Arsenaultb3463552017-07-15 05:52:59 +00002157 EVT ValVT = VA.getValVT();
Christian Konig2c8f6d52013-03-07 09:03:52 +00002158
2159 Reg = MF.addLiveIn(Reg, RC);
2160 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2161
Matt Arsenault5c714cb2019-05-23 19:38:14 +00002162 if (Arg.Flags.isSRet()) {
Matt Arsenault45b98182017-11-15 00:45:43 +00002163 // The return object should be reasonably addressable.
2164
2165 // FIXME: This helps when the return is a real sret. If it is a
2166 // automatically inserted sret (i.e. CanLowerReturn returns false), an
2167 // extra copy is inserted in SelectionDAGBuilder which obscures this.
Matt Arsenault5c714cb2019-05-23 19:38:14 +00002168 unsigned NumBits
2169 = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex();
Matt Arsenault45b98182017-11-15 00:45:43 +00002170 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2171 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2172 }
2173
Matt Arsenaultb3463552017-07-15 05:52:59 +00002174 // If this is an 8 or 16-bit value, it is really passed promoted
2175 // to 32 bits. Insert an assert[sz]ext to capture this, then
2176 // truncate to the right size.
2177 switch (VA.getLocInfo()) {
2178 case CCValAssign::Full:
2179 break;
2180 case CCValAssign::BCvt:
2181 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2182 break;
2183 case CCValAssign::SExt:
2184 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2185 DAG.getValueType(ValVT));
2186 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2187 break;
2188 case CCValAssign::ZExt:
2189 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2190 DAG.getValueType(ValVT));
2191 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2192 break;
2193 case CCValAssign::AExt:
2194 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2195 break;
2196 default:
2197 llvm_unreachable("Unknown loc info!");
2198 }
2199
Christian Konig2c8f6d52013-03-07 09:03:52 +00002200 InVals.push_back(Val);
2201 }
Tom Stellarde99fb652015-01-20 19:33:04 +00002202
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002203 if (!IsEntryFunc) {
2204 // Special inputs come after user arguments.
2205 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2206 }
2207
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002208 // Start adding system SGPRs.
2209 if (IsEntryFunc) {
2210 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002211 } else {
2212 CCInfo.AllocateReg(Info->getScratchRSrcReg());
2213 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2214 CCInfo.AllocateReg(Info->getFrameOffsetReg());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002215 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002216 }
Matt Arsenaultcf13d182015-07-10 22:51:36 +00002217
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002218 auto &ArgUsageInfo =
2219 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
Matt Arsenaultceafc552018-05-29 17:42:50 +00002220 ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002221
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002222 unsigned StackArgSize = CCInfo.getNextStackOffset();
2223 Info->setBytesInStackArgArea(StackArgSize);
2224
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002225 return Chains.empty() ? Chain :
2226 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
Christian Konig2c8f6d52013-03-07 09:03:52 +00002227}
2228
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002229// TODO: If return values can't fit in registers, we should return as many as
2230// possible in registers before passing on stack.
2231bool SITargetLowering::CanLowerReturn(
2232 CallingConv::ID CallConv,
2233 MachineFunction &MF, bool IsVarArg,
2234 const SmallVectorImpl<ISD::OutputArg> &Outs,
2235 LLVMContext &Context) const {
2236 // Replacing returns with sret/stack usage doesn't make sense for shaders.
2237 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2238 // for shaders. Vector types should be explicitly handled by CC.
2239 if (AMDGPU::isEntryFunctionCC(CallConv))
2240 return true;
2241
2242 SmallVector<CCValAssign, 16> RVLocs;
2243 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2244 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2245}
2246
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002247SDValue
2248SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2249 bool isVarArg,
2250 const SmallVectorImpl<ISD::OutputArg> &Outs,
2251 const SmallVectorImpl<SDValue> &OutVals,
2252 const SDLoc &DL, SelectionDAG &DAG) const {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002253 MachineFunction &MF = DAG.getMachineFunction();
2254 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2255
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002256 if (AMDGPU::isKernel(CallConv)) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002257 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2258 OutVals, DL, DAG);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002259 }
2260
2261 bool IsShader = AMDGPU::isShader(CallConv);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002262
Matt Arsenault55ab9212018-08-01 19:57:34 +00002263 Info->setIfReturnsVoid(Outs.empty());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002264 bool IsWaveEnd = Info->returnsVoid() && IsShader;
Marek Olsak8e9cc632016-01-13 17:23:09 +00002265
Marek Olsak8a0f3352016-01-13 17:23:04 +00002266 // CCValAssign - represent the assignment of the return value to a location.
2267 SmallVector<CCValAssign, 48> RVLocs;
Matt Arsenault55ab9212018-08-01 19:57:34 +00002268 SmallVector<ISD::OutputArg, 48> Splits;
Marek Olsak8a0f3352016-01-13 17:23:04 +00002269
2270 // CCState - Info about the registers and stack slots.
2271 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2272 *DAG.getContext());
2273
2274 // Analyze outgoing return values.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002275 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
Marek Olsak8a0f3352016-01-13 17:23:04 +00002276
2277 SDValue Flag;
2278 SmallVector<SDValue, 48> RetOps;
2279 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2280
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002281 // Add return address for callable functions.
2282 if (!Info->isEntryFunction()) {
2283 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2284 SDValue ReturnAddrReg = CreateLiveInRegister(
2285 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2286
Christudasan Devadasanb2d24bd2019-07-09 16:48:42 +00002287 SDValue ReturnAddrVirtualReg = DAG.getRegister(
2288 MF.getRegInfo().createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass),
2289 MVT::i64);
2290 Chain =
2291 DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002292 Flag = Chain.getValue(1);
Christudasan Devadasanb2d24bd2019-07-09 16:48:42 +00002293 RetOps.push_back(ReturnAddrVirtualReg);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002294 }
2295
Marek Olsak8a0f3352016-01-13 17:23:04 +00002296 // Copy the result values into the output registers.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002297 for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2298 ++I, ++RealRVLocIdx) {
2299 CCValAssign &VA = RVLocs[I];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002300 assert(VA.isRegLoc() && "Can only return in registers!");
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002301 // TODO: Partially return in registers if return values don't fit.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002302 SDValue Arg = OutVals[RealRVLocIdx];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002303
2304 // Copied from other backends.
2305 switch (VA.getLocInfo()) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002306 case CCValAssign::Full:
2307 break;
2308 case CCValAssign::BCvt:
2309 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2310 break;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002311 case CCValAssign::SExt:
2312 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2313 break;
2314 case CCValAssign::ZExt:
2315 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2316 break;
2317 case CCValAssign::AExt:
2318 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2319 break;
2320 default:
2321 llvm_unreachable("Unknown loc info!");
Marek Olsak8a0f3352016-01-13 17:23:04 +00002322 }
2323
2324 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2325 Flag = Chain.getValue(1);
2326 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2327 }
2328
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002329 // FIXME: Does sret work properly?
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002330 if (!Info->isEntryFunction()) {
Tom Stellardc5a154d2018-06-28 23:47:12 +00002331 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002332 const MCPhysReg *I =
2333 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2334 if (I) {
2335 for (; *I; ++I) {
2336 if (AMDGPU::SReg_64RegClass.contains(*I))
2337 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2338 else if (AMDGPU::SReg_32RegClass.contains(*I))
2339 RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2340 else
2341 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2342 }
2343 }
2344 }
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002345
Marek Olsak8a0f3352016-01-13 17:23:04 +00002346 // Update chain and glue.
2347 RetOps[0] = Chain;
2348 if (Flag.getNode())
2349 RetOps.push_back(Flag);
2350
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002351 unsigned Opc = AMDGPUISD::ENDPGM;
2352 if (!IsWaveEnd)
2353 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
Matt Arsenault9babdf42016-06-22 20:15:28 +00002354 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002355}
2356
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002357SDValue SITargetLowering::LowerCallResult(
2358 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2359 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2360 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2361 SDValue ThisVal) const {
2362 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2363
2364 // Assign locations to each value returned by this call.
2365 SmallVector<CCValAssign, 16> RVLocs;
2366 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2367 *DAG.getContext());
2368 CCInfo.AnalyzeCallResult(Ins, RetCC);
2369
2370 // Copy all of the result registers out of their specified physreg.
2371 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2372 CCValAssign VA = RVLocs[i];
2373 SDValue Val;
2374
2375 if (VA.isRegLoc()) {
2376 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2377 Chain = Val.getValue(1);
2378 InFlag = Val.getValue(2);
2379 } else if (VA.isMemLoc()) {
2380 report_fatal_error("TODO: return values in memory");
2381 } else
2382 llvm_unreachable("unknown argument location type");
2383
2384 switch (VA.getLocInfo()) {
2385 case CCValAssign::Full:
2386 break;
2387 case CCValAssign::BCvt:
2388 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2389 break;
2390 case CCValAssign::ZExt:
2391 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2392 DAG.getValueType(VA.getValVT()));
2393 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2394 break;
2395 case CCValAssign::SExt:
2396 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2397 DAG.getValueType(VA.getValVT()));
2398 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2399 break;
2400 case CCValAssign::AExt:
2401 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2402 break;
2403 default:
2404 llvm_unreachable("Unknown loc info!");
2405 }
2406
2407 InVals.push_back(Val);
2408 }
2409
2410 return Chain;
2411}
2412
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002413// Add code to pass special inputs required depending on used features separate
2414// from the explicit user arguments present in the IR.
2415void SITargetLowering::passSpecialInputs(
2416 CallLoweringInfo &CLI,
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002417 CCState &CCInfo,
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002418 const SIMachineFunctionInfo &Info,
2419 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2420 SmallVectorImpl<SDValue> &MemOpChains,
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002421 SDValue Chain) const {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002422 // If we don't have a call site, this was a call inserted by
2423 // legalization. These can never use special inputs.
2424 if (!CLI.CS)
2425 return;
2426
2427 const Function *CalleeFunc = CLI.CS.getCalledFunction();
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002428 assert(CalleeFunc);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002429
2430 SelectionDAG &DAG = CLI.DAG;
2431 const SDLoc &DL = CLI.DL;
2432
Tom Stellardc5a154d2018-06-28 23:47:12 +00002433 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002434
2435 auto &ArgUsageInfo =
2436 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2437 const AMDGPUFunctionArgInfo &CalleeArgInfo
2438 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2439
2440 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2441
2442 // TODO: Unify with private memory register handling. This is complicated by
2443 // the fact that at least in kernels, the input argument is not necessarily
2444 // in the same location as the input.
2445 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2446 AMDGPUFunctionArgInfo::DISPATCH_PTR,
2447 AMDGPUFunctionArgInfo::QUEUE_PTR,
2448 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2449 AMDGPUFunctionArgInfo::DISPATCH_ID,
2450 AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2451 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2452 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
Matt Arsenault817c2532017-08-03 23:12:44 +00002453 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002454 };
2455
2456 for (auto InputID : InputRegs) {
2457 const ArgDescriptor *OutgoingArg;
2458 const TargetRegisterClass *ArgRC;
2459
2460 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2461 if (!OutgoingArg)
2462 continue;
2463
2464 const ArgDescriptor *IncomingArg;
2465 const TargetRegisterClass *IncomingArgRC;
2466 std::tie(IncomingArg, IncomingArgRC)
2467 = CallerArgInfo.getPreloadedValue(InputID);
2468 assert(IncomingArgRC == ArgRC);
2469
2470 // All special arguments are ints for now.
2471 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
Matt Arsenault817c2532017-08-03 23:12:44 +00002472 SDValue InputReg;
2473
2474 if (IncomingArg) {
2475 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2476 } else {
2477 // The implicit arg ptr is special because it doesn't have a corresponding
2478 // input for kernels, and is computed from the kernarg segment pointer.
2479 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2480 InputReg = getImplicitArgPtr(DAG, DL);
2481 }
2482
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002483 if (OutgoingArg->isRegister()) {
2484 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2485 } else {
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002486 unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2487 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2488 SpecialArgOffset);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002489 MemOpChains.push_back(ArgStore);
2490 }
2491 }
Stanislav Mekhanoshin07fd88d2019-06-28 01:52:13 +00002492
2493 // Pack workitem IDs into a single register or pass it as is if already
2494 // packed.
2495 const ArgDescriptor *OutgoingArg;
2496 const TargetRegisterClass *ArgRC;
2497
2498 std::tie(OutgoingArg, ArgRC) =
2499 CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
2500 if (!OutgoingArg)
2501 std::tie(OutgoingArg, ArgRC) =
2502 CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
2503 if (!OutgoingArg)
2504 std::tie(OutgoingArg, ArgRC) =
2505 CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
2506 if (!OutgoingArg)
2507 return;
2508
2509 const ArgDescriptor *IncomingArgX
2510 = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X).first;
2511 const ArgDescriptor *IncomingArgY
2512 = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y).first;
2513 const ArgDescriptor *IncomingArgZ
2514 = CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z).first;
2515
2516 SDValue InputReg;
2517 SDLoc SL;
2518
2519 // If incoming ids are not packed we need to pack them.
2520 if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo.WorkItemIDX)
2521 InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX);
2522
2523 if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo.WorkItemIDY) {
2524 SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY);
2525 Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y,
2526 DAG.getShiftAmountConstant(10, MVT::i32, SL));
2527 InputReg = InputReg.getNode() ?
2528 DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y;
2529 }
2530
2531 if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo.WorkItemIDZ) {
2532 SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ);
2533 Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z,
2534 DAG.getShiftAmountConstant(20, MVT::i32, SL));
2535 InputReg = InputReg.getNode() ?
2536 DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z;
2537 }
2538
2539 if (!InputReg.getNode()) {
2540 // Workitem ids are already packed, any of present incoming arguments
2541 // will carry all required fields.
2542 ArgDescriptor IncomingArg = ArgDescriptor::createArg(
2543 IncomingArgX ? *IncomingArgX :
2544 IncomingArgY ? *IncomingArgY :
2545 *IncomingArgZ, ~0u);
2546 InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg);
2547 }
2548
2549 if (OutgoingArg->isRegister()) {
2550 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2551 } else {
2552 unsigned SpecialArgOffset = CCInfo.AllocateStack(4, 4);
2553 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2554 SpecialArgOffset);
2555 MemOpChains.push_back(ArgStore);
2556 }
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002557}
2558
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002559static bool canGuaranteeTCO(CallingConv::ID CC) {
2560 return CC == CallingConv::Fast;
2561}
2562
2563/// Return true if we might ever do TCO for calls with this calling convention.
2564static bool mayTailCallThisCC(CallingConv::ID CC) {
2565 switch (CC) {
2566 case CallingConv::C:
2567 return true;
2568 default:
2569 return canGuaranteeTCO(CC);
2570 }
2571}
2572
2573bool SITargetLowering::isEligibleForTailCallOptimization(
2574 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2575 const SmallVectorImpl<ISD::OutputArg> &Outs,
2576 const SmallVectorImpl<SDValue> &OutVals,
2577 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2578 if (!mayTailCallThisCC(CalleeCC))
2579 return false;
2580
2581 MachineFunction &MF = DAG.getMachineFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00002582 const Function &CallerF = MF.getFunction();
2583 CallingConv::ID CallerCC = CallerF.getCallingConv();
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002584 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2585 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2586
2587 // Kernels aren't callable, and don't have a live in return address so it
2588 // doesn't make sense to do a tail call with entry functions.
2589 if (!CallerPreserved)
2590 return false;
2591
2592 bool CCMatch = CallerCC == CalleeCC;
2593
2594 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2595 if (canGuaranteeTCO(CalleeCC) && CCMatch)
2596 return true;
2597 return false;
2598 }
2599
2600 // TODO: Can we handle var args?
2601 if (IsVarArg)
2602 return false;
2603
Matthias Braunf1caa282017-12-15 22:22:58 +00002604 for (const Argument &Arg : CallerF.args()) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002605 if (Arg.hasByValAttr())
2606 return false;
2607 }
2608
2609 LLVMContext &Ctx = *DAG.getContext();
2610
2611 // Check that the call results are passed in the same way.
2612 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2613 CCAssignFnForCall(CalleeCC, IsVarArg),
2614 CCAssignFnForCall(CallerCC, IsVarArg)))
2615 return false;
2616
2617 // The callee has to preserve all registers the caller needs to preserve.
2618 if (!CCMatch) {
2619 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2620 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2621 return false;
2622 }
2623
2624 // Nothing more to check if the callee is taking no arguments.
2625 if (Outs.empty())
2626 return true;
2627
2628 SmallVector<CCValAssign, 16> ArgLocs;
2629 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2630
2631 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2632
2633 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2634 // If the stack arguments for this call do not fit into our own save area then
2635 // the call cannot be made tail.
2636 // TODO: Is this really necessary?
2637 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2638 return false;
2639
2640 const MachineRegisterInfo &MRI = MF.getRegInfo();
2641 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2642}
2643
2644bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2645 if (!CI->isTailCall())
2646 return false;
2647
2648 const Function *ParentFn = CI->getParent()->getParent();
2649 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2650 return false;
2651
2652 auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2653 return (Attr.getValueAsString() != "true");
2654}
2655
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002656// The wave scratch offset register is used as the global base pointer.
2657SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2658 SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002659 SelectionDAG &DAG = CLI.DAG;
2660 const SDLoc &DL = CLI.DL;
2661 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2662 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2663 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2664 SDValue Chain = CLI.Chain;
2665 SDValue Callee = CLI.Callee;
2666 bool &IsTailCall = CLI.IsTailCall;
2667 CallingConv::ID CallConv = CLI.CallConv;
2668 bool IsVarArg = CLI.IsVarArg;
2669 bool IsSibCall = false;
2670 bool IsThisReturn = false;
2671 MachineFunction &MF = DAG.getMachineFunction();
2672
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002673 if (IsVarArg) {
2674 return lowerUnhandledCall(CLI, InVals,
2675 "unsupported call to variadic function ");
2676 }
2677
Matt Arsenault935f3b72018-08-08 16:58:39 +00002678 if (!CLI.CS.getInstruction())
2679 report_fatal_error("unsupported libcall legalization");
2680
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002681 if (!CLI.CS.getCalledFunction()) {
2682 return lowerUnhandledCall(CLI, InVals,
2683 "unsupported indirect call to function ");
2684 }
2685
2686 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2687 return lowerUnhandledCall(CLI, InVals,
2688 "unsupported required tail call to function ");
2689 }
2690
Matt Arsenault1fb90132018-06-28 10:18:36 +00002691 if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2692 // Note the issue is with the CC of the calling function, not of the call
2693 // itself.
2694 return lowerUnhandledCall(CLI, InVals,
2695 "unsupported call from graphics shader of function ");
2696 }
2697
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002698 if (IsTailCall) {
2699 IsTailCall = isEligibleForTailCallOptimization(
2700 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2701 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2702 report_fatal_error("failed to perform tail call elimination on a call "
2703 "site marked musttail");
2704 }
2705
2706 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2707
2708 // A sibling call is one where we're under the usual C ABI and not planning
2709 // to change that but can still do a tail call:
2710 if (!TailCallOpt && IsTailCall)
2711 IsSibCall = true;
2712
2713 if (IsTailCall)
2714 ++NumTailCalls;
2715 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002716
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002717 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2718
2719 // Analyze operands of the call, assigning locations to each operand.
2720 SmallVector<CCValAssign, 16> ArgLocs;
2721 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2722 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002723
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002724 CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2725
2726 // Get a count of how many bytes are to be pushed on the stack.
2727 unsigned NumBytes = CCInfo.getNextStackOffset();
2728
2729 if (IsSibCall) {
2730 // Since we're not changing the ABI to make this a tail call, the memory
2731 // operands are already available in the caller's incoming argument space.
2732 NumBytes = 0;
2733 }
2734
2735 // FPDiff is the byte offset of the call's argument area from the callee's.
2736 // Stores to callee stack arguments will be placed in FixedStackSlots offset
2737 // by this amount for a tail call. In a sibling call it must be 0 because the
2738 // caller will deallocate the entire stack and the callee still expects its
2739 // arguments to begin at SP+0. Completely unused for non-tail calls.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002740 int32_t FPDiff = 0;
2741 MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002742 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2743
2744 // Adjust the stack pointer for the new arguments...
2745 // These operations are automatically eliminated by the prolog/epilog pass
2746 if (!IsSibCall) {
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002747 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002748
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002749 SmallVector<SDValue, 4> CopyFromChains;
2750
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002751 // In the HSA case, this should be an identity copy.
2752 SDValue ScratchRSrcReg
2753 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2754 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002755 CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002756 Chain = DAG.getTokenFactor(DL, CopyFromChains);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002757 }
2758
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002759 SmallVector<SDValue, 8> MemOpChains;
2760 MVT PtrVT = MVT::i32;
2761
2762 // Walk the register/memloc assignments, inserting copies/loads.
2763 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2764 ++i, ++realArgIdx) {
2765 CCValAssign &VA = ArgLocs[i];
2766 SDValue Arg = OutVals[realArgIdx];
2767
2768 // Promote the value if needed.
2769 switch (VA.getLocInfo()) {
2770 case CCValAssign::Full:
2771 break;
2772 case CCValAssign::BCvt:
2773 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2774 break;
2775 case CCValAssign::ZExt:
2776 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2777 break;
2778 case CCValAssign::SExt:
2779 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2780 break;
2781 case CCValAssign::AExt:
2782 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2783 break;
2784 case CCValAssign::FPExt:
2785 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2786 break;
2787 default:
2788 llvm_unreachable("Unknown loc info!");
2789 }
2790
2791 if (VA.isRegLoc()) {
2792 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2793 } else {
2794 assert(VA.isMemLoc());
2795
2796 SDValue DstAddr;
2797 MachinePointerInfo DstInfo;
2798
2799 unsigned LocMemOffset = VA.getLocMemOffset();
2800 int32_t Offset = LocMemOffset;
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002801
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002802 SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002803 unsigned Align = 0;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002804
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002805 if (IsTailCall) {
2806 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2807 unsigned OpSize = Flags.isByVal() ?
2808 Flags.getByValSize() : VA.getValVT().getStoreSize();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002809
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002810 // FIXME: We can have better than the minimum byval required alignment.
2811 Align = Flags.isByVal() ? Flags.getByValAlign() :
2812 MinAlign(Subtarget->getStackAlignment(), Offset);
2813
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002814 Offset = Offset + FPDiff;
2815 int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2816
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002817 DstAddr = DAG.getFrameIndex(FI, PtrVT);
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002818 DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2819
2820 // Make sure any stack arguments overlapping with where we're storing
2821 // are loaded before this eventual operation. Otherwise they'll be
2822 // clobbered.
2823
2824 // FIXME: Why is this really necessary? This seems to just result in a
2825 // lot of code to copy the stack and write them back to the same
2826 // locations, which are supposed to be immutable?
2827 Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2828 } else {
2829 DstAddr = PtrOff;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002830 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002831 Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002832 }
2833
2834 if (Outs[i].Flags.isByVal()) {
2835 SDValue SizeNode =
2836 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2837 SDValue Cpy = DAG.getMemcpy(
2838 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2839 /*isVol = */ false, /*AlwaysInline = */ true,
Yaxun Liuc5962262017-11-22 16:13:35 +00002840 /*isTailCall = */ false, DstInfo,
2841 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
Matt Arsenault0da63502018-08-31 05:49:54 +00002842 *DAG.getContext(), AMDGPUAS::PRIVATE_ADDRESS))));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002843
2844 MemOpChains.push_back(Cpy);
2845 } else {
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002846 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002847 MemOpChains.push_back(Store);
2848 }
2849 }
2850 }
2851
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002852 // Copy special input registers after user input arguments.
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002853 passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002854
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002855 if (!MemOpChains.empty())
2856 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2857
2858 // Build a sequence of copy-to-reg nodes chained together with token chain
2859 // and flag operands which copy the outgoing args into the appropriate regs.
2860 SDValue InFlag;
2861 for (auto &RegToPass : RegsToPass) {
2862 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2863 RegToPass.second, InFlag);
2864 InFlag = Chain.getValue(1);
2865 }
2866
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002867
2868 SDValue PhysReturnAddrReg;
2869 if (IsTailCall) {
2870 // Since the return is being combined with the call, we need to pass on the
2871 // return address.
2872
2873 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2874 SDValue ReturnAddrReg = CreateLiveInRegister(
2875 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2876
2877 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2878 MVT::i64);
2879 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2880 InFlag = Chain.getValue(1);
2881 }
2882
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002883 // We don't usually want to end the call-sequence here because we would tidy
2884 // the frame up *after* the call, however in the ABI-changing tail-call case
2885 // we've carefully laid out the parameters so that when sp is reset they'll be
2886 // in the correct location.
2887 if (IsTailCall && !IsSibCall) {
2888 Chain = DAG.getCALLSEQ_END(Chain,
2889 DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2890 DAG.getTargetConstant(0, DL, MVT::i32),
2891 InFlag, DL);
2892 InFlag = Chain.getValue(1);
2893 }
2894
2895 std::vector<SDValue> Ops;
2896 Ops.push_back(Chain);
2897 Ops.push_back(Callee);
Scott Linderd19d1972019-02-04 20:00:07 +00002898 // Add a redundant copy of the callee global which will not be legalized, as
2899 // we need direct access to the callee later.
2900 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
2901 const GlobalValue *GV = GSD->getGlobal();
2902 Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002903
2904 if (IsTailCall) {
2905 // Each tail call may have to adjust the stack by a different amount, so
2906 // this information must travel along with the operation for eventual
2907 // consumption by emitEpilogue.
2908 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002909
2910 Ops.push_back(PhysReturnAddrReg);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002911 }
2912
2913 // Add argument registers to the end of the list so that they are known live
2914 // into the call.
2915 for (auto &RegToPass : RegsToPass) {
2916 Ops.push_back(DAG.getRegister(RegToPass.first,
2917 RegToPass.second.getValueType()));
2918 }
2919
2920 // Add a register mask operand representing the call-preserved registers.
2921
Tom Stellardc5a154d2018-06-28 23:47:12 +00002922 auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002923 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2924 assert(Mask && "Missing call preserved mask for calling convention");
2925 Ops.push_back(DAG.getRegisterMask(Mask));
2926
2927 if (InFlag.getNode())
2928 Ops.push_back(InFlag);
2929
2930 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2931
2932 // If we're doing a tall call, use a TC_RETURN here rather than an
2933 // actual call instruction.
2934 if (IsTailCall) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002935 MFI.setHasTailCall();
2936 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002937 }
2938
2939 // Returns a chain and a flag for retval copy to use.
2940 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2941 Chain = Call.getValue(0);
2942 InFlag = Call.getValue(1);
2943
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002944 uint64_t CalleePopBytes = NumBytes;
2945 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002946 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2947 InFlag, DL);
2948 if (!Ins.empty())
2949 InFlag = Chain.getValue(1);
2950
2951 // Handle result values, copying them out of physregs into vregs that we
2952 // return.
2953 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2954 InVals, IsThisReturn,
2955 IsThisReturn ? OutVals[0] : SDValue());
2956}
2957
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002958unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2959 SelectionDAG &DAG) const {
2960 unsigned Reg = StringSwitch<unsigned>(RegName)
2961 .Case("m0", AMDGPU::M0)
2962 .Case("exec", AMDGPU::EXEC)
2963 .Case("exec_lo", AMDGPU::EXEC_LO)
2964 .Case("exec_hi", AMDGPU::EXEC_HI)
2965 .Case("flat_scratch", AMDGPU::FLAT_SCR)
2966 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2967 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2968 .Default(AMDGPU::NoRegister);
2969
2970 if (Reg == AMDGPU::NoRegister) {
2971 report_fatal_error(Twine("invalid register name \""
2972 + StringRef(RegName) + "\"."));
2973
2974 }
2975
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +00002976 if (!Subtarget->hasFlatScrRegister() &&
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00002977 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002978 report_fatal_error(Twine("invalid register \""
2979 + StringRef(RegName) + "\" for subtarget."));
2980 }
2981
2982 switch (Reg) {
2983 case AMDGPU::M0:
2984 case AMDGPU::EXEC_LO:
2985 case AMDGPU::EXEC_HI:
2986 case AMDGPU::FLAT_SCR_LO:
2987 case AMDGPU::FLAT_SCR_HI:
2988 if (VT.getSizeInBits() == 32)
2989 return Reg;
2990 break;
2991 case AMDGPU::EXEC:
2992 case AMDGPU::FLAT_SCR:
2993 if (VT.getSizeInBits() == 64)
2994 return Reg;
2995 break;
2996 default:
2997 llvm_unreachable("missing register type checking");
2998 }
2999
3000 report_fatal_error(Twine("invalid type for register \""
3001 + StringRef(RegName) + "\"."));
3002}
3003
Matt Arsenault786724a2016-07-12 21:41:32 +00003004// If kill is not the last instruction, split the block so kill is always a
3005// proper terminator.
3006MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
3007 MachineBasicBlock *BB) const {
3008 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3009
3010 MachineBasicBlock::iterator SplitPoint(&MI);
3011 ++SplitPoint;
3012
3013 if (SplitPoint == BB->end()) {
3014 // Don't bother with a new block.
Marek Olsakce76ea02017-10-24 10:27:13 +00003015 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00003016 return BB;
3017 }
3018
3019 MachineFunction *MF = BB->getParent();
3020 MachineBasicBlock *SplitBB
3021 = MF->CreateMachineBasicBlock(BB->getBasicBlock());
3022
Matt Arsenault786724a2016-07-12 21:41:32 +00003023 MF->insert(++MachineFunction::iterator(BB), SplitBB);
3024 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
3025
Matt Arsenaultd40ded62016-07-22 17:01:15 +00003026 SplitBB->transferSuccessorsAndUpdatePHIs(BB);
Matt Arsenault786724a2016-07-12 21:41:32 +00003027 BB->addSuccessor(SplitBB);
3028
Marek Olsakce76ea02017-10-24 10:27:13 +00003029 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00003030 return SplitBB;
3031}
3032
Matt Arsenault8ad1dec2019-06-20 20:54:32 +00003033// Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true,
3034// \p MI will be the only instruction in the loop body block. Otherwise, it will
3035// be the first instruction in the remainder block.
3036//
3037/// \returns { LoopBody, Remainder }
3038static std::pair<MachineBasicBlock *, MachineBasicBlock *>
3039splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) {
3040 MachineFunction *MF = MBB.getParent();
3041 MachineBasicBlock::iterator I(&MI);
3042
3043 // To insert the loop we need to split the block. Move everything after this
3044 // point to a new block, and insert a new empty block between the two.
3045 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
3046 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
3047 MachineFunction::iterator MBBI(MBB);
3048 ++MBBI;
3049
3050 MF->insert(MBBI, LoopBB);
3051 MF->insert(MBBI, RemainderBB);
3052
3053 LoopBB->addSuccessor(LoopBB);
3054 LoopBB->addSuccessor(RemainderBB);
3055
3056 // Move the rest of the block into a new block.
3057 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
3058
3059 if (InstInLoop) {
3060 auto Next = std::next(I);
3061
3062 // Move instruction to loop body.
3063 LoopBB->splice(LoopBB->begin(), &MBB, I, Next);
3064
3065 // Move the rest of the block.
3066 RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end());
3067 } else {
3068 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3069 }
3070
3071 MBB.addSuccessor(LoopBB);
3072
3073 return std::make_pair(LoopBB, RemainderBB);
3074}
3075
Matt Arsenault85f38902019-07-19 19:47:30 +00003076/// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it.
3077void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const {
3078 MachineBasicBlock *MBB = MI.getParent();
3079 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3080 auto I = MI.getIterator();
3081 auto E = std::next(I);
3082
3083 BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
3084 .addImm(0);
3085
3086 MIBundleBuilder Bundler(*MBB, I, E);
3087 finalizeBundle(*MBB, Bundler.begin());
3088}
3089
Matt Arsenault8ad1dec2019-06-20 20:54:32 +00003090MachineBasicBlock *
3091SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
3092 MachineBasicBlock *BB) const {
3093 const DebugLoc &DL = MI.getDebugLoc();
3094
3095 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3096
3097 MachineBasicBlock *LoopBB;
3098 MachineBasicBlock *RemainderBB;
3099 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3100
Matt Arsenaultbb582eb2019-08-01 18:41:32 +00003101 // Apparently kill flags are only valid if the def is in the same block?
3102 if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0))
3103 Src->setIsKill(false);
Matt Arsenault8ad1dec2019-06-20 20:54:32 +00003104
3105 std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true);
3106
3107 MachineBasicBlock::iterator I = LoopBB->end();
Matt Arsenault8ad1dec2019-06-20 20:54:32 +00003108
3109 const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg(
3110 AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1);
3111
3112 // Clear TRAP_STS.MEM_VIOL
3113 BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32))
3114 .addImm(0)
3115 .addImm(EncodedReg);
3116
Matt Arsenault85f38902019-07-19 19:47:30 +00003117 bundleInstWithWaitcnt(MI);
Matt Arsenault8ad1dec2019-06-20 20:54:32 +00003118
3119 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3120
3121 // Load and check TRAP_STS.MEM_VIOL
3122 BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg)
3123 .addImm(EncodedReg);
3124
3125 // FIXME: Do we need to use an isel pseudo that may clobber scc?
3126 BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32))
3127 .addReg(Reg, RegState::Kill)
3128 .addImm(0);
3129 BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
3130 .addMBB(LoopBB);
3131
3132 return RemainderBB;
3133}
3134
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003135// Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
3136// wavefront. If the value is uniform and just happens to be in a VGPR, this
3137// will only do one iteration. In the worst case, this will loop 64 times.
3138//
3139// TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003140static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
3141 const SIInstrInfo *TII,
3142 MachineRegisterInfo &MRI,
3143 MachineBasicBlock &OrigBB,
3144 MachineBasicBlock &LoopBB,
3145 const DebugLoc &DL,
3146 const MachineOperand &IdxReg,
3147 unsigned InitReg,
3148 unsigned ResultReg,
3149 unsigned PhiReg,
3150 unsigned InitSaveExecReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003151 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003152 bool UseGPRIdxMode,
3153 bool IsIndirectSrc) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003154 MachineFunction *MF = OrigBB.getParent();
3155 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3156 const SIRegisterInfo *TRI = ST.getRegisterInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003157 MachineBasicBlock::iterator I = LoopBB.begin();
3158
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003159 const TargetRegisterClass *BoolRC = TRI->getBoolRC();
3160 unsigned PhiExec = MRI.createVirtualRegister(BoolRC);
3161 unsigned NewExec = MRI.createVirtualRegister(BoolRC);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003162 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003163 unsigned CondReg = MRI.createVirtualRegister(BoolRC);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003164
3165 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
3166 .addReg(InitReg)
3167 .addMBB(&OrigBB)
3168 .addReg(ResultReg)
3169 .addMBB(&LoopBB);
3170
3171 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
3172 .addReg(InitSaveExecReg)
3173 .addMBB(&OrigBB)
3174 .addReg(NewExec)
3175 .addMBB(&LoopBB);
3176
3177 // Read the next variant <- also loop target.
3178 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
3179 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
3180
3181 // Compare the just read M0 value to all possible Idx values.
3182 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
3183 .addReg(CurrentIdxReg)
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +00003184 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003185
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003186 // Update EXEC, save the original EXEC value to VCC.
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003187 BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32
3188 : AMDGPU::S_AND_SAVEEXEC_B64),
3189 NewExec)
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003190 .addReg(CondReg, RegState::Kill);
3191
3192 MRI.setSimpleHint(NewExec, CondReg);
3193
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003194 if (UseGPRIdxMode) {
3195 unsigned IdxReg;
3196 if (Offset == 0) {
3197 IdxReg = CurrentIdxReg;
3198 } else {
3199 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3200 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
3201 .addReg(CurrentIdxReg, RegState::Kill)
3202 .addImm(Offset);
3203 }
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003204 unsigned IdxMode = IsIndirectSrc ?
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00003205 AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003206 MachineInstr *SetOn =
3207 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3208 .addReg(IdxReg, RegState::Kill)
3209 .addImm(IdxMode);
3210 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003211 } else {
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003212 // Move index from VCC into M0
3213 if (Offset == 0) {
3214 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3215 .addReg(CurrentIdxReg, RegState::Kill);
3216 } else {
3217 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
3218 .addReg(CurrentIdxReg, RegState::Kill)
3219 .addImm(Offset);
3220 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003221 }
3222
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003223 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003224 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003225 MachineInstr *InsertPt =
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003226 BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term
3227 : AMDGPU::S_XOR_B64_term), Exec)
3228 .addReg(Exec)
3229 .addReg(NewExec);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003230
3231 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
3232 // s_cbranch_scc0?
3233
3234 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
3235 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
3236 .addMBB(&LoopBB);
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003237
3238 return InsertPt->getIterator();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003239}
3240
3241// This has slightly sub-optimal regalloc when the source vector is killed by
3242// the read. The register allocator does not understand that the kill is
3243// per-workitem, so is kept alive for the whole loop so we end up not re-using a
3244// subregister from it, using 1 more VGPR than necessary. This was saved when
3245// this was expanded after register allocation.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003246static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
3247 MachineBasicBlock &MBB,
3248 MachineInstr &MI,
3249 unsigned InitResultReg,
3250 unsigned PhiReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003251 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003252 bool UseGPRIdxMode,
3253 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003254 MachineFunction *MF = MBB.getParent();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003255 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3256 const SIRegisterInfo *TRI = ST.getRegisterInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003257 MachineRegisterInfo &MRI = MF->getRegInfo();
3258 const DebugLoc &DL = MI.getDebugLoc();
3259 MachineBasicBlock::iterator I(&MI);
3260
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003261 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003262 unsigned DstReg = MI.getOperand(0).getReg();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003263 unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC);
3264 unsigned TmpExec = MRI.createVirtualRegister(BoolXExecRC);
3265 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3266 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003267
3268 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
3269
3270 // Save the EXEC mask
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003271 BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec)
3272 .addReg(Exec);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003273
Matt Arsenault8ad1dec2019-06-20 20:54:32 +00003274 MachineBasicBlock *LoopBB;
3275 MachineBasicBlock *RemainderBB;
3276 std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003277
3278 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3279
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003280 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
3281 InitResultReg, DstReg, PhiReg, TmpExec,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003282 Offset, UseGPRIdxMode, IsIndirectSrc);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003283
3284 MachineBasicBlock::iterator First = RemainderBB->begin();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003285 BuildMI(*RemainderBB, First, DL, TII->get(MovExecOpc), Exec)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003286 .addReg(SaveExec);
3287
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003288 return InsPt;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003289}
3290
3291// Returns subreg index, offset
3292static std::pair<unsigned, int>
3293computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
3294 const TargetRegisterClass *SuperRC,
3295 unsigned VecReg,
3296 int Offset) {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003297 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003298
3299 // Skip out of bounds offsets, or else we would end up using an undefined
3300 // register.
3301 if (Offset >= NumElts || Offset < 0)
3302 return std::make_pair(AMDGPU::sub0, Offset);
3303
3304 return std::make_pair(AMDGPU::sub0 + Offset, 0);
3305}
3306
3307// Return true if the index is an SGPR and was set.
3308static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
3309 MachineRegisterInfo &MRI,
3310 MachineInstr &MI,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003311 int Offset,
3312 bool UseGPRIdxMode,
3313 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003314 MachineBasicBlock *MBB = MI.getParent();
3315 const DebugLoc &DL = MI.getDebugLoc();
3316 MachineBasicBlock::iterator I(&MI);
3317
3318 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3319 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3320
3321 assert(Idx->getReg() != AMDGPU::NoRegister);
3322
3323 if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3324 return false;
3325
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003326 if (UseGPRIdxMode) {
3327 unsigned IdxMode = IsIndirectSrc ?
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00003328 AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003329 if (Offset == 0) {
3330 MachineInstr *SetOn =
Diana Picus116bbab2017-01-13 09:58:52 +00003331 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3332 .add(*Idx)
3333 .addImm(IdxMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003334
Matt Arsenaultdac31db2016-10-13 12:45:16 +00003335 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003336 } else {
3337 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3338 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
Diana Picus116bbab2017-01-13 09:58:52 +00003339 .add(*Idx)
3340 .addImm(Offset);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003341 MachineInstr *SetOn =
3342 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3343 .addReg(Tmp, RegState::Kill)
3344 .addImm(IdxMode);
3345
Matt Arsenaultdac31db2016-10-13 12:45:16 +00003346 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003347 }
3348
3349 return true;
3350 }
3351
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003352 if (Offset == 0) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00003353 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3354 .add(*Idx);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003355 } else {
3356 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00003357 .add(*Idx)
3358 .addImm(Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003359 }
3360
3361 return true;
3362}
3363
3364// Control flow needs to be inserted if indexing with a VGPR.
3365static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
3366 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00003367 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003368 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003369 const SIRegisterInfo &TRI = TII->getRegisterInfo();
3370 MachineFunction *MF = MBB.getParent();
3371 MachineRegisterInfo &MRI = MF->getRegInfo();
3372
3373 unsigned Dst = MI.getOperand(0).getReg();
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003374 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003375 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3376
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003377 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003378
3379 unsigned SubReg;
3380 std::tie(SubReg, Offset)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003381 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003382
Marek Olsake22fdb92017-03-21 17:00:32 +00003383 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003384
3385 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003386 MachineBasicBlock::iterator I(&MI);
3387 const DebugLoc &DL = MI.getDebugLoc();
3388
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003389 if (UseGPRIdxMode) {
3390 // TODO: Look at the uses to avoid the copy. This may require rescheduling
3391 // to avoid interfering with other uses, so probably requires a new
3392 // optimization pass.
3393 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003394 .addReg(SrcReg, RegState::Undef, SubReg)
3395 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003396 .addReg(AMDGPU::M0, RegState::Implicit);
3397 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3398 } else {
3399 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003400 .addReg(SrcReg, RegState::Undef, SubReg)
3401 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003402 }
3403
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003404 MI.eraseFromParent();
3405
3406 return &MBB;
3407 }
3408
3409 const DebugLoc &DL = MI.getDebugLoc();
3410 MachineBasicBlock::iterator I(&MI);
3411
3412 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3413 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3414
3415 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3416
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003417 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3418 Offset, UseGPRIdxMode, true);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003419 MachineBasicBlock *LoopBB = InsPt->getParent();
3420
3421 if (UseGPRIdxMode) {
3422 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003423 .addReg(SrcReg, RegState::Undef, SubReg)
3424 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003425 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003426 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003427 } else {
3428 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003429 .addReg(SrcReg, RegState::Undef, SubReg)
3430 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003431 }
3432
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003433 MI.eraseFromParent();
3434
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003435 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003436}
3437
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003438static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3439 const TargetRegisterClass *VecRC) {
3440 switch (TRI.getRegSizeInBits(*VecRC)) {
3441 case 32: // 4 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003442 return AMDGPU::V_MOVRELD_B32_V1;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003443 case 64: // 8 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003444 return AMDGPU::V_MOVRELD_B32_V2;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003445 case 128: // 16 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003446 return AMDGPU::V_MOVRELD_B32_V4;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003447 case 256: // 32 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003448 return AMDGPU::V_MOVRELD_B32_V8;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003449 case 512: // 64 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003450 return AMDGPU::V_MOVRELD_B32_V16;
3451 default:
3452 llvm_unreachable("unsupported size for MOVRELD pseudos");
3453 }
3454}
3455
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003456static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3457 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00003458 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003459 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003460 const SIRegisterInfo &TRI = TII->getRegisterInfo();
3461 MachineFunction *MF = MBB.getParent();
3462 MachineRegisterInfo &MRI = MF->getRegInfo();
3463
3464 unsigned Dst = MI.getOperand(0).getReg();
3465 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3466 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3467 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3468 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3469 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3470
3471 // This can be an immediate, but will be folded later.
3472 assert(Val->getReg());
3473
3474 unsigned SubReg;
3475 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3476 SrcVec->getReg(),
3477 Offset);
Marek Olsake22fdb92017-03-21 17:00:32 +00003478 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003479
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003480 if (Idx->getReg() == AMDGPU::NoRegister) {
3481 MachineBasicBlock::iterator I(&MI);
3482 const DebugLoc &DL = MI.getDebugLoc();
3483
3484 assert(Offset == 0);
3485
3486 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
Diana Picus116bbab2017-01-13 09:58:52 +00003487 .add(*SrcVec)
3488 .add(*Val)
3489 .addImm(SubReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003490
3491 MI.eraseFromParent();
3492 return &MBB;
3493 }
3494
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003495 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003496 MachineBasicBlock::iterator I(&MI);
3497 const DebugLoc &DL = MI.getDebugLoc();
3498
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003499 if (UseGPRIdxMode) {
3500 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003501 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3502 .add(*Val)
3503 .addReg(Dst, RegState::ImplicitDefine)
3504 .addReg(SrcVec->getReg(), RegState::Implicit)
3505 .addReg(AMDGPU::M0, RegState::Implicit);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003506
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003507 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3508 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003509 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003510
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003511 BuildMI(MBB, I, DL, MovRelDesc)
3512 .addReg(Dst, RegState::Define)
3513 .addReg(SrcVec->getReg())
Diana Picus116bbab2017-01-13 09:58:52 +00003514 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003515 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003516 }
3517
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003518 MI.eraseFromParent();
3519 return &MBB;
3520 }
3521
3522 if (Val->isReg())
3523 MRI.clearKillFlags(Val->getReg());
3524
3525 const DebugLoc &DL = MI.getDebugLoc();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003526
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003527 unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3528
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003529 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003530 Offset, UseGPRIdxMode, false);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003531 MachineBasicBlock *LoopBB = InsPt->getParent();
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003532
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003533 if (UseGPRIdxMode) {
3534 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003535 .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3536 .add(*Val) // src0
3537 .addReg(Dst, RegState::ImplicitDefine)
3538 .addReg(PhiReg, RegState::Implicit)
3539 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003540 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003541 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003542 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003543
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003544 BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3545 .addReg(Dst, RegState::Define)
3546 .addReg(PhiReg)
Diana Picus116bbab2017-01-13 09:58:52 +00003547 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003548 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003549 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003550
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003551 MI.eraseFromParent();
3552
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003553 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003554}
3555
Matt Arsenault786724a2016-07-12 21:41:32 +00003556MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3557 MachineInstr &MI, MachineBasicBlock *BB) const {
Tom Stellard244891d2016-12-20 15:52:17 +00003558
3559 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3560 MachineFunction *MF = BB->getParent();
3561 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3562
3563 if (TII->isMIMG(MI)) {
Matt Arsenault905f3512017-12-29 17:18:14 +00003564 if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3565 report_fatal_error("missing mem operand from MIMG instruction");
3566 }
Tom Stellard244891d2016-12-20 15:52:17 +00003567 // Add a memoperand for mimg instructions so that they aren't assumed to
3568 // be ordered memory instuctions.
3569
Tom Stellard244891d2016-12-20 15:52:17 +00003570 return BB;
3571 }
3572
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003573 switch (MI.getOpcode()) {
Matt Arsenault301162c2017-11-15 21:51:43 +00003574 case AMDGPU::S_ADD_U64_PSEUDO:
3575 case AMDGPU::S_SUB_U64_PSEUDO: {
3576 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003577 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3578 const SIRegisterInfo *TRI = ST.getRegisterInfo();
3579 const TargetRegisterClass *BoolRC = TRI->getBoolRC();
Matt Arsenault301162c2017-11-15 21:51:43 +00003580 const DebugLoc &DL = MI.getDebugLoc();
3581
3582 MachineOperand &Dest = MI.getOperand(0);
3583 MachineOperand &Src0 = MI.getOperand(1);
3584 MachineOperand &Src1 = MI.getOperand(2);
3585
3586 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3587 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3588
3589 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003590 Src0, BoolRC, AMDGPU::sub0,
Matt Arsenault301162c2017-11-15 21:51:43 +00003591 &AMDGPU::SReg_32_XM0RegClass);
3592 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003593 Src0, BoolRC, AMDGPU::sub1,
Matt Arsenault301162c2017-11-15 21:51:43 +00003594 &AMDGPU::SReg_32_XM0RegClass);
3595
3596 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003597 Src1, BoolRC, AMDGPU::sub0,
Matt Arsenault301162c2017-11-15 21:51:43 +00003598 &AMDGPU::SReg_32_XM0RegClass);
3599 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003600 Src1, BoolRC, AMDGPU::sub1,
Matt Arsenault301162c2017-11-15 21:51:43 +00003601 &AMDGPU::SReg_32_XM0RegClass);
3602
3603 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3604
3605 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3606 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3607 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3608 .add(Src0Sub0)
3609 .add(Src1Sub0);
3610 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3611 .add(Src0Sub1)
3612 .add(Src1Sub1);
3613 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3614 .addReg(DestSub0)
3615 .addImm(AMDGPU::sub0)
3616 .addReg(DestSub1)
3617 .addImm(AMDGPU::sub1);
3618 MI.eraseFromParent();
3619 return BB;
3620 }
3621 case AMDGPU::SI_INIT_M0: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003622 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
Matt Arsenault4ac341c2016-04-14 21:58:15 +00003623 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Diana Picus116bbab2017-01-13 09:58:52 +00003624 .add(MI.getOperand(0));
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003625 MI.eraseFromParent();
Matt Arsenault20711b72015-02-20 22:10:45 +00003626 return BB;
Matt Arsenault301162c2017-11-15 21:51:43 +00003627 }
Marek Olsak2d825902017-04-28 20:21:58 +00003628 case AMDGPU::SI_INIT_EXEC:
3629 // This should be before all vector instructions.
3630 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3631 AMDGPU::EXEC)
3632 .addImm(MI.getOperand(0).getImm());
3633 MI.eraseFromParent();
3634 return BB;
3635
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003636 case AMDGPU::SI_INIT_EXEC_LO:
3637 // This should be before all vector instructions.
3638 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
3639 AMDGPU::EXEC_LO)
3640 .addImm(MI.getOperand(0).getImm());
3641 MI.eraseFromParent();
3642 return BB;
3643
Marek Olsak2d825902017-04-28 20:21:58 +00003644 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3645 // Extract the thread count from an SGPR input and set EXEC accordingly.
3646 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3647 //
3648 // S_BFE_U32 count, input, {shift, 7}
3649 // S_BFM_B64 exec, count, 0
3650 // S_CMP_EQ_U32 count, 64
3651 // S_CMOV_B64 exec, -1
3652 MachineInstr *FirstMI = &*BB->begin();
3653 MachineRegisterInfo &MRI = MF->getRegInfo();
3654 unsigned InputReg = MI.getOperand(0).getReg();
3655 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3656 bool Found = false;
3657
3658 // Move the COPY of the input reg to the beginning, so that we can use it.
3659 for (auto I = BB->begin(); I != &MI; I++) {
3660 if (I->getOpcode() != TargetOpcode::COPY ||
3661 I->getOperand(0).getReg() != InputReg)
3662 continue;
3663
3664 if (I == FirstMI) {
3665 FirstMI = &*++BB->begin();
3666 } else {
3667 I->removeFromParent();
3668 BB->insert(FirstMI, &*I);
3669 }
3670 Found = true;
3671 break;
3672 }
3673 assert(Found);
Davide Italiano0dcc0152017-05-11 19:58:52 +00003674 (void)Found;
Marek Olsak2d825902017-04-28 20:21:58 +00003675
3676 // This should be before all vector instructions.
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003677 unsigned Mask = (getSubtarget()->getWavefrontSize() << 1) - 1;
3678 bool isWave32 = getSubtarget()->isWave32();
3679 unsigned Exec = isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
Marek Olsak2d825902017-04-28 20:21:58 +00003680 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3681 .addReg(InputReg)
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003682 .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
3683 BuildMI(*BB, FirstMI, DebugLoc(),
3684 TII->get(isWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64),
3685 Exec)
Marek Olsak2d825902017-04-28 20:21:58 +00003686 .addReg(CountReg)
3687 .addImm(0);
3688 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3689 .addReg(CountReg, RegState::Kill)
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003690 .addImm(getSubtarget()->getWavefrontSize());
3691 BuildMI(*BB, FirstMI, DebugLoc(),
3692 TII->get(isWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
3693 Exec)
Marek Olsak2d825902017-04-28 20:21:58 +00003694 .addImm(-1);
3695 MI.eraseFromParent();
3696 return BB;
3697 }
3698
Changpeng Fang01f60622016-03-15 17:28:44 +00003699 case AMDGPU::GET_GROUPSTATICSIZE: {
Nicolai Haehnle27101712019-06-25 11:52:30 +00003700 assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
3701 getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL);
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003702 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3c07c812016-07-22 17:01:33 +00003703 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
Diana Picus116bbab2017-01-13 09:58:52 +00003704 .add(MI.getOperand(0))
3705 .addImm(MFI->getLDSSize());
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003706 MI.eraseFromParent();
Changpeng Fang01f60622016-03-15 17:28:44 +00003707 return BB;
3708 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003709 case AMDGPU::SI_INDIRECT_SRC_V1:
3710 case AMDGPU::SI_INDIRECT_SRC_V2:
3711 case AMDGPU::SI_INDIRECT_SRC_V4:
3712 case AMDGPU::SI_INDIRECT_SRC_V8:
3713 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003714 return emitIndirectSrc(MI, *BB, *getSubtarget());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003715 case AMDGPU::SI_INDIRECT_DST_V1:
3716 case AMDGPU::SI_INDIRECT_DST_V2:
3717 case AMDGPU::SI_INDIRECT_DST_V4:
3718 case AMDGPU::SI_INDIRECT_DST_V8:
3719 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003720 return emitIndirectDst(MI, *BB, *getSubtarget());
Marek Olsakce76ea02017-10-24 10:27:13 +00003721 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3722 case AMDGPU::SI_KILL_I1_PSEUDO:
Matt Arsenault786724a2016-07-12 21:41:32 +00003723 return splitKillBlock(MI, BB);
Matt Arsenault22e41792016-08-27 01:00:37 +00003724 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3725 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003726 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3727 const SIRegisterInfo *TRI = ST.getRegisterInfo();
Matt Arsenault22e41792016-08-27 01:00:37 +00003728
3729 unsigned Dst = MI.getOperand(0).getReg();
3730 unsigned Src0 = MI.getOperand(1).getReg();
3731 unsigned Src1 = MI.getOperand(2).getReg();
3732 const DebugLoc &DL = MI.getDebugLoc();
3733 unsigned SrcCond = MI.getOperand(3).getReg();
3734
3735 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3736 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003737 const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3738 unsigned SrcCondCopy = MRI.createVirtualRegister(CondRC);
Matt Arsenault22e41792016-08-27 01:00:37 +00003739
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003740 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3741 .addReg(SrcCond);
Matt Arsenault22e41792016-08-27 01:00:37 +00003742 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003743 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003744 .addReg(Src0, 0, AMDGPU::sub0)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003745 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003746 .addReg(Src1, 0, AMDGPU::sub0)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003747 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003748 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003749 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003750 .addReg(Src0, 0, AMDGPU::sub1)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003751 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003752 .addReg(Src1, 0, AMDGPU::sub1)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003753 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003754
3755 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3756 .addReg(DstLo)
3757 .addImm(AMDGPU::sub0)
3758 .addReg(DstHi)
3759 .addImm(AMDGPU::sub1);
3760 MI.eraseFromParent();
3761 return BB;
3762 }
Matt Arsenault327188a2016-12-15 21:57:11 +00003763 case AMDGPU::SI_BR_UNDEF: {
3764 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3765 const DebugLoc &DL = MI.getDebugLoc();
3766 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
Diana Picus116bbab2017-01-13 09:58:52 +00003767 .add(MI.getOperand(0));
Matt Arsenault327188a2016-12-15 21:57:11 +00003768 Br->getOperand(1).setIsUndef(true); // read undef SCC
3769 MI.eraseFromParent();
3770 return BB;
3771 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003772 case AMDGPU::ADJCALLSTACKUP:
3773 case AMDGPU::ADJCALLSTACKDOWN: {
3774 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3775 MachineInstrBuilder MIB(*MF, &MI);
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003776
3777 // Add an implicit use of the frame offset reg to prevent the restore copy
3778 // inserted after the call from being reorderd after stack operations in the
3779 // the caller's frame.
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003780 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003781 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3782 .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003783 return BB;
3784 }
Scott Linderd19d1972019-02-04 20:00:07 +00003785 case AMDGPU::SI_CALL_ISEL: {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003786 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3787 const DebugLoc &DL = MI.getDebugLoc();
Scott Linderd19d1972019-02-04 20:00:07 +00003788
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003789 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003790
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003791 MachineInstrBuilder MIB;
Scott Linderd19d1972019-02-04 20:00:07 +00003792 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003793
Scott Linderd19d1972019-02-04 20:00:07 +00003794 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003795 MIB.add(MI.getOperand(I));
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003796
Chandler Carruthc73c0302018-08-16 21:30:05 +00003797 MIB.cloneMemRefs(MI);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003798 MI.eraseFromParent();
3799 return BB;
3800 }
Stanislav Mekhanoshin64399da2019-05-02 04:26:35 +00003801 case AMDGPU::V_ADD_I32_e32:
3802 case AMDGPU::V_SUB_I32_e32:
3803 case AMDGPU::V_SUBREV_I32_e32: {
3804 // TODO: Define distinct V_*_I32_Pseudo instructions instead.
3805 const DebugLoc &DL = MI.getDebugLoc();
3806 unsigned Opc = MI.getOpcode();
3807
3808 bool NeedClampOperand = false;
3809 if (TII->pseudoToMCOpcode(Opc) == -1) {
3810 Opc = AMDGPU::getVOPe64(Opc);
3811 NeedClampOperand = true;
3812 }
3813
3814 auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
3815 if (TII->isVOP3(*I)) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003816 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3817 const SIRegisterInfo *TRI = ST.getRegisterInfo();
3818 I.addReg(TRI->getVCC(), RegState::Define);
Stanislav Mekhanoshin64399da2019-05-02 04:26:35 +00003819 }
3820 I.add(MI.getOperand(1))
3821 .add(MI.getOperand(2));
3822 if (NeedClampOperand)
3823 I.addImm(0); // clamp bit for e64 encoding
3824
3825 TII->legalizeOperands(*I);
3826
3827 MI.eraseFromParent();
3828 return BB;
3829 }
Matt Arsenault8ad1dec2019-06-20 20:54:32 +00003830 case AMDGPU::DS_GWS_INIT:
3831 case AMDGPU::DS_GWS_SEMA_V:
3832 case AMDGPU::DS_GWS_SEMA_BR:
3833 case AMDGPU::DS_GWS_SEMA_P:
Matt Arsenault740322f2019-06-20 21:11:42 +00003834 case AMDGPU::DS_GWS_SEMA_RELEASE_ALL:
Matt Arsenault8ad1dec2019-06-20 20:54:32 +00003835 case AMDGPU::DS_GWS_BARRIER:
Matt Arsenault85f38902019-07-19 19:47:30 +00003836 // A s_waitcnt 0 is required to be the instruction immediately following.
3837 if (getSubtarget()->hasGWSAutoReplay()) {
3838 bundleInstWithWaitcnt(MI);
Matt Arsenault8ad1dec2019-06-20 20:54:32 +00003839 return BB;
Matt Arsenault85f38902019-07-19 19:47:30 +00003840 }
3841
Matt Arsenault8ad1dec2019-06-20 20:54:32 +00003842 return emitGWSMemViolTestLoop(MI, BB);
Changpeng Fang01f60622016-03-15 17:28:44 +00003843 default:
3844 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
Tom Stellard75aadc22012-12-11 21:25:42 +00003845 }
Tom Stellard75aadc22012-12-11 21:25:42 +00003846}
3847
Matt Arsenaulte11d8ac2017-10-13 21:10:22 +00003848bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3849 return isTypeLegal(VT.getScalarType());
3850}
3851
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003852bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3853 // This currently forces unfolding various combinations of fsub into fma with
3854 // free fneg'd operands. As long as we have fast FMA (controlled by
3855 // isFMAFasterThanFMulAndFAdd), we should perform these.
3856
3857 // When fma is quarter rate, for f64 where add / sub are at best half rate,
3858 // most of these combines appear to be cycle neutral but save on instruction
3859 // count / code size.
3860 return true;
3861}
3862
Mehdi Amini44ede332015-07-09 02:09:04 +00003863EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3864 EVT VT) const {
Tom Stellard83747202013-07-18 21:43:53 +00003865 if (!VT.isVector()) {
3866 return MVT::i1;
3867 }
Matt Arsenault8596f712014-11-28 22:51:38 +00003868 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
Tom Stellard75aadc22012-12-11 21:25:42 +00003869}
3870
Matt Arsenault94163282016-12-22 16:36:25 +00003871MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3872 // TODO: Should i16 be used always if legal? For now it would force VALU
3873 // shifts.
3874 return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
Christian Konig082a14a2013-03-18 11:34:05 +00003875}
3876
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003877// Answering this is somewhat tricky and depends on the specific device which
3878// have different rates for fma or all f64 operations.
3879//
3880// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3881// regardless of which device (although the number of cycles differs between
3882// devices), so it is always profitable for f64.
3883//
3884// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3885// only on full rate devices. Normally, we should prefer selecting v_mad_f32
3886// which we can always do even without fused FP ops since it returns the same
3887// result as the separate operations and since it is always full
3888// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3889// however does not support denormals, so we do report fma as faster if we have
3890// a fast fma device and require denormals.
3891//
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003892bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3893 VT = VT.getScalarType();
3894
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003895 switch (VT.getSimpleVT().SimpleTy) {
Matt Arsenault0084adc2018-04-30 19:08:16 +00003896 case MVT::f32: {
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003897 // This is as fast on some subtargets. However, we always have full rate f32
3898 // mad available which returns the same result as the separate operations
Matt Arsenault8d630032015-02-20 22:10:41 +00003899 // which we should prefer over fma. We can't use this if we want to support
3900 // denormals, so only report this in these cases.
Matt Arsenault0084adc2018-04-30 19:08:16 +00003901 if (Subtarget->hasFP32Denormals())
3902 return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3903
3904 // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3905 return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3906 }
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003907 case MVT::f64:
3908 return true;
Matt Arsenault9e22bc22016-12-22 03:21:48 +00003909 case MVT::f16:
3910 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003911 default:
3912 break;
3913 }
3914
3915 return false;
3916}
3917
Tom Stellard75aadc22012-12-11 21:25:42 +00003918//===----------------------------------------------------------------------===//
3919// Custom DAG Lowering Operations
3920//===----------------------------------------------------------------------===//
3921
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003922// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3923// wider vector type is legal.
3924SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3925 SelectionDAG &DAG) const {
3926 unsigned Opc = Op.getOpcode();
3927 EVT VT = Op.getValueType();
3928 assert(VT == MVT::v4f16);
3929
3930 SDValue Lo, Hi;
3931 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3932
3933 SDLoc SL(Op);
3934 SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3935 Op->getFlags());
3936 SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3937 Op->getFlags());
3938
3939 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3940}
3941
3942// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3943// wider vector type is legal.
3944SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3945 SelectionDAG &DAG) const {
3946 unsigned Opc = Op.getOpcode();
3947 EVT VT = Op.getValueType();
3948 assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3949
3950 SDValue Lo0, Hi0;
3951 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3952 SDValue Lo1, Hi1;
3953 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3954
3955 SDLoc SL(Op);
3956
3957 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3958 Op->getFlags());
3959 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3960 Op->getFlags());
3961
3962 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3963}
3964
David Stuttard20235ef2019-07-29 08:15:10 +00003965SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
3966 SelectionDAG &DAG) const {
3967 unsigned Opc = Op.getOpcode();
3968 EVT VT = Op.getValueType();
3969 assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3970
3971 SDValue Lo0, Hi0;
3972 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3973 SDValue Lo1, Hi1;
3974 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3975 SDValue Lo2, Hi2;
3976 std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2);
3977
3978 SDLoc SL(Op);
3979
3980 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, Lo2,
3981 Op->getFlags());
3982 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, Hi2,
3983 Op->getFlags());
3984
3985 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3986}
3987
3988
Tom Stellard75aadc22012-12-11 21:25:42 +00003989SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3990 switch (Op.getOpcode()) {
3991 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
Tom Stellardf8794352012-12-19 22:10:31 +00003992 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
Aakanksha Patild5443f82019-05-29 18:20:11 +00003993 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
Tom Stellard35bb18c2013-08-26 15:06:04 +00003994 case ISD::LOAD: {
Tom Stellarde812f2f2014-07-21 15:45:06 +00003995 SDValue Result = LowerLOAD(Op, DAG);
3996 assert((!Result.getNode() ||
3997 Result.getNode()->getNumValues() == 2) &&
3998 "Load should return a value and a chain");
3999 return Result;
Tom Stellard35bb18c2013-08-26 15:06:04 +00004000 }
Tom Stellardaf775432013-10-23 00:44:32 +00004001
Matt Arsenaultad14ce82014-07-19 18:44:39 +00004002 case ISD::FSIN:
4003 case ISD::FCOS:
4004 return LowerTrig(Op, DAG);
Tom Stellard0ec134f2014-02-04 17:18:40 +00004005 case ISD::SELECT: return LowerSELECT(Op, DAG);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00004006 case ISD::FDIV: return LowerFDIV(Op, DAG);
Tom Stellard354a43c2016-04-01 18:27:37 +00004007 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +00004008 case ISD::STORE: return LowerSTORE(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004009 case ISD::GlobalAddress: {
4010 MachineFunction &MF = DAG.getMachineFunction();
4011 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
4012 return LowerGlobalAddress(MFI, Op, DAG);
Tom Stellard94593ee2013-06-03 17:40:18 +00004013 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004014 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00004015 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00004016 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00004017 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
Tim Renouf58168892019-07-04 17:38:24 +00004018 case ISD::INSERT_SUBVECTOR:
4019 return lowerINSERT_SUBVECTOR(Op, DAG);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004020 case ISD::INSERT_VECTOR_ELT:
4021 return lowerINSERT_VECTOR_ELT(Op, DAG);
4022 case ISD::EXTRACT_VECTOR_ELT:
4023 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
Matt Arsenault5fe851b2019-07-02 19:15:45 +00004024 case ISD::VECTOR_SHUFFLE:
4025 return lowerVECTOR_SHUFFLE(Op, DAG);
Matt Arsenault67a98152018-05-16 11:47:30 +00004026 case ISD::BUILD_VECTOR:
4027 return lowerBUILD_VECTOR(Op, DAG);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004028 case ISD::FP_ROUND:
4029 return lowerFP_ROUND(Op, DAG);
Matt Arsenault3e025382017-04-24 17:49:13 +00004030 case ISD::TRAP:
Matt Arsenault3e025382017-04-24 17:49:13 +00004031 return lowerTRAP(Op, DAG);
Tony Tye43259df2018-05-16 16:19:34 +00004032 case ISD::DEBUGTRAP:
4033 return lowerDEBUGTRAP(Op, DAG);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004034 case ISD::FABS:
4035 case ISD::FNEG:
Matt Arsenault36cdcfa2018-08-02 13:43:42 +00004036 case ISD::FCANONICALIZE:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004037 return splitUnaryVectorOp(Op, DAG);
Matt Arsenault687ec752018-10-22 16:27:27 +00004038 case ISD::FMINNUM:
4039 case ISD::FMAXNUM:
4040 return lowerFMINNUM_FMAXNUM(Op, DAG);
David Stuttard20235ef2019-07-29 08:15:10 +00004041 case ISD::FMA:
4042 return splitTernaryVectorOp(Op, DAG);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004043 case ISD::SHL:
4044 case ISD::SRA:
4045 case ISD::SRL:
4046 case ISD::ADD:
4047 case ISD::SUB:
4048 case ISD::MUL:
4049 case ISD::SMIN:
4050 case ISD::SMAX:
4051 case ISD::UMIN:
4052 case ISD::UMAX:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004053 case ISD::FADD:
4054 case ISD::FMUL:
Matt Arsenault687ec752018-10-22 16:27:27 +00004055 case ISD::FMINNUM_IEEE:
4056 case ISD::FMAXNUM_IEEE:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004057 return splitBinaryVectorOp(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +00004058 }
4059 return SDValue();
4060}
4061
Matt Arsenault1349a042018-05-22 06:32:10 +00004062static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
4063 const SDLoc &DL,
4064 SelectionDAG &DAG, bool Unpacked) {
4065 if (!LoadVT.isVector())
4066 return Result;
4067
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004068 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
4069 // Truncate to v2i16/v4i16.
4070 EVT IntLoadVT = LoadVT.changeTypeToInteger();
Matt Arsenault1349a042018-05-22 06:32:10 +00004071
4072 // Workaround legalizer not scalarizing truncate after vector op
4073 // legalization byt not creating intermediate vector trunc.
4074 SmallVector<SDValue, 4> Elts;
4075 DAG.ExtractVectorElements(Result, Elts);
4076 for (SDValue &Elt : Elts)
4077 Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
4078
4079 Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
4080
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004081 // Bitcast to original type (v2f16/v4f16).
Matt Arsenault1349a042018-05-22 06:32:10 +00004082 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004083 }
Matt Arsenault1349a042018-05-22 06:32:10 +00004084
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004085 // Cast back to the original packed type.
4086 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
4087}
4088
Matt Arsenault1349a042018-05-22 06:32:10 +00004089SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
4090 MemSDNode *M,
4091 SelectionDAG &DAG,
Tim Renouf366a49d2018-08-02 23:33:01 +00004092 ArrayRef<SDValue> Ops,
Matt Arsenault1349a042018-05-22 06:32:10 +00004093 bool IsIntrinsic) const {
4094 SDLoc DL(M);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004095
4096 bool Unpacked = Subtarget->hasUnpackedD16VMem();
Matt Arsenault1349a042018-05-22 06:32:10 +00004097 EVT LoadVT = M->getValueType(0);
4098
Matt Arsenault1349a042018-05-22 06:32:10 +00004099 EVT EquivLoadVT = LoadVT;
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004100 if (Unpacked && LoadVT.isVector()) {
4101 EquivLoadVT = LoadVT.isVector() ?
4102 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
4103 LoadVT.getVectorNumElements()) : LoadVT;
Matt Arsenault1349a042018-05-22 06:32:10 +00004104 }
4105
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004106 // Change from v4f16/v2f16 to EquivLoadVT.
4107 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
4108
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004109 SDValue Load
4110 = DAG.getMemIntrinsicNode(
4111 IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
4112 VTList, Ops, M->getMemoryVT(),
4113 M->getMemOperand());
4114 if (!Unpacked) // Just adjusted the opcode.
4115 return Load;
Changpeng Fang4737e892018-01-18 22:08:53 +00004116
Matt Arsenault1349a042018-05-22 06:32:10 +00004117 SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
Changpeng Fang4737e892018-01-18 22:08:53 +00004118
Matt Arsenault1349a042018-05-22 06:32:10 +00004119 return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004120}
4121
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00004122static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
4123 SDNode *N, SelectionDAG &DAG) {
4124 EVT VT = N->getValueType(0);
Matt Arsenaultcaf13162019-03-12 21:02:54 +00004125 const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00004126 int CondCode = CD->getSExtValue();
4127 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
4128 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
4129 return DAG.getUNDEF(VT);
4130
4131 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
4132
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00004133 SDValue LHS = N->getOperand(1);
4134 SDValue RHS = N->getOperand(2);
4135
4136 SDLoc DL(N);
4137
4138 EVT CmpVT = LHS.getValueType();
4139 if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
4140 unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
4141 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4142 LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
4143 RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
4144 }
4145
4146 ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
4147
Stanislav Mekhanoshin68a2fef2019-06-13 23:47:36 +00004148 unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4149 EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4150
4151 SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS,
4152 DAG.getCondCode(CCOpcode));
4153 if (VT.bitsEq(CCVT))
4154 return SetCC;
4155 return DAG.getZExtOrTrunc(SetCC, DL, VT);
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00004156}
4157
4158static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
4159 SDNode *N, SelectionDAG &DAG) {
4160 EVT VT = N->getValueType(0);
Matt Arsenaultcaf13162019-03-12 21:02:54 +00004161 const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00004162
4163 int CondCode = CD->getSExtValue();
4164 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
4165 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
4166 return DAG.getUNDEF(VT);
4167 }
4168
4169 SDValue Src0 = N->getOperand(1);
4170 SDValue Src1 = N->getOperand(2);
4171 EVT CmpVT = Src0.getValueType();
4172 SDLoc SL(N);
4173
4174 if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
4175 Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
4176 Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
4177 }
4178
4179 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
4180 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
Stanislav Mekhanoshin68a2fef2019-06-13 23:47:36 +00004181 unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
4182 EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
4183 SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0,
4184 Src1, DAG.getCondCode(CCOpcode));
4185 if (VT.bitsEq(CCVT))
4186 return SetCC;
4187 return DAG.getZExtOrTrunc(SetCC, SL, VT);
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00004188}
4189
Matt Arsenault3aef8092017-01-23 23:09:58 +00004190void SITargetLowering::ReplaceNodeResults(SDNode *N,
4191 SmallVectorImpl<SDValue> &Results,
4192 SelectionDAG &DAG) const {
4193 switch (N->getOpcode()) {
4194 case ISD::INSERT_VECTOR_ELT: {
4195 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
4196 Results.push_back(Res);
4197 return;
4198 }
4199 case ISD::EXTRACT_VECTOR_ELT: {
4200 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
4201 Results.push_back(Res);
4202 return;
4203 }
Matt Arsenault1f17c662017-02-22 00:27:34 +00004204 case ISD::INTRINSIC_WO_CHAIN: {
4205 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Marek Olsak13e47412018-01-31 20:18:04 +00004206 switch (IID) {
4207 case Intrinsic::amdgcn_cvt_pkrtz: {
Matt Arsenault1f17c662017-02-22 00:27:34 +00004208 SDValue Src0 = N->getOperand(1);
4209 SDValue Src1 = N->getOperand(2);
4210 SDLoc SL(N);
4211 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
4212 Src0, Src1);
Matt Arsenault1f17c662017-02-22 00:27:34 +00004213 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
4214 return;
4215 }
Marek Olsak13e47412018-01-31 20:18:04 +00004216 case Intrinsic::amdgcn_cvt_pknorm_i16:
4217 case Intrinsic::amdgcn_cvt_pknorm_u16:
4218 case Intrinsic::amdgcn_cvt_pk_i16:
4219 case Intrinsic::amdgcn_cvt_pk_u16: {
4220 SDValue Src0 = N->getOperand(1);
4221 SDValue Src1 = N->getOperand(2);
4222 SDLoc SL(N);
4223 unsigned Opcode;
4224
4225 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
4226 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
4227 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
4228 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
4229 else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
4230 Opcode = AMDGPUISD::CVT_PK_I16_I32;
4231 else
4232 Opcode = AMDGPUISD::CVT_PK_U16_U32;
4233
Matt Arsenault709374d2018-08-01 20:13:58 +00004234 EVT VT = N->getValueType(0);
4235 if (isTypeLegal(VT))
4236 Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
4237 else {
4238 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
4239 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
4240 }
Marek Olsak13e47412018-01-31 20:18:04 +00004241 return;
4242 }
4243 }
Simon Pilgrimd362d272017-07-08 19:50:03 +00004244 break;
Matt Arsenault1f17c662017-02-22 00:27:34 +00004245 }
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004246 case ISD::INTRINSIC_W_CHAIN: {
Matt Arsenault1349a042018-05-22 06:32:10 +00004247 if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004248 Results.push_back(Res);
Matt Arsenault1349a042018-05-22 06:32:10 +00004249 Results.push_back(Res.getValue(1));
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004250 return;
4251 }
Matt Arsenault1349a042018-05-22 06:32:10 +00004252
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00004253 break;
4254 }
Matt Arsenault4a486232017-04-19 20:53:07 +00004255 case ISD::SELECT: {
4256 SDLoc SL(N);
4257 EVT VT = N->getValueType(0);
4258 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
4259 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
4260 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
4261
4262 EVT SelectVT = NewVT;
4263 if (NewVT.bitsLT(MVT::i32)) {
4264 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
4265 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
4266 SelectVT = MVT::i32;
4267 }
4268
4269 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
4270 N->getOperand(0), LHS, RHS);
4271
4272 if (NewVT != SelectVT)
4273 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
4274 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
4275 return;
4276 }
Matt Arsenaulte9524f12018-06-06 21:28:11 +00004277 case ISD::FNEG: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004278 if (N->getValueType(0) != MVT::v2f16)
4279 break;
4280
Matt Arsenaulte9524f12018-06-06 21:28:11 +00004281 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00004282 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4283
4284 SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
4285 BC,
4286 DAG.getConstant(0x80008000, SL, MVT::i32));
4287 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4288 return;
4289 }
4290 case ISD::FABS: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004291 if (N->getValueType(0) != MVT::v2f16)
4292 break;
4293
Matt Arsenaulte9524f12018-06-06 21:28:11 +00004294 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00004295 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4296
4297 SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
4298 BC,
4299 DAG.getConstant(0x7fff7fff, SL, MVT::i32));
4300 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4301 return;
4302 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00004303 default:
4304 break;
4305 }
4306}
4307
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00004308/// Helper function for LowerBRCOND
Tom Stellardf8794352012-12-19 22:10:31 +00004309static SDNode *findUser(SDValue Value, unsigned Opcode) {
Tom Stellard75aadc22012-12-11 21:25:42 +00004310
Tom Stellardf8794352012-12-19 22:10:31 +00004311 SDNode *Parent = Value.getNode();
4312 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
4313 I != E; ++I) {
4314
4315 if (I.getUse().get() != Value)
4316 continue;
4317
4318 if (I->getOpcode() == Opcode)
4319 return *I;
4320 }
Craig Topper062a2ba2014-04-25 05:30:21 +00004321 return nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00004322}
4323
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004324unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
Matt Arsenault6408c912016-09-16 22:11:18 +00004325 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4326 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004327 case Intrinsic::amdgcn_if:
4328 return AMDGPUISD::IF;
4329 case Intrinsic::amdgcn_else:
4330 return AMDGPUISD::ELSE;
4331 case Intrinsic::amdgcn_loop:
4332 return AMDGPUISD::LOOP;
4333 case Intrinsic::amdgcn_end_cf:
4334 llvm_unreachable("should not occur");
Matt Arsenault6408c912016-09-16 22:11:18 +00004335 default:
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004336 return 0;
Matt Arsenault6408c912016-09-16 22:11:18 +00004337 }
Tom Stellardbc4497b2016-02-12 23:45:29 +00004338 }
Matt Arsenault6408c912016-09-16 22:11:18 +00004339
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004340 // break, if_break, else_break are all only used as inputs to loop, not
4341 // directly as branch conditions.
4342 return 0;
Tom Stellardbc4497b2016-02-12 23:45:29 +00004343}
4344
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004345bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
4346 const Triple &TT = getTargetMachine().getTargetTriple();
Matt Arsenault0da63502018-08-31 05:49:54 +00004347 return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4348 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004349 AMDGPU::shouldEmitConstantsToTextSection(TT);
4350}
4351
4352bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
Scott Linderd19d1972019-02-04 20:00:07 +00004353 // FIXME: Either avoid relying on address space here or change the default
4354 // address space for functions to avoid the explicit check.
4355 return (GV->getValueType()->isFunctionTy() ||
4356 GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
Matt Arsenault0da63502018-08-31 05:49:54 +00004357 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4358 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004359 !shouldEmitFixup(GV) &&
4360 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
4361}
4362
4363bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4364 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4365}
4366
Tom Stellardf8794352012-12-19 22:10:31 +00004367/// This transforms the control flow intrinsics to get the branch destination as
4368/// last parameter, also switches branch target with BR if the need arise
4369SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4370 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00004371 SDLoc DL(BRCOND);
Tom Stellardf8794352012-12-19 22:10:31 +00004372
4373 SDNode *Intr = BRCOND.getOperand(1).getNode();
4374 SDValue Target = BRCOND.getOperand(2);
Craig Topper062a2ba2014-04-25 05:30:21 +00004375 SDNode *BR = nullptr;
Tom Stellardbc4497b2016-02-12 23:45:29 +00004376 SDNode *SetCC = nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00004377
4378 if (Intr->getOpcode() == ISD::SETCC) {
4379 // As long as we negate the condition everything is fine
Tom Stellardbc4497b2016-02-12 23:45:29 +00004380 SetCC = Intr;
Tom Stellardf8794352012-12-19 22:10:31 +00004381 Intr = SetCC->getOperand(0).getNode();
4382
4383 } else {
4384 // Get the target from BR if we don't negate the condition
4385 BR = findUser(BRCOND, ISD::BR);
4386 Target = BR->getOperand(1);
4387 }
4388
Matt Arsenault6408c912016-09-16 22:11:18 +00004389 // FIXME: This changes the types of the intrinsics instead of introducing new
4390 // nodes with the correct types.
4391 // e.g. llvm.amdgcn.loop
4392
4393 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4394 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4395
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004396 unsigned CFNode = isCFIntrinsic(Intr);
4397 if (CFNode == 0) {
Tom Stellardbc4497b2016-02-12 23:45:29 +00004398 // This is a uniform branch so we don't need to legalize.
4399 return BRCOND;
4400 }
4401
Matt Arsenault6408c912016-09-16 22:11:18 +00004402 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4403 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4404
Tom Stellardbc4497b2016-02-12 23:45:29 +00004405 assert(!SetCC ||
4406 (SetCC->getConstantOperandVal(1) == 1 &&
Tom Stellardbc4497b2016-02-12 23:45:29 +00004407 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4408 ISD::SETNE));
Tom Stellardf8794352012-12-19 22:10:31 +00004409
Tom Stellardf8794352012-12-19 22:10:31 +00004410 // operands of the new intrinsic call
4411 SmallVector<SDValue, 4> Ops;
Matt Arsenault6408c912016-09-16 22:11:18 +00004412 if (HaveChain)
4413 Ops.push_back(BRCOND.getOperand(0));
4414
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004415 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
Tom Stellardf8794352012-12-19 22:10:31 +00004416 Ops.push_back(Target);
4417
Matt Arsenault6408c912016-09-16 22:11:18 +00004418 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4419
Tom Stellardf8794352012-12-19 22:10:31 +00004420 // build the new intrinsic call
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004421 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00004422
Matt Arsenault6408c912016-09-16 22:11:18 +00004423 if (!HaveChain) {
4424 SDValue Ops[] = {
4425 SDValue(Result, 0),
4426 BRCOND.getOperand(0)
4427 };
4428
4429 Result = DAG.getMergeValues(Ops, DL).getNode();
4430 }
4431
Tom Stellardf8794352012-12-19 22:10:31 +00004432 if (BR) {
4433 // Give the branch instruction our target
4434 SDValue Ops[] = {
4435 BR->getOperand(0),
4436 BRCOND.getOperand(2)
4437 };
Chandler Carruth356665a2014-08-01 22:09:43 +00004438 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4439 DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4440 BR = NewBR.getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00004441 }
4442
4443 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4444
4445 // Copy the intrinsic results to registers
4446 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4447 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
4448 if (!CopyToReg)
4449 continue;
4450
4451 Chain = DAG.getCopyToReg(
4452 Chain, DL,
4453 CopyToReg->getOperand(1),
4454 SDValue(Result, i - 1),
4455 SDValue());
4456
4457 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4458 }
4459
4460 // Remove the old intrinsic from the chain
4461 DAG.ReplaceAllUsesOfValueWith(
4462 SDValue(Intr, Intr->getNumValues() - 1),
4463 Intr->getOperand(0));
4464
4465 return Chain;
Tom Stellard75aadc22012-12-11 21:25:42 +00004466}
4467
Aakanksha Patild5443f82019-05-29 18:20:11 +00004468SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
4469 SelectionDAG &DAG) const {
4470 MVT VT = Op.getSimpleValueType();
4471 SDLoc DL(Op);
4472 // Checking the depth
4473 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0)
4474 return DAG.getConstant(0, DL, VT);
4475
4476 MachineFunction &MF = DAG.getMachineFunction();
4477 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4478 // Check for kernel and shader functions
4479 if (Info->isEntryFunction())
4480 return DAG.getConstant(0, DL, VT);
4481
4482 MachineFrameInfo &MFI = MF.getFrameInfo();
4483 // There is a call to @llvm.returnaddress in this function
4484 MFI.setReturnAddressIsTaken(true);
4485
4486 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
4487 // Get the return address reg and mark it as an implicit live-in
4488 unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
4489
4490 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
4491}
4492
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004493SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4494 SDValue Op,
4495 const SDLoc &DL,
4496 EVT VT) const {
4497 return Op.getValueType().bitsLE(VT) ?
4498 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4499 DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4500}
4501
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004502SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004503 assert(Op.getValueType() == MVT::f16 &&
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004504 "Do not know how to custom lower FP_ROUND for non-f16 type");
4505
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004506 SDValue Src = Op.getOperand(0);
4507 EVT SrcVT = Src.getValueType();
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004508 if (SrcVT != MVT::f64)
4509 return Op;
4510
4511 SDLoc DL(Op);
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004512
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004513 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4514 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
Mandeep Singh Grang5e1697e2017-06-06 05:08:36 +00004515 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004516}
4517
Matt Arsenault687ec752018-10-22 16:27:27 +00004518SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4519 SelectionDAG &DAG) const {
4520 EVT VT = Op.getValueType();
Matt Arsenault055e4dc2019-03-29 19:14:54 +00004521 const MachineFunction &MF = DAG.getMachineFunction();
4522 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4523 bool IsIEEEMode = Info->getMode().IEEE;
Matt Arsenault687ec752018-10-22 16:27:27 +00004524
4525 // FIXME: Assert during eslection that this is only selected for
4526 // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4527 // mode functions, but this happens to be OK since it's only done in cases
4528 // where there is known no sNaN.
4529 if (IsIEEEMode)
4530 return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4531
4532 if (VT == MVT::v4f16)
4533 return splitBinaryVectorOp(Op, DAG);
4534 return Op;
4535}
4536
Matt Arsenault3e025382017-04-24 17:49:13 +00004537SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4538 SDLoc SL(Op);
Matt Arsenault3e025382017-04-24 17:49:13 +00004539 SDValue Chain = Op.getOperand(0);
4540
Tom Stellard5bfbae52018-07-11 20:59:01 +00004541 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00004542 !Subtarget->isTrapHandlerEnabled())
Matt Arsenault3e025382017-04-24 17:49:13 +00004543 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
Tony Tye43259df2018-05-16 16:19:34 +00004544
4545 MachineFunction &MF = DAG.getMachineFunction();
4546 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4547 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4548 assert(UserSGPR != AMDGPU::NoRegister);
4549 SDValue QueuePtr = CreateLiveInRegister(
4550 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4551 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4552 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4553 QueuePtr, SDValue());
4554 SDValue Ops[] = {
4555 ToReg,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004556 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
Tony Tye43259df2018-05-16 16:19:34 +00004557 SGPR01,
4558 ToReg.getValue(1)
4559 };
4560 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4561}
4562
4563SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4564 SDLoc SL(Op);
4565 SDValue Chain = Op.getOperand(0);
4566 MachineFunction &MF = DAG.getMachineFunction();
4567
Tom Stellard5bfbae52018-07-11 20:59:01 +00004568 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00004569 !Subtarget->isTrapHandlerEnabled()) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004570 DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
Matt Arsenault3e025382017-04-24 17:49:13 +00004571 "debugtrap handler not supported",
4572 Op.getDebugLoc(),
4573 DS_Warning);
Matthias Braunf1caa282017-12-15 22:22:58 +00004574 LLVMContext &Ctx = MF.getFunction().getContext();
Matt Arsenault3e025382017-04-24 17:49:13 +00004575 Ctx.diagnose(NoTrap);
4576 return Chain;
4577 }
Matt Arsenault3e025382017-04-24 17:49:13 +00004578
Tony Tye43259df2018-05-16 16:19:34 +00004579 SDValue Ops[] = {
4580 Chain,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004581 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
Tony Tye43259df2018-05-16 16:19:34 +00004582 };
4583 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
Matt Arsenault3e025382017-04-24 17:49:13 +00004584}
4585
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004586SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
Matt Arsenault99c14522016-04-25 19:27:24 +00004587 SelectionDAG &DAG) const {
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004588 // FIXME: Use inline constants (src_{shared, private}_base) instead.
4589 if (Subtarget->hasApertureRegs()) {
Matt Arsenault0da63502018-08-31 05:49:54 +00004590 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004591 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4592 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
Matt Arsenault0da63502018-08-31 05:49:54 +00004593 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004594 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4595 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4596 unsigned Encoding =
4597 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4598 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4599 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
Matt Arsenaulte823d922017-02-18 18:29:53 +00004600
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004601 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4602 SDValue ApertureReg = SDValue(
4603 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4604 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4605 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
Matt Arsenaulte823d922017-02-18 18:29:53 +00004606 }
4607
Matt Arsenault99c14522016-04-25 19:27:24 +00004608 MachineFunction &MF = DAG.getMachineFunction();
4609 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004610 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4611 assert(UserSGPR != AMDGPU::NoRegister);
4612
Matt Arsenault99c14522016-04-25 19:27:24 +00004613 SDValue QueuePtr = CreateLiveInRegister(
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004614 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
Matt Arsenault99c14522016-04-25 19:27:24 +00004615
4616 // Offset into amd_queue_t for group_segment_aperture_base_hi /
4617 // private_segment_aperture_base_hi.
Matt Arsenault0da63502018-08-31 05:49:54 +00004618 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
Matt Arsenault99c14522016-04-25 19:27:24 +00004619
Matt Arsenaultb655fa92017-11-29 01:25:12 +00004620 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
Matt Arsenault99c14522016-04-25 19:27:24 +00004621
4622 // TODO: Use custom target PseudoSourceValue.
4623 // TODO: We should use the value from the IR intrinsic call, but it might not
4624 // be available and how do we get it?
4625 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
Matt Arsenault0da63502018-08-31 05:49:54 +00004626 AMDGPUAS::CONSTANT_ADDRESS));
Matt Arsenault99c14522016-04-25 19:27:24 +00004627
4628 MachinePointerInfo PtrInfo(V, StructOffset);
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004629 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
Justin Lebar9c375812016-07-15 18:27:10 +00004630 MinAlign(64, StructOffset),
Justin Lebaradbf09e2016-09-11 01:38:58 +00004631 MachineMemOperand::MODereferenceable |
4632 MachineMemOperand::MOInvariant);
Matt Arsenault99c14522016-04-25 19:27:24 +00004633}
4634
4635SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4636 SelectionDAG &DAG) const {
4637 SDLoc SL(Op);
4638 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4639
4640 SDValue Src = ASC->getOperand(0);
Matt Arsenault99c14522016-04-25 19:27:24 +00004641 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4642
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004643 const AMDGPUTargetMachine &TM =
4644 static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4645
Matt Arsenault99c14522016-04-25 19:27:24 +00004646 // flat -> local/private
Matt Arsenault0da63502018-08-31 05:49:54 +00004647 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004648 unsigned DestAS = ASC->getDestAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004649
Matt Arsenault0da63502018-08-31 05:49:54 +00004650 if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4651 DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004652 unsigned NullVal = TM.getNullPointerValue(DestAS);
4653 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault99c14522016-04-25 19:27:24 +00004654 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4655 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4656
4657 return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4658 NonNull, Ptr, SegmentNullPtr);
4659 }
4660 }
4661
4662 // local/private -> flat
Matt Arsenault0da63502018-08-31 05:49:54 +00004663 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004664 unsigned SrcAS = ASC->getSrcAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004665
Matt Arsenault0da63502018-08-31 05:49:54 +00004666 if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4667 SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004668 unsigned NullVal = TM.getNullPointerValue(SrcAS);
4669 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault971c85e2017-03-13 19:47:31 +00004670
Matt Arsenault99c14522016-04-25 19:27:24 +00004671 SDValue NonNull
4672 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4673
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004674 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00004675 SDValue CvtPtr
4676 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4677
4678 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4679 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4680 FlatNullPtr);
4681 }
4682 }
4683
4684 // global <-> flat are no-ops and never emitted.
4685
4686 const MachineFunction &MF = DAG.getMachineFunction();
4687 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
Matthias Braunf1caa282017-12-15 22:22:58 +00004688 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
Matt Arsenault99c14522016-04-25 19:27:24 +00004689 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4690
4691 return DAG.getUNDEF(ASC->getValueType(0));
4692}
4693
Tim Renouf58168892019-07-04 17:38:24 +00004694// This lowers an INSERT_SUBVECTOR by extracting the individual elements from
4695// the small vector and inserting them into the big vector. That is better than
4696// the default expansion of doing it via a stack slot. Even though the use of
4697// the stack slot would be optimized away afterwards, the stack slot itself
4698// remains.
4699SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4700 SelectionDAG &DAG) const {
4701 SDValue Vec = Op.getOperand(0);
4702 SDValue Ins = Op.getOperand(1);
4703 SDValue Idx = Op.getOperand(2);
4704 EVT VecVT = Vec.getValueType();
4705 EVT InsVT = Ins.getValueType();
4706 EVT EltVT = VecVT.getVectorElementType();
4707 unsigned InsNumElts = InsVT.getVectorNumElements();
4708 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
4709 SDLoc SL(Op);
4710
4711 for (unsigned I = 0; I != InsNumElts; ++I) {
4712 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins,
4713 DAG.getConstant(I, SL, MVT::i32));
4714 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt,
4715 DAG.getConstant(IdxVal + I, SL, MVT::i32));
4716 }
4717 return Vec;
4718}
4719
Matt Arsenault3aef8092017-01-23 23:09:58 +00004720SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4721 SelectionDAG &DAG) const {
Matt Arsenault67a98152018-05-16 11:47:30 +00004722 SDValue Vec = Op.getOperand(0);
4723 SDValue InsVal = Op.getOperand(1);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004724 SDValue Idx = Op.getOperand(2);
Matt Arsenault67a98152018-05-16 11:47:30 +00004725 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004726 EVT EltVT = VecVT.getVectorElementType();
4727 unsigned VecSize = VecVT.getSizeInBits();
4728 unsigned EltSize = EltVT.getSizeInBits();
Matt Arsenault67a98152018-05-16 11:47:30 +00004729
Matt Arsenault9224c002018-06-05 19:52:46 +00004730
4731 assert(VecSize <= 64);
Matt Arsenault67a98152018-05-16 11:47:30 +00004732
4733 unsigned NumElts = VecVT.getVectorNumElements();
4734 SDLoc SL(Op);
4735 auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4736
Matt Arsenault9224c002018-06-05 19:52:46 +00004737 if (NumElts == 4 && EltSize == 16 && KIdx) {
Matt Arsenault67a98152018-05-16 11:47:30 +00004738 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4739
4740 SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4741 DAG.getConstant(0, SL, MVT::i32));
4742 SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4743 DAG.getConstant(1, SL, MVT::i32));
4744
4745 SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4746 SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4747
4748 unsigned Idx = KIdx->getZExtValue();
4749 bool InsertLo = Idx < 2;
4750 SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4751 InsertLo ? LoVec : HiVec,
4752 DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4753 DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4754
4755 InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4756
4757 SDValue Concat = InsertLo ?
4758 DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4759 DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4760
4761 return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4762 }
4763
Matt Arsenault3aef8092017-01-23 23:09:58 +00004764 if (isa<ConstantSDNode>(Idx))
4765 return SDValue();
4766
Matt Arsenault9224c002018-06-05 19:52:46 +00004767 MVT IntVT = MVT::getIntegerVT(VecSize);
Matt Arsenault67a98152018-05-16 11:47:30 +00004768
Matt Arsenault3aef8092017-01-23 23:09:58 +00004769 // Avoid stack access for dynamic indexing.
Matt Arsenault3aef8092017-01-23 23:09:58 +00004770 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
Tim Corringhamfa3e4e52019-02-01 16:51:09 +00004771
4772 // Create a congruent vector with the target value in each element so that
4773 // the required element can be masked and ORed into the target vector.
4774 SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
4775 DAG.getSplatBuildVector(VecVT, SL, InsVal));
Matt Arsenault3aef8092017-01-23 23:09:58 +00004776
Matt Arsenault9224c002018-06-05 19:52:46 +00004777 assert(isPowerOf2_32(EltSize));
4778 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4779
Matt Arsenault3aef8092017-01-23 23:09:58 +00004780 // Convert vector index to bit-index.
Matt Arsenault9224c002018-06-05 19:52:46 +00004781 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004782
Matt Arsenault67a98152018-05-16 11:47:30 +00004783 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4784 SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4785 DAG.getConstant(0xffff, SL, IntVT),
Matt Arsenault3aef8092017-01-23 23:09:58 +00004786 ScaledIdx);
4787
Matt Arsenault67a98152018-05-16 11:47:30 +00004788 SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4789 SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4790 DAG.getNOT(SL, BFM, IntVT), BCVec);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004791
Matt Arsenault67a98152018-05-16 11:47:30 +00004792 SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4793 return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004794}
4795
4796SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4797 SelectionDAG &DAG) const {
4798 SDLoc SL(Op);
4799
4800 EVT ResultVT = Op.getValueType();
4801 SDValue Vec = Op.getOperand(0);
4802 SDValue Idx = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004803 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004804 unsigned VecSize = VecVT.getSizeInBits();
4805 EVT EltVT = VecVT.getVectorElementType();
4806 assert(VecSize <= 64);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004807
Matt Arsenault98f29462017-05-17 20:30:58 +00004808 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4809
Hiroshi Inoue372ffa12018-04-13 11:37:06 +00004810 // Make sure we do any optimizations that will make it easier to fold
Matt Arsenault98f29462017-05-17 20:30:58 +00004811 // source modifiers before obscuring it with bit operations.
4812
4813 // XXX - Why doesn't this get called when vector_shuffle is expanded?
4814 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4815 return Combined;
4816
Matt Arsenault9224c002018-06-05 19:52:46 +00004817 unsigned EltSize = EltVT.getSizeInBits();
4818 assert(isPowerOf2_32(EltSize));
Matt Arsenault3aef8092017-01-23 23:09:58 +00004819
Matt Arsenault9224c002018-06-05 19:52:46 +00004820 MVT IntVT = MVT::getIntegerVT(VecSize);
4821 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4822
4823 // Convert vector index to bit-index (* EltSize)
4824 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004825
Matt Arsenault67a98152018-05-16 11:47:30 +00004826 SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4827 SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004828
Matt Arsenault67a98152018-05-16 11:47:30 +00004829 if (ResultVT == MVT::f16) {
4830 SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4831 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4832 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00004833
Matt Arsenault67a98152018-05-16 11:47:30 +00004834 return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4835}
4836
Matt Arsenault5fe851b2019-07-02 19:15:45 +00004837static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) {
4838 assert(Elt % 2 == 0);
4839 return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0);
4840}
4841
4842SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
4843 SelectionDAG &DAG) const {
4844 SDLoc SL(Op);
4845 EVT ResultVT = Op.getValueType();
4846 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
4847
4848 EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16;
4849 EVT EltVT = PackVT.getVectorElementType();
4850 int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements();
4851
4852 // vector_shuffle <0,1,6,7> lhs, rhs
4853 // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2)
4854 //
4855 // vector_shuffle <6,7,2,3> lhs, rhs
4856 // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2)
4857 //
4858 // vector_shuffle <6,7,0,1> lhs, rhs
4859 // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0)
4860
4861 // Avoid scalarizing when both halves are reading from consecutive elements.
4862 SmallVector<SDValue, 4> Pieces;
4863 for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) {
4864 if (elementPairIsContiguous(SVN->getMask(), I)) {
4865 const int Idx = SVN->getMaskElt(I);
4866 int VecIdx = Idx < SrcNumElts ? 0 : 1;
4867 int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts;
4868 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL,
4869 PackVT, SVN->getOperand(VecIdx),
4870 DAG.getConstant(EltIdx, SL, MVT::i32));
4871 Pieces.push_back(SubVec);
4872 } else {
4873 const int Idx0 = SVN->getMaskElt(I);
4874 const int Idx1 = SVN->getMaskElt(I + 1);
4875 int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1;
4876 int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1;
4877 int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts;
4878 int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts;
4879
4880 SDValue Vec0 = SVN->getOperand(VecIdx0);
4881 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4882 Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32));
4883
4884 SDValue Vec1 = SVN->getOperand(VecIdx1);
4885 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4886 Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32));
4887 Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 }));
4888 }
4889 }
4890
4891 return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces);
4892}
4893
Matt Arsenault67a98152018-05-16 11:47:30 +00004894SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4895 SelectionDAG &DAG) const {
4896 SDLoc SL(Op);
4897 EVT VT = Op.getValueType();
Matt Arsenault67a98152018-05-16 11:47:30 +00004898
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004899 if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4900 EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4901
4902 // Turn into pair of packed build_vectors.
4903 // TODO: Special case for constants that can be materialized with s_mov_b64.
4904 SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4905 { Op.getOperand(0), Op.getOperand(1) });
4906 SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4907 { Op.getOperand(2), Op.getOperand(3) });
4908
4909 SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4910 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4911
4912 SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4913 return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4914 }
4915
Matt Arsenault1349a042018-05-22 06:32:10 +00004916 assert(VT == MVT::v2f16 || VT == MVT::v2i16);
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004917 assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
Matt Arsenault67a98152018-05-16 11:47:30 +00004918
Matt Arsenault1349a042018-05-22 06:32:10 +00004919 SDValue Lo = Op.getOperand(0);
4920 SDValue Hi = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004921
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004922 // Avoid adding defined bits with the zero_extend.
4923 if (Hi.isUndef()) {
4924 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4925 SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
4926 return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
4927 }
Matt Arsenault67a98152018-05-16 11:47:30 +00004928
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004929 Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
Matt Arsenault1349a042018-05-22 06:32:10 +00004930 Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4931
4932 SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4933 DAG.getConstant(16, SL, MVT::i32));
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004934 if (Lo.isUndef())
4935 return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
4936
4937 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4938 Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
Matt Arsenault1349a042018-05-22 06:32:10 +00004939
4940 SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
Matt Arsenault1349a042018-05-22 06:32:10 +00004941 return DAG.getNode(ISD::BITCAST, SL, VT, Or);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004942}
4943
Tom Stellard418beb72016-07-13 14:23:33 +00004944bool
4945SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4946 // We can fold offsets for anything that doesn't require a GOT relocation.
Matt Arsenault0da63502018-08-31 05:49:54 +00004947 return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4948 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4949 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004950 !shouldEmitGOTReloc(GA->getGlobal());
Tom Stellard418beb72016-07-13 14:23:33 +00004951}
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004952
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004953static SDValue
4954buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4955 const SDLoc &DL, unsigned Offset, EVT PtrVT,
4956 unsigned GAFlags = SIInstrInfo::MO_NONE) {
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004957 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4958 // lowered to the following code sequence:
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004959 //
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004960 // For constant address space:
4961 // s_getpc_b64 s[0:1]
4962 // s_add_u32 s0, s0, $symbol
4963 // s_addc_u32 s1, s1, 0
4964 //
4965 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4966 // a fixup or relocation is emitted to replace $symbol with a literal
4967 // constant, which is a pc-relative offset from the encoding of the $symbol
4968 // operand to the global variable.
4969 //
4970 // For global address space:
4971 // s_getpc_b64 s[0:1]
4972 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4973 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4974 //
4975 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4976 // fixups or relocations are emitted to replace $symbol@*@lo and
4977 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4978 // which is a 64-bit pc-relative offset from the encoding of the $symbol
4979 // operand to the global variable.
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004980 //
4981 // What we want here is an offset from the value returned by s_getpc
4982 // (which is the address of the s_add_u32 instruction) to the global
4983 // variable, but since the encoding of $symbol starts 4 bytes after the start
4984 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4985 // small. This requires us to add 4 to the global variable offset in order to
4986 // compute the correct address.
Nicolai Haehnle6d71be42019-06-16 17:32:01 +00004987 unsigned LoFlags = GAFlags;
4988 if (LoFlags == SIInstrInfo::MO_NONE)
4989 LoFlags = SIInstrInfo::MO_REL32;
4990 SDValue PtrLo =
4991 DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, LoFlags);
4992 SDValue PtrHi;
4993 if (GAFlags == SIInstrInfo::MO_NONE) {
4994 PtrHi = DAG.getTargetConstant(0, DL, MVT::i32);
4995 } else {
4996 PtrHi =
4997 DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags + 1);
4998 }
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004999 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
Tom Stellardbf3e6e52016-06-14 20:29:59 +00005000}
5001
Tom Stellard418beb72016-07-13 14:23:33 +00005002SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
5003 SDValue Op,
5004 SelectionDAG &DAG) const {
5005 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00005006 const GlobalValue *GV = GSD->getGlobal();
Nicolai Haehnle27101712019-06-25 11:52:30 +00005007 if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
5008 (!GV->hasExternalLinkage() ||
5009 getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||
5010 getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL)) ||
Matt Arsenaultd1f45712018-09-10 12:16:11 +00005011 GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
5012 GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS)
Tom Stellard418beb72016-07-13 14:23:33 +00005013 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
5014
5015 SDLoc DL(GSD);
Tom Stellard418beb72016-07-13 14:23:33 +00005016 EVT PtrVT = Op.getValueType();
5017
Nicolai Haehnle27101712019-06-25 11:52:30 +00005018 if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
5019 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(),
5020 SIInstrInfo::MO_ABS32_LO);
5021 return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA);
5022 }
5023
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00005024 if (shouldEmitFixup(GV))
Tom Stellard418beb72016-07-13 14:23:33 +00005025 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00005026 else if (shouldEmitPCReloc(GV))
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00005027 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
5028 SIInstrInfo::MO_REL32);
Tom Stellard418beb72016-07-13 14:23:33 +00005029
5030 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00005031 SIInstrInfo::MO_GOTPCREL32);
Tom Stellard418beb72016-07-13 14:23:33 +00005032
5033 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
Matt Arsenault0da63502018-08-31 05:49:54 +00005034 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
Tom Stellard418beb72016-07-13 14:23:33 +00005035 const DataLayout &DataLayout = DAG.getDataLayout();
5036 unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
Matt Arsenaultd77fcc22018-09-10 02:23:39 +00005037 MachinePointerInfo PtrInfo
5038 = MachinePointerInfo::getGOT(DAG.getMachineFunction());
Tom Stellard418beb72016-07-13 14:23:33 +00005039
Justin Lebar9c375812016-07-15 18:27:10 +00005040 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
Justin Lebaradbf09e2016-09-11 01:38:58 +00005041 MachineMemOperand::MODereferenceable |
5042 MachineMemOperand::MOInvariant);
Tom Stellard418beb72016-07-13 14:23:33 +00005043}
5044
Benjamin Kramerbdc49562016-06-12 15:39:02 +00005045SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
5046 const SDLoc &DL, SDValue V) const {
Matt Arsenault4ac341c2016-04-14 21:58:15 +00005047 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
5048 // the destination register.
5049 //
Tom Stellardfc92e772015-05-12 14:18:14 +00005050 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
5051 // so we will end up with redundant moves to m0.
5052 //
Matt Arsenault4ac341c2016-04-14 21:58:15 +00005053 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
5054
5055 // A Null SDValue creates a glue result.
5056 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
5057 V, Chain);
5058 return SDValue(M0, 0);
Tom Stellardfc92e772015-05-12 14:18:14 +00005059}
5060
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005061SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
5062 SDValue Op,
5063 MVT VT,
5064 unsigned Offset) const {
5065 SDLoc SL(Op);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005066 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005067 DAG.getEntryNode(), Offset, 4, false);
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005068 // The local size values will have the hi 16-bits as zero.
5069 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
5070 DAG.getValueType(VT));
5071}
5072
Benjamin Kramer061f4a52017-01-13 14:39:03 +00005073static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5074 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00005075 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005076 "non-hsa intrinsic with hsa target",
5077 DL.getDebugLoc());
5078 DAG.getContext()->diagnose(BadIntrin);
5079 return DAG.getUNDEF(VT);
5080}
5081
Benjamin Kramer061f4a52017-01-13 14:39:03 +00005082static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
5083 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00005084 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005085 "intrinsic not supported on subtarget",
5086 DL.getDebugLoc());
Matt Arsenaulte0132462016-01-30 05:19:45 +00005087 DAG.getContext()->diagnose(BadIntrin);
5088 return DAG.getUNDEF(VT);
5089}
5090
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005091static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
5092 ArrayRef<SDValue> Elts) {
5093 assert(!Elts.empty());
5094 MVT Type;
5095 unsigned NumElts;
5096
5097 if (Elts.size() == 1) {
5098 Type = MVT::f32;
5099 NumElts = 1;
5100 } else if (Elts.size() == 2) {
5101 Type = MVT::v2f32;
5102 NumElts = 2;
5103 } else if (Elts.size() <= 4) {
5104 Type = MVT::v4f32;
5105 NumElts = 4;
5106 } else if (Elts.size() <= 8) {
5107 Type = MVT::v8f32;
5108 NumElts = 8;
5109 } else {
5110 assert(Elts.size() <= 16);
5111 Type = MVT::v16f32;
5112 NumElts = 16;
5113 }
5114
5115 SmallVector<SDValue, 16> VecElts(NumElts);
5116 for (unsigned i = 0; i < Elts.size(); ++i) {
5117 SDValue Elt = Elts[i];
5118 if (Elt.getValueType() != MVT::f32)
5119 Elt = DAG.getBitcast(MVT::f32, Elt);
5120 VecElts[i] = Elt;
5121 }
5122 for (unsigned i = Elts.size(); i < NumElts; ++i)
5123 VecElts[i] = DAG.getUNDEF(MVT::f32);
5124
5125 if (NumElts == 1)
5126 return VecElts[0];
5127 return DAG.getBuildVector(Type, DL, VecElts);
5128}
5129
5130static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005131 SDValue *GLC, SDValue *SLC, SDValue *DLC) {
Matt Arsenaultcaf13162019-03-12 21:02:54 +00005132 auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode());
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005133
5134 uint64_t Value = CachePolicyConst->getZExtValue();
5135 SDLoc DL(CachePolicy);
5136 if (GLC) {
5137 *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5138 Value &= ~(uint64_t)0x1;
5139 }
5140 if (SLC) {
5141 *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5142 Value &= ~(uint64_t)0x2;
5143 }
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005144 if (DLC) {
5145 *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32);
5146 Value &= ~(uint64_t)0x4;
5147 }
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005148
5149 return Value == 0;
5150}
5151
David Stuttardf77079f2019-01-14 11:55:24 +00005152// Re-construct the required return value for a image load intrinsic.
5153// This is more complicated due to the optional use TexFailCtrl which means the required
5154// return type is an aggregate
5155static SDValue constructRetValue(SelectionDAG &DAG,
5156 MachineSDNode *Result,
5157 ArrayRef<EVT> ResultTypes,
5158 bool IsTexFail, bool Unpacked, bool IsD16,
5159 int DMaskPop, int NumVDataDwords,
5160 const SDLoc &DL, LLVMContext &Context) {
5161 // Determine the required return type. This is the same regardless of IsTexFail flag
5162 EVT ReqRetVT = ResultTypes[0];
5163 EVT ReqRetEltVT = ReqRetVT.isVector() ? ReqRetVT.getVectorElementType() : ReqRetVT;
5164 int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
5165 EVT AdjEltVT = Unpacked && IsD16 ? MVT::i32 : ReqRetEltVT;
5166 EVT AdjVT = Unpacked ? ReqRetNumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, ReqRetNumElts)
5167 : AdjEltVT
5168 : ReqRetVT;
5169
5170 // Extract data part of the result
5171 // Bitcast the result to the same type as the required return type
5172 int NumElts;
5173 if (IsD16 && !Unpacked)
5174 NumElts = NumVDataDwords << 1;
5175 else
5176 NumElts = NumVDataDwords;
5177
5178 EVT CastVT = NumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, NumElts)
5179 : AdjEltVT;
5180
Tim Renouf6f0191a2019-03-22 15:21:11 +00005181 // Special case for v6f16. Rather than add support for this, use v3i32 to
David Stuttardf77079f2019-01-14 11:55:24 +00005182 // extract the data elements
Tim Renouf6f0191a2019-03-22 15:21:11 +00005183 bool V6F16Special = false;
5184 if (NumElts == 6) {
5185 CastVT = EVT::getVectorVT(Context, MVT::i32, NumElts / 2);
David Stuttardf77079f2019-01-14 11:55:24 +00005186 DMaskPop >>= 1;
5187 ReqRetNumElts >>= 1;
Tim Renouf6f0191a2019-03-22 15:21:11 +00005188 V6F16Special = true;
David Stuttardf77079f2019-01-14 11:55:24 +00005189 AdjVT = MVT::v2i32;
5190 }
5191
5192 SDValue N = SDValue(Result, 0);
5193 SDValue CastRes = DAG.getNode(ISD::BITCAST, DL, CastVT, N);
5194
5195 // Iterate over the result
5196 SmallVector<SDValue, 4> BVElts;
5197
5198 if (CastVT.isVector()) {
5199 DAG.ExtractVectorElements(CastRes, BVElts, 0, DMaskPop);
5200 } else {
5201 BVElts.push_back(CastRes);
5202 }
5203 int ExtraElts = ReqRetNumElts - DMaskPop;
5204 while(ExtraElts--)
5205 BVElts.push_back(DAG.getUNDEF(AdjEltVT));
5206
5207 SDValue PreTFCRes;
5208 if (ReqRetNumElts > 1) {
5209 SDValue NewVec = DAG.getBuildVector(AdjVT, DL, BVElts);
5210 if (IsD16 && Unpacked)
5211 PreTFCRes = adjustLoadValueTypeImpl(NewVec, ReqRetVT, DL, DAG, Unpacked);
5212 else
5213 PreTFCRes = NewVec;
5214 } else {
5215 PreTFCRes = BVElts[0];
5216 }
5217
Tim Renouf6f0191a2019-03-22 15:21:11 +00005218 if (V6F16Special)
David Stuttardf77079f2019-01-14 11:55:24 +00005219 PreTFCRes = DAG.getNode(ISD::BITCAST, DL, MVT::v4f16, PreTFCRes);
5220
5221 if (!IsTexFail) {
5222 if (Result->getNumValues() > 1)
5223 return DAG.getMergeValues({PreTFCRes, SDValue(Result, 1)}, DL);
5224 else
5225 return PreTFCRes;
5226 }
5227
5228 // Extract the TexFail result and insert into aggregate return
5229 SmallVector<SDValue, 1> TFCElt;
5230 DAG.ExtractVectorElements(N, TFCElt, DMaskPop, 1);
5231 SDValue TFCRes = DAG.getNode(ISD::BITCAST, DL, ResultTypes[1], TFCElt[0]);
5232 return DAG.getMergeValues({PreTFCRes, TFCRes, SDValue(Result, 1)}, DL);
5233}
5234
5235static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
5236 SDValue *LWE, bool &IsTexFail) {
Matt Arsenaultcaf13162019-03-12 21:02:54 +00005237 auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode());
David Stuttardf77079f2019-01-14 11:55:24 +00005238
5239 uint64_t Value = TexFailCtrlConst->getZExtValue();
5240 if (Value) {
5241 IsTexFail = true;
5242 }
5243
5244 SDLoc DL(TexFailCtrlConst);
5245 *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
5246 Value &= ~(uint64_t)0x1;
5247 *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
5248 Value &= ~(uint64_t)0x2;
5249
5250 return Value == 0;
5251}
5252
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005253SDValue SITargetLowering::lowerImage(SDValue Op,
5254 const AMDGPU::ImageDimIntrinsicInfo *Intr,
5255 SelectionDAG &DAG) const {
5256 SDLoc DL(Op);
Ryan Taylor1f334d02018-08-28 15:07:30 +00005257 MachineFunction &MF = DAG.getMachineFunction();
5258 const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005259 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
5260 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
5261 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
Ryan Taylor894c8fd2018-08-01 12:12:01 +00005262 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
5263 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
Piotr Sobczak9b11e932019-06-10 15:58:51 +00005264 const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
5265 AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
Ryan Taylor894c8fd2018-08-01 12:12:01 +00005266 unsigned IntrOpcode = Intr->BaseOpcode;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005267 bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005268
David Stuttardf77079f2019-01-14 11:55:24 +00005269 SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end());
5270 SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end());
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005271 bool IsD16 = false;
Ryan Taylor1f334d02018-08-28 15:07:30 +00005272 bool IsA16 = false;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005273 SDValue VData;
5274 int NumVDataDwords;
David Stuttardf77079f2019-01-14 11:55:24 +00005275 bool AdjustRetType = false;
5276
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005277 unsigned AddrIdx; // Index of first address argument
5278 unsigned DMask;
David Stuttardf77079f2019-01-14 11:55:24 +00005279 unsigned DMaskLanes = 0;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005280
5281 if (BaseOpcode->Atomic) {
5282 VData = Op.getOperand(2);
5283
5284 bool Is64Bit = VData.getValueType() == MVT::i64;
5285 if (BaseOpcode->AtomicX2) {
5286 SDValue VData2 = Op.getOperand(3);
5287 VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
5288 {VData, VData2});
5289 if (Is64Bit)
5290 VData = DAG.getBitcast(MVT::v4i32, VData);
5291
5292 ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
5293 DMask = Is64Bit ? 0xf : 0x3;
5294 NumVDataDwords = Is64Bit ? 4 : 2;
5295 AddrIdx = 4;
5296 } else {
5297 DMask = Is64Bit ? 0x3 : 0x1;
5298 NumVDataDwords = Is64Bit ? 2 : 1;
5299 AddrIdx = 3;
5300 }
5301 } else {
David Stuttardf77079f2019-01-14 11:55:24 +00005302 unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1;
Matt Arsenaultcaf13162019-03-12 21:02:54 +00005303 auto DMaskConst = cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
David Stuttardf77079f2019-01-14 11:55:24 +00005304 DMask = DMaskConst->getZExtValue();
5305 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005306
5307 if (BaseOpcode->Store) {
5308 VData = Op.getOperand(2);
5309
5310 MVT StoreVT = VData.getSimpleValueType();
5311 if (StoreVT.getScalarType() == MVT::f16) {
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +00005312 if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005313 return Op; // D16 is unsupported for this instruction
5314
5315 IsD16 = true;
5316 VData = handleD16VData(VData, DAG);
5317 }
5318
5319 NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005320 } else {
David Stuttardf77079f2019-01-14 11:55:24 +00005321 // Work out the num dwords based on the dmask popcount and underlying type
5322 // and whether packing is supported.
5323 MVT LoadVT = ResultTypes[0].getSimpleVT();
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005324 if (LoadVT.getScalarType() == MVT::f16) {
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +00005325 if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16)
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005326 return Op; // D16 is unsupported for this instruction
5327
5328 IsD16 = true;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005329 }
5330
David Stuttardf77079f2019-01-14 11:55:24 +00005331 // Confirm that the return type is large enough for the dmask specified
5332 if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
5333 (!LoadVT.isVector() && DMaskLanes > 1))
5334 return Op;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005335
David Stuttardf77079f2019-01-14 11:55:24 +00005336 if (IsD16 && !Subtarget->hasUnpackedD16VMem())
5337 NumVDataDwords = (DMaskLanes + 1) / 2;
5338 else
5339 NumVDataDwords = DMaskLanes;
5340
5341 AdjustRetType = true;
5342 }
David Stuttardc6603862018-11-29 20:14:17 +00005343
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005344 AddrIdx = DMaskIdx + 1;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005345 }
5346
Ryan Taylor1f334d02018-08-28 15:07:30 +00005347 unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
5348 unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
5349 unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
5350 unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients +
5351 NumCoords + NumLCM;
5352 unsigned NumMIVAddrs = NumVAddrs;
5353
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005354 SmallVector<SDValue, 4> VAddrs;
Ryan Taylor894c8fd2018-08-01 12:12:01 +00005355
5356 // Optimize _L to _LZ when _L is zero
5357 if (LZMappingInfo) {
5358 if (auto ConstantLod =
Ryan Taylor1f334d02018-08-28 15:07:30 +00005359 dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
Ryan Taylor894c8fd2018-08-01 12:12:01 +00005360 if (ConstantLod->isZero() || ConstantLod->isNegative()) {
5361 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
Ryan Taylor1f334d02018-08-28 15:07:30 +00005362 NumMIVAddrs--; // remove 'lod'
Ryan Taylor894c8fd2018-08-01 12:12:01 +00005363 }
5364 }
5365 }
5366
Piotr Sobczak9b11e932019-06-10 15:58:51 +00005367 // Optimize _mip away, when 'lod' is zero
5368 if (MIPMappingInfo) {
5369 if (auto ConstantLod =
5370 dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5371 if (ConstantLod->isNullValue()) {
5372 IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip
5373 NumMIVAddrs--; // remove 'lod'
5374 }
5375 }
5376 }
5377
Ryan Taylor1f334d02018-08-28 15:07:30 +00005378 // Check for 16 bit addresses and pack if true.
5379 unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs;
5380 MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType();
Neil Henning63718b22018-10-31 10:34:48 +00005381 const MVT VAddrScalarVT = VAddrVT.getScalarType();
5382 if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16)) &&
Ryan Taylor1f334d02018-08-28 15:07:30 +00005383 ST->hasFeature(AMDGPU::FeatureR128A16)) {
5384 IsA16 = true;
Neil Henning63718b22018-10-31 10:34:48 +00005385 const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
Ryan Taylor1f334d02018-08-28 15:07:30 +00005386 for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) {
5387 SDValue AddrLo, AddrHi;
5388 // Push back extra arguments.
5389 if (i < DimIdx) {
5390 AddrLo = Op.getOperand(i);
5391 } else {
5392 AddrLo = Op.getOperand(i);
5393 // Dz/dh, dz/dv and the last odd coord are packed with undef. Also,
5394 // in 1D, derivatives dx/dh and dx/dv are packed with undef.
5395 if (((i + 1) >= (AddrIdx + NumMIVAddrs)) ||
Matt Arsenault0da63502018-08-31 05:49:54 +00005396 ((NumGradients / 2) % 2 == 1 &&
5397 (i == DimIdx + (NumGradients / 2) - 1 ||
Ryan Taylor1f334d02018-08-28 15:07:30 +00005398 i == DimIdx + NumGradients - 1))) {
5399 AddrHi = DAG.getUNDEF(MVT::f16);
5400 } else {
5401 AddrHi = Op.getOperand(i + 1);
5402 i++;
5403 }
Neil Henning63718b22018-10-31 10:34:48 +00005404 AddrLo = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorVT,
Ryan Taylor1f334d02018-08-28 15:07:30 +00005405 {AddrLo, AddrHi});
5406 AddrLo = DAG.getBitcast(MVT::i32, AddrLo);
5407 }
5408 VAddrs.push_back(AddrLo);
5409 }
5410 } else {
5411 for (unsigned i = 0; i < NumMIVAddrs; ++i)
5412 VAddrs.push_back(Op.getOperand(AddrIdx + i));
5413 }
5414
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005415 // If the register allocator cannot place the address registers contiguously
5416 // without introducing moves, then using the non-sequential address encoding
5417 // is always preferable, since it saves VALU instructions and is usually a
5418 // wash in terms of code size or even better.
5419 //
5420 // However, we currently have no way of hinting to the register allocator that
5421 // MIMG addresses should be placed contiguously when it is possible to do so,
5422 // so force non-NSA for the common 2-address case as a heuristic.
5423 //
5424 // SIShrinkInstructions will convert NSA encodings to non-NSA after register
5425 // allocation when possible.
5426 bool UseNSA =
5427 ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3;
5428 SDValue VAddr;
5429 if (!UseNSA)
5430 VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005431
5432 SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
5433 SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
5434 unsigned CtrlIdx; // Index of texfailctrl argument
5435 SDValue Unorm;
5436 if (!BaseOpcode->Sampler) {
5437 Unorm = True;
5438 CtrlIdx = AddrIdx + NumVAddrs + 1;
5439 } else {
5440 auto UnormConst =
Matt Arsenaultcaf13162019-03-12 21:02:54 +00005441 cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005442
5443 Unorm = UnormConst->getZExtValue() ? True : False;
5444 CtrlIdx = AddrIdx + NumVAddrs + 3;
5445 }
5446
David Stuttardf77079f2019-01-14 11:55:24 +00005447 SDValue TFE;
5448 SDValue LWE;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005449 SDValue TexFail = Op.getOperand(CtrlIdx);
David Stuttardf77079f2019-01-14 11:55:24 +00005450 bool IsTexFail = false;
5451 if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005452 return Op;
5453
David Stuttardf77079f2019-01-14 11:55:24 +00005454 if (IsTexFail) {
5455 if (!DMaskLanes) {
5456 // Expecting to get an error flag since TFC is on - and dmask is 0
5457 // Force dmask to be at least 1 otherwise the instruction will fail
5458 DMask = 0x1;
5459 DMaskLanes = 1;
5460 NumVDataDwords = 1;
5461 }
5462 NumVDataDwords += 1;
5463 AdjustRetType = true;
5464 }
5465
5466 // Has something earlier tagged that the return type needs adjusting
5467 // This happens if the instruction is a load or has set TexFailCtrl flags
5468 if (AdjustRetType) {
5469 // NumVDataDwords reflects the true number of dwords required in the return type
5470 if (DMaskLanes == 0 && !BaseOpcode->Store) {
5471 // This is a no-op load. This can be eliminated
5472 SDValue Undef = DAG.getUNDEF(Op.getValueType());
5473 if (isa<MemSDNode>(Op))
5474 return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
5475 return Undef;
5476 }
5477
David Stuttardf77079f2019-01-14 11:55:24 +00005478 EVT NewVT = NumVDataDwords > 1 ?
5479 EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumVDataDwords)
5480 : MVT::f32;
5481
5482 ResultTypes[0] = NewVT;
5483 if (ResultTypes.size() == 3) {
5484 // Original result was aggregate type used for TexFailCtrl results
5485 // The actual instruction returns as a vector type which has now been
5486 // created. Remove the aggregate result.
5487 ResultTypes.erase(&ResultTypes[1]);
5488 }
5489 }
5490
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005491 SDValue GLC;
5492 SDValue SLC;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005493 SDValue DLC;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005494 if (BaseOpcode->Atomic) {
5495 GLC = True; // TODO no-return optimization
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005496 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC,
5497 IsGFX10 ? &DLC : nullptr))
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005498 return Op;
5499 } else {
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005500 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC,
5501 IsGFX10 ? &DLC : nullptr))
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005502 return Op;
5503 }
5504
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005505 SmallVector<SDValue, 26> Ops;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005506 if (BaseOpcode->Store || BaseOpcode->Atomic)
5507 Ops.push_back(VData); // vdata
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005508 if (UseNSA) {
5509 for (const SDValue &Addr : VAddrs)
5510 Ops.push_back(Addr);
5511 } else {
5512 Ops.push_back(VAddr);
5513 }
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005514 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
5515 if (BaseOpcode->Sampler)
5516 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
5517 Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005518 if (IsGFX10)
5519 Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32));
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005520 Ops.push_back(Unorm);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005521 if (IsGFX10)
5522 Ops.push_back(DLC);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005523 Ops.push_back(GLC);
5524 Ops.push_back(SLC);
Ryan Taylor1f334d02018-08-28 15:07:30 +00005525 Ops.push_back(IsA16 && // a16 or r128
5526 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
David Stuttardf77079f2019-01-14 11:55:24 +00005527 Ops.push_back(TFE); // tfe
5528 Ops.push_back(LWE); // lwe
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005529 if (!IsGFX10)
5530 Ops.push_back(DimInfo->DA ? True : False);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005531 if (BaseOpcode->HasD16)
5532 Ops.push_back(IsD16 ? True : False);
5533 if (isa<MemSDNode>(Op))
5534 Ops.push_back(Op.getOperand(0)); // chain
5535
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005536 int NumVAddrDwords =
5537 UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005538 int Opcode = -1;
5539
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005540 if (IsGFX10) {
5541 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
5542 UseNSA ? AMDGPU::MIMGEncGfx10NSA
5543 : AMDGPU::MIMGEncGfx10Default,
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005544 NumVDataDwords, NumVAddrDwords);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005545 } else {
5546 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5547 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
5548 NumVDataDwords, NumVAddrDwords);
5549 if (Opcode == -1)
5550 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
5551 NumVDataDwords, NumVAddrDwords);
5552 }
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005553 assert(Opcode != -1);
5554
5555 MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
5556 if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
Chandler Carruth66654b72018-08-14 23:30:32 +00005557 MachineMemOperand *MemRef = MemOp->getMemOperand();
5558 DAG.setNodeMemRefs(NewNode, {MemRef});
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005559 }
5560
5561 if (BaseOpcode->AtomicX2) {
5562 SmallVector<SDValue, 1> Elt;
5563 DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
5564 return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
David Stuttardf77079f2019-01-14 11:55:24 +00005565 } else if (!BaseOpcode->Store) {
5566 return constructRetValue(DAG, NewNode,
5567 OrigResultTypes, IsTexFail,
5568 Subtarget->hasUnpackedD16VMem(), IsD16,
5569 DMaskLanes, NumVDataDwords, DL,
5570 *DAG.getContext());
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005571 }
5572
5573 return SDValue(NewNode, 0);
5574}
5575
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005576SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
Nicolai Haehnle490e83c2019-06-16 17:14:12 +00005577 SDValue Offset, SDValue GLC, SDValue DLC,
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005578 SelectionDAG &DAG) const {
5579 MachineFunction &MF = DAG.getMachineFunction();
5580 MachineMemOperand *MMO = MF.getMachineMemOperand(
5581 MachinePointerInfo(),
5582 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
5583 MachineMemOperand::MOInvariant,
5584 VT.getStoreSize(), VT.getStoreSize());
5585
5586 if (!Offset->isDivergent()) {
5587 SDValue Ops[] = {
5588 Rsrc,
5589 Offset, // Offset
Nicolai Haehnle490e83c2019-06-16 17:14:12 +00005590 GLC,
5591 DLC,
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005592 };
5593 return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
5594 DAG.getVTList(VT), Ops, VT, MMO);
5595 }
5596
5597 // We have a divergent offset. Emit a MUBUF buffer load instead. We can
5598 // assume that the buffer is unswizzled.
5599 SmallVector<SDValue, 4> Loads;
5600 unsigned NumLoads = 1;
5601 MVT LoadVT = VT.getSimpleVT();
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005602 unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
Simon Pilgrim44dfd812018-12-07 21:44:25 +00005603 assert((LoadVT.getScalarType() == MVT::i32 ||
5604 LoadVT.getScalarType() == MVT::f32) &&
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005605 isPowerOf2_32(NumElts));
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005606
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005607 if (NumElts == 8 || NumElts == 16) {
5608 NumLoads = NumElts == 16 ? 4 : 2;
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005609 LoadVT = MVT::v4i32;
5610 }
5611
5612 SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
5613 unsigned CachePolicy = cast<ConstantSDNode>(GLC)->getZExtValue();
5614 SDValue Ops[] = {
5615 DAG.getEntryNode(), // Chain
5616 Rsrc, // rsrc
5617 DAG.getConstant(0, DL, MVT::i32), // vindex
5618 {}, // voffset
5619 {}, // soffset
5620 {}, // offset
5621 DAG.getConstant(CachePolicy, DL, MVT::i32), // cachepolicy
5622 DAG.getConstant(0, DL, MVT::i1), // idxen
5623 };
5624
5625 // Use the alignment to ensure that the required offsets will fit into the
5626 // immediate offsets.
5627 setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4);
5628
5629 uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
5630 for (unsigned i = 0; i < NumLoads; ++i) {
5631 Ops[5] = DAG.getConstant(InstOffset + 16 * i, DL, MVT::i32);
5632 Loads.push_back(DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList,
5633 Ops, LoadVT, MMO));
5634 }
5635
5636 if (VT == MVT::v8i32 || VT == MVT::v16i32)
5637 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
5638
5639 return Loads[0];
5640}
5641
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005642SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
5643 SelectionDAG &DAG) const {
5644 MachineFunction &MF = DAG.getMachineFunction();
Tom Stellarddcb9f092015-07-09 21:20:37 +00005645 auto MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005646
5647 EVT VT = Op.getValueType();
5648 SDLoc DL(Op);
5649 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5650
Sanjay Patela2607012015-09-16 16:31:21 +00005651 // TODO: Should this propagate fast-math-flags?
5652
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005653 switch (IntrinsicID) {
Tom Stellard2f3f9852017-01-25 01:25:13 +00005654 case Intrinsic::amdgcn_implicit_buffer_ptr: {
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +00005655 if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
Matt Arsenault10fc0622017-06-26 03:01:31 +00005656 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005657 return getPreloadedValue(DAG, *MFI, VT,
5658 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
Tom Stellard2f3f9852017-01-25 01:25:13 +00005659 }
Tom Stellard48f29f22015-11-26 00:43:29 +00005660 case Intrinsic::amdgcn_dispatch_ptr:
Matt Arsenault48ab5262016-04-25 19:27:18 +00005661 case Intrinsic::amdgcn_queue_ptr: {
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +00005662 if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00005663 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00005664 MF.getFunction(), "unsupported hsa intrinsic without hsa target",
Oliver Stannard7e7d9832016-02-02 13:52:43 +00005665 DL.getDebugLoc());
Matt Arsenault800fecf2016-01-11 21:18:33 +00005666 DAG.getContext()->diagnose(BadIntrin);
5667 return DAG.getUNDEF(VT);
5668 }
5669
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005670 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
5671 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
5672 return getPreloadedValue(DAG, *MFI, VT, RegID);
Matt Arsenault48ab5262016-04-25 19:27:18 +00005673 }
Jan Veselyfea814d2016-06-21 20:46:20 +00005674 case Intrinsic::amdgcn_implicitarg_ptr: {
Matt Arsenault9166ce82017-07-28 15:52:08 +00005675 if (MFI->isEntryFunction())
5676 return getImplicitArgPtr(DAG, DL);
Matt Arsenault817c2532017-08-03 23:12:44 +00005677 return getPreloadedValue(DAG, *MFI, VT,
5678 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
Jan Veselyfea814d2016-06-21 20:46:20 +00005679 }
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00005680 case Intrinsic::amdgcn_kernarg_segment_ptr: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005681 return getPreloadedValue(DAG, *MFI, VT,
5682 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00005683 }
Matt Arsenault8d718dc2016-07-22 17:01:30 +00005684 case Intrinsic::amdgcn_dispatch_id: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005685 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
Matt Arsenault8d718dc2016-07-22 17:01:30 +00005686 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005687 case Intrinsic::amdgcn_rcp:
5688 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
5689 case Intrinsic::amdgcn_rsq:
5690 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00005691 case Intrinsic::amdgcn_rsq_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00005692 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005693 return emitRemovedIntrinsicError(DAG, DL, VT);
5694
5695 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00005696 case Intrinsic::amdgcn_rcp_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00005697 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault32fc5272016-07-26 16:45:45 +00005698 return emitRemovedIntrinsicError(DAG, DL, VT);
5699 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
Matt Arsenault09b2c4a2016-07-15 21:26:52 +00005700 case Intrinsic::amdgcn_rsq_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00005701 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault79963e82016-02-13 01:03:00 +00005702 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
Tom Stellard48f29f22015-11-26 00:43:29 +00005703
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005704 Type *Type = VT.getTypeForEVT(*DAG.getContext());
5705 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
5706 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
5707
5708 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5709 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
5710 DAG.getConstantFP(Max, DL, VT));
5711 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
5712 DAG.getConstantFP(Min, DL, VT));
5713 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005714 case Intrinsic::r600_read_ngroups_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005715 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005716 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005717
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005718 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005719 SI::KernelInputOffsets::NGROUPS_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005720 case Intrinsic::r600_read_ngroups_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005721 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005722 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005723
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005724 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005725 SI::KernelInputOffsets::NGROUPS_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005726 case Intrinsic::r600_read_ngroups_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005727 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005728 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005729
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005730 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005731 SI::KernelInputOffsets::NGROUPS_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005732 case Intrinsic::r600_read_global_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005733 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005734 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005735
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005736 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005737 SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005738 case Intrinsic::r600_read_global_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005739 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005740 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005741
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005742 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005743 SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005744 case Intrinsic::r600_read_global_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005745 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005746 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005747
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005748 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005749 SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005750 case Intrinsic::r600_read_local_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005751 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005752 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005753
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005754 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5755 SI::KernelInputOffsets::LOCAL_SIZE_X);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005756 case Intrinsic::r600_read_local_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005757 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005758 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005759
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005760 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5761 SI::KernelInputOffsets::LOCAL_SIZE_Y);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005762 case Intrinsic::r600_read_local_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005763 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005764 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005765
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005766 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5767 SI::KernelInputOffsets::LOCAL_SIZE_Z);
Matt Arsenault43976df2016-01-30 04:25:19 +00005768 case Intrinsic::amdgcn_workgroup_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005769 case Intrinsic::r600_read_tgid_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005770 return getPreloadedValue(DAG, *MFI, VT,
5771 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
Matt Arsenault43976df2016-01-30 04:25:19 +00005772 case Intrinsic::amdgcn_workgroup_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005773 case Intrinsic::r600_read_tgid_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005774 return getPreloadedValue(DAG, *MFI, VT,
5775 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
Matt Arsenault43976df2016-01-30 04:25:19 +00005776 case Intrinsic::amdgcn_workgroup_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005777 case Intrinsic::r600_read_tgid_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005778 return getPreloadedValue(DAG, *MFI, VT,
5779 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
Reid Kleckner4dc0b1a2018-11-01 19:54:45 +00005780 case Intrinsic::amdgcn_workitem_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005781 case Intrinsic::r600_read_tidig_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005782 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5783 SDLoc(DAG.getEntryNode()),
5784 MFI->getArgInfo().WorkItemIDX);
Matt Arsenault43976df2016-01-30 04:25:19 +00005785 case Intrinsic::amdgcn_workitem_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005786 case Intrinsic::r600_read_tidig_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005787 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5788 SDLoc(DAG.getEntryNode()),
5789 MFI->getArgInfo().WorkItemIDY);
Matt Arsenault43976df2016-01-30 04:25:19 +00005790 case Intrinsic::amdgcn_workitem_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005791 case Intrinsic::r600_read_tidig_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005792 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5793 SDLoc(DAG.getEntryNode()),
5794 MFI->getArgInfo().WorkItemIDZ);
Stanislav Mekhanoshin68a2fef2019-06-13 23:47:36 +00005795 case Intrinsic::amdgcn_wavefrontsize:
5796 return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(),
5797 SDLoc(Op), MVT::i32);
Tim Renouf904343f2018-08-25 14:53:17 +00005798 case Intrinsic::amdgcn_s_buffer_load: {
Nicolai Haehnle490e83c2019-06-16 17:14:12 +00005799 bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
5800 SDValue GLC;
5801 SDValue DLC = DAG.getTargetConstant(0, DL, MVT::i1);
5802 if (!parseCachePolicy(Op.getOperand(3), DAG, &GLC, nullptr,
5803 IsGFX10 ? &DLC : nullptr))
5804 return Op;
5805 return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), GLC, DLC,
5806 DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005807 }
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00005808 case Intrinsic::amdgcn_fdiv_fast:
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005809 return lowerFDIV_FAST(Op, DAG);
Tom Stellard2187bb82016-12-06 23:52:13 +00005810 case Intrinsic::amdgcn_interp_mov: {
5811 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5812 SDValue Glue = M0.getValue(1);
5813 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
5814 Op.getOperand(2), Op.getOperand(3), Glue);
5815 }
Tom Stellardad7d03d2015-12-15 17:02:49 +00005816 case Intrinsic::amdgcn_interp_p1: {
5817 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5818 SDValue Glue = M0.getValue(1);
5819 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
5820 Op.getOperand(2), Op.getOperand(3), Glue);
5821 }
5822 case Intrinsic::amdgcn_interp_p2: {
5823 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5824 SDValue Glue = SDValue(M0.getNode(), 1);
5825 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
5826 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
5827 Glue);
5828 }
Tim Corringham824ca3f2019-01-28 13:48:59 +00005829 case Intrinsic::amdgcn_interp_p1_f16: {
5830 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5831 SDValue Glue = M0.getValue(1);
5832 if (getSubtarget()->getLDSBankCount() == 16) {
5833 // 16 bank LDS
5834 SDValue S = DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32,
5835 DAG.getConstant(2, DL, MVT::i32), // P0
5836 Op.getOperand(2), // Attrchan
5837 Op.getOperand(3), // Attr
5838 Glue);
5839 SDValue Ops[] = {
5840 Op.getOperand(1), // Src0
5841 Op.getOperand(2), // Attrchan
5842 Op.getOperand(3), // Attr
5843 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5844 S, // Src2 - holds two f16 values selected by high
5845 DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5846 Op.getOperand(4), // high
5847 DAG.getConstant(0, DL, MVT::i1), // $clamp
5848 DAG.getConstant(0, DL, MVT::i32) // $omod
5849 };
5850 return DAG.getNode(AMDGPUISD::INTERP_P1LV_F16, DL, MVT::f32, Ops);
5851 } else {
5852 // 32 bank LDS
5853 SDValue Ops[] = {
5854 Op.getOperand(1), // Src0
5855 Op.getOperand(2), // Attrchan
5856 Op.getOperand(3), // Attr
5857 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5858 Op.getOperand(4), // high
5859 DAG.getConstant(0, DL, MVT::i1), // $clamp
5860 DAG.getConstant(0, DL, MVT::i32), // $omod
5861 Glue
5862 };
5863 return DAG.getNode(AMDGPUISD::INTERP_P1LL_F16, DL, MVT::f32, Ops);
5864 }
5865 }
5866 case Intrinsic::amdgcn_interp_p2_f16: {
5867 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(6));
5868 SDValue Glue = SDValue(M0.getNode(), 1);
5869 SDValue Ops[] = {
5870 Op.getOperand(2), // Src0
5871 Op.getOperand(3), // Attrchan
5872 Op.getOperand(4), // Attr
5873 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5874 Op.getOperand(1), // Src2
5875 DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5876 Op.getOperand(5), // high
5877 DAG.getConstant(0, DL, MVT::i1), // $clamp
5878 Glue
5879 };
5880 return DAG.getNode(AMDGPUISD::INTERP_P2_F16, DL, MVT::f16, Ops);
5881 }
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005882 case Intrinsic::amdgcn_sin:
5883 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
5884
5885 case Intrinsic::amdgcn_cos:
5886 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
5887
Matt Arsenault49169a92019-07-15 17:50:31 +00005888 case Intrinsic::amdgcn_mul_u24:
5889 return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5890 case Intrinsic::amdgcn_mul_i24:
5891 return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2));
5892
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005893 case Intrinsic::amdgcn_log_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00005894 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005895 return SDValue();
5896
5897 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00005898 MF.getFunction(), "intrinsic not supported on subtarget",
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005899 DL.getDebugLoc());
5900 DAG.getContext()->diagnose(BadIntrin);
5901 return DAG.getUNDEF(VT);
5902 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005903 case Intrinsic::amdgcn_ldexp:
5904 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
5905 Op.getOperand(1), Op.getOperand(2));
Matt Arsenault74015162016-05-28 00:19:52 +00005906
5907 case Intrinsic::amdgcn_fract:
5908 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
5909
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005910 case Intrinsic::amdgcn_class:
5911 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
5912 Op.getOperand(1), Op.getOperand(2));
5913 case Intrinsic::amdgcn_div_fmas:
5914 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
5915 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5916 Op.getOperand(4));
5917
5918 case Intrinsic::amdgcn_div_fixup:
5919 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
5920 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5921
5922 case Intrinsic::amdgcn_trig_preop:
5923 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
5924 Op.getOperand(1), Op.getOperand(2));
5925 case Intrinsic::amdgcn_div_scale: {
Matt Arsenaultcaf13162019-03-12 21:02:54 +00005926 const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3));
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005927
5928 // Translate to the operands expected by the machine instruction. The
5929 // first parameter must be the same as the first instruction.
5930 SDValue Numerator = Op.getOperand(1);
5931 SDValue Denominator = Op.getOperand(2);
5932
5933 // Note this order is opposite of the machine instruction's operations,
5934 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
5935 // intrinsic has the numerator as the first operand to match a normal
5936 // division operation.
5937
5938 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
5939
5940 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
5941 Denominator, Numerator);
5942 }
Wei Ding07e03712016-07-28 16:42:13 +00005943 case Intrinsic::amdgcn_icmp: {
Marek Olsak33eb4d92019-01-15 02:13:18 +00005944 // There is a Pat that handles this variant, so return it as-is.
5945 if (Op.getOperand(1).getValueType() == MVT::i1 &&
5946 Op.getConstantOperandVal(2) == 0 &&
5947 Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
5948 return Op;
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00005949 return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
Wei Ding07e03712016-07-28 16:42:13 +00005950 }
5951 case Intrinsic::amdgcn_fcmp: {
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00005952 return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
Wei Ding07e03712016-07-28 16:42:13 +00005953 }
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00005954 case Intrinsic::amdgcn_fmed3:
5955 return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
5956 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Farhana Aleenc370d7b2018-07-16 18:19:59 +00005957 case Intrinsic::amdgcn_fdot2:
5958 return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00005959 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5960 Op.getOperand(4));
Matt Arsenault32fc5272016-07-26 16:45:45 +00005961 case Intrinsic::amdgcn_fmul_legacy:
5962 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
5963 Op.getOperand(1), Op.getOperand(2));
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00005964 case Intrinsic::amdgcn_sffbh:
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00005965 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
Matt Arsenaultf5262252017-02-22 23:04:58 +00005966 case Intrinsic::amdgcn_sbfe:
5967 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
5968 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5969 case Intrinsic::amdgcn_ubfe:
5970 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
5971 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Marek Olsak13e47412018-01-31 20:18:04 +00005972 case Intrinsic::amdgcn_cvt_pkrtz:
5973 case Intrinsic::amdgcn_cvt_pknorm_i16:
5974 case Intrinsic::amdgcn_cvt_pknorm_u16:
5975 case Intrinsic::amdgcn_cvt_pk_i16:
5976 case Intrinsic::amdgcn_cvt_pk_u16: {
5977 // FIXME: Stop adding cast if v2f16/v2i16 are legal.
Matt Arsenault1f17c662017-02-22 00:27:34 +00005978 EVT VT = Op.getValueType();
Marek Olsak13e47412018-01-31 20:18:04 +00005979 unsigned Opcode;
5980
5981 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
5982 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
5983 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
5984 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
5985 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
5986 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
5987 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
5988 Opcode = AMDGPUISD::CVT_PK_I16_I32;
5989 else
5990 Opcode = AMDGPUISD::CVT_PK_U16_U32;
5991
Matt Arsenault709374d2018-08-01 20:13:58 +00005992 if (isTypeLegal(VT))
5993 return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
5994
Marek Olsak13e47412018-01-31 20:18:04 +00005995 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
Matt Arsenault1f17c662017-02-22 00:27:34 +00005996 Op.getOperand(1), Op.getOperand(2));
5997 return DAG.getNode(ISD::BITCAST, DL, VT, Node);
5998 }
Stanislav Mekhanoshindacda792018-06-26 20:04:19 +00005999 case Intrinsic::amdgcn_fmad_ftz:
6000 return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
6001 Op.getOperand(2), Op.getOperand(3));
Stanislav Mekhanoshin68a2fef2019-06-13 23:47:36 +00006002
6003 case Intrinsic::amdgcn_if_break:
6004 return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT,
6005 Op->getOperand(1), Op->getOperand(2)), 0);
6006
Nicolai Haehnle27101712019-06-25 11:52:30 +00006007 case Intrinsic::amdgcn_groupstaticsize: {
6008 Triple::OSType OS = getTargetMachine().getTargetTriple().getOS();
6009 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL)
6010 return Op;
6011
6012 const Module *M = MF.getFunction().getParent();
6013 const GlobalValue *GV =
6014 M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize));
6015 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0,
6016 SIInstrInfo::MO_ABS32_LO);
6017 return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0};
6018 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006019 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00006020 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6021 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6022 return lowerImage(Op, ImageDimIntr, DAG);
6023
Matt Arsenault754dd3e2017-04-03 18:08:08 +00006024 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006025 }
6026}
6027
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00006028SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
6029 SelectionDAG &DAG) const {
6030 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Tom Stellard6f9ef142016-12-20 17:19:44 +00006031 SDLoc DL(Op);
David Stuttard70e8bc12017-06-22 16:29:22 +00006032
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00006033 switch (IntrID) {
Marek Olsakc5cec5e2019-01-16 15:43:53 +00006034 case Intrinsic::amdgcn_ds_ordered_add:
6035 case Intrinsic::amdgcn_ds_ordered_swap: {
6036 MemSDNode *M = cast<MemSDNode>(Op);
6037 SDValue Chain = M->getOperand(0);
6038 SDValue M0 = M->getOperand(2);
6039 SDValue Value = M->getOperand(3);
Nicolai Haehnle10c911d2019-07-01 17:17:52 +00006040 unsigned IndexOperand = M->getConstantOperandVal(7);
Marek Olsakc5cec5e2019-01-16 15:43:53 +00006041 unsigned WaveRelease = M->getConstantOperandVal(8);
6042 unsigned WaveDone = M->getConstantOperandVal(9);
6043 unsigned ShaderType;
6044 unsigned Instruction;
6045
Nicolai Haehnle10c911d2019-07-01 17:17:52 +00006046 unsigned OrderedCountIndex = IndexOperand & 0x3f;
6047 IndexOperand &= ~0x3f;
6048 unsigned CountDw = 0;
6049
6050 if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) {
6051 CountDw = (IndexOperand >> 24) & 0xf;
6052 IndexOperand &= ~(0xf << 24);
6053
6054 if (CountDw < 1 || CountDw > 4) {
6055 report_fatal_error(
6056 "ds_ordered_count: dword count must be between 1 and 4");
6057 }
6058 }
6059
6060 if (IndexOperand)
6061 report_fatal_error("ds_ordered_count: bad index operand");
6062
Marek Olsakc5cec5e2019-01-16 15:43:53 +00006063 switch (IntrID) {
6064 case Intrinsic::amdgcn_ds_ordered_add:
6065 Instruction = 0;
6066 break;
6067 case Intrinsic::amdgcn_ds_ordered_swap:
6068 Instruction = 1;
6069 break;
6070 }
6071
6072 if (WaveDone && !WaveRelease)
6073 report_fatal_error("ds_ordered_count: wave_done requires wave_release");
6074
6075 switch (DAG.getMachineFunction().getFunction().getCallingConv()) {
6076 case CallingConv::AMDGPU_CS:
6077 case CallingConv::AMDGPU_KERNEL:
6078 ShaderType = 0;
6079 break;
6080 case CallingConv::AMDGPU_PS:
6081 ShaderType = 1;
6082 break;
6083 case CallingConv::AMDGPU_VS:
6084 ShaderType = 2;
6085 break;
6086 case CallingConv::AMDGPU_GS:
6087 ShaderType = 3;
6088 break;
6089 default:
6090 report_fatal_error("ds_ordered_count unsupported for this calling conv");
6091 }
6092
6093 unsigned Offset0 = OrderedCountIndex << 2;
6094 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
6095 (Instruction << 4);
Nicolai Haehnle10c911d2019-07-01 17:17:52 +00006096
6097 if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
6098 Offset1 |= (CountDw - 1) << 6;
6099
Marek Olsakc5cec5e2019-01-16 15:43:53 +00006100 unsigned Offset = Offset0 | (Offset1 << 8);
6101
6102 SDValue Ops[] = {
6103 Chain,
6104 Value,
6105 DAG.getTargetConstant(Offset, DL, MVT::i16),
6106 copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue
6107 };
6108 return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
6109 M->getVTList(), Ops, M->getMemoryVT(),
6110 M->getMemOperand());
6111 }
Matt Arsenaulta5840c32019-01-22 18:36:06 +00006112 case Intrinsic::amdgcn_ds_fadd: {
6113 MemSDNode *M = cast<MemSDNode>(Op);
6114 unsigned Opc;
6115 switch (IntrID) {
6116 case Intrinsic::amdgcn_ds_fadd:
6117 Opc = ISD::ATOMIC_LOAD_FADD;
6118 break;
6119 }
6120
6121 return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
6122 M->getOperand(0), M->getOperand(2), M->getOperand(3),
6123 M->getMemOperand());
6124 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00006125 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00006126 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00006127 case Intrinsic::amdgcn_ds_fmin:
6128 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00006129 MemSDNode *M = cast<MemSDNode>(Op);
Daniil Fukalovd5fca552018-01-17 14:05:05 +00006130 unsigned Opc;
6131 switch (IntrID) {
6132 case Intrinsic::amdgcn_atomic_inc:
6133 Opc = AMDGPUISD::ATOMIC_INC;
6134 break;
6135 case Intrinsic::amdgcn_atomic_dec:
6136 Opc = AMDGPUISD::ATOMIC_DEC;
6137 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00006138 case Intrinsic::amdgcn_ds_fmin:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00006139 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
6140 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00006141 case Intrinsic::amdgcn_ds_fmax:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00006142 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
6143 break;
6144 default:
6145 llvm_unreachable("Unknown intrinsic!");
6146 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00006147 SDValue Ops[] = {
6148 M->getOperand(0), // Chain
6149 M->getOperand(2), // Ptr
6150 M->getOperand(3) // Value
6151 };
6152
6153 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
6154 M->getMemoryVT(), M->getMemOperand());
6155 }
Tom Stellard6f9ef142016-12-20 17:19:44 +00006156 case Intrinsic::amdgcn_buffer_load:
6157 case Intrinsic::amdgcn_buffer_load_format: {
Tim Renouf4f703f52018-08-21 11:07:10 +00006158 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
6159 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6160 unsigned IdxEn = 1;
6161 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6162 IdxEn = Idx->getZExtValue() != 0;
Tom Stellard6f9ef142016-12-20 17:19:44 +00006163 SDValue Ops[] = {
6164 Op.getOperand(0), // Chain
6165 Op.getOperand(2), // rsrc
6166 Op.getOperand(3), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00006167 SDValue(), // voffset -- will be set by setBufferOffsets
6168 SDValue(), // soffset -- will be set by setBufferOffsets
6169 SDValue(), // offset -- will be set by setBufferOffsets
6170 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6171 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Tom Stellard6f9ef142016-12-20 17:19:44 +00006172 };
Tom Stellard6f9ef142016-12-20 17:19:44 +00006173
Tim Renouf4f703f52018-08-21 11:07:10 +00006174 setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
Tom Stellard6f9ef142016-12-20 17:19:44 +00006175 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
6176 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
Tim Renouf4f703f52018-08-21 11:07:10 +00006177
6178 EVT VT = Op.getValueType();
6179 EVT IntVT = VT.changeTypeToInteger();
6180 auto *M = cast<MemSDNode>(Op);
6181 EVT LoadVT = Op.getValueType();
6182
6183 if (LoadVT.getScalarType() == MVT::f16)
6184 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6185 M, DAG, Ops);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006186
6187 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6188 if (LoadVT.getScalarType() == MVT::i8 ||
6189 LoadVT.getScalarType() == MVT::i16)
6190 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6191
Tim Renouf677387d2019-03-22 14:58:02 +00006192 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6193 M->getMemOperand(), DAG);
Tim Renouf4f703f52018-08-21 11:07:10 +00006194 }
6195 case Intrinsic::amdgcn_raw_buffer_load:
6196 case Intrinsic::amdgcn_raw_buffer_load_format: {
6197 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6198 SDValue Ops[] = {
6199 Op.getOperand(0), // Chain
6200 Op.getOperand(2), // rsrc
6201 DAG.getConstant(0, DL, MVT::i32), // vindex
6202 Offsets.first, // voffset
6203 Op.getOperand(4), // soffset
6204 Offsets.second, // offset
6205 Op.getOperand(5), // cachepolicy
6206 DAG.getConstant(0, DL, MVT::i1), // idxen
6207 };
6208
6209 unsigned Opc = (IntrID == Intrinsic::amdgcn_raw_buffer_load) ?
6210 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
6211
6212 EVT VT = Op.getValueType();
6213 EVT IntVT = VT.changeTypeToInteger();
6214 auto *M = cast<MemSDNode>(Op);
6215 EVT LoadVT = Op.getValueType();
6216
6217 if (LoadVT.getScalarType() == MVT::f16)
6218 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6219 M, DAG, Ops);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006220
6221 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6222 if (LoadVT.getScalarType() == MVT::i8 ||
6223 LoadVT.getScalarType() == MVT::i16)
6224 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6225
Tim Renouf677387d2019-03-22 14:58:02 +00006226 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6227 M->getMemOperand(), DAG);
Tim Renouf4f703f52018-08-21 11:07:10 +00006228 }
6229 case Intrinsic::amdgcn_struct_buffer_load:
6230 case Intrinsic::amdgcn_struct_buffer_load_format: {
6231 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6232 SDValue Ops[] = {
6233 Op.getOperand(0), // Chain
6234 Op.getOperand(2), // rsrc
6235 Op.getOperand(3), // vindex
6236 Offsets.first, // voffset
6237 Op.getOperand(5), // soffset
6238 Offsets.second, // offset
6239 Op.getOperand(6), // cachepolicy
6240 DAG.getConstant(1, DL, MVT::i1), // idxen
6241 };
6242
6243 unsigned Opc = (IntrID == Intrinsic::amdgcn_struct_buffer_load) ?
6244 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
6245
Tom Stellard6f9ef142016-12-20 17:19:44 +00006246 EVT VT = Op.getValueType();
6247 EVT IntVT = VT.changeTypeToInteger();
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00006248 auto *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00006249 EVT LoadVT = Op.getValueType();
Matt Arsenault1349a042018-05-22 06:32:10 +00006250
Tim Renouf366a49d2018-08-02 23:33:01 +00006251 if (LoadVT.getScalarType() == MVT::f16)
6252 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
6253 M, DAG, Ops);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006254
6255 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
6256 if (LoadVT.getScalarType() == MVT::i8 ||
6257 LoadVT.getScalarType() == MVT::i16)
6258 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
6259
Tim Renouf677387d2019-03-22 14:58:02 +00006260 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
6261 M->getMemOperand(), DAG);
Tom Stellard6f9ef142016-12-20 17:19:44 +00006262 }
David Stuttard70e8bc12017-06-22 16:29:22 +00006263 case Intrinsic::amdgcn_tbuffer_load: {
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00006264 MemSDNode *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00006265 EVT LoadVT = Op.getValueType();
Matt Arsenault1349a042018-05-22 06:32:10 +00006266
Tim Renouf35484c92018-08-21 11:06:05 +00006267 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6268 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6269 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6270 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6271 unsigned IdxEn = 1;
6272 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
6273 IdxEn = Idx->getZExtValue() != 0;
David Stuttard70e8bc12017-06-22 16:29:22 +00006274 SDValue Ops[] = {
6275 Op.getOperand(0), // Chain
6276 Op.getOperand(2), // rsrc
6277 Op.getOperand(3), // vindex
6278 Op.getOperand(4), // voffset
6279 Op.getOperand(5), // soffset
6280 Op.getOperand(6), // offset
Tim Renouf35484c92018-08-21 11:06:05 +00006281 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6282 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6283 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6284 };
6285
6286 if (LoadVT.getScalarType() == MVT::f16)
6287 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6288 M, DAG, Ops);
Tim Renouf677387d2019-03-22 14:58:02 +00006289 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6290 Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6291 DAG);
Tim Renouf35484c92018-08-21 11:06:05 +00006292 }
6293 case Intrinsic::amdgcn_raw_tbuffer_load: {
6294 MemSDNode *M = cast<MemSDNode>(Op);
6295 EVT LoadVT = Op.getValueType();
6296 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
6297
6298 SDValue Ops[] = {
6299 Op.getOperand(0), // Chain
6300 Op.getOperand(2), // rsrc
6301 DAG.getConstant(0, DL, MVT::i32), // vindex
6302 Offsets.first, // voffset
6303 Op.getOperand(4), // soffset
6304 Offsets.second, // offset
6305 Op.getOperand(5), // format
6306 Op.getOperand(6), // cachepolicy
6307 DAG.getConstant(0, DL, MVT::i1), // idxen
6308 };
6309
6310 if (LoadVT.getScalarType() == MVT::f16)
6311 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6312 M, DAG, Ops);
Tim Renouf677387d2019-03-22 14:58:02 +00006313 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6314 Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6315 DAG);
Tim Renouf35484c92018-08-21 11:06:05 +00006316 }
6317 case Intrinsic::amdgcn_struct_tbuffer_load: {
6318 MemSDNode *M = cast<MemSDNode>(Op);
6319 EVT LoadVT = Op.getValueType();
6320 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6321
6322 SDValue Ops[] = {
6323 Op.getOperand(0), // Chain
6324 Op.getOperand(2), // rsrc
6325 Op.getOperand(3), // vindex
6326 Offsets.first, // voffset
6327 Op.getOperand(5), // soffset
6328 Offsets.second, // offset
6329 Op.getOperand(6), // format
6330 Op.getOperand(7), // cachepolicy
6331 DAG.getConstant(1, DL, MVT::i1), // idxen
David Stuttard70e8bc12017-06-22 16:29:22 +00006332 };
6333
Tim Renouf366a49d2018-08-02 23:33:01 +00006334 if (LoadVT.getScalarType() == MVT::f16)
6335 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
6336 M, DAG, Ops);
Tim Renouf677387d2019-03-22 14:58:02 +00006337 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
6338 Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
6339 DAG);
David Stuttard70e8bc12017-06-22 16:29:22 +00006340 }
Marek Olsak5cec6412017-11-09 01:52:48 +00006341 case Intrinsic::amdgcn_buffer_atomic_swap:
6342 case Intrinsic::amdgcn_buffer_atomic_add:
6343 case Intrinsic::amdgcn_buffer_atomic_sub:
6344 case Intrinsic::amdgcn_buffer_atomic_smin:
6345 case Intrinsic::amdgcn_buffer_atomic_umin:
6346 case Intrinsic::amdgcn_buffer_atomic_smax:
6347 case Intrinsic::amdgcn_buffer_atomic_umax:
6348 case Intrinsic::amdgcn_buffer_atomic_and:
6349 case Intrinsic::amdgcn_buffer_atomic_or:
6350 case Intrinsic::amdgcn_buffer_atomic_xor: {
Tim Renouf4f703f52018-08-21 11:07:10 +00006351 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6352 unsigned IdxEn = 1;
6353 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6354 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00006355 SDValue Ops[] = {
6356 Op.getOperand(0), // Chain
6357 Op.getOperand(2), // vdata
6358 Op.getOperand(3), // rsrc
6359 Op.getOperand(4), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00006360 SDValue(), // voffset -- will be set by setBufferOffsets
6361 SDValue(), // soffset -- will be set by setBufferOffsets
6362 SDValue(), // offset -- will be set by setBufferOffsets
6363 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6364 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00006365 };
Tim Renouf4f703f52018-08-21 11:07:10 +00006366 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00006367 EVT VT = Op.getValueType();
6368
6369 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00006370 unsigned Opcode = 0;
6371
6372 switch (IntrID) {
6373 case Intrinsic::amdgcn_buffer_atomic_swap:
6374 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6375 break;
6376 case Intrinsic::amdgcn_buffer_atomic_add:
6377 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6378 break;
6379 case Intrinsic::amdgcn_buffer_atomic_sub:
6380 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6381 break;
6382 case Intrinsic::amdgcn_buffer_atomic_smin:
6383 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6384 break;
6385 case Intrinsic::amdgcn_buffer_atomic_umin:
6386 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6387 break;
6388 case Intrinsic::amdgcn_buffer_atomic_smax:
6389 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6390 break;
6391 case Intrinsic::amdgcn_buffer_atomic_umax:
6392 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6393 break;
6394 case Intrinsic::amdgcn_buffer_atomic_and:
6395 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6396 break;
6397 case Intrinsic::amdgcn_buffer_atomic_or:
6398 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6399 break;
6400 case Intrinsic::amdgcn_buffer_atomic_xor:
6401 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6402 break;
6403 default:
6404 llvm_unreachable("unhandled atomic opcode");
6405 }
6406
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00006407 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6408 M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00006409 }
Tim Renouf4f703f52018-08-21 11:07:10 +00006410 case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6411 case Intrinsic::amdgcn_raw_buffer_atomic_add:
6412 case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6413 case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6414 case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6415 case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6416 case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6417 case Intrinsic::amdgcn_raw_buffer_atomic_and:
6418 case Intrinsic::amdgcn_raw_buffer_atomic_or:
Nicolai Haehnlee2047862019-08-05 09:36:06 +00006419 case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6420 case Intrinsic::amdgcn_raw_buffer_atomic_inc:
6421 case Intrinsic::amdgcn_raw_buffer_atomic_dec: {
Tim Renouf4f703f52018-08-21 11:07:10 +00006422 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6423 SDValue Ops[] = {
6424 Op.getOperand(0), // Chain
6425 Op.getOperand(2), // vdata
6426 Op.getOperand(3), // rsrc
6427 DAG.getConstant(0, DL, MVT::i32), // vindex
6428 Offsets.first, // voffset
6429 Op.getOperand(5), // soffset
6430 Offsets.second, // offset
6431 Op.getOperand(6), // cachepolicy
6432 DAG.getConstant(0, DL, MVT::i1), // idxen
6433 };
6434 EVT VT = Op.getValueType();
Marek Olsak5cec6412017-11-09 01:52:48 +00006435
Tim Renouf4f703f52018-08-21 11:07:10 +00006436 auto *M = cast<MemSDNode>(Op);
6437 unsigned Opcode = 0;
6438
6439 switch (IntrID) {
6440 case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6441 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6442 break;
6443 case Intrinsic::amdgcn_raw_buffer_atomic_add:
6444 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6445 break;
6446 case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6447 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6448 break;
6449 case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6450 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6451 break;
6452 case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6453 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6454 break;
6455 case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6456 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6457 break;
6458 case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6459 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6460 break;
6461 case Intrinsic::amdgcn_raw_buffer_atomic_and:
6462 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6463 break;
6464 case Intrinsic::amdgcn_raw_buffer_atomic_or:
6465 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6466 break;
6467 case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6468 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6469 break;
Nicolai Haehnlee2047862019-08-05 09:36:06 +00006470 case Intrinsic::amdgcn_raw_buffer_atomic_inc:
6471 Opcode = AMDGPUISD::BUFFER_ATOMIC_INC;
6472 break;
6473 case Intrinsic::amdgcn_raw_buffer_atomic_dec:
6474 Opcode = AMDGPUISD::BUFFER_ATOMIC_DEC;
6475 break;
Tim Renouf4f703f52018-08-21 11:07:10 +00006476 default:
6477 llvm_unreachable("unhandled atomic opcode");
6478 }
6479
6480 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6481 M->getMemOperand());
6482 }
6483 case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6484 case Intrinsic::amdgcn_struct_buffer_atomic_add:
6485 case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6486 case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6487 case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6488 case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6489 case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6490 case Intrinsic::amdgcn_struct_buffer_atomic_and:
6491 case Intrinsic::amdgcn_struct_buffer_atomic_or:
Nicolai Haehnlee2047862019-08-05 09:36:06 +00006492 case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6493 case Intrinsic::amdgcn_struct_buffer_atomic_inc:
6494 case Intrinsic::amdgcn_struct_buffer_atomic_dec: {
Tim Renouf4f703f52018-08-21 11:07:10 +00006495 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6496 SDValue Ops[] = {
6497 Op.getOperand(0), // Chain
6498 Op.getOperand(2), // vdata
6499 Op.getOperand(3), // rsrc
6500 Op.getOperand(4), // vindex
6501 Offsets.first, // voffset
6502 Op.getOperand(6), // soffset
6503 Offsets.second, // offset
6504 Op.getOperand(7), // cachepolicy
6505 DAG.getConstant(1, DL, MVT::i1), // idxen
6506 };
6507 EVT VT = Op.getValueType();
6508
6509 auto *M = cast<MemSDNode>(Op);
6510 unsigned Opcode = 0;
6511
6512 switch (IntrID) {
6513 case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6514 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6515 break;
6516 case Intrinsic::amdgcn_struct_buffer_atomic_add:
6517 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6518 break;
6519 case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6520 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6521 break;
6522 case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6523 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6524 break;
6525 case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6526 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6527 break;
6528 case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6529 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6530 break;
6531 case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6532 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6533 break;
6534 case Intrinsic::amdgcn_struct_buffer_atomic_and:
6535 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6536 break;
6537 case Intrinsic::amdgcn_struct_buffer_atomic_or:
6538 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6539 break;
6540 case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6541 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6542 break;
Nicolai Haehnlee2047862019-08-05 09:36:06 +00006543 case Intrinsic::amdgcn_struct_buffer_atomic_inc:
6544 Opcode = AMDGPUISD::BUFFER_ATOMIC_INC;
6545 break;
6546 case Intrinsic::amdgcn_struct_buffer_atomic_dec:
6547 Opcode = AMDGPUISD::BUFFER_ATOMIC_DEC;
6548 break;
Tim Renouf4f703f52018-08-21 11:07:10 +00006549 default:
6550 llvm_unreachable("unhandled atomic opcode");
6551 }
6552
6553 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6554 M->getMemOperand());
6555 }
Marek Olsak5cec6412017-11-09 01:52:48 +00006556 case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
Tim Renouf4f703f52018-08-21 11:07:10 +00006557 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6558 unsigned IdxEn = 1;
6559 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5)))
6560 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00006561 SDValue Ops[] = {
6562 Op.getOperand(0), // Chain
6563 Op.getOperand(2), // src
6564 Op.getOperand(3), // cmp
6565 Op.getOperand(4), // rsrc
6566 Op.getOperand(5), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00006567 SDValue(), // voffset -- will be set by setBufferOffsets
6568 SDValue(), // soffset -- will be set by setBufferOffsets
6569 SDValue(), // offset -- will be set by setBufferOffsets
6570 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6571 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6572 };
6573 setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
6574 EVT VT = Op.getValueType();
6575 auto *M = cast<MemSDNode>(Op);
6576
6577 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6578 Op->getVTList(), Ops, VT, M->getMemOperand());
6579 }
6580 case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
6581 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6582 SDValue Ops[] = {
6583 Op.getOperand(0), // Chain
6584 Op.getOperand(2), // src
6585 Op.getOperand(3), // cmp
6586 Op.getOperand(4), // rsrc
6587 DAG.getConstant(0, DL, MVT::i32), // vindex
6588 Offsets.first, // voffset
6589 Op.getOperand(6), // soffset
6590 Offsets.second, // offset
6591 Op.getOperand(7), // cachepolicy
6592 DAG.getConstant(0, DL, MVT::i1), // idxen
6593 };
6594 EVT VT = Op.getValueType();
6595 auto *M = cast<MemSDNode>(Op);
6596
6597 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6598 Op->getVTList(), Ops, VT, M->getMemOperand());
6599 }
6600 case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
6601 auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
6602 SDValue Ops[] = {
6603 Op.getOperand(0), // Chain
6604 Op.getOperand(2), // src
6605 Op.getOperand(3), // cmp
6606 Op.getOperand(4), // rsrc
6607 Op.getOperand(5), // vindex
6608 Offsets.first, // voffset
6609 Op.getOperand(7), // soffset
6610 Offsets.second, // offset
6611 Op.getOperand(8), // cachepolicy
6612 DAG.getConstant(1, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00006613 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00006614 EVT VT = Op.getValueType();
6615 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00006616
6617 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00006618 Op->getVTList(), Ops, VT, M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00006619 }
6620
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00006621 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00006622 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6623 AMDGPU::getImageDimIntrinsicInfo(IntrID))
6624 return lowerImage(Op, ImageDimIntr, DAG);
Matt Arsenault1349a042018-05-22 06:32:10 +00006625
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00006626 return SDValue();
6627 }
6628}
6629
Tim Renouf677387d2019-03-22 14:58:02 +00006630// Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
6631// dwordx4 if on SI.
6632SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL,
6633 SDVTList VTList,
6634 ArrayRef<SDValue> Ops, EVT MemVT,
6635 MachineMemOperand *MMO,
6636 SelectionDAG &DAG) const {
6637 EVT VT = VTList.VTs[0];
6638 EVT WidenedVT = VT;
6639 EVT WidenedMemVT = MemVT;
6640 if (!Subtarget->hasDwordx3LoadStores() &&
6641 (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) {
6642 WidenedVT = EVT::getVectorVT(*DAG.getContext(),
6643 WidenedVT.getVectorElementType(), 4);
6644 WidenedMemVT = EVT::getVectorVT(*DAG.getContext(),
6645 WidenedMemVT.getVectorElementType(), 4);
6646 MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16);
6647 }
6648
6649 assert(VTList.NumVTs == 2);
6650 SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]);
6651
6652 auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops,
6653 WidenedMemVT, MMO);
6654 if (WidenedVT != VT) {
6655 auto Extract = DAG.getNode(
6656 ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp,
6657 DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
6658 NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL);
6659 }
6660 return NewOp;
6661}
6662
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006663SDValue SITargetLowering::handleD16VData(SDValue VData,
6664 SelectionDAG &DAG) const {
6665 EVT StoreVT = VData.getValueType();
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006666
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006667 // No change for f16 and legal vector D16 types.
Matt Arsenault1349a042018-05-22 06:32:10 +00006668 if (!StoreVT.isVector())
6669 return VData;
6670
6671 SDLoc DL(VData);
6672 assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
6673
6674 if (Subtarget->hasUnpackedD16VMem()) {
6675 // We need to unpack the packed data to store.
6676 EVT IntStoreVT = StoreVT.changeTypeToInteger();
6677 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
6678
6679 EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
6680 StoreVT.getVectorNumElements());
6681 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
6682 return DAG.UnrollVectorOp(ZExt.getNode());
6683 }
6684
Matt Arsenault02dc7e12018-06-15 15:15:46 +00006685 assert(isTypeLegal(StoreVT));
6686 return VData;
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006687}
6688
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006689SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
6690 SelectionDAG &DAG) const {
Tom Stellardfc92e772015-05-12 14:18:14 +00006691 SDLoc DL(Op);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006692 SDValue Chain = Op.getOperand(0);
6693 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
David Stuttard70e8bc12017-06-22 16:29:22 +00006694 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006695
6696 switch (IntrinsicID) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00006697 case Intrinsic::amdgcn_exp: {
Matt Arsenault4165efd2017-01-17 07:26:53 +00006698 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6699 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6700 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
6701 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
6702
6703 const SDValue Ops[] = {
6704 Chain,
6705 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6706 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
6707 Op.getOperand(4), // src0
6708 Op.getOperand(5), // src1
6709 Op.getOperand(6), // src2
6710 Op.getOperand(7), // src3
6711 DAG.getTargetConstant(0, DL, MVT::i1), // compr
6712 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6713 };
6714
6715 unsigned Opc = Done->isNullValue() ?
6716 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6717 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6718 }
6719 case Intrinsic::amdgcn_exp_compr: {
6720 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6721 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6722 SDValue Src0 = Op.getOperand(4);
6723 SDValue Src1 = Op.getOperand(5);
6724 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
6725 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
6726
6727 SDValue Undef = DAG.getUNDEF(MVT::f32);
6728 const SDValue Ops[] = {
6729 Chain,
6730 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6731 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
6732 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
6733 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
6734 Undef, // src2
6735 Undef, // src3
6736 DAG.getTargetConstant(1, DL, MVT::i1), // compr
6737 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6738 };
6739
6740 unsigned Opc = Done->isNullValue() ?
6741 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6742 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6743 }
Marek Olsak2d825902017-04-28 20:21:58 +00006744 case Intrinsic::amdgcn_init_exec: {
6745 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
6746 Op.getOperand(2));
6747 }
6748 case Intrinsic::amdgcn_init_exec_from_input: {
6749 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
6750 Op.getOperand(2), Op.getOperand(3));
6751 }
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00006752 case Intrinsic::amdgcn_s_barrier: {
6753 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00006754 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matthias Braunf1caa282017-12-15 22:22:58 +00006755 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00006756 if (WGSize <= ST.getWavefrontSize())
6757 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
6758 Op.getOperand(0)), 0);
6759 }
6760 return SDValue();
6761 };
David Stuttard70e8bc12017-06-22 16:29:22 +00006762 case Intrinsic::amdgcn_tbuffer_store: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006763 SDValue VData = Op.getOperand(2);
6764 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6765 if (IsD16)
6766 VData = handleD16VData(VData, DAG);
Tim Renouf35484c92018-08-21 11:06:05 +00006767 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6768 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6769 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6770 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
6771 unsigned IdxEn = 1;
6772 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6773 IdxEn = Idx->getZExtValue() != 0;
David Stuttard70e8bc12017-06-22 16:29:22 +00006774 SDValue Ops[] = {
6775 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006776 VData, // vdata
David Stuttard70e8bc12017-06-22 16:29:22 +00006777 Op.getOperand(3), // rsrc
6778 Op.getOperand(4), // vindex
6779 Op.getOperand(5), // voffset
6780 Op.getOperand(6), // soffset
6781 Op.getOperand(7), // offset
Tim Renouf35484c92018-08-21 11:06:05 +00006782 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6783 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6784 DAG.getConstant(IdxEn, DL, MVT::i1), // idexen
6785 };
6786 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6787 AMDGPUISD::TBUFFER_STORE_FORMAT;
6788 MemSDNode *M = cast<MemSDNode>(Op);
6789 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6790 M->getMemoryVT(), M->getMemOperand());
6791 }
6792
6793 case Intrinsic::amdgcn_struct_tbuffer_store: {
6794 SDValue VData = Op.getOperand(2);
6795 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6796 if (IsD16)
6797 VData = handleD16VData(VData, DAG);
6798 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6799 SDValue Ops[] = {
6800 Chain,
6801 VData, // vdata
6802 Op.getOperand(3), // rsrc
6803 Op.getOperand(4), // vindex
6804 Offsets.first, // voffset
6805 Op.getOperand(6), // soffset
6806 Offsets.second, // offset
6807 Op.getOperand(7), // format
6808 Op.getOperand(8), // cachepolicy
6809 DAG.getConstant(1, DL, MVT::i1), // idexen
6810 };
6811 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6812 AMDGPUISD::TBUFFER_STORE_FORMAT;
6813 MemSDNode *M = cast<MemSDNode>(Op);
6814 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6815 M->getMemoryVT(), M->getMemOperand());
6816 }
6817
6818 case Intrinsic::amdgcn_raw_tbuffer_store: {
6819 SDValue VData = Op.getOperand(2);
6820 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6821 if (IsD16)
6822 VData = handleD16VData(VData, DAG);
6823 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6824 SDValue Ops[] = {
6825 Chain,
6826 VData, // vdata
6827 Op.getOperand(3), // rsrc
6828 DAG.getConstant(0, DL, MVT::i32), // vindex
6829 Offsets.first, // voffset
6830 Op.getOperand(5), // soffset
6831 Offsets.second, // offset
6832 Op.getOperand(6), // format
6833 Op.getOperand(7), // cachepolicy
6834 DAG.getConstant(0, DL, MVT::i1), // idexen
David Stuttard70e8bc12017-06-22 16:29:22 +00006835 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006836 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6837 AMDGPUISD::TBUFFER_STORE_FORMAT;
6838 MemSDNode *M = cast<MemSDNode>(Op);
6839 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6840 M->getMemoryVT(), M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00006841 }
6842
Marek Olsak5cec6412017-11-09 01:52:48 +00006843 case Intrinsic::amdgcn_buffer_store:
6844 case Intrinsic::amdgcn_buffer_store_format: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006845 SDValue VData = Op.getOperand(2);
6846 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6847 if (IsD16)
6848 VData = handleD16VData(VData, DAG);
Tim Renouf4f703f52018-08-21 11:07:10 +00006849 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6850 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6851 unsigned IdxEn = 1;
6852 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6853 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00006854 SDValue Ops[] = {
6855 Chain,
Tim Renouf4f703f52018-08-21 11:07:10 +00006856 VData,
Marek Olsak5cec6412017-11-09 01:52:48 +00006857 Op.getOperand(3), // rsrc
6858 Op.getOperand(4), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00006859 SDValue(), // voffset -- will be set by setBufferOffsets
6860 SDValue(), // soffset -- will be set by setBufferOffsets
6861 SDValue(), // offset -- will be set by setBufferOffsets
6862 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6863 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00006864 };
Tim Renouf4f703f52018-08-21 11:07:10 +00006865 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006866 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
6867 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6868 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6869 MemSDNode *M = cast<MemSDNode>(Op);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006870
6871 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6872 EVT VDataType = VData.getValueType().getScalarType();
6873 if (VDataType == MVT::i8 || VDataType == MVT::i16)
6874 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6875
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006876 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6877 M->getMemoryVT(), M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00006878 }
Tim Renouf4f703f52018-08-21 11:07:10 +00006879
6880 case Intrinsic::amdgcn_raw_buffer_store:
6881 case Intrinsic::amdgcn_raw_buffer_store_format: {
Matt Arsenault0e0a1c82019-08-05 14:57:59 +00006882 const bool IsFormat =
6883 IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format;
6884
Tim Renouf4f703f52018-08-21 11:07:10 +00006885 SDValue VData = Op.getOperand(2);
Matt Arsenault0e0a1c82019-08-05 14:57:59 +00006886 EVT VDataVT = VData.getValueType();
6887 EVT EltType = VDataVT.getScalarType();
6888 bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
Tim Renouf4f703f52018-08-21 11:07:10 +00006889 if (IsD16)
6890 VData = handleD16VData(VData, DAG);
Matt Arsenault0e0a1c82019-08-05 14:57:59 +00006891
6892 if (!isTypeLegal(VDataVT)) {
6893 VData =
6894 DAG.getNode(ISD::BITCAST, DL,
6895 getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
6896 }
6897
Tim Renouf4f703f52018-08-21 11:07:10 +00006898 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6899 SDValue Ops[] = {
6900 Chain,
6901 VData,
6902 Op.getOperand(3), // rsrc
6903 DAG.getConstant(0, DL, MVT::i32), // vindex
6904 Offsets.first, // voffset
6905 Op.getOperand(5), // soffset
6906 Offsets.second, // offset
6907 Op.getOperand(6), // cachepolicy
6908 DAG.getConstant(0, DL, MVT::i1), // idxen
6909 };
Matt Arsenault0e0a1c82019-08-05 14:57:59 +00006910 unsigned Opc =
6911 IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE;
Tim Renouf4f703f52018-08-21 11:07:10 +00006912 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6913 MemSDNode *M = cast<MemSDNode>(Op);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006914
6915 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
Matt Arsenault0e0a1c82019-08-05 14:57:59 +00006916 if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
6917 return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006918
Tim Renouf4f703f52018-08-21 11:07:10 +00006919 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6920 M->getMemoryVT(), M->getMemOperand());
6921 }
6922
6923 case Intrinsic::amdgcn_struct_buffer_store:
6924 case Intrinsic::amdgcn_struct_buffer_store_format: {
Matt Arsenault0e0a1c82019-08-05 14:57:59 +00006925 const bool IsFormat =
6926 IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format;
6927
Tim Renouf4f703f52018-08-21 11:07:10 +00006928 SDValue VData = Op.getOperand(2);
Matt Arsenault0e0a1c82019-08-05 14:57:59 +00006929 EVT VDataVT = VData.getValueType();
6930 EVT EltType = VDataVT.getScalarType();
6931 bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16);
6932
Tim Renouf4f703f52018-08-21 11:07:10 +00006933 if (IsD16)
6934 VData = handleD16VData(VData, DAG);
Matt Arsenault0e0a1c82019-08-05 14:57:59 +00006935
6936 if (!isTypeLegal(VDataVT)) {
6937 VData =
6938 DAG.getNode(ISD::BITCAST, DL,
6939 getEquivalentMemType(*DAG.getContext(), VDataVT), VData);
6940 }
6941
Tim Renouf4f703f52018-08-21 11:07:10 +00006942 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6943 SDValue Ops[] = {
6944 Chain,
6945 VData,
6946 Op.getOperand(3), // rsrc
6947 Op.getOperand(4), // vindex
6948 Offsets.first, // voffset
6949 Op.getOperand(6), // soffset
6950 Offsets.second, // offset
6951 Op.getOperand(7), // cachepolicy
6952 DAG.getConstant(1, DL, MVT::i1), // idxen
6953 };
6954 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
6955 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6956 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6957 MemSDNode *M = cast<MemSDNode>(Op);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006958
6959 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6960 EVT VDataType = VData.getValueType().getScalarType();
Matt Arsenault0e0a1c82019-08-05 14:57:59 +00006961 if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
Ryan Taylor00e063a2019-03-19 16:07:00 +00006962 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6963
Tim Renouf4f703f52018-08-21 11:07:10 +00006964 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6965 M->getMemoryVT(), M->getMemOperand());
6966 }
6967
Stanislav Mekhanoshine93279f2019-07-11 00:10:17 +00006968 case Intrinsic::amdgcn_buffer_atomic_fadd: {
6969 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6970 unsigned IdxEn = 1;
6971 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6972 IdxEn = Idx->getZExtValue() != 0;
6973 SDValue Ops[] = {
6974 Chain,
6975 Op.getOperand(2), // vdata
6976 Op.getOperand(3), // rsrc
6977 Op.getOperand(4), // vindex
6978 SDValue(), // voffset -- will be set by setBufferOffsets
6979 SDValue(), // soffset -- will be set by setBufferOffsets
6980 SDValue(), // offset -- will be set by setBufferOffsets
6981 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6982 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6983 };
6984 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
6985 EVT VT = Op.getOperand(2).getValueType();
6986
6987 auto *M = cast<MemSDNode>(Op);
6988 unsigned Opcode = VT.isVector() ? AMDGPUISD::BUFFER_ATOMIC_PK_FADD
6989 : AMDGPUISD::BUFFER_ATOMIC_FADD;
6990
6991 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6992 M->getMemOperand());
6993 }
6994
6995 case Intrinsic::amdgcn_global_atomic_fadd: {
6996 SDValue Ops[] = {
6997 Chain,
6998 Op.getOperand(2), // ptr
6999 Op.getOperand(3) // vdata
7000 };
7001 EVT VT = Op.getOperand(3).getValueType();
7002
7003 auto *M = cast<MemSDNode>(Op);
7004 unsigned Opcode = VT.isVector() ? AMDGPUISD::ATOMIC_PK_FADD
7005 : AMDGPUISD::ATOMIC_FADD;
7006
7007 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
7008 M->getMemOperand());
7009 }
7010
Stanislav Mekhanoshin68a2fef2019-06-13 23:47:36 +00007011 case Intrinsic::amdgcn_end_cf:
7012 return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other,
7013 Op->getOperand(2), Chain), 0);
7014
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00007015 default: {
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00007016 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
7017 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
7018 return lowerImage(Op, ImageDimIntr, DAG);
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00007019
Matt Arsenault754dd3e2017-04-03 18:08:08 +00007020 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00007021 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00007022 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00007023}
7024
Tim Renouf4f703f52018-08-21 11:07:10 +00007025// The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
7026// offset (the offset that is included in bounds checking and swizzling, to be
7027// split between the instruction's voffset and immoffset fields) and soffset
7028// (the offset that is excluded from bounds checking and swizzling, to go in
7029// the instruction's soffset field). This function takes the first kind of
7030// offset and figures out how to split it between voffset and immoffset.
Tim Renouf35484c92018-08-21 11:06:05 +00007031std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
7032 SDValue Offset, SelectionDAG &DAG) const {
7033 SDLoc DL(Offset);
7034 const unsigned MaxImm = 4095;
7035 SDValue N0 = Offset;
7036 ConstantSDNode *C1 = nullptr;
Piotr Sobczak378131b2019-01-02 09:47:41 +00007037
7038 if ((C1 = dyn_cast<ConstantSDNode>(N0)))
Tim Renouf35484c92018-08-21 11:06:05 +00007039 N0 = SDValue();
Piotr Sobczak378131b2019-01-02 09:47:41 +00007040 else if (DAG.isBaseWithConstantOffset(N0)) {
7041 C1 = cast<ConstantSDNode>(N0.getOperand(1));
7042 N0 = N0.getOperand(0);
7043 }
Tim Renouf35484c92018-08-21 11:06:05 +00007044
7045 if (C1) {
7046 unsigned ImmOffset = C1->getZExtValue();
7047 // If the immediate value is too big for the immoffset field, put the value
Tim Renoufa37679d2018-10-03 10:29:43 +00007048 // and -4096 into the immoffset field so that the value that is copied/added
Tim Renouf35484c92018-08-21 11:06:05 +00007049 // for the voffset field is a multiple of 4096, and it stands more chance
7050 // of being CSEd with the copy/add for another similar load/store.
Tim Renoufa37679d2018-10-03 10:29:43 +00007051 // However, do not do that rounding down to a multiple of 4096 if that is a
7052 // negative number, as it appears to be illegal to have a negative offset
7053 // in the vgpr, even if adding the immediate offset makes it positive.
Tim Renouf35484c92018-08-21 11:06:05 +00007054 unsigned Overflow = ImmOffset & ~MaxImm;
7055 ImmOffset -= Overflow;
Tim Renoufa37679d2018-10-03 10:29:43 +00007056 if ((int32_t)Overflow < 0) {
7057 Overflow += ImmOffset;
7058 ImmOffset = 0;
7059 }
Tim Renouf35484c92018-08-21 11:06:05 +00007060 C1 = cast<ConstantSDNode>(DAG.getConstant(ImmOffset, DL, MVT::i32));
7061 if (Overflow) {
7062 auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
7063 if (!N0)
7064 N0 = OverflowVal;
7065 else {
7066 SDValue Ops[] = { N0, OverflowVal };
7067 N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
7068 }
7069 }
7070 }
7071 if (!N0)
7072 N0 = DAG.getConstant(0, DL, MVT::i32);
7073 if (!C1)
7074 C1 = cast<ConstantSDNode>(DAG.getConstant(0, DL, MVT::i32));
7075 return {N0, SDValue(C1, 0)};
7076}
7077
Tim Renouf4f703f52018-08-21 11:07:10 +00007078// Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
7079// three offsets (voffset, soffset and instoffset) into the SDValue[3] array
7080// pointed to by Offsets.
7081void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00007082 SelectionDAG &DAG, SDValue *Offsets,
7083 unsigned Align) const {
Tim Renouf4f703f52018-08-21 11:07:10 +00007084 SDLoc DL(CombinedOffset);
7085 if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
7086 uint32_t Imm = C->getZExtValue();
7087 uint32_t SOffset, ImmOffset;
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00007088 if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) {
Tim Renouf4f703f52018-08-21 11:07:10 +00007089 Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
7090 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7091 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
7092 return;
7093 }
7094 }
7095 if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
7096 SDValue N0 = CombinedOffset.getOperand(0);
7097 SDValue N1 = CombinedOffset.getOperand(1);
7098 uint32_t SOffset, ImmOffset;
7099 int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00007100 if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
7101 Subtarget, Align)) {
Tim Renouf4f703f52018-08-21 11:07:10 +00007102 Offsets[0] = N0;
7103 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
7104 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
7105 return;
7106 }
7107 }
7108 Offsets[0] = CombinedOffset;
7109 Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
7110 Offsets[2] = DAG.getConstant(0, DL, MVT::i32);
7111}
7112
Ryan Taylor00e063a2019-03-19 16:07:00 +00007113// Handle 8 bit and 16 bit buffer loads
7114SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
7115 EVT LoadVT, SDLoc DL,
7116 ArrayRef<SDValue> Ops,
7117 MemSDNode *M) const {
7118 EVT IntVT = LoadVT.changeTypeToInteger();
7119 unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ?
7120 AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT;
7121
7122 SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other);
7123 SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList,
7124 Ops, IntVT,
7125 M->getMemOperand());
7126 SDValue BufferLoadTrunc = DAG.getNode(ISD::TRUNCATE, DL,
7127 LoadVT.getScalarType(), BufferLoad);
7128 return DAG.getMergeValues({BufferLoadTrunc, BufferLoad.getValue(1)}, DL);
7129}
7130
7131// Handle 8 bit and 16 bit buffer stores
7132SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG,
7133 EVT VDataType, SDLoc DL,
7134 SDValue Ops[],
7135 MemSDNode *M) const {
Matt Arsenault0e0a1c82019-08-05 14:57:59 +00007136 if (VDataType == MVT::f16)
7137 Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]);
7138
Ryan Taylor00e063a2019-03-19 16:07:00 +00007139 SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]);
7140 Ops[1] = BufferStoreExt;
7141 unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE :
7142 AMDGPUISD::BUFFER_STORE_SHORT;
7143 ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9);
7144 return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType,
7145 M->getMemOperand());
7146}
7147
Matt Arsenault90083d32018-06-07 09:54:49 +00007148static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
7149 ISD::LoadExtType ExtType, SDValue Op,
7150 const SDLoc &SL, EVT VT) {
7151 if (VT.bitsLT(Op.getValueType()))
7152 return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
7153
7154 switch (ExtType) {
7155 case ISD::SEXTLOAD:
7156 return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
7157 case ISD::ZEXTLOAD:
7158 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
7159 case ISD::EXTLOAD:
7160 return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
7161 case ISD::NON_EXTLOAD:
7162 return Op;
7163 }
7164
7165 llvm_unreachable("invalid ext type");
7166}
7167
7168SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
7169 SelectionDAG &DAG = DCI.DAG;
7170 if (Ld->getAlignment() < 4 || Ld->isDivergent())
7171 return SDValue();
7172
7173 // FIXME: Constant loads should all be marked invariant.
7174 unsigned AS = Ld->getAddressSpace();
Matt Arsenault0da63502018-08-31 05:49:54 +00007175 if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
7176 AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
Matt Arsenault90083d32018-06-07 09:54:49 +00007177 (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
7178 return SDValue();
7179
7180 // Don't do this early, since it may interfere with adjacent load merging for
7181 // illegal types. We can avoid losing alignment information for exotic types
7182 // pre-legalize.
7183 EVT MemVT = Ld->getMemoryVT();
7184 if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
7185 MemVT.getSizeInBits() >= 32)
7186 return SDValue();
7187
7188 SDLoc SL(Ld);
7189
7190 assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
7191 "unexpected vector extload");
7192
7193 // TODO: Drop only high part of range.
7194 SDValue Ptr = Ld->getBasePtr();
7195 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
7196 MVT::i32, SL, Ld->getChain(), Ptr,
7197 Ld->getOffset(),
7198 Ld->getPointerInfo(), MVT::i32,
7199 Ld->getAlignment(),
7200 Ld->getMemOperand()->getFlags(),
7201 Ld->getAAInfo(),
7202 nullptr); // Drop ranges
7203
7204 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
7205 if (MemVT.isFloatingPoint()) {
7206 assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
7207 "unexpected fp extload");
7208 TruncVT = MemVT.changeTypeToInteger();
7209 }
7210
7211 SDValue Cvt = NewLoad;
7212 if (Ld->getExtensionType() == ISD::SEXTLOAD) {
7213 Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
7214 DAG.getValueType(TruncVT));
7215 } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
7216 Ld->getExtensionType() == ISD::NON_EXTLOAD) {
7217 Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
7218 } else {
7219 assert(Ld->getExtensionType() == ISD::EXTLOAD);
7220 }
7221
7222 EVT VT = Ld->getValueType(0);
7223 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
7224
7225 DCI.AddToWorklist(Cvt.getNode());
7226
7227 // We may need to handle exotic cases, such as i16->i64 extloads, so insert
7228 // the appropriate extension from the 32-bit load.
7229 Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
7230 DCI.AddToWorklist(Cvt.getNode());
7231
7232 // Handle conversion back to floating point if necessary.
7233 Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
7234
7235 return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
7236}
7237
Tom Stellard81d871d2013-11-13 23:36:50 +00007238SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7239 SDLoc DL(Op);
7240 LoadSDNode *Load = cast<LoadSDNode>(Op);
Matt Arsenault6dfda962016-02-10 18:21:39 +00007241 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaulta1436412016-02-10 18:21:45 +00007242 EVT MemVT = Load->getMemoryVT();
Matt Arsenault6dfda962016-02-10 18:21:39 +00007243
Matt Arsenaulta1436412016-02-10 18:21:45 +00007244 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
Matt Arsenault65ca292a2017-09-07 05:37:34 +00007245 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
7246 return SDValue();
7247
Matt Arsenault6dfda962016-02-10 18:21:39 +00007248 // FIXME: Copied from PPC
7249 // First, load into 32 bits, then truncate to 1 bit.
7250
7251 SDValue Chain = Load->getChain();
7252 SDValue BasePtr = Load->getBasePtr();
7253 MachineMemOperand *MMO = Load->getMemOperand();
7254
Tom Stellard115a6152016-11-10 16:02:37 +00007255 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
7256
Matt Arsenault6dfda962016-02-10 18:21:39 +00007257 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
Tom Stellard115a6152016-11-10 16:02:37 +00007258 BasePtr, RealMemVT, MMO);
Matt Arsenault6dfda962016-02-10 18:21:39 +00007259
Tim Renouf361b5b22019-03-21 12:01:21 +00007260 if (!MemVT.isVector()) {
7261 SDValue Ops[] = {
7262 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
7263 NewLD.getValue(1)
7264 };
7265
7266 return DAG.getMergeValues(Ops, DL);
7267 }
7268
7269 SmallVector<SDValue, 3> Elts;
7270 for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) {
7271 SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD,
7272 DAG.getConstant(I, DL, MVT::i32));
7273
7274 Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt));
7275 }
7276
Matt Arsenault6dfda962016-02-10 18:21:39 +00007277 SDValue Ops[] = {
Tim Renouf361b5b22019-03-21 12:01:21 +00007278 DAG.getBuildVector(MemVT, DL, Elts),
Matt Arsenault6dfda962016-02-10 18:21:39 +00007279 NewLD.getValue(1)
7280 };
7281
7282 return DAG.getMergeValues(Ops, DL);
7283 }
Tom Stellard81d871d2013-11-13 23:36:50 +00007284
Matt Arsenaulta1436412016-02-10 18:21:45 +00007285 if (!MemVT.isVector())
7286 return SDValue();
Matt Arsenault4d801cd2015-11-24 12:05:03 +00007287
Matt Arsenaulta1436412016-02-10 18:21:45 +00007288 assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
7289 "Custom lowering for non-i32 vectors hasn't been implemented.");
Matt Arsenault4d801cd2015-11-24 12:05:03 +00007290
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007291 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
Simon Pilgrim266f4392019-06-11 11:00:23 +00007292 *Load->getMemOperand())) {
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007293 SDValue Ops[2];
7294 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
7295 return DAG.getMergeValues(Ops, DL);
7296 }
Simon Pilgrim266f4392019-06-11 11:00:23 +00007297
7298 unsigned Alignment = Load->getAlignment();
7299 unsigned AS = Load->getAddressSpace();
Stanislav Mekhanoshina224f682019-05-01 16:11:11 +00007300 if (Subtarget->hasLDSMisalignedBug() &&
7301 AS == AMDGPUAS::FLAT_ADDRESS &&
7302 Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
7303 return SplitVectorLoad(Op, DAG);
7304 }
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007305
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007306 MachineFunction &MF = DAG.getMachineFunction();
7307 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7308 // If there is a possibilty that flat instruction access scratch memory
7309 // then we need to use the same legalization rules we use for private.
Matt Arsenault0da63502018-08-31 05:49:54 +00007310 if (AS == AMDGPUAS::FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007311 AS = MFI->hasFlatScratchInit() ?
Matt Arsenault0da63502018-08-31 05:49:54 +00007312 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007313
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007314 unsigned NumElements = MemVT.getVectorNumElements();
Matt Arsenault6c041a32018-03-29 19:59:28 +00007315
Matt Arsenault0da63502018-08-31 05:49:54 +00007316 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7317 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
Tim Renouf361b5b22019-03-21 12:01:21 +00007318 if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) {
7319 if (MemVT.isPow2VectorType())
7320 return SDValue();
7321 if (NumElements == 3)
7322 return WidenVectorLoad(Op, DAG);
7323 return SplitVectorLoad(Op, DAG);
7324 }
Matt Arsenaulta1436412016-02-10 18:21:45 +00007325 // Non-uniform loads will be selected to MUBUF instructions, so they
Alexander Timofeev18009562016-12-08 17:28:47 +00007326 // have the same legalization requirements as global and private
Matt Arsenaulta1436412016-02-10 18:21:45 +00007327 // loads.
7328 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00007329 }
Matt Arsenault6c041a32018-03-29 19:59:28 +00007330
Matt Arsenault0da63502018-08-31 05:49:54 +00007331 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7332 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7333 AS == AMDGPUAS::GLOBAL_ADDRESS) {
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00007334 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
Farhana Aleen89196642018-03-07 17:09:18 +00007335 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
Tim Renouf361b5b22019-03-21 12:01:21 +00007336 Alignment >= 4 && NumElements < 32) {
7337 if (MemVT.isPow2VectorType())
7338 return SDValue();
7339 if (NumElements == 3)
7340 return WidenVectorLoad(Op, DAG);
7341 return SplitVectorLoad(Op, DAG);
7342 }
Alexander Timofeev18009562016-12-08 17:28:47 +00007343 // Non-uniform loads will be selected to MUBUF instructions, so they
7344 // have the same legalization requirements as global and private
7345 // loads.
7346 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00007347 }
Matt Arsenault0da63502018-08-31 05:49:54 +00007348 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
7349 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
7350 AS == AMDGPUAS::GLOBAL_ADDRESS ||
7351 AS == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007352 if (NumElements > 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00007353 return SplitVectorLoad(Op, DAG);
Tim Renouf361b5b22019-03-21 12:01:21 +00007354 // v3 loads not supported on SI.
7355 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7356 return WidenVectorLoad(Op, DAG);
7357 // v3 and v4 loads are supported for private and global memory.
Matt Arsenaulta1436412016-02-10 18:21:45 +00007358 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00007359 }
Matt Arsenault0da63502018-08-31 05:49:54 +00007360 if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007361 // Depending on the setting of the private_element_size field in the
7362 // resource descriptor, we can only make private accesses up to a certain
7363 // size.
7364 switch (Subtarget->getMaxPrivateElementSize()) {
7365 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00007366 return scalarizeVectorLoad(Load, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007367 case 8:
7368 if (NumElements > 2)
7369 return SplitVectorLoad(Op, DAG);
7370 return SDValue();
7371 case 16:
7372 // Same as global/flat
7373 if (NumElements > 4)
7374 return SplitVectorLoad(Op, DAG);
Tim Renouf361b5b22019-03-21 12:01:21 +00007375 // v3 loads not supported on SI.
7376 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7377 return WidenVectorLoad(Op, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007378 return SDValue();
7379 default:
7380 llvm_unreachable("unsupported private_element_size");
7381 }
Nicolai Haehnle4dc3b2b2019-07-01 17:17:45 +00007382 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
Farhana Aleena7cb3112018-03-09 17:41:39 +00007383 // Use ds_read_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00007384 if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
Farhana Aleena7cb3112018-03-09 17:41:39 +00007385 MemVT.getStoreSize() == 16)
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007386 return SDValue();
7387
Farhana Aleena7cb3112018-03-09 17:41:39 +00007388 if (NumElements > 2)
7389 return SplitVectorLoad(Op, DAG);
Nicolai Haehnle48219372018-10-17 15:37:48 +00007390
7391 // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7392 // address is negative, then the instruction is incorrectly treated as
7393 // out-of-bounds even if base + offsets is in bounds. Split vectorized
7394 // loads here to avoid emitting ds_read2_b32. We may re-combine the
7395 // load later in the SILoadStoreOptimizer.
7396 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
7397 NumElements == 2 && MemVT.getStoreSize() == 8 &&
7398 Load->getAlignment() < 8) {
7399 return SplitVectorLoad(Op, DAG);
7400 }
Tom Stellarde9373602014-01-22 19:24:14 +00007401 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00007402 return SDValue();
Tom Stellard81d871d2013-11-13 23:36:50 +00007403}
7404
Tom Stellard0ec134f2014-02-04 17:18:40 +00007405SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00007406 EVT VT = Op.getValueType();
7407 assert(VT.getSizeInBits() == 64);
Tom Stellard0ec134f2014-02-04 17:18:40 +00007408
7409 SDLoc DL(Op);
7410 SDValue Cond = Op.getOperand(0);
Tom Stellard0ec134f2014-02-04 17:18:40 +00007411
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007412 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
7413 SDValue One = DAG.getConstant(1, DL, MVT::i32);
Tom Stellard0ec134f2014-02-04 17:18:40 +00007414
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00007415 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
7416 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
7417
7418 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
7419 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
Tom Stellard0ec134f2014-02-04 17:18:40 +00007420
7421 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
7422
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00007423 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
7424 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
Tom Stellard0ec134f2014-02-04 17:18:40 +00007425
7426 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
7427
Ahmed Bougacha128f8732016-04-26 21:15:30 +00007428 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
Matt Arsenault02dc7e12018-06-15 15:15:46 +00007429 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
Tom Stellard0ec134f2014-02-04 17:18:40 +00007430}
7431
Matt Arsenault22ca3f82014-07-15 23:50:10 +00007432// Catch division cases where we can use shortcuts with rcp and rsq
7433// instructions.
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00007434SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
7435 SelectionDAG &DAG) const {
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007436 SDLoc SL(Op);
7437 SDValue LHS = Op.getOperand(0);
7438 SDValue RHS = Op.getOperand(1);
7439 EVT VT = Op.getValueType();
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00007440 const SDNodeFlags Flags = Op->getFlags();
Michael Berg7acc81b2018-05-04 18:48:20 +00007441 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007442
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00007443 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
7444 return SDValue();
7445
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007446 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00007447 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
Matt Arsenault979902b2016-08-02 22:25:04 +00007448 if (CLHS->isExactlyValue(1.0)) {
7449 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
7450 // the CI documentation has a worst case error of 1 ulp.
7451 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
7452 // use it as long as we aren't trying to use denormals.
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00007453 //
7454 // v_rcp_f16 and v_rsq_f16 DO support denormals.
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007455
Matt Arsenault979902b2016-08-02 22:25:04 +00007456 // 1.0 / sqrt(x) -> rsq(x)
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00007457
Matt Arsenault979902b2016-08-02 22:25:04 +00007458 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
7459 // error seems really high at 2^29 ULP.
7460 if (RHS.getOpcode() == ISD::FSQRT)
7461 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
7462
7463 // 1.0 / x -> rcp(x)
7464 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7465 }
7466
7467 // Same as for 1.0, but expand the sign out of the constant.
7468 if (CLHS->isExactlyValue(-1.0)) {
7469 // -1.0 / x -> rcp (fneg x)
7470 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
7471 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
7472 }
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007473 }
7474 }
7475
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00007476 if (Unsafe) {
Matt Arsenault22ca3f82014-07-15 23:50:10 +00007477 // Turn into multiply by the reciprocal.
7478 // x / y -> x * (1.0 / y)
7479 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00007480 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
Matt Arsenault22ca3f82014-07-15 23:50:10 +00007481 }
7482
7483 return SDValue();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007484}
7485
Tom Stellard8485fa02016-12-07 02:42:15 +00007486static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7487 EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
7488 if (GlueChain->getNumValues() <= 1) {
7489 return DAG.getNode(Opcode, SL, VT, A, B);
7490 }
7491
7492 assert(GlueChain->getNumValues() == 3);
7493
7494 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7495 switch (Opcode) {
7496 default: llvm_unreachable("no chain equivalent for opcode");
7497 case ISD::FMUL:
7498 Opcode = AMDGPUISD::FMUL_W_CHAIN;
7499 break;
7500 }
7501
7502 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
7503 GlueChain.getValue(2));
7504}
7505
7506static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7507 EVT VT, SDValue A, SDValue B, SDValue C,
7508 SDValue GlueChain) {
7509 if (GlueChain->getNumValues() <= 1) {
7510 return DAG.getNode(Opcode, SL, VT, A, B, C);
7511 }
7512
7513 assert(GlueChain->getNumValues() == 3);
7514
7515 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7516 switch (Opcode) {
7517 default: llvm_unreachable("no chain equivalent for opcode");
7518 case ISD::FMA:
7519 Opcode = AMDGPUISD::FMA_W_CHAIN;
7520 break;
7521 }
7522
7523 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
7524 GlueChain.getValue(2));
7525}
7526
Matt Arsenault4052a572016-12-22 03:05:41 +00007527SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00007528 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7529 return FastLowered;
7530
Matt Arsenault4052a572016-12-22 03:05:41 +00007531 SDLoc SL(Op);
7532 SDValue Src0 = Op.getOperand(0);
7533 SDValue Src1 = Op.getOperand(1);
7534
7535 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
7536 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
7537
7538 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
7539 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
7540
7541 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
7542 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
7543
7544 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
7545}
7546
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00007547// Faster 2.5 ULP division that does not support denormals.
7548SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
7549 SDLoc SL(Op);
7550 SDValue LHS = Op.getOperand(1);
7551 SDValue RHS = Op.getOperand(2);
7552
7553 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
7554
7555 const APFloat K0Val(BitsToFloat(0x6f800000));
7556 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
7557
7558 const APFloat K1Val(BitsToFloat(0x2f800000));
7559 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
7560
7561 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7562
7563 EVT SetCCVT =
7564 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
7565
7566 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
7567
7568 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
7569
7570 // TODO: Should this propagate fast-math-flags?
7571 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
7572
7573 // rcp does not support denormals.
7574 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
7575
7576 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
7577
7578 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
7579}
7580
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007581SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00007582 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
Eric Christopher538d09d02016-06-07 20:27:12 +00007583 return FastLowered;
Matt Arsenault22ca3f82014-07-15 23:50:10 +00007584
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007585 SDLoc SL(Op);
7586 SDValue LHS = Op.getOperand(0);
7587 SDValue RHS = Op.getOperand(1);
7588
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007589 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007590
Wei Dinged0f97f2016-06-09 19:17:15 +00007591 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007592
Tom Stellard8485fa02016-12-07 02:42:15 +00007593 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7594 RHS, RHS, LHS);
7595 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7596 LHS, RHS, LHS);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007597
Matt Arsenaultdfec5ce2016-07-09 07:48:11 +00007598 // Denominator is scaled to not be denormal, so using rcp is ok.
Tom Stellard8485fa02016-12-07 02:42:15 +00007599 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
7600 DenominatorScaled);
7601 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
7602 DenominatorScaled);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007603
Tom Stellard8485fa02016-12-07 02:42:15 +00007604 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
7605 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
7606 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007607
Tom Stellard8485fa02016-12-07 02:42:15 +00007608 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007609
Tom Stellard8485fa02016-12-07 02:42:15 +00007610 if (!Subtarget->hasFP32Denormals()) {
7611 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
7612 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
7613 SL, MVT::i32);
7614 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
7615 DAG.getEntryNode(),
7616 EnableDenormValue, BitField);
7617 SDValue Ops[3] = {
7618 NegDivScale0,
7619 EnableDenorm.getValue(0),
7620 EnableDenorm.getValue(1)
7621 };
Matt Arsenault37fefd62016-06-10 02:18:02 +00007622
Tom Stellard8485fa02016-12-07 02:42:15 +00007623 NegDivScale0 = DAG.getMergeValues(Ops, SL);
7624 }
7625
7626 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
7627 ApproxRcp, One, NegDivScale0);
7628
7629 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
7630 ApproxRcp, Fma0);
7631
7632 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
7633 Fma1, Fma1);
7634
7635 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
7636 NumeratorScaled, Mul);
7637
7638 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
7639
7640 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
7641 NumeratorScaled, Fma3);
7642
7643 if (!Subtarget->hasFP32Denormals()) {
7644 const SDValue DisableDenormValue =
7645 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
7646 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
7647 Fma4.getValue(1),
7648 DisableDenormValue,
7649 BitField,
7650 Fma4.getValue(2));
7651
7652 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
7653 DisableDenorm, DAG.getRoot());
7654 DAG.setRoot(OutputChain);
7655 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00007656
Wei Dinged0f97f2016-06-09 19:17:15 +00007657 SDValue Scale = NumeratorScaled.getValue(1);
Tom Stellard8485fa02016-12-07 02:42:15 +00007658 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
7659 Fma4, Fma1, Fma3, Scale);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007660
Wei Dinged0f97f2016-06-09 19:17:15 +00007661 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007662}
7663
7664SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007665 if (DAG.getTarget().Options.UnsafeFPMath)
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00007666 return lowerFastUnsafeFDIV(Op, DAG);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007667
7668 SDLoc SL(Op);
7669 SDValue X = Op.getOperand(0);
7670 SDValue Y = Op.getOperand(1);
7671
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007672 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007673
7674 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
7675
7676 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
7677
7678 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
7679
7680 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
7681
7682 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
7683
7684 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
7685
7686 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
7687
7688 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
7689
7690 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
7691 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
7692
7693 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
7694 NegDivScale0, Mul, DivScale1);
7695
7696 SDValue Scale;
7697
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +00007698 if (!Subtarget->hasUsableDivScaleConditionOutput()) {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007699 // Workaround a hardware bug on SI where the condition output from div_scale
7700 // is not usable.
7701
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007702 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007703
7704 // Figure out if the scale to use for div_fmas.
7705 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
7706 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
7707 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
7708 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
7709
7710 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
7711 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
7712
7713 SDValue Scale0Hi
7714 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
7715 SDValue Scale1Hi
7716 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
7717
7718 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
7719 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
7720 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
7721 } else {
7722 Scale = DivScale1.getValue(1);
7723 }
7724
7725 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
7726 Fma4, Fma3, Mul, Scale);
7727
7728 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007729}
7730
7731SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
7732 EVT VT = Op.getValueType();
7733
7734 if (VT == MVT::f32)
7735 return LowerFDIV32(Op, DAG);
7736
7737 if (VT == MVT::f64)
7738 return LowerFDIV64(Op, DAG);
7739
Matt Arsenault4052a572016-12-22 03:05:41 +00007740 if (VT == MVT::f16)
7741 return LowerFDIV16(Op, DAG);
7742
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007743 llvm_unreachable("Unexpected type for fdiv");
7744}
7745
Tom Stellard81d871d2013-11-13 23:36:50 +00007746SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7747 SDLoc DL(Op);
7748 StoreSDNode *Store = cast<StoreSDNode>(Op);
7749 EVT VT = Store->getMemoryVT();
7750
Matt Arsenault95245662016-02-11 05:32:46 +00007751 if (VT == MVT::i1) {
7752 return DAG.getTruncStore(Store->getChain(), DL,
7753 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
7754 Store->getBasePtr(), MVT::i1, Store->getMemOperand());
Tom Stellardb02094e2014-07-21 15:45:01 +00007755 }
7756
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007757 assert(VT.isVector() &&
7758 Store->getValue().getValueType().getScalarType() == MVT::i32);
7759
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007760 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
Simon Pilgrim266f4392019-06-11 11:00:23 +00007761 *Store->getMemOperand())) {
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007762 return expandUnalignedStore(Store, DAG);
7763 }
Tom Stellard81d871d2013-11-13 23:36:50 +00007764
Simon Pilgrim266f4392019-06-11 11:00:23 +00007765 unsigned AS = Store->getAddressSpace();
Stanislav Mekhanoshina224f682019-05-01 16:11:11 +00007766 if (Subtarget->hasLDSMisalignedBug() &&
7767 AS == AMDGPUAS::FLAT_ADDRESS &&
7768 Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
7769 return SplitVectorStore(Op, DAG);
7770 }
7771
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007772 MachineFunction &MF = DAG.getMachineFunction();
7773 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7774 // If there is a possibilty that flat instruction access scratch memory
7775 // then we need to use the same legalization rules we use for private.
Matt Arsenault0da63502018-08-31 05:49:54 +00007776 if (AS == AMDGPUAS::FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007777 AS = MFI->hasFlatScratchInit() ?
Matt Arsenault0da63502018-08-31 05:49:54 +00007778 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007779
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007780 unsigned NumElements = VT.getVectorNumElements();
Matt Arsenault0da63502018-08-31 05:49:54 +00007781 if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
7782 AS == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007783 if (NumElements > 4)
7784 return SplitVectorStore(Op, DAG);
Tim Renouf361b5b22019-03-21 12:01:21 +00007785 // v3 stores not supported on SI.
7786 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7787 return SplitVectorStore(Op, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007788 return SDValue();
Matt Arsenault0da63502018-08-31 05:49:54 +00007789 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007790 switch (Subtarget->getMaxPrivateElementSize()) {
7791 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00007792 return scalarizeVectorStore(Store, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007793 case 8:
7794 if (NumElements > 2)
7795 return SplitVectorStore(Op, DAG);
7796 return SDValue();
7797 case 16:
Tim Renouf361b5b22019-03-21 12:01:21 +00007798 if (NumElements > 4 || NumElements == 3)
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007799 return SplitVectorStore(Op, DAG);
7800 return SDValue();
7801 default:
7802 llvm_unreachable("unsupported private_element_size");
7803 }
Nicolai Haehnle4dc3b2b2019-07-01 17:17:45 +00007804 } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00007805 // Use ds_write_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00007806 if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
Tim Renouf361b5b22019-03-21 12:01:21 +00007807 VT.getStoreSize() == 16 && NumElements != 3)
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00007808 return SDValue();
7809
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007810 if (NumElements > 2)
7811 return SplitVectorStore(Op, DAG);
Nicolai Haehnle48219372018-10-17 15:37:48 +00007812
7813 // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7814 // address is negative, then the instruction is incorrectly treated as
7815 // out-of-bounds even if base + offsets is in bounds. Split vectorized
7816 // stores here to avoid emitting ds_write2_b32. We may re-combine the
7817 // store later in the SILoadStoreOptimizer.
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +00007818 if (!Subtarget->hasUsableDSOffset() &&
Nicolai Haehnle48219372018-10-17 15:37:48 +00007819 NumElements == 2 && VT.getStoreSize() == 8 &&
7820 Store->getAlignment() < 8) {
7821 return SplitVectorStore(Op, DAG);
7822 }
7823
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00007824 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00007825 } else {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007826 llvm_unreachable("unhandled address space");
Matt Arsenault95245662016-02-11 05:32:46 +00007827 }
Tom Stellard81d871d2013-11-13 23:36:50 +00007828}
7829
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007830SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007831 SDLoc DL(Op);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007832 EVT VT = Op.getValueType();
7833 SDValue Arg = Op.getOperand(0);
David Stuttard20de3e92018-09-14 10:27:19 +00007834 SDValue TrigVal;
7835
Sanjay Patela2607012015-09-16 16:31:21 +00007836 // TODO: Should this propagate fast-math-flags?
David Stuttard20de3e92018-09-14 10:27:19 +00007837
7838 SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT);
7839
7840 if (Subtarget->hasTrigReducedRange()) {
7841 SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7842 TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal);
7843 } else {
7844 TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7845 }
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007846
7847 switch (Op.getOpcode()) {
7848 case ISD::FCOS:
David Stuttard20de3e92018-09-14 10:27:19 +00007849 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007850 case ISD::FSIN:
David Stuttard20de3e92018-09-14 10:27:19 +00007851 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007852 default:
7853 llvm_unreachable("Wrong trig opcode");
7854 }
7855}
7856
Tom Stellard354a43c2016-04-01 18:27:37 +00007857SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
7858 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
7859 assert(AtomicNode->isCompareAndSwap());
7860 unsigned AS = AtomicNode->getAddressSpace();
7861
7862 // No custom lowering required for local address space
Matt Arsenault0da63502018-08-31 05:49:54 +00007863 if (!isFlatGlobalAddrSpace(AS))
Tom Stellard354a43c2016-04-01 18:27:37 +00007864 return Op;
7865
7866 // Non-local address space requires custom lowering for atomic compare
7867 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
7868 SDLoc DL(Op);
7869 SDValue ChainIn = Op.getOperand(0);
7870 SDValue Addr = Op.getOperand(1);
7871 SDValue Old = Op.getOperand(2);
7872 SDValue New = Op.getOperand(3);
7873 EVT VT = Op.getValueType();
7874 MVT SimpleVT = VT.getSimpleVT();
7875 MVT VecType = MVT::getVectorVT(SimpleVT, 2);
7876
Ahmed Bougacha128f8732016-04-26 21:15:30 +00007877 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
Tom Stellard354a43c2016-04-01 18:27:37 +00007878 SDValue Ops[] = { ChainIn, Addr, NewOld };
Matt Arsenault88701812016-06-09 23:42:48 +00007879
7880 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
7881 Ops, VT, AtomicNode->getMemOperand());
Tom Stellard354a43c2016-04-01 18:27:37 +00007882}
7883
Tom Stellard75aadc22012-12-11 21:25:42 +00007884//===----------------------------------------------------------------------===//
7885// Custom DAG optimizations
7886//===----------------------------------------------------------------------===//
7887
Matt Arsenault364a6742014-06-11 17:50:44 +00007888SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
Matt Arsenaulte6986632015-01-14 01:35:22 +00007889 DAGCombinerInfo &DCI) const {
Matt Arsenault364a6742014-06-11 17:50:44 +00007890 EVT VT = N->getValueType(0);
7891 EVT ScalarVT = VT.getScalarType();
7892 if (ScalarVT != MVT::f32)
7893 return SDValue();
7894
7895 SelectionDAG &DAG = DCI.DAG;
7896 SDLoc DL(N);
7897
7898 SDValue Src = N->getOperand(0);
7899 EVT SrcVT = Src.getValueType();
7900
7901 // TODO: We could try to match extracting the higher bytes, which would be
7902 // easier if i8 vectors weren't promoted to i32 vectors, particularly after
7903 // types are legalized. v4i8 -> v4f32 is probably the only case to worry
7904 // about in practice.
Craig Topper80d3bb32018-03-06 19:44:52 +00007905 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
Matt Arsenault364a6742014-06-11 17:50:44 +00007906 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
7907 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
7908 DCI.AddToWorklist(Cvt.getNode());
7909 return Cvt;
7910 }
7911 }
7912
Matt Arsenault364a6742014-06-11 17:50:44 +00007913 return SDValue();
7914}
7915
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007916// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
7917
7918// This is a variant of
7919// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
7920//
7921// The normal DAG combiner will do this, but only if the add has one use since
7922// that would increase the number of instructions.
7923//
7924// This prevents us from seeing a constant offset that can be folded into a
7925// memory instruction's addressing mode. If we know the resulting add offset of
7926// a pointer can be folded into an addressing offset, we can replace the pointer
7927// operand with the add of new constant offset. This eliminates one of the uses,
7928// and may allow the remaining use to also be simplified.
7929//
7930SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
7931 unsigned AddrSpace,
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007932 EVT MemVT,
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007933 DAGCombinerInfo &DCI) const {
7934 SDValue N0 = N->getOperand(0);
7935 SDValue N1 = N->getOperand(1);
7936
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007937 // We only do this to handle cases where it's profitable when there are
7938 // multiple uses of the add, so defer to the standard combine.
Matt Arsenaultc8903122017-11-14 23:46:42 +00007939 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
7940 N0->hasOneUse())
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007941 return SDValue();
7942
7943 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
7944 if (!CN1)
7945 return SDValue();
7946
7947 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
7948 if (!CAdd)
7949 return SDValue();
7950
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007951 // If the resulting offset is too large, we can't fold it into the addressing
7952 // mode offset.
7953 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007954 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
7955
7956 AddrMode AM;
7957 AM.HasBaseReg = true;
7958 AM.BaseOffs = Offset.getSExtValue();
7959 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007960 return SDValue();
7961
7962 SelectionDAG &DAG = DCI.DAG;
7963 SDLoc SL(N);
7964 EVT VT = N->getValueType(0);
7965
7966 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007967 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007968
Matt Arsenaulte5e0c742017-11-13 05:33:35 +00007969 SDNodeFlags Flags;
7970 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
7971 (N0.getOpcode() == ISD::OR ||
7972 N0->getFlags().hasNoUnsignedWrap()));
7973
7974 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007975}
7976
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007977SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
7978 DAGCombinerInfo &DCI) const {
7979 SDValue Ptr = N->getBasePtr();
7980 SelectionDAG &DAG = DCI.DAG;
7981 SDLoc SL(N);
7982
7983 // TODO: We could also do this for multiplies.
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007984 if (Ptr.getOpcode() == ISD::SHL) {
7985 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(),
7986 N->getMemoryVT(), DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007987 if (NewPtr) {
7988 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
7989
7990 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
7991 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
7992 }
7993 }
7994
7995 return SDValue();
7996}
7997
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007998static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
7999 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
8000 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
8001 (Opc == ISD::XOR && Val == 0);
8002}
8003
8004// Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
8005// will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
8006// integer combine opportunities since most 64-bit operations are decomposed
8007// this way. TODO: We won't want this for SALU especially if it is an inline
8008// immediate.
8009SDValue SITargetLowering::splitBinaryBitConstantOp(
8010 DAGCombinerInfo &DCI,
8011 const SDLoc &SL,
8012 unsigned Opc, SDValue LHS,
8013 const ConstantSDNode *CRHS) const {
8014 uint64_t Val = CRHS->getZExtValue();
8015 uint32_t ValLo = Lo_32(Val);
8016 uint32_t ValHi = Hi_32(Val);
8017 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8018
8019 if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
8020 bitOpWithConstantIsReducible(Opc, ValHi)) ||
8021 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
8022 // If we need to materialize a 64-bit immediate, it will be split up later
8023 // anyway. Avoid creating the harder to understand 64-bit immediate
8024 // materialization.
8025 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
8026 }
8027
8028 return SDValue();
8029}
8030
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00008031// Returns true if argument is a boolean value which is not serialized into
8032// memory or argument and does not require v_cmdmask_b32 to be deserialized.
8033static bool isBoolSGPR(SDValue V) {
8034 if (V.getValueType() != MVT::i1)
8035 return false;
8036 switch (V.getOpcode()) {
8037 default: break;
8038 case ISD::SETCC:
8039 case ISD::AND:
8040 case ISD::OR:
8041 case ISD::XOR:
8042 case AMDGPUISD::FP_CLASS:
8043 return true;
8044 }
8045 return false;
8046}
8047
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00008048// If a constant has all zeroes or all ones within each byte return it.
8049// Otherwise return 0.
8050static uint32_t getConstantPermuteMask(uint32_t C) {
8051 // 0xff for any zero byte in the mask
8052 uint32_t ZeroByteMask = 0;
8053 if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
8054 if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
8055 if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
8056 if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
8057 uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
8058 if ((NonZeroByteMask & C) != NonZeroByteMask)
8059 return 0; // Partial bytes selected.
8060 return C;
8061}
8062
8063// Check if a node selects whole bytes from its operand 0 starting at a byte
8064// boundary while masking the rest. Returns select mask as in the v_perm_b32
8065// or -1 if not succeeded.
8066// Note byte select encoding:
8067// value 0-3 selects corresponding source byte;
8068// value 0xc selects zero;
8069// value 0xff selects 0xff.
8070static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
8071 assert(V.getValueSizeInBits() == 32);
8072
8073 if (V.getNumOperands() != 2)
8074 return ~0;
8075
8076 ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
8077 if (!N1)
8078 return ~0;
8079
8080 uint32_t C = N1->getZExtValue();
8081
8082 switch (V.getOpcode()) {
8083 default:
8084 break;
8085 case ISD::AND:
8086 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8087 return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
8088 }
8089 break;
8090
8091 case ISD::OR:
8092 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
8093 return (0x03020100 & ~ConstMask) | ConstMask;
8094 }
8095 break;
8096
8097 case ISD::SHL:
8098 if (C % 8)
8099 return ~0;
8100
8101 return uint32_t((0x030201000c0c0c0cull << C) >> 32);
8102
8103 case ISD::SRL:
8104 if (C % 8)
8105 return ~0;
8106
8107 return uint32_t(0x0c0c0c0c03020100ull >> C);
8108 }
8109
8110 return ~0;
8111}
8112
Matt Arsenaultd0101a22015-01-06 23:00:46 +00008113SDValue SITargetLowering::performAndCombine(SDNode *N,
8114 DAGCombinerInfo &DCI) const {
8115 if (DCI.isBeforeLegalize())
8116 return SDValue();
8117
8118 SelectionDAG &DAG = DCI.DAG;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008119 EVT VT = N->getValueType(0);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00008120 SDValue LHS = N->getOperand(0);
8121 SDValue RHS = N->getOperand(1);
8122
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008123
Stanislav Mekhanoshin53a21292017-05-23 19:54:48 +00008124 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8125 if (VT == MVT::i64 && CRHS) {
8126 if (SDValue Split
8127 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
8128 return Split;
8129 }
8130
8131 if (CRHS && VT == MVT::i32) {
8132 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
8133 // nb = number of trailing zeroes in mask
8134 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
8135 // given that we are selecting 8 or 16 bit fields starting at byte boundary.
8136 uint64_t Mask = CRHS->getZExtValue();
8137 unsigned Bits = countPopulation(Mask);
8138 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
8139 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
8140 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
8141 unsigned Shift = CShift->getZExtValue();
8142 unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
8143 unsigned Offset = NB + Shift;
8144 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
8145 SDLoc SL(N);
8146 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
8147 LHS->getOperand(0),
8148 DAG.getConstant(Offset, SL, MVT::i32),
8149 DAG.getConstant(Bits, SL, MVT::i32));
8150 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8151 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
8152 DAG.getValueType(NarrowVT));
8153 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
8154 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
8155 return Shl;
8156 }
8157 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008158 }
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00008159
8160 // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8161 if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
8162 isa<ConstantSDNode>(LHS.getOperand(2))) {
8163 uint32_t Sel = getConstantPermuteMask(Mask);
8164 if (!Sel)
8165 return SDValue();
8166
8167 // Select 0xc for all zero bytes
8168 Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
8169 SDLoc DL(N);
8170 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8171 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8172 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008173 }
8174
8175 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
8176 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
8177 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
Matt Arsenaultd0101a22015-01-06 23:00:46 +00008178 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8179 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
8180
8181 SDValue X = LHS.getOperand(0);
8182 SDValue Y = RHS.getOperand(0);
8183 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
8184 return SDValue();
8185
8186 if (LCC == ISD::SETO) {
8187 if (X != LHS.getOperand(1))
8188 return SDValue();
8189
8190 if (RCC == ISD::SETUNE) {
8191 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
8192 if (!C1 || !C1->isInfinity() || C1->isNegative())
8193 return SDValue();
8194
8195 const uint32_t Mask = SIInstrFlags::N_NORMAL |
8196 SIInstrFlags::N_SUBNORMAL |
8197 SIInstrFlags::N_ZERO |
8198 SIInstrFlags::P_ZERO |
8199 SIInstrFlags::P_SUBNORMAL |
8200 SIInstrFlags::P_NORMAL;
8201
8202 static_assert(((~(SIInstrFlags::S_NAN |
8203 SIInstrFlags::Q_NAN |
8204 SIInstrFlags::N_INFINITY |
8205 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
8206 "mask not equal");
8207
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008208 SDLoc DL(N);
8209 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8210 X, DAG.getConstant(Mask, DL, MVT::i32));
Matt Arsenaultd0101a22015-01-06 23:00:46 +00008211 }
8212 }
8213 }
8214
Matt Arsenault3dcf4ce2018-08-10 18:58:56 +00008215 if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
8216 std::swap(LHS, RHS);
8217
8218 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8219 RHS.hasOneUse()) {
8220 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8221 // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan)
8222 // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan)
8223 const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8224 if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
8225 (RHS.getOperand(0) == LHS.getOperand(0) &&
8226 LHS.getOperand(0) == LHS.getOperand(1))) {
8227 const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
8228 unsigned NewMask = LCC == ISD::SETO ?
8229 Mask->getZExtValue() & ~OrdMask :
8230 Mask->getZExtValue() & OrdMask;
8231
8232 SDLoc DL(N);
8233 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
8234 DAG.getConstant(NewMask, DL, MVT::i32));
8235 }
8236 }
8237
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00008238 if (VT == MVT::i32 &&
8239 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
8240 // and x, (sext cc from i1) => select cc, x, 0
8241 if (RHS.getOpcode() != ISD::SIGN_EXTEND)
8242 std::swap(LHS, RHS);
8243 if (isBoolSGPR(RHS.getOperand(0)))
8244 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
8245 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
8246 }
8247
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00008248 // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8249 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8250 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8251 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8252 uint32_t LHSMask = getPermuteMask(DAG, LHS);
8253 uint32_t RHSMask = getPermuteMask(DAG, RHS);
8254 if (LHSMask != ~0u && RHSMask != ~0u) {
8255 // Canonicalize the expression in an attempt to have fewer unique masks
8256 // and therefore fewer registers used to hold the masks.
8257 if (LHSMask > RHSMask) {
8258 std::swap(LHSMask, RHSMask);
8259 std::swap(LHS, RHS);
8260 }
8261
8262 // Select 0xc for each lane used from source operand. Zero has 0xc mask
8263 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8264 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8265 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8266
8267 // Check of we need to combine values from two sources within a byte.
8268 if (!(LHSUsedLanes & RHSUsedLanes) &&
8269 // If we select high and lower word keep it for SDWA.
8270 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8271 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8272 // Each byte in each mask is either selector mask 0-3, or has higher
8273 // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
8274 // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
8275 // mask which is not 0xff wins. By anding both masks we have a correct
8276 // result except that 0x0c shall be corrected to give 0x0c only.
8277 uint32_t Mask = LHSMask & RHSMask;
8278 for (unsigned I = 0; I < 32; I += 8) {
8279 uint32_t ByteSel = 0xff << I;
8280 if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
8281 Mask &= (0x0c << I) & 0xffffffff;
8282 }
8283
8284 // Add 4 to each active LHS lane. It will not affect any existing 0xff
8285 // or 0x0c.
8286 uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
8287 SDLoc DL(N);
8288
8289 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8290 LHS.getOperand(0), RHS.getOperand(0),
8291 DAG.getConstant(Sel, DL, MVT::i32));
8292 }
8293 }
8294 }
8295
Matt Arsenaultd0101a22015-01-06 23:00:46 +00008296 return SDValue();
8297}
8298
Matt Arsenaultf2290332015-01-06 23:00:39 +00008299SDValue SITargetLowering::performOrCombine(SDNode *N,
8300 DAGCombinerInfo &DCI) const {
8301 SelectionDAG &DAG = DCI.DAG;
8302 SDValue LHS = N->getOperand(0);
8303 SDValue RHS = N->getOperand(1);
8304
Matt Arsenault3b082382016-04-12 18:24:38 +00008305 EVT VT = N->getValueType(0);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008306 if (VT == MVT::i1) {
8307 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
8308 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
8309 RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
8310 SDValue Src = LHS.getOperand(0);
8311 if (Src != RHS.getOperand(0))
8312 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00008313
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008314 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
8315 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8316 if (!CLHS || !CRHS)
8317 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00008318
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008319 // Only 10 bits are used.
8320 static const uint32_t MaxMask = 0x3ff;
Matt Arsenault3b082382016-04-12 18:24:38 +00008321
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008322 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
8323 SDLoc DL(N);
8324 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
8325 Src, DAG.getConstant(NewMask, DL, MVT::i32));
8326 }
Matt Arsenault3b082382016-04-12 18:24:38 +00008327
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008328 return SDValue();
8329 }
8330
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00008331 // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
8332 if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
8333 LHS.getOpcode() == AMDGPUISD::PERM &&
8334 isa<ConstantSDNode>(LHS.getOperand(2))) {
8335 uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
8336 if (!Sel)
8337 return SDValue();
8338
8339 Sel |= LHS.getConstantOperandVal(2);
8340 SDLoc DL(N);
8341 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
8342 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
8343 }
8344
8345 // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
8346 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8347 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
8348 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
8349 uint32_t LHSMask = getPermuteMask(DAG, LHS);
8350 uint32_t RHSMask = getPermuteMask(DAG, RHS);
8351 if (LHSMask != ~0u && RHSMask != ~0u) {
8352 // Canonicalize the expression in an attempt to have fewer unique masks
8353 // and therefore fewer registers used to hold the masks.
8354 if (LHSMask > RHSMask) {
8355 std::swap(LHSMask, RHSMask);
8356 std::swap(LHS, RHS);
8357 }
8358
8359 // Select 0xc for each lane used from source operand. Zero has 0xc mask
8360 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
8361 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8362 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
8363
8364 // Check of we need to combine values from two sources within a byte.
8365 if (!(LHSUsedLanes & RHSUsedLanes) &&
8366 // If we select high and lower word keep it for SDWA.
8367 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
8368 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
8369 // Kill zero bytes selected by other mask. Zero value is 0xc.
8370 LHSMask &= ~RHSUsedLanes;
8371 RHSMask &= ~LHSUsedLanes;
8372 // Add 4 to each active LHS lane
8373 LHSMask |= LHSUsedLanes & 0x04040404;
8374 // Combine masks
8375 uint32_t Sel = LHSMask | RHSMask;
8376 SDLoc DL(N);
8377
8378 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
8379 LHS.getOperand(0), RHS.getOperand(0),
8380 DAG.getConstant(Sel, DL, MVT::i32));
8381 }
8382 }
8383 }
8384
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008385 if (VT != MVT::i64)
8386 return SDValue();
8387
8388 // TODO: This could be a generic combine with a predicate for extracting the
8389 // high half of an integer being free.
8390
8391 // (or i64:x, (zero_extend i32:y)) ->
8392 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
8393 if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
8394 RHS.getOpcode() != ISD::ZERO_EXTEND)
8395 std::swap(LHS, RHS);
8396
8397 if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
8398 SDValue ExtSrc = RHS.getOperand(0);
8399 EVT SrcVT = ExtSrc.getValueType();
8400 if (SrcVT == MVT::i32) {
8401 SDLoc SL(N);
8402 SDValue LowLHS, HiBits;
8403 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
8404 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
8405
8406 DCI.AddToWorklist(LowOr.getNode());
8407 DCI.AddToWorklist(HiBits.getNode());
8408
8409 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
8410 LowOr, HiBits);
8411 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
Matt Arsenault3b082382016-04-12 18:24:38 +00008412 }
8413 }
8414
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008415 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
8416 if (CRHS) {
8417 if (SDValue Split
8418 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
8419 return Split;
8420 }
Matt Arsenaultf2290332015-01-06 23:00:39 +00008421
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008422 return SDValue();
8423}
Matt Arsenaultf2290332015-01-06 23:00:39 +00008424
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008425SDValue SITargetLowering::performXorCombine(SDNode *N,
8426 DAGCombinerInfo &DCI) const {
8427 EVT VT = N->getValueType(0);
8428 if (VT != MVT::i64)
8429 return SDValue();
Matt Arsenaultf2290332015-01-06 23:00:39 +00008430
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008431 SDValue LHS = N->getOperand(0);
8432 SDValue RHS = N->getOperand(1);
8433
8434 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
8435 if (CRHS) {
8436 if (SDValue Split
8437 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
8438 return Split;
Matt Arsenaultf2290332015-01-06 23:00:39 +00008439 }
8440
8441 return SDValue();
8442}
8443
Matt Arsenault5cf42712017-04-06 20:58:30 +00008444// Instructions that will be lowered with a final instruction that zeros the
8445// high result bits.
8446// XXX - probably only need to list legal operations.
Matt Arsenault8edfaee2017-03-31 19:53:03 +00008447static bool fp16SrcZerosHighBits(unsigned Opc) {
8448 switch (Opc) {
Matt Arsenault5cf42712017-04-06 20:58:30 +00008449 case ISD::FADD:
8450 case ISD::FSUB:
8451 case ISD::FMUL:
8452 case ISD::FDIV:
8453 case ISD::FREM:
8454 case ISD::FMA:
8455 case ISD::FMAD:
8456 case ISD::FCANONICALIZE:
8457 case ISD::FP_ROUND:
8458 case ISD::UINT_TO_FP:
8459 case ISD::SINT_TO_FP:
8460 case ISD::FABS:
8461 // Fabs is lowered to a bit operation, but it's an and which will clear the
8462 // high bits anyway.
8463 case ISD::FSQRT:
8464 case ISD::FSIN:
8465 case ISD::FCOS:
8466 case ISD::FPOWI:
8467 case ISD::FPOW:
8468 case ISD::FLOG:
8469 case ISD::FLOG2:
8470 case ISD::FLOG10:
8471 case ISD::FEXP:
8472 case ISD::FEXP2:
8473 case ISD::FCEIL:
8474 case ISD::FTRUNC:
8475 case ISD::FRINT:
8476 case ISD::FNEARBYINT:
8477 case ISD::FROUND:
8478 case ISD::FFLOOR:
8479 case ISD::FMINNUM:
8480 case ISD::FMAXNUM:
8481 case AMDGPUISD::FRACT:
8482 case AMDGPUISD::CLAMP:
8483 case AMDGPUISD::COS_HW:
8484 case AMDGPUISD::SIN_HW:
8485 case AMDGPUISD::FMIN3:
8486 case AMDGPUISD::FMAX3:
8487 case AMDGPUISD::FMED3:
8488 case AMDGPUISD::FMAD_FTZ:
8489 case AMDGPUISD::RCP:
8490 case AMDGPUISD::RSQ:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00008491 case AMDGPUISD::RCP_IFLAG:
Matt Arsenault5cf42712017-04-06 20:58:30 +00008492 case AMDGPUISD::LDEXP:
Matt Arsenault8edfaee2017-03-31 19:53:03 +00008493 return true;
Matt Arsenault5cf42712017-04-06 20:58:30 +00008494 default:
8495 // fcopysign, select and others may be lowered to 32-bit bit operations
8496 // which don't zero the high bits.
8497 return false;
Matt Arsenault8edfaee2017-03-31 19:53:03 +00008498 }
8499}
8500
8501SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
8502 DAGCombinerInfo &DCI) const {
8503 if (!Subtarget->has16BitInsts() ||
8504 DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8505 return SDValue();
8506
8507 EVT VT = N->getValueType(0);
8508 if (VT != MVT::i32)
8509 return SDValue();
8510
8511 SDValue Src = N->getOperand(0);
8512 if (Src.getValueType() != MVT::i16)
8513 return SDValue();
8514
8515 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
8516 // FIXME: It is not universally true that the high bits are zeroed on gfx9.
8517 if (Src.getOpcode() == ISD::BITCAST) {
8518 SDValue BCSrc = Src.getOperand(0);
8519 if (BCSrc.getValueType() == MVT::f16 &&
8520 fp16SrcZerosHighBits(BCSrc.getOpcode()))
8521 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
8522 }
8523
8524 return SDValue();
8525}
8526
Ryan Taylor00e063a2019-03-19 16:07:00 +00008527SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N,
8528 DAGCombinerInfo &DCI)
8529 const {
8530 SDValue Src = N->getOperand(0);
8531 auto *VTSign = cast<VTSDNode>(N->getOperand(1));
8532
8533 if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
8534 VTSign->getVT() == MVT::i8) ||
8535 (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
8536 VTSign->getVT() == MVT::i16)) &&
8537 Src.hasOneUse()) {
8538 auto *M = cast<MemSDNode>(Src);
8539 SDValue Ops[] = {
8540 Src.getOperand(0), // Chain
8541 Src.getOperand(1), // rsrc
8542 Src.getOperand(2), // vindex
8543 Src.getOperand(3), // voffset
8544 Src.getOperand(4), // soffset
8545 Src.getOperand(5), // offset
8546 Src.getOperand(6),
8547 Src.getOperand(7)
8548 };
8549 // replace with BUFFER_LOAD_BYTE/SHORT
8550 SDVTList ResList = DCI.DAG.getVTList(MVT::i32,
8551 Src.getOperand(0).getValueType());
8552 unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ?
8553 AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT;
8554 SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N),
8555 ResList,
8556 Ops, M->getMemoryVT(),
8557 M->getMemOperand());
8558 return DCI.DAG.getMergeValues({BufferLoadSignExt,
8559 BufferLoadSignExt.getValue(1)}, SDLoc(N));
8560 }
8561 return SDValue();
8562}
8563
Matt Arsenaultf2290332015-01-06 23:00:39 +00008564SDValue SITargetLowering::performClassCombine(SDNode *N,
8565 DAGCombinerInfo &DCI) const {
8566 SelectionDAG &DAG = DCI.DAG;
8567 SDValue Mask = N->getOperand(1);
8568
8569 // fp_class x, 0 -> false
8570 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
8571 if (CMask->isNullValue())
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008572 return DAG.getConstant(0, SDLoc(N), MVT::i1);
Matt Arsenaultf2290332015-01-06 23:00:39 +00008573 }
8574
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00008575 if (N->getOperand(0).isUndef())
8576 return DAG.getUNDEF(MVT::i1);
8577
Matt Arsenaultf2290332015-01-06 23:00:39 +00008578 return SDValue();
8579}
8580
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00008581SDValue SITargetLowering::performRcpCombine(SDNode *N,
8582 DAGCombinerInfo &DCI) const {
8583 EVT VT = N->getValueType(0);
8584 SDValue N0 = N->getOperand(0);
8585
8586 if (N0.isUndef())
8587 return N0;
8588
8589 if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
8590 N0.getOpcode() == ISD::SINT_TO_FP)) {
8591 return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
8592 N->getFlags());
8593 }
8594
8595 return AMDGPUTargetLowering::performRcpCombine(N, DCI);
8596}
8597
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008598bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
8599 unsigned MaxDepth) const {
8600 unsigned Opcode = Op.getOpcode();
8601 if (Opcode == ISD::FCANONICALIZE)
8602 return true;
8603
8604 if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8605 auto F = CFP->getValueAPF();
8606 if (F.isNaN() && F.isSignaling())
8607 return false;
8608 return !F.isDenormal() || denormalsEnabledForType(Op.getValueType());
8609 }
8610
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008611 // If source is a result of another standard FP operation it is already in
8612 // canonical form.
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008613 if (MaxDepth == 0)
8614 return false;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008615
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008616 switch (Opcode) {
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008617 // These will flush denorms if required.
8618 case ISD::FADD:
8619 case ISD::FSUB:
8620 case ISD::FMUL:
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008621 case ISD::FCEIL:
8622 case ISD::FFLOOR:
8623 case ISD::FMA:
8624 case ISD::FMAD:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008625 case ISD::FSQRT:
8626 case ISD::FDIV:
8627 case ISD::FREM:
Matt Arsenaultce6d61f2018-08-06 21:51:52 +00008628 case ISD::FP_ROUND:
8629 case ISD::FP_EXTEND:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008630 case AMDGPUISD::FMUL_LEGACY:
8631 case AMDGPUISD::FMAD_FTZ:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00008632 case AMDGPUISD::RCP:
8633 case AMDGPUISD::RSQ:
8634 case AMDGPUISD::RSQ_CLAMP:
8635 case AMDGPUISD::RCP_LEGACY:
8636 case AMDGPUISD::RSQ_LEGACY:
8637 case AMDGPUISD::RCP_IFLAG:
8638 case AMDGPUISD::TRIG_PREOP:
8639 case AMDGPUISD::DIV_SCALE:
8640 case AMDGPUISD::DIV_FMAS:
8641 case AMDGPUISD::DIV_FIXUP:
8642 case AMDGPUISD::FRACT:
8643 case AMDGPUISD::LDEXP:
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008644 case AMDGPUISD::CVT_PKRTZ_F16_F32:
Matt Arsenault940e6072018-08-10 19:20:17 +00008645 case AMDGPUISD::CVT_F32_UBYTE0:
8646 case AMDGPUISD::CVT_F32_UBYTE1:
8647 case AMDGPUISD::CVT_F32_UBYTE2:
8648 case AMDGPUISD::CVT_F32_UBYTE3:
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008649 return true;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008650
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008651 // It can/will be lowered or combined as a bit operation.
8652 // Need to check their input recursively to handle.
8653 case ISD::FNEG:
8654 case ISD::FABS:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008655 case ISD::FCOPYSIGN:
8656 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008657
8658 case ISD::FSIN:
8659 case ISD::FCOS:
8660 case ISD::FSINCOS:
8661 return Op.getValueType().getScalarType() != MVT::f16;
8662
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008663 case ISD::FMINNUM:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00008664 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008665 case ISD::FMINNUM_IEEE:
8666 case ISD::FMAXNUM_IEEE:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00008667 case AMDGPUISD::CLAMP:
8668 case AMDGPUISD::FMED3:
8669 case AMDGPUISD::FMAX3:
8670 case AMDGPUISD::FMIN3: {
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008671 // FIXME: Shouldn't treat the generic operations different based these.
Matt Arsenault687ec752018-10-22 16:27:27 +00008672 // However, we aren't really required to flush the result from
8673 // minnum/maxnum..
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008674
Matt Arsenault687ec752018-10-22 16:27:27 +00008675 // snans will be quieted, so we only need to worry about denormals.
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008676 if (Subtarget->supportsMinMaxDenormModes() ||
Matt Arsenault687ec752018-10-22 16:27:27 +00008677 denormalsEnabledForType(Op.getValueType()))
8678 return true;
8679
8680 // Flushing may be required.
8681 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
8682 // targets need to check their input recursively.
8683
8684 // FIXME: Does this apply with clamp? It's implemented with max.
8685 for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
8686 if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
8687 return false;
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008688 }
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008689
Matt Arsenault687ec752018-10-22 16:27:27 +00008690 return true;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008691 }
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008692 case ISD::SELECT: {
8693 return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
8694 isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008695 }
Matt Arsenaulte94ee832018-08-06 22:45:51 +00008696 case ISD::BUILD_VECTOR: {
8697 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
8698 SDValue SrcOp = Op.getOperand(i);
8699 if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
8700 return false;
8701 }
8702
8703 return true;
8704 }
8705 case ISD::EXTRACT_VECTOR_ELT:
8706 case ISD::EXTRACT_SUBVECTOR: {
8707 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8708 }
8709 case ISD::INSERT_VECTOR_ELT: {
8710 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
8711 isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
8712 }
8713 case ISD::UNDEF:
8714 // Could be anything.
8715 return false;
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008716
Matt Arsenault687ec752018-10-22 16:27:27 +00008717 case ISD::BITCAST: {
8718 // Hack round the mess we make when legalizing extract_vector_elt
8719 SDValue Src = Op.getOperand(0);
8720 if (Src.getValueType() == MVT::i16 &&
8721 Src.getOpcode() == ISD::TRUNCATE) {
8722 SDValue TruncSrc = Src.getOperand(0);
8723 if (TruncSrc.getValueType() == MVT::i32 &&
8724 TruncSrc.getOpcode() == ISD::BITCAST &&
8725 TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
8726 return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
8727 }
8728 }
8729
8730 return false;
8731 }
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008732 case ISD::INTRINSIC_WO_CHAIN: {
8733 unsigned IntrinsicID
8734 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8735 // TODO: Handle more intrinsics
8736 switch (IntrinsicID) {
8737 case Intrinsic::amdgcn_cvt_pkrtz:
Matt Arsenault940e6072018-08-10 19:20:17 +00008738 case Intrinsic::amdgcn_cubeid:
8739 case Intrinsic::amdgcn_frexp_mant:
8740 case Intrinsic::amdgcn_fdot2:
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008741 return true;
8742 default:
8743 break;
8744 }
Matt Arsenault5bb9d792018-08-10 17:57:12 +00008745
8746 LLVM_FALLTHROUGH;
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008747 }
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008748 default:
8749 return denormalsEnabledForType(Op.getValueType()) &&
8750 DAG.isKnownNeverSNaN(Op);
8751 }
8752
8753 llvm_unreachable("invalid operation");
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008754}
8755
Matt Arsenault9cd90712016-04-14 01:42:16 +00008756// Constant fold canonicalize.
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008757SDValue SITargetLowering::getCanonicalConstantFP(
8758 SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
8759 // Flush denormals to 0 if not enabled.
8760 if (C.isDenormal() && !denormalsEnabledForType(VT))
8761 return DAG.getConstantFP(0.0, SL, VT);
8762
8763 if (C.isNaN()) {
8764 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
8765 if (C.isSignaling()) {
8766 // Quiet a signaling NaN.
8767 // FIXME: Is this supposed to preserve payload bits?
8768 return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8769 }
8770
8771 // Make sure it is the canonical NaN bitpattern.
8772 //
8773 // TODO: Can we use -1 as the canonical NaN value since it's an inline
8774 // immediate?
8775 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
8776 return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8777 }
8778
8779 // Already canonical.
8780 return DAG.getConstantFP(C, SL, VT);
8781}
8782
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008783static bool vectorEltWillFoldAway(SDValue Op) {
8784 return Op.isUndef() || isa<ConstantFPSDNode>(Op);
8785}
8786
Matt Arsenault9cd90712016-04-14 01:42:16 +00008787SDValue SITargetLowering::performFCanonicalizeCombine(
8788 SDNode *N,
8789 DAGCombinerInfo &DCI) const {
Matt Arsenault9cd90712016-04-14 01:42:16 +00008790 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault4aec86d2018-07-31 13:34:31 +00008791 SDValue N0 = N->getOperand(0);
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008792 EVT VT = N->getValueType(0);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008793
Matt Arsenault4aec86d2018-07-31 13:34:31 +00008794 // fcanonicalize undef -> qnan
8795 if (N0.isUndef()) {
Matt Arsenault4aec86d2018-07-31 13:34:31 +00008796 APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
8797 return DAG.getConstantFP(QNaN, SDLoc(N), VT);
8798 }
8799
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008800 if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
Matt Arsenault9cd90712016-04-14 01:42:16 +00008801 EVT VT = N->getValueType(0);
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008802 return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
Matt Arsenault9cd90712016-04-14 01:42:16 +00008803 }
8804
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008805 // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
8806 // (fcanonicalize k)
8807 //
8808 // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
8809
8810 // TODO: This could be better with wider vectors that will be split to v2f16,
8811 // and to consider uses since there aren't that many packed operations.
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008812 if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
8813 isTypeLegal(MVT::v2f16)) {
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008814 SDLoc SL(N);
8815 SDValue NewElts[2];
8816 SDValue Lo = N0.getOperand(0);
8817 SDValue Hi = N0.getOperand(1);
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008818 EVT EltVT = Lo.getValueType();
8819
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008820 if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
8821 for (unsigned I = 0; I != 2; ++I) {
8822 SDValue Op = N0.getOperand(I);
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008823 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8824 NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
8825 CFP->getValueAPF());
8826 } else if (Op.isUndef()) {
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008827 // Handled below based on what the other operand is.
8828 NewElts[I] = Op;
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008829 } else {
8830 NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
8831 }
8832 }
8833
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008834 // If one half is undef, and one is constant, perfer a splat vector rather
8835 // than the normal qNaN. If it's a register, prefer 0.0 since that's
8836 // cheaper to use and may be free with a packed operation.
8837 if (NewElts[0].isUndef()) {
8838 if (isa<ConstantFPSDNode>(NewElts[1]))
8839 NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
8840 NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
8841 }
8842
8843 if (NewElts[1].isUndef()) {
8844 NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
8845 NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
8846 }
8847
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008848 return DAG.getBuildVector(VT, SL, NewElts);
8849 }
8850 }
8851
Matt Arsenault687ec752018-10-22 16:27:27 +00008852 unsigned SrcOpc = N0.getOpcode();
8853
8854 // If it's free to do so, push canonicalizes further up the source, which may
8855 // find a canonical source.
8856 //
8857 // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for
8858 // sNaNs.
8859 if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
8860 auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
8861 if (CRHS && N0.hasOneUse()) {
8862 SDLoc SL(N);
8863 SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
8864 N0.getOperand(0));
8865 SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
8866 DCI.AddToWorklist(Canon0.getNode());
8867
8868 return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
8869 }
8870 }
8871
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008872 return isCanonicalized(DAG, N0) ? N0 : SDValue();
Matt Arsenault9cd90712016-04-14 01:42:16 +00008873}
8874
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008875static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
8876 switch (Opc) {
8877 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008878 case ISD::FMAXNUM_IEEE:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008879 return AMDGPUISD::FMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008880 case ISD::SMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008881 return AMDGPUISD::SMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008882 case ISD::UMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008883 return AMDGPUISD::UMAX3;
8884 case ISD::FMINNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008885 case ISD::FMINNUM_IEEE:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008886 return AMDGPUISD::FMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008887 case ISD::SMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008888 return AMDGPUISD::SMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008889 case ISD::UMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008890 return AMDGPUISD::UMIN3;
8891 default:
8892 llvm_unreachable("Not a min/max opcode");
8893 }
8894}
8895
Matt Arsenault10268f92017-02-27 22:40:39 +00008896SDValue SITargetLowering::performIntMed3ImmCombine(
8897 SelectionDAG &DAG, const SDLoc &SL,
8898 SDValue Op0, SDValue Op1, bool Signed) const {
Matt Arsenaultf639c322016-01-28 20:53:42 +00008899 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
8900 if (!K1)
8901 return SDValue();
8902
8903 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
8904 if (!K0)
8905 return SDValue();
8906
Matt Arsenaultf639c322016-01-28 20:53:42 +00008907 if (Signed) {
8908 if (K0->getAPIntValue().sge(K1->getAPIntValue()))
8909 return SDValue();
8910 } else {
8911 if (K0->getAPIntValue().uge(K1->getAPIntValue()))
8912 return SDValue();
8913 }
8914
8915 EVT VT = K0->getValueType(0);
Matt Arsenault10268f92017-02-27 22:40:39 +00008916 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
8917 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
8918 return DAG.getNode(Med3Opc, SL, VT,
8919 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
8920 }
Tom Stellard115a6152016-11-10 16:02:37 +00008921
Matt Arsenault10268f92017-02-27 22:40:39 +00008922 // If there isn't a 16-bit med3 operation, convert to 32-bit.
Tom Stellard115a6152016-11-10 16:02:37 +00008923 MVT NVT = MVT::i32;
8924 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8925
Matt Arsenault10268f92017-02-27 22:40:39 +00008926 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
8927 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
8928 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
Tom Stellard115a6152016-11-10 16:02:37 +00008929
Matt Arsenault10268f92017-02-27 22:40:39 +00008930 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
8931 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
Matt Arsenaultf639c322016-01-28 20:53:42 +00008932}
8933
Matt Arsenault6b114d22017-08-30 01:20:17 +00008934static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
8935 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
8936 return C;
8937
8938 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
8939 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
8940 return C;
8941 }
8942
8943 return nullptr;
8944}
8945
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008946SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
8947 const SDLoc &SL,
8948 SDValue Op0,
8949 SDValue Op1) const {
Matt Arsenault6b114d22017-08-30 01:20:17 +00008950 ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
Matt Arsenaultf639c322016-01-28 20:53:42 +00008951 if (!K1)
8952 return SDValue();
8953
Matt Arsenault6b114d22017-08-30 01:20:17 +00008954 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
Matt Arsenaultf639c322016-01-28 20:53:42 +00008955 if (!K0)
8956 return SDValue();
8957
8958 // Ordered >= (although NaN inputs should have folded away by now).
8959 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
8960 if (Cmp == APFloat::cmpGreaterThan)
8961 return SDValue();
8962
Matt Arsenault055e4dc2019-03-29 19:14:54 +00008963 const MachineFunction &MF = DAG.getMachineFunction();
8964 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
8965
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008966 // TODO: Check IEEE bit enabled?
Matt Arsenault6b114d22017-08-30 01:20:17 +00008967 EVT VT = Op0.getValueType();
Matt Arsenault055e4dc2019-03-29 19:14:54 +00008968 if (Info->getMode().DX10Clamp) {
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008969 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
8970 // hardware fmed3 behavior converting to a min.
8971 // FIXME: Should this be allowing -0.0?
8972 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
8973 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
8974 }
8975
Matt Arsenault6b114d22017-08-30 01:20:17 +00008976 // med3 for f16 is only available on gfx9+, and not available for v2f16.
8977 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
8978 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
8979 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
8980 // then give the other result, which is different from med3 with a NaN
8981 // input.
8982 SDValue Var = Op0.getOperand(0);
Matt Arsenaultc3dc8e62018-08-03 18:27:52 +00008983 if (!DAG.isKnownNeverSNaN(Var))
Matt Arsenault6b114d22017-08-30 01:20:17 +00008984 return SDValue();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008985
Matt Arsenaultebf46142018-09-18 02:34:54 +00008986 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8987
8988 if ((!K0->hasOneUse() ||
8989 TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
8990 (!K1->hasOneUse() ||
8991 TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
8992 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
8993 Var, SDValue(K0, 0), SDValue(K1, 0));
8994 }
Matt Arsenault6b114d22017-08-30 01:20:17 +00008995 }
Matt Arsenaultf639c322016-01-28 20:53:42 +00008996
Matt Arsenault6b114d22017-08-30 01:20:17 +00008997 return SDValue();
Matt Arsenaultf639c322016-01-28 20:53:42 +00008998}
8999
9000SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
9001 DAGCombinerInfo &DCI) const {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00009002 SelectionDAG &DAG = DCI.DAG;
9003
Matt Arsenault79a45db2017-02-22 23:53:37 +00009004 EVT VT = N->getValueType(0);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00009005 unsigned Opc = N->getOpcode();
9006 SDValue Op0 = N->getOperand(0);
9007 SDValue Op1 = N->getOperand(1);
9008
9009 // Only do this if the inner op has one use since this will just increases
9010 // register pressure for no benefit.
9011
Matt Arsenault79a45db2017-02-22 23:53:37 +00009012 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
Neil Henninge85f6bd2019-03-19 15:50:24 +00009013 !VT.isVector() &&
9014 (VT == MVT::i32 || VT == MVT::f32 ||
9015 ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) {
Matt Arsenault5b39b342016-01-28 20:53:48 +00009016 // max(max(a, b), c) -> max3(a, b, c)
9017 // min(min(a, b), c) -> min3(a, b, c)
9018 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
9019 SDLoc DL(N);
9020 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
9021 DL,
9022 N->getValueType(0),
9023 Op0.getOperand(0),
9024 Op0.getOperand(1),
9025 Op1);
9026 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00009027
Matt Arsenault5b39b342016-01-28 20:53:48 +00009028 // Try commuted.
9029 // max(a, max(b, c)) -> max3(a, b, c)
9030 // min(a, min(b, c)) -> min3(a, b, c)
9031 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
9032 SDLoc DL(N);
9033 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
9034 DL,
9035 N->getValueType(0),
9036 Op0,
9037 Op1.getOperand(0),
9038 Op1.getOperand(1));
9039 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00009040 }
9041
Matt Arsenaultf639c322016-01-28 20:53:42 +00009042 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
9043 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
9044 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
9045 return Med3;
9046 }
9047
9048 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
9049 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
9050 return Med3;
9051 }
9052
9053 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
Matt Arsenault5b39b342016-01-28 20:53:48 +00009054 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
Matt Arsenault687ec752018-10-22 16:27:27 +00009055 (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
Matt Arsenault5b39b342016-01-28 20:53:48 +00009056 (Opc == AMDGPUISD::FMIN_LEGACY &&
9057 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
Matt Arsenault79a45db2017-02-22 23:53:37 +00009058 (VT == MVT::f32 || VT == MVT::f64 ||
Matt Arsenault6b114d22017-08-30 01:20:17 +00009059 (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
9060 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00009061 Op0.hasOneUse()) {
Matt Arsenaultf639c322016-01-28 20:53:42 +00009062 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
9063 return Res;
9064 }
9065
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00009066 return SDValue();
9067}
9068
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00009069static bool isClampZeroToOne(SDValue A, SDValue B) {
9070 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
9071 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
9072 // FIXME: Should this be allowing -0.0?
9073 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
9074 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
9075 }
9076 }
9077
9078 return false;
9079}
9080
9081// FIXME: Should only worry about snans for version with chain.
9082SDValue SITargetLowering::performFMed3Combine(SDNode *N,
9083 DAGCombinerInfo &DCI) const {
9084 EVT VT = N->getValueType(0);
9085 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
9086 // NaNs. With a NaN input, the order of the operands may change the result.
9087
9088 SelectionDAG &DAG = DCI.DAG;
9089 SDLoc SL(N);
9090
9091 SDValue Src0 = N->getOperand(0);
9092 SDValue Src1 = N->getOperand(1);
9093 SDValue Src2 = N->getOperand(2);
9094
9095 if (isClampZeroToOne(Src0, Src1)) {
9096 // const_a, const_b, x -> clamp is safe in all cases including signaling
9097 // nans.
9098 // FIXME: Should this be allowing -0.0?
9099 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
9100 }
9101
Matt Arsenault055e4dc2019-03-29 19:14:54 +00009102 const MachineFunction &MF = DAG.getMachineFunction();
9103 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
9104
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00009105 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
9106 // handling no dx10-clamp?
Matt Arsenault055e4dc2019-03-29 19:14:54 +00009107 if (Info->getMode().DX10Clamp) {
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00009108 // If NaNs is clamped to 0, we are free to reorder the inputs.
9109
9110 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9111 std::swap(Src0, Src1);
9112
9113 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
9114 std::swap(Src1, Src2);
9115
9116 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
9117 std::swap(Src0, Src1);
9118
9119 if (isClampZeroToOne(Src1, Src2))
9120 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
9121 }
9122
9123 return SDValue();
9124}
9125
Matt Arsenault1f17c662017-02-22 00:27:34 +00009126SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
9127 DAGCombinerInfo &DCI) const {
9128 SDValue Src0 = N->getOperand(0);
9129 SDValue Src1 = N->getOperand(1);
9130 if (Src0.isUndef() && Src1.isUndef())
9131 return DCI.DAG.getUNDEF(N->getValueType(0));
9132 return SDValue();
9133}
9134
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00009135SDValue SITargetLowering::performExtractVectorEltCombine(
9136 SDNode *N, DAGCombinerInfo &DCI) const {
9137 SDValue Vec = N->getOperand(0);
Matt Arsenault8cbb4882017-09-20 21:01:24 +00009138 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault63bc0e32018-06-15 15:31:36 +00009139
9140 EVT VecVT = Vec.getValueType();
9141 EVT EltVT = VecVT.getVectorElementType();
9142
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00009143 if ((Vec.getOpcode() == ISD::FNEG ||
9144 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00009145 SDLoc SL(N);
9146 EVT EltVT = N->getValueType(0);
9147 SDValue Idx = N->getOperand(1);
9148 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9149 Vec.getOperand(0), Idx);
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00009150 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00009151 }
9152
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00009153 // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
9154 // =>
9155 // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
9156 // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
9157 // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
Farhana Aleene24f3ff2018-05-09 21:18:34 +00009158 if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00009159 SDLoc SL(N);
9160 EVT EltVT = N->getValueType(0);
9161 SDValue Idx = N->getOperand(1);
9162 unsigned Opc = Vec.getOpcode();
9163
9164 switch(Opc) {
9165 default:
Stanislav Mekhanoshinbcb34ac2018-11-13 21:18:21 +00009166 break;
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00009167 // TODO: Support other binary operations.
9168 case ISD::FADD:
Matt Arsenaulta8160732018-08-15 21:34:06 +00009169 case ISD::FSUB:
9170 case ISD::FMUL:
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00009171 case ISD::ADD:
Farhana Aleene24f3ff2018-05-09 21:18:34 +00009172 case ISD::UMIN:
9173 case ISD::UMAX:
9174 case ISD::SMIN:
9175 case ISD::SMAX:
9176 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00009177 case ISD::FMINNUM:
9178 case ISD::FMAXNUM_IEEE:
9179 case ISD::FMINNUM_IEEE: {
Matt Arsenaulta8160732018-08-15 21:34:06 +00009180 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9181 Vec.getOperand(0), Idx);
9182 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9183 Vec.getOperand(1), Idx);
9184
9185 DCI.AddToWorklist(Elt0.getNode());
9186 DCI.AddToWorklist(Elt1.getNode());
9187 return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
9188 }
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00009189 }
9190 }
Matt Arsenault63bc0e32018-06-15 15:31:36 +00009191
Matt Arsenault63bc0e32018-06-15 15:31:36 +00009192 unsigned VecSize = VecVT.getSizeInBits();
9193 unsigned EltSize = EltVT.getSizeInBits();
9194
Stanislav Mekhanoshinbcb34ac2018-11-13 21:18:21 +00009195 // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
9196 // This elminates non-constant index and subsequent movrel or scratch access.
9197 // Sub-dword vectors of size 2 dword or less have better implementation.
9198 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9199 // instructions.
9200 if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) &&
9201 !isa<ConstantSDNode>(N->getOperand(1))) {
9202 SDLoc SL(N);
9203 SDValue Idx = N->getOperand(1);
9204 EVT IdxVT = Idx.getValueType();
9205 SDValue V;
9206 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9207 SDValue IC = DAG.getConstant(I, SL, IdxVT);
9208 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9209 if (I == 0)
9210 V = Elt;
9211 else
9212 V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
9213 }
9214 return V;
9215 }
9216
9217 if (!DCI.isBeforeLegalize())
9218 return SDValue();
9219
Matt Arsenault63bc0e32018-06-15 15:31:36 +00009220 // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
9221 // elements. This exposes more load reduction opportunities by replacing
9222 // multiple small extract_vector_elements with a single 32-bit extract.
9223 auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
Matt Arsenaultbf07a502018-08-31 15:39:52 +00009224 if (isa<MemSDNode>(Vec) &&
9225 EltSize <= 16 &&
Matt Arsenault63bc0e32018-06-15 15:31:36 +00009226 EltVT.isByteSized() &&
9227 VecSize > 32 &&
9228 VecSize % 32 == 0 &&
9229 Idx) {
9230 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
9231
9232 unsigned BitIndex = Idx->getZExtValue() * EltSize;
9233 unsigned EltIdx = BitIndex / 32;
9234 unsigned LeftoverBitIdx = BitIndex % 32;
9235 SDLoc SL(N);
9236
9237 SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
9238 DCI.AddToWorklist(Cast.getNode());
9239
9240 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
9241 DAG.getConstant(EltIdx, SL, MVT::i32));
9242 DCI.AddToWorklist(Elt.getNode());
9243 SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
9244 DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
9245 DCI.AddToWorklist(Srl.getNode());
9246
9247 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
9248 DCI.AddToWorklist(Trunc.getNode());
9249 return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
9250 }
9251
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00009252 return SDValue();
9253}
9254
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00009255SDValue
9256SITargetLowering::performInsertVectorEltCombine(SDNode *N,
9257 DAGCombinerInfo &DCI) const {
9258 SDValue Vec = N->getOperand(0);
9259 SDValue Idx = N->getOperand(2);
9260 EVT VecVT = Vec.getValueType();
9261 EVT EltVT = VecVT.getVectorElementType();
9262 unsigned VecSize = VecVT.getSizeInBits();
9263 unsigned EltSize = EltVT.getSizeInBits();
9264
9265 // INSERT_VECTOR_ELT (<n x e>, var-idx)
9266 // => BUILD_VECTOR n x select (e, const-idx)
9267 // This elminates non-constant index and subsequent movrel or scratch access.
9268 // Sub-dword vectors of size 2 dword or less have better implementation.
9269 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
9270 // instructions.
9271 if (isa<ConstantSDNode>(Idx) ||
9272 VecSize > 256 || (VecSize <= 64 && EltSize < 32))
9273 return SDValue();
9274
9275 SelectionDAG &DAG = DCI.DAG;
9276 SDLoc SL(N);
9277 SDValue Ins = N->getOperand(1);
9278 EVT IdxVT = Idx.getValueType();
9279
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00009280 SmallVector<SDValue, 16> Ops;
9281 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
9282 SDValue IC = DAG.getConstant(I, SL, IdxVT);
9283 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
9284 SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
9285 Ops.push_back(V);
9286 }
9287
9288 return DAG.getBuildVector(VecVT, SL, Ops);
9289}
9290
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009291unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
9292 const SDNode *N0,
9293 const SDNode *N1) const {
9294 EVT VT = N0->getValueType(0);
9295
Matt Arsenault770ec862016-12-22 03:55:35 +00009296 // Only do this if we are not trying to support denormals. v_mad_f32 does not
9297 // support denormals ever.
Stanislav Mekhanoshin28a19362019-05-04 04:20:37 +00009298 if (((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
9299 (VT == MVT::f16 && !Subtarget->hasFP16Denormals() &&
9300 getSubtarget()->hasMadF16())) &&
9301 isOperationLegal(ISD::FMAD, VT))
Matt Arsenault770ec862016-12-22 03:55:35 +00009302 return ISD::FMAD;
9303
9304 const TargetOptions &Options = DAG.getTarget().Options;
Amara Emersond28f0cd42017-05-01 15:17:51 +00009305 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
Michael Berg7acc81b2018-05-04 18:48:20 +00009306 (N0->getFlags().hasAllowContract() &&
9307 N1->getFlags().hasAllowContract())) &&
Matt Arsenault770ec862016-12-22 03:55:35 +00009308 isFMAFasterThanFMulAndFAdd(VT)) {
9309 return ISD::FMA;
9310 }
9311
9312 return 0;
9313}
9314
Stanislav Mekhanoshin871821f2019-02-14 22:11:25 +00009315// For a reassociatable opcode perform:
9316// op x, (op y, z) -> op (op x, z), y, if x and z are uniform
9317SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
9318 SelectionDAG &DAG) const {
9319 EVT VT = N->getValueType(0);
9320 if (VT != MVT::i32 && VT != MVT::i64)
9321 return SDValue();
9322
9323 unsigned Opc = N->getOpcode();
9324 SDValue Op0 = N->getOperand(0);
9325 SDValue Op1 = N->getOperand(1);
9326
9327 if (!(Op0->isDivergent() ^ Op1->isDivergent()))
9328 return SDValue();
9329
9330 if (Op0->isDivergent())
9331 std::swap(Op0, Op1);
9332
9333 if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
9334 return SDValue();
9335
9336 SDValue Op2 = Op1.getOperand(1);
9337 Op1 = Op1.getOperand(0);
9338 if (!(Op1->isDivergent() ^ Op2->isDivergent()))
9339 return SDValue();
9340
9341 if (Op1->isDivergent())
9342 std::swap(Op1, Op2);
9343
9344 // If either operand is constant this will conflict with
9345 // DAGCombiner::ReassociateOps().
Stanislav Mekhanoshinda1628e2019-02-26 20:56:25 +00009346 if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
9347 DAG.isConstantIntBuildVectorOrConstantInt(Op1))
Stanislav Mekhanoshin871821f2019-02-14 22:11:25 +00009348 return SDValue();
9349
9350 SDLoc SL(N);
9351 SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
9352 return DAG.getNode(Opc, SL, VT, Add1, Op2);
9353}
9354
Matt Arsenault4f6318f2017-11-06 17:04:37 +00009355static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
9356 EVT VT,
9357 SDValue N0, SDValue N1, SDValue N2,
9358 bool Signed) {
9359 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
9360 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
9361 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
9362 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
9363}
9364
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00009365SDValue SITargetLowering::performAddCombine(SDNode *N,
9366 DAGCombinerInfo &DCI) const {
9367 SelectionDAG &DAG = DCI.DAG;
9368 EVT VT = N->getValueType(0);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00009369 SDLoc SL(N);
9370 SDValue LHS = N->getOperand(0);
9371 SDValue RHS = N->getOperand(1);
9372
Matt Arsenault4f6318f2017-11-06 17:04:37 +00009373 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
9374 && Subtarget->hasMad64_32() &&
9375 !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
9376 VT.getScalarSizeInBits() <= 64) {
9377 if (LHS.getOpcode() != ISD::MUL)
9378 std::swap(LHS, RHS);
9379
9380 SDValue MulLHS = LHS.getOperand(0);
9381 SDValue MulRHS = LHS.getOperand(1);
9382 SDValue AddRHS = RHS;
9383
9384 // TODO: Maybe restrict if SGPR inputs.
9385 if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
9386 numBitsUnsigned(MulRHS, DAG) <= 32) {
9387 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
9388 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
9389 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
9390 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
9391 }
9392
9393 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
9394 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
9395 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
9396 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
9397 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
9398 }
9399
9400 return SDValue();
9401 }
9402
Stanislav Mekhanoshin871821f2019-02-14 22:11:25 +00009403 if (SDValue V = reassociateScalarOps(N, DAG)) {
9404 return V;
9405 }
9406
Farhana Aleen07e61232018-05-02 18:16:39 +00009407 if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
Matt Arsenault4f6318f2017-11-06 17:04:37 +00009408 return SDValue();
9409
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00009410 // add x, zext (setcc) => addcarry x, 0, setcc
9411 // add x, sext (setcc) => subcarry x, 0, setcc
9412 unsigned Opc = LHS.getOpcode();
9413 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00009414 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00009415 std::swap(RHS, LHS);
9416
9417 Opc = RHS.getOpcode();
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00009418 switch (Opc) {
9419 default: break;
9420 case ISD::ZERO_EXTEND:
9421 case ISD::SIGN_EXTEND:
9422 case ISD::ANY_EXTEND: {
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00009423 auto Cond = RHS.getOperand(0);
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00009424 if (!isBoolSGPR(Cond))
Stanislav Mekhanoshin3ed38c62017-06-21 23:46:22 +00009425 break;
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00009426 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
9427 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
9428 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
9429 return DAG.getNode(Opc, SL, VTList, Args);
9430 }
9431 case ISD::ADDCARRY: {
9432 // add x, (addcarry y, 0, cc) => addcarry x, y, cc
9433 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
9434 if (!C || C->getZExtValue() != 0) break;
9435 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
9436 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
9437 }
9438 }
9439 return SDValue();
9440}
9441
9442SDValue SITargetLowering::performSubCombine(SDNode *N,
9443 DAGCombinerInfo &DCI) const {
9444 SelectionDAG &DAG = DCI.DAG;
9445 EVT VT = N->getValueType(0);
9446
9447 if (VT != MVT::i32)
9448 return SDValue();
9449
9450 SDLoc SL(N);
9451 SDValue LHS = N->getOperand(0);
9452 SDValue RHS = N->getOperand(1);
9453
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00009454 if (LHS.getOpcode() == ISD::SUBCARRY) {
9455 // sub (subcarry x, 0, cc), y => subcarry x, y, cc
9456 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
Stanislav Mekhanoshin42e229e2019-02-21 02:58:00 +00009457 if (!C || !C->isNullValue())
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00009458 return SDValue();
9459 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
9460 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
9461 }
9462 return SDValue();
9463}
9464
9465SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
9466 DAGCombinerInfo &DCI) const {
9467
9468 if (N->getValueType(0) != MVT::i32)
9469 return SDValue();
9470
9471 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
9472 if (!C || C->getZExtValue() != 0)
9473 return SDValue();
9474
9475 SelectionDAG &DAG = DCI.DAG;
9476 SDValue LHS = N->getOperand(0);
9477
9478 // addcarry (add x, y), 0, cc => addcarry x, y, cc
9479 // subcarry (sub x, y), 0, cc => subcarry x, y, cc
9480 unsigned LHSOpc = LHS.getOpcode();
9481 unsigned Opc = N->getOpcode();
9482 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
9483 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
9484 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
9485 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00009486 }
9487 return SDValue();
9488}
9489
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009490SDValue SITargetLowering::performFAddCombine(SDNode *N,
9491 DAGCombinerInfo &DCI) const {
9492 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9493 return SDValue();
9494
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009495 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault770ec862016-12-22 03:55:35 +00009496 EVT VT = N->getValueType(0);
Matt Arsenault770ec862016-12-22 03:55:35 +00009497
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009498 SDLoc SL(N);
9499 SDValue LHS = N->getOperand(0);
9500 SDValue RHS = N->getOperand(1);
9501
9502 // These should really be instruction patterns, but writing patterns with
9503 // source modiifiers is a pain.
9504
9505 // fadd (fadd (a, a), b) -> mad 2.0, a, b
9506 if (LHS.getOpcode() == ISD::FADD) {
9507 SDValue A = LHS.getOperand(0);
9508 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009509 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00009510 if (FusedOp != 0) {
9511 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00009512 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00009513 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009514 }
9515 }
9516
9517 // fadd (b, fadd (a, a)) -> mad 2.0, a, b
9518 if (RHS.getOpcode() == ISD::FADD) {
9519 SDValue A = RHS.getOperand(0);
9520 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009521 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00009522 if (FusedOp != 0) {
9523 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00009524 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00009525 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009526 }
9527 }
9528
9529 return SDValue();
9530}
9531
9532SDValue SITargetLowering::performFSubCombine(SDNode *N,
9533 DAGCombinerInfo &DCI) const {
9534 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9535 return SDValue();
9536
9537 SelectionDAG &DAG = DCI.DAG;
9538 SDLoc SL(N);
9539 EVT VT = N->getValueType(0);
9540 assert(!VT.isVector());
9541
9542 // Try to get the fneg to fold into the source modifier. This undoes generic
9543 // DAG combines and folds them into the mad.
9544 //
9545 // Only do this if we are not trying to support denormals. v_mad_f32 does
9546 // not support denormals ever.
Matt Arsenault770ec862016-12-22 03:55:35 +00009547 SDValue LHS = N->getOperand(0);
9548 SDValue RHS = N->getOperand(1);
9549 if (LHS.getOpcode() == ISD::FADD) {
9550 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
9551 SDValue A = LHS.getOperand(0);
9552 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009553 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00009554 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009555 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9556 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
9557
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00009558 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009559 }
9560 }
Matt Arsenault770ec862016-12-22 03:55:35 +00009561 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009562
Matt Arsenault770ec862016-12-22 03:55:35 +00009563 if (RHS.getOpcode() == ISD::FADD) {
9564 // (fsub c, (fadd a, a)) -> mad -2.0, a, c
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009565
Matt Arsenault770ec862016-12-22 03:55:35 +00009566 SDValue A = RHS.getOperand(0);
9567 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009568 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00009569 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009570 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00009571 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009572 }
9573 }
9574 }
9575
9576 return SDValue();
9577}
9578
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009579SDValue SITargetLowering::performFMACombine(SDNode *N,
9580 DAGCombinerInfo &DCI) const {
9581 SelectionDAG &DAG = DCI.DAG;
9582 EVT VT = N->getValueType(0);
9583 SDLoc SL(N);
9584
Stanislav Mekhanoshin0e858b02019-02-09 00:34:21 +00009585 if (!Subtarget->hasDot2Insts() || VT != MVT::f32)
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009586 return SDValue();
9587
9588 // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
9589 // FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
9590 SDValue Op1 = N->getOperand(0);
9591 SDValue Op2 = N->getOperand(1);
9592 SDValue FMA = N->getOperand(2);
9593
9594 if (FMA.getOpcode() != ISD::FMA ||
9595 Op1.getOpcode() != ISD::FP_EXTEND ||
9596 Op2.getOpcode() != ISD::FP_EXTEND)
9597 return SDValue();
9598
9599 // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
9600 // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
9601 // is sufficient to allow generaing fdot2.
9602 const TargetOptions &Options = DAG.getTarget().Options;
9603 if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9604 (N->getFlags().hasAllowContract() &&
9605 FMA->getFlags().hasAllowContract())) {
9606 Op1 = Op1.getOperand(0);
9607 Op2 = Op2.getOperand(0);
9608 if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9609 Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9610 return SDValue();
9611
9612 SDValue Vec1 = Op1.getOperand(0);
9613 SDValue Idx1 = Op1.getOperand(1);
9614 SDValue Vec2 = Op2.getOperand(0);
9615
9616 SDValue FMAOp1 = FMA.getOperand(0);
9617 SDValue FMAOp2 = FMA.getOperand(1);
9618 SDValue FMAAcc = FMA.getOperand(2);
9619
9620 if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
9621 FMAOp2.getOpcode() != ISD::FP_EXTEND)
9622 return SDValue();
9623
9624 FMAOp1 = FMAOp1.getOperand(0);
9625 FMAOp2 = FMAOp2.getOperand(0);
9626 if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9627 FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9628 return SDValue();
9629
9630 SDValue Vec3 = FMAOp1.getOperand(0);
9631 SDValue Vec4 = FMAOp2.getOperand(0);
9632 SDValue Idx2 = FMAOp1.getOperand(1);
9633
9634 if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
9635 // Idx1 and Idx2 cannot be the same.
9636 Idx1 == Idx2)
9637 return SDValue();
9638
9639 if (Vec1 == Vec2 || Vec3 == Vec4)
9640 return SDValue();
9641
9642 if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
9643 return SDValue();
9644
9645 if ((Vec1 == Vec3 && Vec2 == Vec4) ||
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00009646 (Vec1 == Vec4 && Vec2 == Vec3)) {
9647 return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
9648 DAG.getTargetConstant(0, SL, MVT::i1));
9649 }
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009650 }
9651 return SDValue();
9652}
9653
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009654SDValue SITargetLowering::performSetCCCombine(SDNode *N,
9655 DAGCombinerInfo &DCI) const {
9656 SelectionDAG &DAG = DCI.DAG;
9657 SDLoc SL(N);
9658
9659 SDValue LHS = N->getOperand(0);
9660 SDValue RHS = N->getOperand(1);
9661 EVT VT = LHS.getValueType();
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00009662 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
9663
9664 auto CRHS = dyn_cast<ConstantSDNode>(RHS);
9665 if (!CRHS) {
9666 CRHS = dyn_cast<ConstantSDNode>(LHS);
9667 if (CRHS) {
9668 std::swap(LHS, RHS);
9669 CC = getSetCCSwappedOperands(CC);
9670 }
9671 }
9672
Stanislav Mekhanoshin3b117942018-06-16 03:46:59 +00009673 if (CRHS) {
9674 if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
9675 isBoolSGPR(LHS.getOperand(0))) {
9676 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
9677 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
9678 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1
9679 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc
9680 if ((CRHS->isAllOnesValue() &&
9681 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
9682 (CRHS->isNullValue() &&
9683 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
9684 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9685 DAG.getConstant(-1, SL, MVT::i1));
9686 if ((CRHS->isAllOnesValue() &&
9687 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
9688 (CRHS->isNullValue() &&
9689 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
9690 return LHS.getOperand(0);
9691 }
9692
9693 uint64_t CRHSVal = CRHS->getZExtValue();
9694 if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
9695 LHS.getOpcode() == ISD::SELECT &&
9696 isa<ConstantSDNode>(LHS.getOperand(1)) &&
9697 isa<ConstantSDNode>(LHS.getOperand(2)) &&
9698 LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
9699 isBoolSGPR(LHS.getOperand(0))) {
9700 // Given CT != FT:
9701 // setcc (select cc, CT, CF), CF, eq => xor cc, -1
9702 // setcc (select cc, CT, CF), CF, ne => cc
9703 // setcc (select cc, CT, CF), CT, ne => xor cc, -1
9704 // setcc (select cc, CT, CF), CT, eq => cc
9705 uint64_t CT = LHS.getConstantOperandVal(1);
9706 uint64_t CF = LHS.getConstantOperandVal(2);
9707
9708 if ((CF == CRHSVal && CC == ISD::SETEQ) ||
9709 (CT == CRHSVal && CC == ISD::SETNE))
9710 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9711 DAG.getConstant(-1, SL, MVT::i1));
9712 if ((CF == CRHSVal && CC == ISD::SETNE) ||
9713 (CT == CRHSVal && CC == ISD::SETEQ))
9714 return LHS.getOperand(0);
9715 }
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00009716 }
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009717
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00009718 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
9719 VT != MVT::f16))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009720 return SDValue();
9721
Matt Arsenault8ad00d32018-08-10 18:58:41 +00009722 // Match isinf/isfinite pattern
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009723 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
Matt Arsenault8ad00d32018-08-10 18:58:41 +00009724 // (fcmp one (fabs x), inf) -> (fp_class x,
9725 // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero)
9726 if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009727 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
9728 if (!CRHS)
9729 return SDValue();
9730
9731 const APFloat &APF = CRHS->getValueAPF();
9732 if (APF.isInfinity() && !APF.isNegative()) {
Matt Arsenault8ad00d32018-08-10 18:58:41 +00009733 const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
9734 SIInstrFlags::N_INFINITY;
9735 const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
9736 SIInstrFlags::P_ZERO |
9737 SIInstrFlags::N_NORMAL |
9738 SIInstrFlags::P_NORMAL |
9739 SIInstrFlags::N_SUBNORMAL |
9740 SIInstrFlags::P_SUBNORMAL;
9741 unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009742 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
9743 DAG.getConstant(Mask, SL, MVT::i32));
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009744 }
9745 }
9746
9747 return SDValue();
9748}
9749
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009750SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
9751 DAGCombinerInfo &DCI) const {
9752 SelectionDAG &DAG = DCI.DAG;
9753 SDLoc SL(N);
9754 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
9755
9756 SDValue Src = N->getOperand(0);
9757 SDValue Srl = N->getOperand(0);
9758 if (Srl.getOpcode() == ISD::ZERO_EXTEND)
9759 Srl = Srl.getOperand(0);
9760
9761 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
9762 if (Srl.getOpcode() == ISD::SRL) {
9763 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
9764 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
9765 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
9766
9767 if (const ConstantSDNode *C =
9768 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
9769 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
9770 EVT(MVT::i32));
9771
9772 unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
9773 if (SrcOffset < 32 && SrcOffset % 8 == 0) {
9774 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
9775 MVT::f32, Srl);
9776 }
9777 }
9778 }
9779
9780 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
9781
Craig Topperd0af7e82017-04-28 05:31:46 +00009782 KnownBits Known;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009783 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
9784 !DCI.isBeforeLegalizeOps());
9785 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Stanislav Mekhanoshined0d6c62019-01-09 02:24:22 +00009786 if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009787 DCI.CommitTargetLoweringOpt(TLO);
9788 }
9789
9790 return SDValue();
9791}
9792
Tom Stellard1b95fed2018-05-24 05:28:34 +00009793SDValue SITargetLowering::performClampCombine(SDNode *N,
9794 DAGCombinerInfo &DCI) const {
9795 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
9796 if (!CSrc)
9797 return SDValue();
9798
Matt Arsenault055e4dc2019-03-29 19:14:54 +00009799 const MachineFunction &MF = DCI.DAG.getMachineFunction();
Tom Stellard1b95fed2018-05-24 05:28:34 +00009800 const APFloat &F = CSrc->getValueAPF();
9801 APFloat Zero = APFloat::getZero(F.getSemantics());
9802 APFloat::cmpResult Cmp0 = F.compare(Zero);
9803 if (Cmp0 == APFloat::cmpLessThan ||
Matt Arsenault055e4dc2019-03-29 19:14:54 +00009804 (Cmp0 == APFloat::cmpUnordered &&
9805 MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) {
Tom Stellard1b95fed2018-05-24 05:28:34 +00009806 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
9807 }
9808
9809 APFloat One(F.getSemantics(), "1.0");
9810 APFloat::cmpResult Cmp1 = F.compare(One);
9811 if (Cmp1 == APFloat::cmpGreaterThan)
9812 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
9813
9814 return SDValue(CSrc, 0);
9815}
9816
9817
Tom Stellard75aadc22012-12-11 21:25:42 +00009818SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
9819 DAGCombinerInfo &DCI) const {
Stanislav Mekhanoshin443a7f92018-11-27 15:13:37 +00009820 if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
9821 return SDValue();
Tom Stellard75aadc22012-12-11 21:25:42 +00009822 switch (N->getOpcode()) {
Matt Arsenault22b4c252014-12-21 16:48:42 +00009823 default:
9824 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00009825 case ISD::ADD:
9826 return performAddCombine(N, DCI);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00009827 case ISD::SUB:
9828 return performSubCombine(N, DCI);
9829 case ISD::ADDCARRY:
9830 case ISD::SUBCARRY:
9831 return performAddCarrySubCarryCombine(N, DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009832 case ISD::FADD:
9833 return performFAddCombine(N, DCI);
9834 case ISD::FSUB:
9835 return performFSubCombine(N, DCI);
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009836 case ISD::SETCC:
9837 return performSetCCCombine(N, DCI);
Matt Arsenault5b39b342016-01-28 20:53:48 +00009838 case ISD::FMAXNUM:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00009839 case ISD::FMINNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00009840 case ISD::FMAXNUM_IEEE:
9841 case ISD::FMINNUM_IEEE:
Matt Arsenault5881f4e2015-06-09 00:52:37 +00009842 case ISD::SMAX:
9843 case ISD::SMIN:
9844 case ISD::UMAX:
Matt Arsenault5b39b342016-01-28 20:53:48 +00009845 case ISD::UMIN:
9846 case AMDGPUISD::FMIN_LEGACY:
Stanislav Mekhanoshin443a7f92018-11-27 15:13:37 +00009847 case AMDGPUISD::FMAX_LEGACY:
9848 return performMinMaxCombine(N, DCI);
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009849 case ISD::FMA:
9850 return performFMACombine(N, DCI);
Matt Arsenault90083d32018-06-07 09:54:49 +00009851 case ISD::LOAD: {
9852 if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
9853 return Widended;
9854 LLVM_FALLTHROUGH;
9855 }
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00009856 case ISD::STORE:
9857 case ISD::ATOMIC_LOAD:
9858 case ISD::ATOMIC_STORE:
9859 case ISD::ATOMIC_CMP_SWAP:
9860 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
9861 case ISD::ATOMIC_SWAP:
9862 case ISD::ATOMIC_LOAD_ADD:
9863 case ISD::ATOMIC_LOAD_SUB:
9864 case ISD::ATOMIC_LOAD_AND:
9865 case ISD::ATOMIC_LOAD_OR:
9866 case ISD::ATOMIC_LOAD_XOR:
9867 case ISD::ATOMIC_LOAD_NAND:
9868 case ISD::ATOMIC_LOAD_MIN:
9869 case ISD::ATOMIC_LOAD_MAX:
9870 case ISD::ATOMIC_LOAD_UMIN:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00009871 case ISD::ATOMIC_LOAD_UMAX:
Matt Arsenaulta5840c32019-01-22 18:36:06 +00009872 case ISD::ATOMIC_LOAD_FADD:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00009873 case AMDGPUISD::ATOMIC_INC:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00009874 case AMDGPUISD::ATOMIC_DEC:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00009875 case AMDGPUISD::ATOMIC_LOAD_FMIN:
Matt Arsenaulta5840c32019-01-22 18:36:06 +00009876 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00009877 if (DCI.isBeforeLegalize())
9878 break;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009879 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00009880 case ISD::AND:
9881 return performAndCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00009882 case ISD::OR:
9883 return performOrCombine(N, DCI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00009884 case ISD::XOR:
9885 return performXorCombine(N, DCI);
Matt Arsenault8edfaee2017-03-31 19:53:03 +00009886 case ISD::ZERO_EXTEND:
9887 return performZeroExtendCombine(N, DCI);
Ryan Taylor00e063a2019-03-19 16:07:00 +00009888 case ISD::SIGN_EXTEND_INREG:
9889 return performSignExtendInRegCombine(N , DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00009890 case AMDGPUISD::FP_CLASS:
9891 return performClassCombine(N, DCI);
Matt Arsenault9cd90712016-04-14 01:42:16 +00009892 case ISD::FCANONICALIZE:
9893 return performFCanonicalizeCombine(N, DCI);
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009894 case AMDGPUISD::RCP:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00009895 return performRcpCombine(N, DCI);
9896 case AMDGPUISD::FRACT:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009897 case AMDGPUISD::RSQ:
Matt Arsenault32fc5272016-07-26 16:45:45 +00009898 case AMDGPUISD::RCP_LEGACY:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009899 case AMDGPUISD::RSQ_LEGACY:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00009900 case AMDGPUISD::RCP_IFLAG:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009901 case AMDGPUISD::RSQ_CLAMP:
9902 case AMDGPUISD::LDEXP: {
9903 SDValue Src = N->getOperand(0);
9904 if (Src.isUndef())
9905 return Src;
9906 break;
9907 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009908 case ISD::SINT_TO_FP:
9909 case ISD::UINT_TO_FP:
9910 return performUCharToFloatCombine(N, DCI);
9911 case AMDGPUISD::CVT_F32_UBYTE0:
9912 case AMDGPUISD::CVT_F32_UBYTE1:
9913 case AMDGPUISD::CVT_F32_UBYTE2:
9914 case AMDGPUISD::CVT_F32_UBYTE3:
9915 return performCvtF32UByteNCombine(N, DCI);
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00009916 case AMDGPUISD::FMED3:
9917 return performFMed3Combine(N, DCI);
Matt Arsenault1f17c662017-02-22 00:27:34 +00009918 case AMDGPUISD::CVT_PKRTZ_F16_F32:
9919 return performCvtPkRTZCombine(N, DCI);
Tom Stellard1b95fed2018-05-24 05:28:34 +00009920 case AMDGPUISD::CLAMP:
9921 return performClampCombine(N, DCI);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00009922 case ISD::SCALAR_TO_VECTOR: {
9923 SelectionDAG &DAG = DCI.DAG;
9924 EVT VT = N->getValueType(0);
9925
9926 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
9927 if (VT == MVT::v2i16 || VT == MVT::v2f16) {
9928 SDLoc SL(N);
9929 SDValue Src = N->getOperand(0);
9930 EVT EltVT = Src.getValueType();
9931 if (EltVT == MVT::f16)
9932 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
9933
9934 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
9935 return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
9936 }
9937
9938 break;
9939 }
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00009940 case ISD::EXTRACT_VECTOR_ELT:
9941 return performExtractVectorEltCombine(N, DCI);
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00009942 case ISD::INSERT_VECTOR_ELT:
9943 return performInsertVectorEltCombine(N, DCI);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00009944 }
Matt Arsenault5565f65e2014-05-22 18:09:07 +00009945 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Tom Stellard75aadc22012-12-11 21:25:42 +00009946}
Christian Konigd910b7d2013-02-26 17:52:16 +00009947
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009948/// Helper function for adjustWritemask
Benjamin Kramer635e3682013-05-23 15:43:05 +00009949static unsigned SubIdx2Lane(unsigned Idx) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00009950 switch (Idx) {
9951 default: return 0;
9952 case AMDGPU::sub0: return 0;
9953 case AMDGPU::sub1: return 1;
9954 case AMDGPU::sub2: return 2;
9955 case AMDGPU::sub3: return 3;
David Stuttardf77079f2019-01-14 11:55:24 +00009956 case AMDGPU::sub4: return 4; // Possible with TFE/LWE
Christian Konig8e06e2a2013-04-10 08:39:08 +00009957 }
9958}
9959
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009960/// Adjust the writemask of MIMG instructions
Matt Arsenault68f05052017-12-04 22:18:27 +00009961SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
9962 SelectionDAG &DAG) const {
Nicolai Haehnlef2674312018-06-21 13:36:01 +00009963 unsigned Opcode = Node->getMachineOpcode();
9964
9965 // Subtract 1 because the vdata output is not a MachineSDNode operand.
9966 int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
9967 if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
9968 return Node; // not implemented for D16
9969
David Stuttardf77079f2019-01-14 11:55:24 +00009970 SDNode *Users[5] = { nullptr };
Tom Stellard54774e52013-10-23 02:53:47 +00009971 unsigned Lane = 0;
Nicolai Haehnlef2674312018-06-21 13:36:01 +00009972 unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00009973 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
Tom Stellard54774e52013-10-23 02:53:47 +00009974 unsigned NewDmask = 0;
David Stuttardf77079f2019-01-14 11:55:24 +00009975 unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
9976 unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
9977 bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) ||
9978 Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
9979 unsigned TFCLane = 0;
Matt Arsenault856777d2017-12-08 20:00:57 +00009980 bool HasChain = Node->getNumValues() > 1;
9981
9982 if (OldDmask == 0) {
9983 // These are folded out, but on the chance it happens don't assert.
9984 return Node;
9985 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009986
David Stuttardf77079f2019-01-14 11:55:24 +00009987 unsigned OldBitsSet = countPopulation(OldDmask);
9988 // Work out which is the TFE/LWE lane if that is enabled.
9989 if (UsesTFC) {
9990 TFCLane = OldBitsSet;
9991 }
9992
Christian Konig8e06e2a2013-04-10 08:39:08 +00009993 // Try to figure out the used register components
9994 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
9995 I != E; ++I) {
9996
Matt Arsenault93e65ea2017-02-22 21:16:41 +00009997 // Don't look at users of the chain.
9998 if (I.getUse().getResNo() != 0)
9999 continue;
10000
Christian Konig8e06e2a2013-04-10 08:39:08 +000010001 // Abort if we can't understand the usage
10002 if (!I->isMachineOpcode() ||
10003 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
Matt Arsenault68f05052017-12-04 22:18:27 +000010004 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +000010005
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +000010006 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
Tom Stellard54774e52013-10-23 02:53:47 +000010007 // Note that subregs are packed, i.e. Lane==0 is the first bit set
10008 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
10009 // set, etc.
Christian Konig8b1ed282013-04-10 08:39:16 +000010010 Lane = SubIdx2Lane(I->getConstantOperandVal(1));
Christian Konig8e06e2a2013-04-10 08:39:08 +000010011
David Stuttardf77079f2019-01-14 11:55:24 +000010012 // Check if the use is for the TFE/LWE generated result at VGPRn+1.
10013 if (UsesTFC && Lane == TFCLane) {
10014 Users[Lane] = *I;
10015 } else {
10016 // Set which texture component corresponds to the lane.
10017 unsigned Comp;
10018 for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
10019 Comp = countTrailingZeros(Dmask);
10020 Dmask &= ~(1 << Comp);
10021 }
10022
10023 // Abort if we have more than one user per component.
10024 if (Users[Lane])
10025 return Node;
10026
10027 Users[Lane] = *I;
10028 NewDmask |= 1 << Comp;
Tom Stellard54774e52013-10-23 02:53:47 +000010029 }
Christian Konig8e06e2a2013-04-10 08:39:08 +000010030 }
10031
David Stuttardf77079f2019-01-14 11:55:24 +000010032 // Don't allow 0 dmask, as hardware assumes one channel enabled.
10033 bool NoChannels = !NewDmask;
10034 if (NoChannels) {
David Stuttardfc2a7472019-03-20 09:29:55 +000010035 if (!UsesTFC) {
10036 // No uses of the result and not using TFC. Then do nothing.
10037 return Node;
10038 }
David Stuttardf77079f2019-01-14 11:55:24 +000010039 // If the original dmask has one channel - then nothing to do
10040 if (OldBitsSet == 1)
10041 return Node;
10042 // Use an arbitrary dmask - required for the instruction to work
10043 NewDmask = 1;
10044 }
Tom Stellard54774e52013-10-23 02:53:47 +000010045 // Abort if there's no change
10046 if (NewDmask == OldDmask)
Matt Arsenault68f05052017-12-04 22:18:27 +000010047 return Node;
10048
10049 unsigned BitsSet = countPopulation(NewDmask);
10050
David Stuttardf77079f2019-01-14 11:55:24 +000010051 // Check for TFE or LWE - increase the number of channels by one to account
10052 // for the extra return value
10053 // This will need adjustment for D16 if this is also included in
10054 // adjustWriteMask (this function) but at present D16 are excluded.
10055 unsigned NewChannels = BitsSet + UsesTFC;
10056
10057 int NewOpcode =
10058 AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
Matt Arsenault68f05052017-12-04 22:18:27 +000010059 assert(NewOpcode != -1 &&
10060 NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
10061 "failed to find equivalent MIMG op");
Christian Konig8e06e2a2013-04-10 08:39:08 +000010062
10063 // Adjust the writemask in the node
Matt Arsenault68f05052017-12-04 22:18:27 +000010064 SmallVector<SDValue, 12> Ops;
Nikolay Haustov2f684f12016-02-26 09:51:05 +000010065 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +000010066 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
Nikolay Haustov2f684f12016-02-26 09:51:05 +000010067 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
Christian Konig8e06e2a2013-04-10 08:39:08 +000010068
Matt Arsenault68f05052017-12-04 22:18:27 +000010069 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
10070
David Stuttardf77079f2019-01-14 11:55:24 +000010071 MVT ResultVT = NewChannels == 1 ?
10072 SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
10073 NewChannels == 5 ? 8 : NewChannels);
Matt Arsenault856777d2017-12-08 20:00:57 +000010074 SDVTList NewVTList = HasChain ?
10075 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
10076
Matt Arsenault68f05052017-12-04 22:18:27 +000010077
10078 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
10079 NewVTList, Ops);
Matt Arsenaultecad0d532017-12-08 20:00:45 +000010080
Matt Arsenault856777d2017-12-08 20:00:57 +000010081 if (HasChain) {
10082 // Update chain.
Chandler Carruth66654b72018-08-14 23:30:32 +000010083 DAG.setNodeMemRefs(NewNode, Node->memoperands());
Matt Arsenault856777d2017-12-08 20:00:57 +000010084 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
10085 }
Matt Arsenault68f05052017-12-04 22:18:27 +000010086
David Stuttardf77079f2019-01-14 11:55:24 +000010087 if (NewChannels == 1) {
Matt Arsenault68f05052017-12-04 22:18:27 +000010088 assert(Node->hasNUsesOfValue(1, 0));
10089 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
10090 SDLoc(Node), Users[Lane]->getValueType(0),
10091 SDValue(NewNode, 0));
Christian Konig8b1ed282013-04-10 08:39:16 +000010092 DAG.ReplaceAllUsesWith(Users[Lane], Copy);
Matt Arsenault68f05052017-12-04 22:18:27 +000010093 return nullptr;
Christian Konig8b1ed282013-04-10 08:39:16 +000010094 }
10095
Christian Konig8e06e2a2013-04-10 08:39:08 +000010096 // Update the users of the node with the new indices
David Stuttardf77079f2019-01-14 11:55:24 +000010097 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
Christian Konig8e06e2a2013-04-10 08:39:08 +000010098 SDNode *User = Users[i];
David Stuttardf77079f2019-01-14 11:55:24 +000010099 if (!User) {
10100 // Handle the special case of NoChannels. We set NewDmask to 1 above, but
10101 // Users[0] is still nullptr because channel 0 doesn't really have a use.
10102 if (i || !NoChannels)
10103 continue;
10104 } else {
10105 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
10106 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
10107 }
Christian Konig8e06e2a2013-04-10 08:39:08 +000010108
10109 switch (Idx) {
10110 default: break;
10111 case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
10112 case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
10113 case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
David Stuttardf77079f2019-01-14 11:55:24 +000010114 case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
Christian Konig8e06e2a2013-04-10 08:39:08 +000010115 }
10116 }
Matt Arsenault68f05052017-12-04 22:18:27 +000010117
10118 DAG.RemoveDeadNode(Node);
10119 return nullptr;
Christian Konig8e06e2a2013-04-10 08:39:08 +000010120}
10121
Tom Stellardc98ee202015-07-16 19:40:07 +000010122static bool isFrameIndexOp(SDValue Op) {
10123 if (Op.getOpcode() == ISD::AssertZext)
10124 Op = Op.getOperand(0);
10125
10126 return isa<FrameIndexSDNode>(Op);
10127}
10128
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010129/// Legalize target independent instructions (e.g. INSERT_SUBREG)
Tom Stellard3457a842014-10-09 19:06:00 +000010130/// with frame index operands.
10131/// LLVM assumes that inputs are to these instructions are registers.
Matt Arsenault0d0d6c22017-04-12 21:58:23 +000010132SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
10133 SelectionDAG &DAG) const {
10134 if (Node->getOpcode() == ISD::CopyToReg) {
10135 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
10136 SDValue SrcVal = Node->getOperand(2);
10137
10138 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
10139 // to try understanding copies to physical registers.
10140 if (SrcVal.getValueType() == MVT::i1 &&
Daniel Sanders2bea69b2019-08-01 23:27:28 +000010141 Register::isPhysicalRegister(DestReg->getReg())) {
Matt Arsenault0d0d6c22017-04-12 21:58:23 +000010142 SDLoc SL(Node);
10143 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10144 SDValue VReg = DAG.getRegister(
10145 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
10146
10147 SDNode *Glued = Node->getGluedNode();
10148 SDValue ToVReg
10149 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
10150 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
10151 SDValue ToResultReg
10152 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
10153 VReg, ToVReg.getValue(1));
10154 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
10155 DAG.RemoveDeadNode(Node);
10156 return ToResultReg.getNode();
10157 }
10158 }
Tom Stellard8dd392e2014-10-09 18:09:15 +000010159
10160 SmallVector<SDValue, 8> Ops;
Tom Stellard3457a842014-10-09 19:06:00 +000010161 for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
Tom Stellardc98ee202015-07-16 19:40:07 +000010162 if (!isFrameIndexOp(Node->getOperand(i))) {
Tom Stellard3457a842014-10-09 19:06:00 +000010163 Ops.push_back(Node->getOperand(i));
Tom Stellard8dd392e2014-10-09 18:09:15 +000010164 continue;
10165 }
10166
Tom Stellard3457a842014-10-09 19:06:00 +000010167 SDLoc DL(Node);
Tom Stellard8dd392e2014-10-09 18:09:15 +000010168 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
Tom Stellard3457a842014-10-09 19:06:00 +000010169 Node->getOperand(i).getValueType(),
10170 Node->getOperand(i)), 0));
Tom Stellard8dd392e2014-10-09 18:09:15 +000010171 }
10172
Mark Searles4e3d6162017-10-16 23:38:53 +000010173 return DAG.UpdateNodeOperands(Node, Ops);
Tom Stellard8dd392e2014-10-09 18:09:15 +000010174}
10175
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010176/// Fold the instructions after selecting them.
Matt Arsenault68f05052017-12-04 22:18:27 +000010177/// Returns null if users were already updated.
Christian Konig8e06e2a2013-04-10 08:39:08 +000010178SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
10179 SelectionDAG &DAG) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +000010180 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +000010181 unsigned Opcode = Node->getMachineOpcode();
Christian Konig8e06e2a2013-04-10 08:39:08 +000010182
Nicolai Haehnlec06bfa12016-07-11 21:59:43 +000010183 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
Nicolai Haehnlef2674312018-06-21 13:36:01 +000010184 !TII->isGather4(Opcode)) {
Matt Arsenault68f05052017-12-04 22:18:27 +000010185 return adjustWritemask(Node, DAG);
10186 }
Christian Konig8e06e2a2013-04-10 08:39:08 +000010187
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +000010188 if (Opcode == AMDGPU::INSERT_SUBREG ||
10189 Opcode == AMDGPU::REG_SEQUENCE) {
Tom Stellard8dd392e2014-10-09 18:09:15 +000010190 legalizeTargetIndependentNode(Node, DAG);
10191 return Node;
10192 }
Matt Arsenault206f8262017-08-01 20:49:41 +000010193
10194 switch (Opcode) {
10195 case AMDGPU::V_DIV_SCALE_F32:
10196 case AMDGPU::V_DIV_SCALE_F64: {
10197 // Satisfy the operand register constraint when one of the inputs is
10198 // undefined. Ordinarily each undef value will have its own implicit_def of
10199 // a vreg, so force these to use a single register.
10200 SDValue Src0 = Node->getOperand(0);
10201 SDValue Src1 = Node->getOperand(1);
10202 SDValue Src2 = Node->getOperand(2);
10203
10204 if ((Src0.isMachineOpcode() &&
10205 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
10206 (Src0 == Src1 || Src0 == Src2))
10207 break;
10208
10209 MVT VT = Src0.getValueType().getSimpleVT();
Alexander Timofeevba447ba2019-05-26 20:33:26 +000010210 const TargetRegisterClass *RC =
10211 getRegClassFor(VT, Src0.getNode()->isDivergent());
Matt Arsenault206f8262017-08-01 20:49:41 +000010212
10213 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
10214 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
10215
10216 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
10217 UndefReg, Src0, SDValue());
10218
10219 // src0 must be the same register as src1 or src2, even if the value is
10220 // undefined, so make sure we don't violate this constraint.
10221 if (Src0.isMachineOpcode() &&
10222 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
10223 if (Src1.isMachineOpcode() &&
10224 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10225 Src0 = Src1;
10226 else if (Src2.isMachineOpcode() &&
10227 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
10228 Src0 = Src2;
10229 else {
10230 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
10231 Src0 = UndefReg;
10232 Src1 = UndefReg;
10233 }
10234 } else
10235 break;
10236
10237 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
10238 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
10239 Ops.push_back(Node->getOperand(I));
10240
10241 Ops.push_back(ImpDef.getValue(1));
10242 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
10243 }
Stanislav Mekhanoshin5f581c92019-06-12 17:52:51 +000010244 case AMDGPU::V_PERMLANE16_B32:
10245 case AMDGPU::V_PERMLANEX16_B32: {
10246 ConstantSDNode *FI = cast<ConstantSDNode>(Node->getOperand(0));
10247 ConstantSDNode *BC = cast<ConstantSDNode>(Node->getOperand(2));
10248 if (!FI->getZExtValue() && !BC->getZExtValue())
10249 break;
10250 SDValue VDstIn = Node->getOperand(6);
10251 if (VDstIn.isMachineOpcode()
10252 && VDstIn.getMachineOpcode() == AMDGPU::IMPLICIT_DEF)
10253 break;
10254 MachineSDNode *ImpDef = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
10255 SDLoc(Node), MVT::i32);
10256 SmallVector<SDValue, 8> Ops = { SDValue(FI, 0), Node->getOperand(1),
10257 SDValue(BC, 0), Node->getOperand(3),
10258 Node->getOperand(4), Node->getOperand(5),
10259 SDValue(ImpDef, 0), Node->getOperand(7) };
10260 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
10261 }
Matt Arsenault206f8262017-08-01 20:49:41 +000010262 default:
10263 break;
10264 }
10265
Tom Stellard654d6692015-01-08 15:08:17 +000010266 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +000010267}
Christian Konig8b1ed282013-04-10 08:39:16 +000010268
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010269/// Assign the register class depending on the number of
Christian Konig8b1ed282013-04-10 08:39:16 +000010270/// bits set in the writemask
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +000010271void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
Christian Konig8b1ed282013-04-10 08:39:16 +000010272 SDNode *Node) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +000010273 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +000010274
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +000010275 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Matt Arsenault6005fcb2015-10-21 21:51:02 +000010276
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +000010277 if (TII->isVOP3(MI.getOpcode())) {
Matt Arsenault6005fcb2015-10-21 21:51:02 +000010278 // Make sure constant bus requirements are respected.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +000010279 TII->legalizeOperandsVOP3(MRI, MI);
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +000010280
10281 // Prefer VGPRs over AGPRs in mAI instructions where possible.
10282 // This saves a chain-copy of registers and better ballance register
10283 // use between vgpr and agpr as agpr tuples tend to be big.
10284 if (const MCOperandInfo *OpInfo = MI.getDesc().OpInfo) {
10285 unsigned Opc = MI.getOpcode();
10286 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
10287 for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
10288 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) {
10289 if (I == -1)
10290 break;
10291 MachineOperand &Op = MI.getOperand(I);
10292 if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID &&
10293 OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) ||
Daniel Sanders2bea69b2019-08-01 23:27:28 +000010294 !Register::isVirtualRegister(Op.getReg()) ||
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +000010295 !TRI->isAGPR(MRI, Op.getReg()))
10296 continue;
10297 auto *Src = MRI.getUniqueVRegDef(Op.getReg());
10298 if (!Src || !Src->isCopy() ||
10299 !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg()))
10300 continue;
10301 auto *RC = TRI->getRegClassForReg(MRI, Op.getReg());
10302 auto *NewRC = TRI->getEquivalentVGPRClass(RC);
10303 // All uses of agpr64 and agpr32 can also accept vgpr except for
10304 // v_accvgpr_read, but we do not produce agpr reads during selection,
10305 // so no use checks are needed.
10306 MRI.setRegClass(Op.getReg(), NewRC);
10307 }
10308 }
10309
Matt Arsenault6005fcb2015-10-21 21:51:02 +000010310 return;
10311 }
Matt Arsenaultcb0ac3d2014-09-26 17:54:59 +000010312
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +000010313 // Replace unused atomics with the no return version.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +000010314 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +000010315 if (NoRetAtomicOp != -1) {
10316 if (!Node->hasAnyUseOfValue(0)) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +000010317 MI.setDesc(TII->get(NoRetAtomicOp));
10318 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +000010319 return;
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +000010320 }
10321
Tom Stellard354a43c2016-04-01 18:27:37 +000010322 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
10323 // instruction, because the return type of these instructions is a vec2 of
10324 // the memory type, so it can be tied to the input operand.
10325 // This means these instructions always have a use, so we need to add a
10326 // special case to check if the atomic has only one extract_subreg use,
10327 // which itself has no uses.
10328 if ((Node->hasNUsesOfValue(1, 0) &&
Nicolai Haehnle750082d2016-04-15 14:42:36 +000010329 Node->use_begin()->isMachineOpcode() &&
Tom Stellard354a43c2016-04-01 18:27:37 +000010330 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
10331 !Node->use_begin()->hasAnyUseOfValue(0))) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +000010332 unsigned Def = MI.getOperand(0).getReg();
Tom Stellard354a43c2016-04-01 18:27:37 +000010333
10334 // Change this into a noret atomic.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +000010335 MI.setDesc(TII->get(NoRetAtomicOp));
10336 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +000010337
10338 // If we only remove the def operand from the atomic instruction, the
10339 // extract_subreg will be left with a use of a vreg without a def.
10340 // So we need to insert an implicit_def to avoid machine verifier
10341 // errors.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +000010342 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
Tom Stellard354a43c2016-04-01 18:27:37 +000010343 TII->get(AMDGPU::IMPLICIT_DEF), Def);
10344 }
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +000010345 return;
10346 }
Christian Konig8b1ed282013-04-10 08:39:16 +000010347}
Tom Stellard0518ff82013-06-03 17:39:58 +000010348
Benjamin Kramerbdc49562016-06-12 15:39:02 +000010349static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
10350 uint64_t Val) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +000010351 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
Matt Arsenault485defe2014-11-05 19:01:17 +000010352 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
10353}
10354
10355MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
Benjamin Kramerbdc49562016-06-12 15:39:02 +000010356 const SDLoc &DL,
Matt Arsenault485defe2014-11-05 19:01:17 +000010357 SDValue Ptr) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +000010358 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault485defe2014-11-05 19:01:17 +000010359
Matt Arsenault2d6fdb82015-09-25 17:08:42 +000010360 // Build the half of the subregister with the constants before building the
10361 // full 128-bit register. If we are building multiple resource descriptors,
10362 // this will allow CSEing of the 2-component register.
10363 const SDValue Ops0[] = {
10364 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
10365 buildSMovImm32(DAG, DL, 0),
10366 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
10367 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
10368 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
10369 };
Matt Arsenault485defe2014-11-05 19:01:17 +000010370
Matt Arsenault2d6fdb82015-09-25 17:08:42 +000010371 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
10372 MVT::v2i32, Ops0), 0);
Matt Arsenault485defe2014-11-05 19:01:17 +000010373
Matt Arsenault2d6fdb82015-09-25 17:08:42 +000010374 // Combine the constants and the pointer.
10375 const SDValue Ops1[] = {
10376 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
10377 Ptr,
10378 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
10379 SubRegHi,
10380 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
10381 };
Matt Arsenault485defe2014-11-05 19:01:17 +000010382
Matt Arsenault2d6fdb82015-09-25 17:08:42 +000010383 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
Matt Arsenault485defe2014-11-05 19:01:17 +000010384}
10385
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010386/// Return a resource descriptor with the 'Add TID' bit enabled
Benjamin Kramerdf005cb2015-08-08 18:27:36 +000010387/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
10388/// of the resource descriptor) to create an offset, which is added to
10389/// the resource pointer.
Benjamin Kramerbdc49562016-06-12 15:39:02 +000010390MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
10391 SDValue Ptr, uint32_t RsrcDword1,
Matt Arsenaultf3cd4512014-11-05 19:01:19 +000010392 uint64_t RsrcDword2And3) const {
10393 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
10394 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
10395 if (RsrcDword1) {
10396 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +000010397 DAG.getConstant(RsrcDword1, DL, MVT::i32)),
10398 0);
Matt Arsenaultf3cd4512014-11-05 19:01:19 +000010399 }
10400
10401 SDValue DataLo = buildSMovImm32(DAG, DL,
10402 RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
10403 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
10404
10405 const SDValue Ops[] = {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +000010406 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +000010407 PtrLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +000010408 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +000010409 PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +000010410 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +000010411 DataLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +000010412 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +000010413 DataHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +000010414 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
Matt Arsenaultf3cd4512014-11-05 19:01:19 +000010415 };
10416
10417 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
10418}
10419
Tom Stellardd7e6f132015-04-08 01:09:26 +000010420//===----------------------------------------------------------------------===//
10421// SI Inline Assembly Support
10422//===----------------------------------------------------------------------===//
10423
10424std::pair<unsigned, const TargetRegisterClass *>
10425SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +000010426 StringRef Constraint,
Tom Stellardd7e6f132015-04-08 01:09:26 +000010427 MVT VT) const {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010428 const TargetRegisterClass *RC = nullptr;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010429 if (Constraint.size() == 1) {
10430 switch (Constraint[0]) {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010431 default:
10432 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010433 case 's':
10434 case 'r':
10435 switch (VT.getSizeInBits()) {
10436 default:
10437 return std::make_pair(0U, nullptr);
10438 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +000010439 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010440 RC = &AMDGPU::SReg_32_XM0RegClass;
10441 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010442 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010443 RC = &AMDGPU::SGPR_64RegClass;
10444 break;
Tim Renouf361b5b22019-03-21 12:01:21 +000010445 case 96:
10446 RC = &AMDGPU::SReg_96RegClass;
10447 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010448 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010449 RC = &AMDGPU::SReg_128RegClass;
10450 break;
Tim Renouf033f99a2019-03-22 10:11:21 +000010451 case 160:
10452 RC = &AMDGPU::SReg_160RegClass;
10453 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010454 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010455 RC = &AMDGPU::SReg_256RegClass;
10456 break;
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +000010457 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010458 RC = &AMDGPU::SReg_512RegClass;
10459 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010460 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010461 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010462 case 'v':
10463 switch (VT.getSizeInBits()) {
10464 default:
10465 return std::make_pair(0U, nullptr);
10466 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +000010467 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010468 RC = &AMDGPU::VGPR_32RegClass;
10469 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010470 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010471 RC = &AMDGPU::VReg_64RegClass;
10472 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010473 case 96:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010474 RC = &AMDGPU::VReg_96RegClass;
10475 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010476 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010477 RC = &AMDGPU::VReg_128RegClass;
10478 break;
Tim Renouf033f99a2019-03-22 10:11:21 +000010479 case 160:
10480 RC = &AMDGPU::VReg_160RegClass;
10481 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010482 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010483 RC = &AMDGPU::VReg_256RegClass;
10484 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010485 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010486 RC = &AMDGPU::VReg_512RegClass;
10487 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010488 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010489 break;
Stanislav Mekhanoshin50d7f4642019-07-09 21:43:09 +000010490 case 'a':
Stanislav Mekhanoshin450afce2019-07-30 19:29:33 +000010491 if (!Subtarget->hasMAIInsts())
10492 break;
Stanislav Mekhanoshin50d7f4642019-07-09 21:43:09 +000010493 switch (VT.getSizeInBits()) {
10494 default:
10495 return std::make_pair(0U, nullptr);
10496 case 32:
10497 case 16:
10498 RC = &AMDGPU::AGPR_32RegClass;
10499 break;
10500 case 64:
10501 RC = &AMDGPU::AReg_64RegClass;
10502 break;
10503 case 128:
10504 RC = &AMDGPU::AReg_128RegClass;
10505 break;
10506 case 512:
10507 RC = &AMDGPU::AReg_512RegClass;
10508 break;
10509 case 1024:
10510 RC = &AMDGPU::AReg_1024RegClass;
10511 // v32 types are not legal but we support them here.
10512 return std::make_pair(0U, RC);
10513 }
10514 break;
Tom Stellardd7e6f132015-04-08 01:09:26 +000010515 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +000010516 // We actually support i128, i16 and f16 as inline parameters
10517 // even if they are not reported as legal
10518 if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
10519 VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
10520 return std::make_pair(0U, RC);
Tom Stellardd7e6f132015-04-08 01:09:26 +000010521 }
10522
10523 if (Constraint.size() > 1) {
Tom Stellardd7e6f132015-04-08 01:09:26 +000010524 if (Constraint[1] == 'v') {
10525 RC = &AMDGPU::VGPR_32RegClass;
10526 } else if (Constraint[1] == 's') {
10527 RC = &AMDGPU::SGPR_32RegClass;
Stanislav Mekhanoshin50d7f4642019-07-09 21:43:09 +000010528 } else if (Constraint[1] == 'a') {
10529 RC = &AMDGPU::AGPR_32RegClass;
Tom Stellardd7e6f132015-04-08 01:09:26 +000010530 }
10531
10532 if (RC) {
Matt Arsenault0b554ed2015-06-23 02:05:55 +000010533 uint32_t Idx;
10534 bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
10535 if (!Failed && Idx < RC->getNumRegs())
Tom Stellardd7e6f132015-04-08 01:09:26 +000010536 return std::make_pair(RC->getRegister(Idx), RC);
10537 }
10538 }
10539 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10540}
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010541
10542SITargetLowering::ConstraintType
10543SITargetLowering::getConstraintType(StringRef Constraint) const {
10544 if (Constraint.size() == 1) {
10545 switch (Constraint[0]) {
10546 default: break;
10547 case 's':
10548 case 'v':
Stanislav Mekhanoshin50d7f4642019-07-09 21:43:09 +000010549 case 'a':
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010550 return C_RegisterClass;
10551 }
10552 }
10553 return TargetLowering::getConstraintType(Constraint);
10554}
Matt Arsenault1cc47f82017-07-18 16:44:56 +000010555
10556// Figure out which registers should be reserved for stack access. Only after
10557// the function is legalized do we know all of the non-spill stack objects or if
10558// calls are present.
10559void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
10560 MachineRegisterInfo &MRI = MF.getRegInfo();
10561 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +000010562 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Tom Stellardc5a154d2018-06-28 23:47:12 +000010563 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault1cc47f82017-07-18 16:44:56 +000010564
10565 if (Info->isEntryFunction()) {
10566 // Callable functions have fixed registers used for stack access.
10567 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
10568 }
10569
Matt Arsenaultb812b7a2019-06-05 22:20:47 +000010570 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
10571 Info->getStackPtrOffsetReg()));
10572 if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG)
10573 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
Matt Arsenault1cc47f82017-07-18 16:44:56 +000010574
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +000010575 // We need to worry about replacing the default register with itself in case
10576 // of MIR testcases missing the MFI.
10577 if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG)
10578 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
10579
10580 if (Info->getFrameOffsetReg() != AMDGPU::FP_REG)
10581 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
10582
10583 if (Info->getScratchWaveOffsetReg() != AMDGPU::SCRATCH_WAVE_OFFSET_REG) {
10584 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
10585 Info->getScratchWaveOffsetReg());
10586 }
Matt Arsenault1cc47f82017-07-18 16:44:56 +000010587
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +000010588 Info->limitOccupancy(MF);
10589
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +000010590 if (ST.isWave32() && !MF.empty()) {
10591 // Add VCC_HI def because many instructions marked as imp-use VCC where
10592 // we may only define VCC_LO. If nothing defines VCC_HI we may end up
10593 // having a use of undef.
10594
10595 const SIInstrInfo *TII = ST.getInstrInfo();
10596 DebugLoc DL;
10597
10598 MachineBasicBlock &MBB = MF.front();
10599 MachineBasicBlock::iterator I = MBB.getFirstNonDebugInstr();
10600 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), AMDGPU::VCC_HI);
10601
10602 for (auto &MBB : MF) {
10603 for (auto &MI : MBB) {
10604 TII->fixImplicitOperands(MI);
10605 }
10606 }
10607 }
10608
Matt Arsenault1cc47f82017-07-18 16:44:56 +000010609 TargetLoweringBase::finalizeLowering(MF);
10610}
Matt Arsenault45b98182017-11-15 00:45:43 +000010611
10612void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
10613 KnownBits &Known,
10614 const APInt &DemandedElts,
10615 const SelectionDAG &DAG,
10616 unsigned Depth) const {
10617 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
10618 DAG, Depth);
10619
Matt Arsenault5c714cb2019-05-23 19:38:14 +000010620 // Set the high bits to zero based on the maximum allowed scratch size per
10621 // wave. We can't use vaddr in MUBUF instructions if we don't know the address
Matt Arsenault45b98182017-11-15 00:45:43 +000010622 // calculation won't overflow, so assume the sign bit is never set.
Matt Arsenault5c714cb2019-05-23 19:38:14 +000010623 Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
Matt Arsenault45b98182017-11-15 00:45:43 +000010624}
Tom Stellard264c1712018-06-13 15:06:37 +000010625
Stanislav Mekhanoshin93f15c92019-05-03 21:17:29 +000010626unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
10627 const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
10628 const unsigned CacheLineAlign = 6; // log2(64)
10629
10630 // Pre-GFX10 target did not benefit from loop alignment
10631 if (!ML || DisableLoopAlignment ||
10632 (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
10633 getSubtarget()->hasInstFwdPrefetchBug())
10634 return PrefAlign;
10635
10636 // On GFX10 I$ is 4 x 64 bytes cache lines.
10637 // By default prefetcher keeps one cache line behind and reads two ahead.
10638 // We can modify it with S_INST_PREFETCH for larger loops to have two lines
10639 // behind and one ahead.
10640 // Therefor we can benefit from aligning loop headers if loop fits 192 bytes.
10641 // If loop fits 64 bytes it always spans no more than two cache lines and
10642 // does not need an alignment.
10643 // Else if loop is less or equal 128 bytes we do not need to modify prefetch,
10644 // Else if loop is less or equal 192 bytes we need two lines behind.
10645
10646 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10647 const MachineBasicBlock *Header = ML->getHeader();
10648 if (Header->getAlignment() != PrefAlign)
10649 return Header->getAlignment(); // Already processed.
10650
10651 unsigned LoopSize = 0;
10652 for (const MachineBasicBlock *MBB : ML->blocks()) {
10653 // If inner loop block is aligned assume in average half of the alignment
10654 // size to be added as nops.
10655 if (MBB != Header)
10656 LoopSize += (1 << MBB->getAlignment()) / 2;
10657
10658 for (const MachineInstr &MI : *MBB) {
10659 LoopSize += TII->getInstSizeInBytes(MI);
10660 if (LoopSize > 192)
10661 return PrefAlign;
10662 }
10663 }
10664
10665 if (LoopSize <= 64)
10666 return PrefAlign;
10667
10668 if (LoopSize <= 128)
10669 return CacheLineAlign;
10670
10671 // If any of parent loops is surrounded by prefetch instructions do not
10672 // insert new for inner loop, which would reset parent's settings.
10673 for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) {
10674 if (MachineBasicBlock *Exit = P->getExitBlock()) {
10675 auto I = Exit->getFirstNonDebugInstr();
10676 if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
10677 return CacheLineAlign;
10678 }
10679 }
10680
10681 MachineBasicBlock *Pre = ML->getLoopPreheader();
10682 MachineBasicBlock *Exit = ML->getExitBlock();
10683
10684 if (Pre && Exit) {
10685 BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(),
10686 TII->get(AMDGPU::S_INST_PREFETCH))
10687 .addImm(1); // prefetch 2 lines behind PC
10688
10689 BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(),
10690 TII->get(AMDGPU::S_INST_PREFETCH))
10691 .addImm(2); // prefetch 1 line behind PC
10692 }
10693
10694 return CacheLineAlign;
10695}
10696
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010697LLVM_ATTRIBUTE_UNUSED
10698static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
10699 assert(N->getOpcode() == ISD::CopyFromReg);
10700 do {
10701 // Follow the chain until we find an INLINEASM node.
10702 N = N->getOperand(0).getNode();
Craig Topper784929d2019-02-08 20:48:56 +000010703 if (N->getOpcode() == ISD::INLINEASM ||
10704 N->getOpcode() == ISD::INLINEASM_BR)
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010705 return true;
10706 } while (N->getOpcode() == ISD::CopyFromReg);
10707 return false;
10708}
10709
Tom Stellard264c1712018-06-13 15:06:37 +000010710bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
Nicolai Haehnle35617ed2018-08-30 14:21:36 +000010711 FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const
Tom Stellard264c1712018-06-13 15:06:37 +000010712{
10713 switch (N->getOpcode()) {
Tom Stellard264c1712018-06-13 15:06:37 +000010714 case ISD::CopyFromReg:
10715 {
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010716 const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
10717 const MachineFunction * MF = FLI->MF;
10718 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
10719 const MachineRegisterInfo &MRI = MF->getRegInfo();
10720 const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
10721 unsigned Reg = R->getReg();
Daniel Sanders2bea69b2019-08-01 23:27:28 +000010722 if (Register::isPhysicalRegister(Reg))
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010723 return !TRI.isSGPRReg(MRI, Reg);
Tom Stellard264c1712018-06-13 15:06:37 +000010724
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010725 if (MRI.isLiveIn(Reg)) {
10726 // workitem.id.x workitem.id.y workitem.id.z
10727 // Any VGPR formal argument is also considered divergent
10728 if (!TRI.isSGPRReg(MRI, Reg))
10729 return true;
10730 // Formal arguments of non-entry functions
10731 // are conservatively considered divergent
10732 else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
10733 return true;
10734 return false;
Tom Stellard264c1712018-06-13 15:06:37 +000010735 }
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010736 const Value *V = FLI->getValueFromVirtualReg(Reg);
10737 if (V)
10738 return KDA->isDivergent(V);
10739 assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
10740 return !TRI.isSGPRReg(MRI, Reg);
Tom Stellard264c1712018-06-13 15:06:37 +000010741 }
10742 break;
10743 case ISD::LOAD: {
Matt Arsenault813613c2018-09-04 18:58:19 +000010744 const LoadSDNode *L = cast<LoadSDNode>(N);
10745 unsigned AS = L->getAddressSpace();
10746 // A flat load may access private memory.
10747 return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
Tom Stellard264c1712018-06-13 15:06:37 +000010748 } break;
10749 case ISD::CALLSEQ_END:
10750 return true;
10751 break;
10752 case ISD::INTRINSIC_WO_CHAIN:
10753 {
10754
10755 }
10756 return AMDGPU::isIntrinsicSourceOfDivergence(
10757 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
10758 case ISD::INTRINSIC_W_CHAIN:
10759 return AMDGPU::isIntrinsicSourceOfDivergence(
10760 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
10761 // In some cases intrinsics that are a source of divergence have been
10762 // lowered to AMDGPUISD so we also need to check those too.
10763 case AMDGPUISD::INTERP_MOV:
10764 case AMDGPUISD::INTERP_P1:
10765 case AMDGPUISD::INTERP_P2:
10766 return true;
10767 }
10768 return false;
10769}
Matt Arsenaultf8768bf2018-08-06 21:38:27 +000010770
10771bool SITargetLowering::denormalsEnabledForType(EVT VT) const {
10772 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
10773 case MVT::f32:
10774 return Subtarget->hasFP32Denormals();
10775 case MVT::f64:
10776 return Subtarget->hasFP64Denormals();
10777 case MVT::f16:
10778 return Subtarget->hasFP16Denormals();
10779 default:
10780 return false;
10781 }
10782}
Matt Arsenault687ec752018-10-22 16:27:27 +000010783
10784bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
10785 const SelectionDAG &DAG,
10786 bool SNaN,
10787 unsigned Depth) const {
10788 if (Op.getOpcode() == AMDGPUISD::CLAMP) {
Matt Arsenault055e4dc2019-03-29 19:14:54 +000010789 const MachineFunction &MF = DAG.getMachineFunction();
10790 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10791
10792 if (Info->getMode().DX10Clamp)
Matt Arsenault687ec752018-10-22 16:27:27 +000010793 return true; // Clamped to 0.
10794 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
10795 }
10796
10797 return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
10798 SNaN, Depth);
10799}
Matt Arsenaulta5840c32019-01-22 18:36:06 +000010800
10801TargetLowering::AtomicExpansionKind
10802SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
10803 switch (RMW->getOperation()) {
10804 case AtomicRMWInst::FAdd: {
10805 Type *Ty = RMW->getType();
10806
10807 // We don't have a way to support 16-bit atomics now, so just leave them
10808 // as-is.
10809 if (Ty->isHalfTy())
10810 return AtomicExpansionKind::None;
10811
10812 if (!Ty->isFloatTy())
10813 return AtomicExpansionKind::CmpXChg;
10814
10815 // TODO: Do have these for flat. Older targets also had them for buffers.
10816 unsigned AS = RMW->getPointerAddressSpace();
10817 return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ?
10818 AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg;
10819 }
10820 default:
10821 break;
10822 }
10823
10824 return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
10825}