blob: 12113fcc1fcb8ccdef26e80bf263fdc2f1ec1331 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000011/// Custom DAG lowering for SI
Tom Stellard75aadc22012-12-11 21:25:42 +000012//
13//===----------------------------------------------------------------------===//
14
Sylvestre Ledrudf92dab2018-11-02 17:25:40 +000015#if defined(_MSC_VER) || defined(__MINGW32__)
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000016// Provide M_PI.
17#define _USE_MATH_DEFINES
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000018#endif
19
Chandler Carruth6bda14b2017-06-06 11:49:48 +000020#include "SIISelLowering.h"
Christian Konig99ee0f42013-03-07 09:04:14 +000021#include "AMDGPU.h"
Matt Arsenaultc791f392014-06-23 18:00:31 +000022#include "AMDGPUIntrinsicInfo.h"
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000023#include "AMDGPUSubtarget.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000024#include "AMDGPUTargetMachine.h"
Tom Stellard8485fa02016-12-07 02:42:15 +000025#include "SIDefines.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000026#include "SIInstrInfo.h"
27#include "SIMachineFunctionInfo.h"
28#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000029#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000030#include "Utils/AMDGPUBaseInfo.h"
31#include "llvm/ADT/APFloat.h"
32#include "llvm/ADT/APInt.h"
33#include "llvm/ADT/ArrayRef.h"
Alexey Samsonova253bf92014-08-27 19:36:53 +000034#include "llvm/ADT/BitVector.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000035#include "llvm/ADT/SmallVector.h"
Matt Arsenault71bcbd42017-08-11 20:42:08 +000036#include "llvm/ADT/Statistic.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000037#include "llvm/ADT/StringRef.h"
Matt Arsenault9a10cea2016-01-26 04:29:24 +000038#include "llvm/ADT/StringSwitch.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000039#include "llvm/ADT/Twine.h"
Wei Ding07e03712016-07-28 16:42:13 +000040#include "llvm/CodeGen/Analysis.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000041#include "llvm/CodeGen/CallingConvLower.h"
42#include "llvm/CodeGen/DAGCombine.h"
43#include "llvm/CodeGen/ISDOpcodes.h"
44#include "llvm/CodeGen/MachineBasicBlock.h"
45#include "llvm/CodeGen/MachineFrameInfo.h"
46#include "llvm/CodeGen/MachineFunction.h"
47#include "llvm/CodeGen/MachineInstr.h"
48#include "llvm/CodeGen/MachineInstrBuilder.h"
49#include "llvm/CodeGen/MachineMemOperand.h"
Matt Arsenault8623e8d2017-08-03 23:00:29 +000050#include "llvm/CodeGen/MachineModuleInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000051#include "llvm/CodeGen/MachineOperand.h"
52#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000053#include "llvm/CodeGen/SelectionDAG.h"
54#include "llvm/CodeGen/SelectionDAGNodes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000055#include "llvm/CodeGen/TargetCallingConv.h"
56#include "llvm/CodeGen/TargetRegisterInfo.h"
Craig Topper2fa14362018-03-29 17:21:10 +000057#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000058#include "llvm/IR/Constants.h"
59#include "llvm/IR/DataLayout.h"
60#include "llvm/IR/DebugLoc.h"
61#include "llvm/IR/DerivedTypes.h"
Oliver Stannard7e7d9832016-02-02 13:52:43 +000062#include "llvm/IR/DiagnosticInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000063#include "llvm/IR/Function.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000064#include "llvm/IR/GlobalValue.h"
65#include "llvm/IR/InstrTypes.h"
66#include "llvm/IR/Instruction.h"
67#include "llvm/IR/Instructions.h"
Matt Arsenault7dc01c92017-03-15 23:15:12 +000068#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000069#include "llvm/IR/Type.h"
70#include "llvm/Support/Casting.h"
71#include "llvm/Support/CodeGen.h"
72#include "llvm/Support/CommandLine.h"
73#include "llvm/Support/Compiler.h"
74#include "llvm/Support/ErrorHandling.h"
Craig Topperd0af7e82017-04-28 05:31:46 +000075#include "llvm/Support/KnownBits.h"
David Blaikie13e77db2018-03-23 23:58:25 +000076#include "llvm/Support/MachineValueType.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000077#include "llvm/Support/MathExtras.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000078#include "llvm/Target/TargetOptions.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000079#include <cassert>
80#include <cmath>
81#include <cstdint>
82#include <iterator>
83#include <tuple>
84#include <utility>
85#include <vector>
Tom Stellard75aadc22012-12-11 21:25:42 +000086
87using namespace llvm;
88
Matt Arsenault71bcbd42017-08-11 20:42:08 +000089#define DEBUG_TYPE "si-lower"
90
91STATISTIC(NumTailCalls, "Number of tail calls");
92
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000093static cl::opt<bool> EnableVGPRIndexMode(
94 "amdgpu-vgpr-index-mode",
95 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
96 cl::init(false));
97
Matt Arsenault45b98182017-11-15 00:45:43 +000098static cl::opt<unsigned> AssumeFrameIndexHighZeroBits(
99 "amdgpu-frame-index-zero-bits",
100 cl::desc("High bits of frame index assumed to be zero"),
101 cl::init(5),
102 cl::ReallyHidden);
103
Tom Stellardf110f8f2016-04-14 16:27:03 +0000104static unsigned findFirstFreeSGPR(CCState &CCInfo) {
105 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
106 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
107 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
108 return AMDGPU::SGPR0 + Reg;
109 }
110 }
111 llvm_unreachable("Cannot allocate sgpr");
112}
113
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000114SITargetLowering::SITargetLowering(const TargetMachine &TM,
Tom Stellard5bfbae52018-07-11 20:59:01 +0000115 const GCNSubtarget &STI)
Tom Stellardc5a154d2018-06-28 23:47:12 +0000116 : AMDGPUTargetLowering(TM, STI),
117 Subtarget(&STI) {
Tom Stellard1bd80722014-04-30 15:31:33 +0000118 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
Tom Stellard436780b2014-05-15 14:41:57 +0000119 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000120
Marek Olsak79c05872016-11-25 17:37:09 +0000121 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000122 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000123
Tom Stellard436780b2014-05-15 14:41:57 +0000124 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
125 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
126 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000127
Matt Arsenault61001bb2015-11-25 19:58:34 +0000128 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
129 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
130
Tom Stellard436780b2014-05-15 14:41:57 +0000131 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
132 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000133
Tom Stellardf0a21072014-11-18 20:39:39 +0000134 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000135 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
136
Tom Stellardf0a21072014-11-18 20:39:39 +0000137 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000138 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000139
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000140 if (Subtarget->has16BitInsts()) {
Marek Olsak79c05872016-11-25 17:37:09 +0000141 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
142 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard115a6152016-11-10 16:02:37 +0000143
Matt Arsenault1349a042018-05-22 06:32:10 +0000144 // Unless there are also VOP3P operations, not operations are really legal.
Matt Arsenault7596f132017-02-27 20:52:10 +0000145 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
146 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000147 addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
148 addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
Matt Arsenault7596f132017-02-27 20:52:10 +0000149 }
150
Tom Stellardc5a154d2018-06-28 23:47:12 +0000151 computeRegisterProperties(Subtarget->getRegisterInfo());
Tom Stellard75aadc22012-12-11 21:25:42 +0000152
Tom Stellard35bb18c2013-08-26 15:06:04 +0000153 // We need to custom lower vector stores from local memory
Matt Arsenault71e66762016-05-21 02:27:49 +0000154 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
Tom Stellard35bb18c2013-08-26 15:06:04 +0000155 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
Tom Stellardaf775432013-10-23 00:44:32 +0000156 setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
157 setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000158 setOperationAction(ISD::LOAD, MVT::i1, Custom);
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +0000159 setOperationAction(ISD::LOAD, MVT::v32i32, Custom);
Matt Arsenault2b957b52016-05-02 20:07:26 +0000160
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000161 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000162 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
163 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
164 setOperationAction(ISD::STORE, MVT::v16i32, Custom);
165 setOperationAction(ISD::STORE, MVT::i1, Custom);
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +0000166 setOperationAction(ISD::STORE, MVT::v32i32, Custom);
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000167
Jan Vesely06200bd2017-01-06 21:00:46 +0000168 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
169 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
170 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
171 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
172 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
173 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
174 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
175 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
176 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
177 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
178
Matt Arsenault71e66762016-05-21 02:27:49 +0000179 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
180 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000181
182 setOperationAction(ISD::SELECT, MVT::i1, Promote);
Tom Stellard0ec134f2014-02-04 17:18:40 +0000183 setOperationAction(ISD::SELECT, MVT::i64, Custom);
Tom Stellardda99c6e2014-03-24 16:07:30 +0000184 setOperationAction(ISD::SELECT, MVT::f64, Promote);
185 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
Tom Stellard81d871d2013-11-13 23:36:50 +0000186
Tom Stellard3ca1bfc2014-06-10 16:01:22 +0000187 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
188 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
189 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
190 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
Matt Arsenault71e66762016-05-21 02:27:49 +0000191 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
Tom Stellard754f80f2013-04-05 23:31:51 +0000192
Tom Stellardd1efda82016-01-20 21:48:24 +0000193 setOperationAction(ISD::SETCC, MVT::i1, Promote);
Tom Stellard83747202013-07-18 21:43:53 +0000194 setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
195 setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
Matt Arsenault18f56be2016-12-22 16:27:11 +0000196 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
Tom Stellard83747202013-07-18 21:43:53 +0000197
Matt Arsenault71e66762016-05-21 02:27:49 +0000198 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
199 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
Matt Arsenaulte306a322014-10-21 16:25:08 +0000200
Matt Arsenault4e466652014-04-16 01:41:30 +0000201 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000203 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
204 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000205 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
206 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000207 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
208
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000209 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000210 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000211 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
Matt Arsenaultb3a80e52018-08-15 21:25:20 +0000212 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
213 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom);
Marek Olsak13e47412018-01-31 20:18:04 +0000214 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000215 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
216
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000217 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
218 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
David Stuttardf77079f2019-01-14 11:55:24 +0000219 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000220 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000221
222 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
Matt Arsenault4165efd2017-01-17 07:26:53 +0000223 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
224 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000225 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000226
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000227 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000228 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000229 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
230 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
231 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
232 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
Tom Stellardafcf12f2013-09-12 02:55:14 +0000233
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +0000234 setOperationAction(ISD::UADDO, MVT::i32, Legal);
235 setOperationAction(ISD::USUBO, MVT::i32, Legal);
236
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000237 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
238 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
239
Matt Arsenaulte7191392018-08-08 16:58:33 +0000240 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
241 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
242 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
243
Matt Arsenault84445dd2017-11-30 22:51:26 +0000244#if 0
245 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
246 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
247#endif
248
Benjamin Kramer867bfc52015-03-07 17:41:00 +0000249 // We only support LOAD/STORE and vector manipulation ops for vectors
250 // with > 4 elements.
Matt Arsenault7596f132017-02-27 20:52:10 +0000251 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +0000252 MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16, MVT::v32i32 }) {
Tom Stellard967bf582014-02-13 23:34:15 +0000253 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000254 switch (Op) {
Tom Stellard967bf582014-02-13 23:34:15 +0000255 case ISD::LOAD:
256 case ISD::STORE:
257 case ISD::BUILD_VECTOR:
258 case ISD::BITCAST:
259 case ISD::EXTRACT_VECTOR_ELT:
260 case ISD::INSERT_VECTOR_ELT:
Tom Stellard967bf582014-02-13 23:34:15 +0000261 case ISD::INSERT_SUBVECTOR:
262 case ISD::EXTRACT_SUBVECTOR:
Matt Arsenault61001bb2015-11-25 19:58:34 +0000263 case ISD::SCALAR_TO_VECTOR:
Tom Stellard967bf582014-02-13 23:34:15 +0000264 break;
Tom Stellardc0503db2014-08-09 01:06:56 +0000265 case ISD::CONCAT_VECTORS:
266 setOperationAction(Op, VT, Custom);
267 break;
Tom Stellard967bf582014-02-13 23:34:15 +0000268 default:
Matt Arsenaultd504a742014-05-15 21:44:05 +0000269 setOperationAction(Op, VT, Expand);
Tom Stellard967bf582014-02-13 23:34:15 +0000270 break;
271 }
272 }
273 }
274
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000275 setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
276
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000277 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
278 // is expanded to avoid having two separate loops in case the index is a VGPR.
279
Matt Arsenault61001bb2015-11-25 19:58:34 +0000280 // Most operations are naturally 32-bit vector operations. We only support
281 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
282 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
283 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
284 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
285
286 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
287 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
288
289 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
290 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
291
292 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
293 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
294 }
295
Matt Arsenault71e66762016-05-21 02:27:49 +0000296 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
297 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
298 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
299 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000300
Matt Arsenault67a98152018-05-16 11:47:30 +0000301 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
302 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
303
Matt Arsenault3aef8092017-01-23 23:09:58 +0000304 // Avoid stack access for these.
305 // TODO: Generalize to more vector types.
306 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
307 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault67a98152018-05-16 11:47:30 +0000308 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
309 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
310
Matt Arsenault3aef8092017-01-23 23:09:58 +0000311 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
312 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault9224c002018-06-05 19:52:46 +0000313 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
314 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
315 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
316
317 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
318 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
319 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
Matt Arsenault3aef8092017-01-23 23:09:58 +0000320
Matt Arsenault67a98152018-05-16 11:47:30 +0000321 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
322 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
323 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
324 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
325
Tom Stellard354a43c2016-04-01 18:27:37 +0000326 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
327 // and output demarshalling
328 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
329 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
330
331 // We can't return success/failure, only the old value,
332 // let LLVM add the comparison
333 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
334 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
335
Tom Stellardc5a154d2018-06-28 23:47:12 +0000336 if (Subtarget->hasFlatAddressSpace()) {
Matt Arsenault99c14522016-04-25 19:27:24 +0000337 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
338 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
339 }
340
Matt Arsenault71e66762016-05-21 02:27:49 +0000341 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
342 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
343
344 // On SI this is s_memtime and s_memrealtime on VI.
345 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
Matt Arsenault3e025382017-04-24 17:49:13 +0000346 setOperationAction(ISD::TRAP, MVT::Other, Custom);
347 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000348
Tom Stellardc5a154d2018-06-28 23:47:12 +0000349 if (Subtarget->has16BitInsts()) {
350 setOperationAction(ISD::FLOG, MVT::f16, Custom);
Matt Arsenault7121bed2018-08-16 17:07:52 +0000351 setOperationAction(ISD::FEXP, MVT::f16, Custom);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000352 setOperationAction(ISD::FLOG10, MVT::f16, Custom);
353 }
354
355 // v_mad_f32 does not support denormals according to some sources.
356 if (!Subtarget->hasFP32Denormals())
357 setOperationAction(ISD::FMAD, MVT::f32, Legal);
358
359 if (!Subtarget->hasBFI()) {
360 // fcopysign can be done in a single instruction with BFI.
361 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
362 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
363 }
364
365 if (!Subtarget->hasBCNT(32))
366 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
367
368 if (!Subtarget->hasBCNT(64))
369 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
370
371 if (Subtarget->hasFFBH())
372 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
373
374 if (Subtarget->hasFFBL())
375 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
376
377 // We only really have 32-bit BFE instructions (and 16-bit on VI).
378 //
379 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
380 // effort to match them now. We want this to be false for i64 cases when the
381 // extraction isn't restricted to the upper or lower half. Ideally we would
382 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
383 // span the midpoint are probably relatively rare, so don't worry about them
384 // for now.
385 if (Subtarget->hasBFE())
386 setHasExtractBitsInsn(true);
387
Matt Arsenault687ec752018-10-22 16:27:27 +0000388 setOperationAction(ISD::FMINNUM, MVT::f32, Custom);
389 setOperationAction(ISD::FMAXNUM, MVT::f32, Custom);
390 setOperationAction(ISD::FMINNUM, MVT::f64, Custom);
391 setOperationAction(ISD::FMAXNUM, MVT::f64, Custom);
392
393
394 // These are really only legal for ieee_mode functions. We should be avoiding
395 // them for functions that don't have ieee_mode enabled, so just say they are
396 // legal.
397 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
398 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
399 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
400 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
401
Matt Arsenault71e66762016-05-21 02:27:49 +0000402
Tom Stellard5bfbae52018-07-11 20:59:01 +0000403 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000404 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
405 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
406 setOperationAction(ISD::FRINT, MVT::f64, Legal);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000407 } else {
408 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
409 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
410 setOperationAction(ISD::FRINT, MVT::f64, Custom);
411 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000412 }
413
414 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
415
416 setOperationAction(ISD::FSIN, MVT::f32, Custom);
417 setOperationAction(ISD::FCOS, MVT::f32, Custom);
418 setOperationAction(ISD::FDIV, MVT::f32, Custom);
419 setOperationAction(ISD::FDIV, MVT::f64, Custom);
420
Tom Stellard115a6152016-11-10 16:02:37 +0000421 if (Subtarget->has16BitInsts()) {
422 setOperationAction(ISD::Constant, MVT::i16, Legal);
423
424 setOperationAction(ISD::SMIN, MVT::i16, Legal);
425 setOperationAction(ISD::SMAX, MVT::i16, Legal);
426
427 setOperationAction(ISD::UMIN, MVT::i16, Legal);
428 setOperationAction(ISD::UMAX, MVT::i16, Legal);
429
Tom Stellard115a6152016-11-10 16:02:37 +0000430 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
431 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
432
433 setOperationAction(ISD::ROTR, MVT::i16, Promote);
434 setOperationAction(ISD::ROTL, MVT::i16, Promote);
435
436 setOperationAction(ISD::SDIV, MVT::i16, Promote);
437 setOperationAction(ISD::UDIV, MVT::i16, Promote);
438 setOperationAction(ISD::SREM, MVT::i16, Promote);
439 setOperationAction(ISD::UREM, MVT::i16, Promote);
440
441 setOperationAction(ISD::BSWAP, MVT::i16, Promote);
442 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
443
444 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
445 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
446 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
447 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
Jan Veselyb283ea02018-03-02 02:50:22 +0000448 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
Tom Stellard115a6152016-11-10 16:02:37 +0000449
450 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
451
452 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
453
454 setOperationAction(ISD::LOAD, MVT::i16, Custom);
455
456 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
457
Tom Stellard115a6152016-11-10 16:02:37 +0000458 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
459 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
460 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
461 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000462
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000463 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
464 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
465 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
466 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000467
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000468 // F16 - Constant Actions.
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000469 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000470
471 // F16 - Load/Store Actions.
472 setOperationAction(ISD::LOAD, MVT::f16, Promote);
473 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
474 setOperationAction(ISD::STORE, MVT::f16, Promote);
475 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
476
477 // F16 - VOP1 Actions.
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +0000478 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000479 setOperationAction(ISD::FCOS, MVT::f16, Promote);
480 setOperationAction(ISD::FSIN, MVT::f16, Promote);
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000481 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
482 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
483 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
484 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
Matt Arsenaultb5d23272017-03-24 20:04:18 +0000485 setOperationAction(ISD::FROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000486
487 // F16 - VOP2 Actions.
Konstantin Zhuravlyov662e01d2016-11-17 03:49:01 +0000488 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000489 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
Matt Arsenault687ec752018-10-22 16:27:27 +0000490
Matt Arsenault4052a572016-12-22 03:05:41 +0000491 setOperationAction(ISD::FDIV, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000492
493 // F16 - VOP3 Actions.
494 setOperationAction(ISD::FMA, MVT::f16, Legal);
495 if (!Subtarget->hasFP16Denormals())
496 setOperationAction(ISD::FMAD, MVT::f16, Legal);
Tom Stellard115a6152016-11-10 16:02:37 +0000497
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000498 for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
Matt Arsenault7596f132017-02-27 20:52:10 +0000499 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
500 switch (Op) {
501 case ISD::LOAD:
502 case ISD::STORE:
503 case ISD::BUILD_VECTOR:
504 case ISD::BITCAST:
505 case ISD::EXTRACT_VECTOR_ELT:
506 case ISD::INSERT_VECTOR_ELT:
507 case ISD::INSERT_SUBVECTOR:
508 case ISD::EXTRACT_SUBVECTOR:
509 case ISD::SCALAR_TO_VECTOR:
510 break;
511 case ISD::CONCAT_VECTORS:
512 setOperationAction(Op, VT, Custom);
513 break;
514 default:
515 setOperationAction(Op, VT, Expand);
516 break;
517 }
518 }
519 }
520
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000521 // XXX - Do these do anything? Vector constants turn into build_vector.
522 setOperationAction(ISD::Constant, MVT::v2i16, Legal);
523 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
524
Matt Arsenaultdfb88df2018-05-13 10:04:38 +0000525 setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
526 setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
527
Matt Arsenault7596f132017-02-27 20:52:10 +0000528 setOperationAction(ISD::STORE, MVT::v2i16, Promote);
529 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
530 setOperationAction(ISD::STORE, MVT::v2f16, Promote);
531 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
532
533 setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
534 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
535 setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
536 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000537
538 setOperationAction(ISD::AND, MVT::v2i16, Promote);
539 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
540 setOperationAction(ISD::OR, MVT::v2i16, Promote);
541 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
542 setOperationAction(ISD::XOR, MVT::v2i16, Promote);
543 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000544
Matt Arsenault1349a042018-05-22 06:32:10 +0000545 setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
546 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
547 setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
548 AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
549
550 setOperationAction(ISD::STORE, MVT::v4i16, Promote);
551 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
552 setOperationAction(ISD::STORE, MVT::v4f16, Promote);
553 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
554
555 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
556 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
557 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
558 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
559
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000560 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
561 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
562 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
563
Matt Arsenault1349a042018-05-22 06:32:10 +0000564 if (!Subtarget->hasVOP3PInsts()) {
565 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
566 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
567 }
568
569 setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
570 // This isn't really legal, but this avoids the legalizer unrolling it (and
571 // allows matching fneg (fabs x) patterns)
572 setOperationAction(ISD::FABS, MVT::v2f16, Legal);
Matt Arsenault687ec752018-10-22 16:27:27 +0000573
574 setOperationAction(ISD::FMAXNUM, MVT::f16, Custom);
575 setOperationAction(ISD::FMINNUM, MVT::f16, Custom);
576 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal);
577 setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal);
578
579 setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom);
580 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom);
581
582 setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand);
583 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand);
Matt Arsenault1349a042018-05-22 06:32:10 +0000584 }
585
586 if (Subtarget->hasVOP3PInsts()) {
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000587 setOperationAction(ISD::ADD, MVT::v2i16, Legal);
588 setOperationAction(ISD::SUB, MVT::v2i16, Legal);
589 setOperationAction(ISD::MUL, MVT::v2i16, Legal);
590 setOperationAction(ISD::SHL, MVT::v2i16, Legal);
591 setOperationAction(ISD::SRL, MVT::v2i16, Legal);
592 setOperationAction(ISD::SRA, MVT::v2i16, Legal);
593 setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
594 setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
595 setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
596 setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
597
598 setOperationAction(ISD::FADD, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000599 setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
600 setOperationAction(ISD::FMA, MVT::v2f16, Legal);
Matt Arsenault687ec752018-10-22 16:27:27 +0000601
602 setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal);
603 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal);
604
Matt Arsenault540512c2018-04-26 19:21:37 +0000605 setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000606
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000607 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
608 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000609
610 setOperationAction(ISD::SHL, MVT::v4i16, Custom);
611 setOperationAction(ISD::SRA, MVT::v4i16, Custom);
612 setOperationAction(ISD::SRL, MVT::v4i16, Custom);
613 setOperationAction(ISD::ADD, MVT::v4i16, Custom);
614 setOperationAction(ISD::SUB, MVT::v4i16, Custom);
615 setOperationAction(ISD::MUL, MVT::v4i16, Custom);
616
617 setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
618 setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
619 setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
620 setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
621
622 setOperationAction(ISD::FADD, MVT::v4f16, Custom);
623 setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
Matt Arsenault687ec752018-10-22 16:27:27 +0000624
625 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom);
626 setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom);
627
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000628 setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
629 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
Matt Arsenault36cdcfa2018-08-02 13:43:42 +0000630 setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000631
Matt Arsenault7121bed2018-08-16 17:07:52 +0000632 setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000633 setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
634 setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
Matt Arsenault1349a042018-05-22 06:32:10 +0000635 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000636
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000637 setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
638 setOperationAction(ISD::FABS, MVT::v4f16, Custom);
639
Matt Arsenault1349a042018-05-22 06:32:10 +0000640 if (Subtarget->has16BitInsts()) {
641 setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
642 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
643 setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
644 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
Matt Arsenault4a486232017-04-19 20:53:07 +0000645 } else {
Matt Arsenault1349a042018-05-22 06:32:10 +0000646 // Legalization hack.
Matt Arsenault4a486232017-04-19 20:53:07 +0000647 setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
648 setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
Matt Arsenaulte9524f12018-06-06 21:28:11 +0000649
650 setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
651 setOperationAction(ISD::FABS, MVT::v2f16, Custom);
Matt Arsenault4a486232017-04-19 20:53:07 +0000652 }
653
654 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
655 setOperationAction(ISD::SELECT, VT, Custom);
Matt Arsenault7596f132017-02-27 20:52:10 +0000656 }
657
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000658 setTargetDAGCombine(ISD::ADD);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +0000659 setTargetDAGCombine(ISD::ADDCARRY);
660 setTargetDAGCombine(ISD::SUB);
661 setTargetDAGCombine(ISD::SUBCARRY);
Matt Arsenault02cb0ff2014-09-29 14:59:34 +0000662 setTargetDAGCombine(ISD::FADD);
Matt Arsenault8675db12014-08-29 16:01:14 +0000663 setTargetDAGCombine(ISD::FSUB);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +0000664 setTargetDAGCombine(ISD::FMINNUM);
665 setTargetDAGCombine(ISD::FMAXNUM);
Matt Arsenault687ec752018-10-22 16:27:27 +0000666 setTargetDAGCombine(ISD::FMINNUM_IEEE);
667 setTargetDAGCombine(ISD::FMAXNUM_IEEE);
Farhana Aleenc370d7b2018-07-16 18:19:59 +0000668 setTargetDAGCombine(ISD::FMA);
Matt Arsenault5881f4e2015-06-09 00:52:37 +0000669 setTargetDAGCombine(ISD::SMIN);
670 setTargetDAGCombine(ISD::SMAX);
671 setTargetDAGCombine(ISD::UMIN);
672 setTargetDAGCombine(ISD::UMAX);
Tom Stellard75aadc22012-12-11 21:25:42 +0000673 setTargetDAGCombine(ISD::SETCC);
Matt Arsenaultd0101a22015-01-06 23:00:46 +0000674 setTargetDAGCombine(ISD::AND);
Matt Arsenaultf2290332015-01-06 23:00:39 +0000675 setTargetDAGCombine(ISD::OR);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000676 setTargetDAGCombine(ISD::XOR);
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +0000677 setTargetDAGCombine(ISD::SINT_TO_FP);
Matt Arsenault364a6742014-06-11 17:50:44 +0000678 setTargetDAGCombine(ISD::UINT_TO_FP);
Matt Arsenault9cd90712016-04-14 01:42:16 +0000679 setTargetDAGCombine(ISD::FCANONICALIZE);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000680 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
Matt Arsenault8edfaee2017-03-31 19:53:03 +0000681 setTargetDAGCombine(ISD::ZERO_EXTEND);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +0000682 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +0000683 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
Matt Arsenault364a6742014-06-11 17:50:44 +0000684
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000685 // All memory operations. Some folding on the pointer operand is done to help
686 // matching the constant offsets in the addressing modes.
687 setTargetDAGCombine(ISD::LOAD);
688 setTargetDAGCombine(ISD::STORE);
689 setTargetDAGCombine(ISD::ATOMIC_LOAD);
690 setTargetDAGCombine(ISD::ATOMIC_STORE);
691 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
692 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
693 setTargetDAGCombine(ISD::ATOMIC_SWAP);
694 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
695 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
696 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
697 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
698 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
699 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
700 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
701 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
702 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
703 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
704
Christian Konigeecebd02013-03-26 14:04:02 +0000705 setSchedulingPreference(Sched::RegPressure);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000706
707 // SI at least has hardware support for floating point exceptions, but no way
708 // of using or handling them is implemented. They are also optional in OpenCL
709 // (Section 7.3)
710 setHasFloatingPointExceptions(Subtarget->hasFPExceptions());
Tom Stellard75aadc22012-12-11 21:25:42 +0000711}
712
Tom Stellard5bfbae52018-07-11 20:59:01 +0000713const GCNSubtarget *SITargetLowering::getSubtarget() const {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000714 return Subtarget;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000715}
716
Tom Stellard0125f2a2013-06-25 02:39:35 +0000717//===----------------------------------------------------------------------===//
718// TargetLowering queries
719//===----------------------------------------------------------------------===//
720
Tom Stellardb12f4de2018-05-22 19:37:55 +0000721// v_mad_mix* support a conversion from f16 to f32.
722//
723// There is only one special case when denormals are enabled we don't currently,
724// where this is OK to use.
725bool SITargetLowering::isFPExtFoldable(unsigned Opcode,
726 EVT DestVT, EVT SrcVT) const {
727 return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
728 (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
729 DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
730 SrcVT.getScalarType() == MVT::f16;
731}
732
Zvi Rackover1b736822017-07-26 08:06:58 +0000733bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000734 // SI has some legal vector types, but no legal vector operations. Say no
735 // shuffles are legal in order to prefer scalarizing some vector operations.
736 return false;
737}
738
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000739MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
740 CallingConv::ID CC,
741 EVT VT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000742 // TODO: Consider splitting all arguments into 32-bit pieces.
743 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000744 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000745 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000746 if (Size == 32)
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000747 return ScalarVT.getSimpleVT();
Matt Arsenault0395da72018-07-31 19:17:47 +0000748
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000749 if (Size == 64)
750 return MVT::i32;
751
Matt Arsenault57b59662018-09-10 11:49:23 +0000752 if (Size == 16 && Subtarget->has16BitInsts())
Matt Arsenault0395da72018-07-31 19:17:47 +0000753 return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000754 }
755
756 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
757}
758
759unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
760 CallingConv::ID CC,
761 EVT VT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000762 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000763 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000764 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000765 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenault0395da72018-07-31 19:17:47 +0000766
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000767 if (Size == 32)
Matt Arsenault0395da72018-07-31 19:17:47 +0000768 return NumElts;
769
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000770 if (Size == 64)
771 return 2 * NumElts;
772
Matt Arsenault57b59662018-09-10 11:49:23 +0000773 if (Size == 16 && Subtarget->has16BitInsts())
774 return (VT.getVectorNumElements() + 1) / 2;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000775 }
776
777 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
778}
779
780unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
781 LLVMContext &Context, CallingConv::ID CC,
782 EVT VT, EVT &IntermediateVT,
783 unsigned &NumIntermediates, MVT &RegisterVT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000784 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000785 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000786 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000787 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000788 if (Size == 32) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000789 RegisterVT = ScalarVT.getSimpleVT();
790 IntermediateVT = RegisterVT;
Matt Arsenault0395da72018-07-31 19:17:47 +0000791 NumIntermediates = NumElts;
792 return NumIntermediates;
793 }
794
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000795 if (Size == 64) {
796 RegisterVT = MVT::i32;
797 IntermediateVT = RegisterVT;
798 NumIntermediates = 2 * NumElts;
799 return NumIntermediates;
800 }
801
Matt Arsenault0395da72018-07-31 19:17:47 +0000802 // FIXME: We should fix the ABI to be the same on targets without 16-bit
803 // support, but unless we can properly handle 3-vectors, it will be still be
804 // inconsistent.
Matt Arsenault57b59662018-09-10 11:49:23 +0000805 if (Size == 16 && Subtarget->has16BitInsts()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000806 RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
807 IntermediateVT = RegisterVT;
Matt Arsenault57b59662018-09-10 11:49:23 +0000808 NumIntermediates = (NumElts + 1) / 2;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000809 return NumIntermediates;
810 }
811 }
812
813 return TargetLowering::getVectorTypeBreakdownForCallingConv(
814 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
815}
816
David Stuttardf77079f2019-01-14 11:55:24 +0000817static MVT memVTFromAggregate(Type *Ty) {
818 // Only limited forms of aggregate type currently expected.
819 assert(Ty->isStructTy() && "Expected struct type");
820
821
822 Type *ElementType = nullptr;
823 unsigned NumElts;
824 if (Ty->getContainedType(0)->isVectorTy()) {
825 VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0));
826 ElementType = VecComponent->getElementType();
827 NumElts = VecComponent->getNumElements();
828 } else {
829 ElementType = Ty->getContainedType(0);
830 NumElts = 1;
831 }
832
833 assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type");
834
835 // Calculate the size of the memVT type from the aggregate
836 unsigned Pow2Elts = 0;
837 unsigned ElementSize;
838 switch (ElementType->getTypeID()) {
839 default:
840 llvm_unreachable("Unknown type!");
841 case Type::IntegerTyID:
842 ElementSize = cast<IntegerType>(ElementType)->getBitWidth();
843 break;
844 case Type::HalfTyID:
845 ElementSize = 16;
846 break;
847 case Type::FloatTyID:
848 ElementSize = 32;
849 break;
850 }
851 unsigned AdditionalElts = ElementSize == 16 ? 2 : 1;
852 Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts);
853
854 return MVT::getVectorVT(MVT::getVT(ElementType, false),
855 Pow2Elts);
856}
857
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000858bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
859 const CallInst &CI,
Matt Arsenault7d7adf42017-12-14 22:34:10 +0000860 MachineFunction &MF,
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000861 unsigned IntrID) const {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000862 if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +0000863 AMDGPU::lookupRsrcIntrinsic(IntrID)) {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000864 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
865 (Intrinsic::ID)IntrID);
866 if (Attr.hasFnAttribute(Attribute::ReadNone))
867 return false;
868
869 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
870
871 if (RsrcIntr->IsImage) {
872 Info.ptrVal = MFI->getImagePSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000873 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000874 CI.getArgOperand(RsrcIntr->RsrcArg));
875 Info.align = 0;
876 } else {
877 Info.ptrVal = MFI->getBufferPSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000878 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000879 CI.getArgOperand(RsrcIntr->RsrcArg));
880 }
881
882 Info.flags = MachineMemOperand::MODereferenceable;
883 if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
884 Info.opc = ISD::INTRINSIC_W_CHAIN;
David Stuttardf77079f2019-01-14 11:55:24 +0000885 Info.memVT = MVT::getVT(CI.getType(), true);
886 if (Info.memVT == MVT::Other) {
887 // Some intrinsics return an aggregate type - special case to work out
888 // the correct memVT
889 Info.memVT = memVTFromAggregate(CI.getType());
890 }
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000891 Info.flags |= MachineMemOperand::MOLoad;
892 } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
893 Info.opc = ISD::INTRINSIC_VOID;
894 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
895 Info.flags |= MachineMemOperand::MOStore;
896 } else {
897 // Atomic
898 Info.opc = ISD::INTRINSIC_W_CHAIN;
899 Info.memVT = MVT::getVT(CI.getType());
900 Info.flags = MachineMemOperand::MOLoad |
901 MachineMemOperand::MOStore |
902 MachineMemOperand::MODereferenceable;
903
904 // XXX - Should this be volatile without known ordering?
905 Info.flags |= MachineMemOperand::MOVolatile;
906 }
907 return true;
908 }
909
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000910 switch (IntrID) {
911 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000912 case Intrinsic::amdgcn_atomic_dec:
Marek Olsakc5cec5e2019-01-16 15:43:53 +0000913 case Intrinsic::amdgcn_ds_ordered_add:
914 case Intrinsic::amdgcn_ds_ordered_swap:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000915 case Intrinsic::amdgcn_ds_fadd:
916 case Intrinsic::amdgcn_ds_fmin:
917 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000918 Info.opc = ISD::INTRINSIC_W_CHAIN;
919 Info.memVT = MVT::getVT(CI.getType());
920 Info.ptrVal = CI.getOperand(0);
921 Info.align = 0;
Matt Arsenault11171332017-12-14 21:39:51 +0000922 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000923
924 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
Matt Arsenault11171332017-12-14 21:39:51 +0000925 if (!Vol || !Vol->isZero())
926 Info.flags |= MachineMemOperand::MOVolatile;
927
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000928 return true;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000929 }
Matt Arsenault905f3512017-12-29 17:18:14 +0000930
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000931 default:
932 return false;
933 }
934}
935
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000936bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
937 SmallVectorImpl<Value*> &Ops,
938 Type *&AccessTy) const {
939 switch (II->getIntrinsicID()) {
940 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000941 case Intrinsic::amdgcn_atomic_dec:
Marek Olsakc5cec5e2019-01-16 15:43:53 +0000942 case Intrinsic::amdgcn_ds_ordered_add:
943 case Intrinsic::amdgcn_ds_ordered_swap:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000944 case Intrinsic::amdgcn_ds_fadd:
945 case Intrinsic::amdgcn_ds_fmin:
946 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000947 Value *Ptr = II->getArgOperand(0);
948 AccessTy = II->getType();
949 Ops.push_back(Ptr);
950 return true;
951 }
952 default:
953 return false;
954 }
Matt Arsenaulte306a322014-10-21 16:25:08 +0000955}
956
Tom Stellard70580f82015-07-20 14:28:41 +0000957bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
Matt Arsenaultd9b77842017-06-12 17:06:35 +0000958 if (!Subtarget->hasFlatInstOffsets()) {
959 // Flat instructions do not have offsets, and only have the register
960 // address.
961 return AM.BaseOffs == 0 && AM.Scale == 0;
962 }
963
964 // GFX9 added a 13-bit signed offset. When using regular flat instructions,
965 // the sign bit is ignored and is treated as a 12-bit unsigned offset.
966
967 // Just r + i
968 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
Tom Stellard70580f82015-07-20 14:28:41 +0000969}
970
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +0000971bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
972 if (Subtarget->hasFlatGlobalInsts())
973 return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
974
975 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
976 // Assume the we will use FLAT for all global memory accesses
977 // on VI.
978 // FIXME: This assumption is currently wrong. On VI we still use
979 // MUBUF instructions for the r + i addressing mode. As currently
980 // implemented, the MUBUF instructions only work on buffer < 4GB.
981 // It may be possible to support > 4GB buffers with MUBUF instructions,
982 // by setting the stride value in the resource descriptor which would
983 // increase the size limit to (stride * 4GB). However, this is risky,
984 // because it has never been validated.
985 return isLegalFlatAddressingMode(AM);
986 }
987
988 return isLegalMUBUFAddressingMode(AM);
989}
990
Matt Arsenault711b3902015-08-07 20:18:34 +0000991bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
992 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
993 // additionally can do r + r + i with addr64. 32-bit has more addressing
994 // mode options. Depending on the resource constant, it can also do
995 // (i64 r0) + (i32 r1) * (i14 i).
996 //
997 // Private arrays end up using a scratch buffer most of the time, so also
998 // assume those use MUBUF instructions. Scratch loads / stores are currently
999 // implemented as mubuf instructions with offen bit set, so slightly
1000 // different than the normal addr64.
1001 if (!isUInt<12>(AM.BaseOffs))
1002 return false;
1003
1004 // FIXME: Since we can split immediate into soffset and immediate offset,
1005 // would it make sense to allow any immediate?
1006
1007 switch (AM.Scale) {
1008 case 0: // r + i or just i, depending on HasBaseReg.
1009 return true;
1010 case 1:
1011 return true; // We have r + r or r + i.
1012 case 2:
1013 if (AM.HasBaseReg) {
1014 // Reject 2 * r + r.
1015 return false;
1016 }
1017
1018 // Allow 2 * r as r + r
1019 // Or 2 * r + i is allowed as r + r + i.
1020 return true;
1021 default: // Don't allow n * r
1022 return false;
1023 }
1024}
1025
Mehdi Amini0cdec1e2015-07-09 02:09:40 +00001026bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
1027 const AddrMode &AM, Type *Ty,
Jonas Paulsson024e3192017-07-21 11:59:37 +00001028 unsigned AS, Instruction *I) const {
Matt Arsenault5015a892014-08-15 17:17:07 +00001029 // No global is ever allowed as a base.
1030 if (AM.BaseGV)
1031 return false;
1032
Matt Arsenault0da63502018-08-31 05:49:54 +00001033 if (AS == AMDGPUAS::GLOBAL_ADDRESS)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +00001034 return isLegalGlobalAddressingMode(AM);
Matt Arsenault5015a892014-08-15 17:17:07 +00001035
Matt Arsenault0da63502018-08-31 05:49:54 +00001036 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
1037 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001038 // If the offset isn't a multiple of 4, it probably isn't going to be
1039 // correctly aligned.
Matt Arsenault3cc1e002016-08-13 01:43:51 +00001040 // FIXME: Can we get the real alignment here?
Matt Arsenault711b3902015-08-07 20:18:34 +00001041 if (AM.BaseOffs % 4 != 0)
1042 return isLegalMUBUFAddressingMode(AM);
1043
1044 // There are no SMRD extloads, so if we have to do a small type access we
1045 // will use a MUBUF load.
1046 // FIXME?: We also need to do this if unaligned, but we don't know the
1047 // alignment here.
Stanislav Mekhanoshin57d341c2018-05-15 22:07:51 +00001048 if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +00001049 return isLegalGlobalAddressingMode(AM);
Matt Arsenault711b3902015-08-07 20:18:34 +00001050
Tom Stellard5bfbae52018-07-11 20:59:01 +00001051 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001052 // SMRD instructions have an 8-bit, dword offset on SI.
1053 if (!isUInt<8>(AM.BaseOffs / 4))
1054 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +00001055 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001056 // On CI+, this can also be a 32-bit literal constant offset. If it fits
1057 // in 8-bits, it can use a smaller encoding.
1058 if (!isUInt<32>(AM.BaseOffs / 4))
1059 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +00001060 } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001061 // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1062 if (!isUInt<20>(AM.BaseOffs))
1063 return false;
1064 } else
1065 llvm_unreachable("unhandled generation");
1066
1067 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1068 return true;
1069
1070 if (AM.Scale == 1 && AM.HasBaseReg)
1071 return true;
1072
1073 return false;
Matt Arsenault711b3902015-08-07 20:18:34 +00001074
Matt Arsenault0da63502018-08-31 05:49:54 +00001075 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001076 return isLegalMUBUFAddressingMode(AM);
Matt Arsenault0da63502018-08-31 05:49:54 +00001077 } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1078 AS == AMDGPUAS::REGION_ADDRESS) {
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001079 // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1080 // field.
1081 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1082 // an 8-bit dword offset but we don't know the alignment here.
1083 if (!isUInt<16>(AM.BaseOffs))
Matt Arsenault5015a892014-08-15 17:17:07 +00001084 return false;
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001085
1086 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1087 return true;
1088
1089 if (AM.Scale == 1 && AM.HasBaseReg)
1090 return true;
1091
Matt Arsenault5015a892014-08-15 17:17:07 +00001092 return false;
Matt Arsenault0da63502018-08-31 05:49:54 +00001093 } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1094 AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
Matt Arsenault7d1b6c82016-04-29 06:25:10 +00001095 // For an unknown address space, this usually means that this is for some
1096 // reason being used for pure arithmetic, and not based on some addressing
1097 // computation. We don't have instructions that compute pointers with any
1098 // addressing modes, so treat them as having no offset like flat
1099 // instructions.
Tom Stellard70580f82015-07-20 14:28:41 +00001100 return isLegalFlatAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001101 } else {
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001102 llvm_unreachable("unhandled address space");
1103 }
Matt Arsenault5015a892014-08-15 17:17:07 +00001104}
1105
Nirav Dave4dcad5d2017-07-10 20:25:54 +00001106bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1107 const SelectionDAG &DAG) const {
Matt Arsenault0da63502018-08-31 05:49:54 +00001108 if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001109 return (MemVT.getSizeInBits() <= 4 * 32);
Matt Arsenault0da63502018-08-31 05:49:54 +00001110 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001111 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1112 return (MemVT.getSizeInBits() <= MaxPrivateBits);
Matt Arsenault0da63502018-08-31 05:49:54 +00001113 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001114 return (MemVT.getSizeInBits() <= 2 * 32);
1115 }
1116 return true;
1117}
1118
Matt Arsenaulte6986632015-01-14 01:35:22 +00001119bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001120 unsigned AddrSpace,
1121 unsigned Align,
1122 bool *IsFast) const {
Matt Arsenault1018c892014-04-24 17:08:26 +00001123 if (IsFast)
1124 *IsFast = false;
1125
Matt Arsenault1018c892014-04-24 17:08:26 +00001126 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1127 // which isn't a simple VT.
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001128 // Until MVT is extended to handle this, simply check for the size and
1129 // rely on the condition below: allow accesses if the size is a multiple of 4.
1130 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1131 VT.getStoreSize() > 16)) {
Tom Stellard81d871d2013-11-13 23:36:50 +00001132 return false;
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001133 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001134
Matt Arsenault0da63502018-08-31 05:49:54 +00001135 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1136 AddrSpace == AMDGPUAS::REGION_ADDRESS) {
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001137 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1138 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1139 // with adjacent offsets.
Sanjay Patelce74db92015-09-03 15:03:19 +00001140 bool AlignedBy4 = (Align % 4 == 0);
1141 if (IsFast)
1142 *IsFast = AlignedBy4;
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001143
Sanjay Patelce74db92015-09-03 15:03:19 +00001144 return AlignedBy4;
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001145 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001146
Tom Stellard64a9d082016-10-14 18:10:39 +00001147 // FIXME: We have to be conservative here and assume that flat operations
1148 // will access scratch. If we had access to the IR function, then we
1149 // could determine if any private memory was used in the function.
1150 if (!Subtarget->hasUnalignedScratchAccess() &&
Matt Arsenault0da63502018-08-31 05:49:54 +00001151 (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1152 AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
Matt Arsenaultf4320112018-09-24 13:18:15 +00001153 bool AlignedBy4 = Align >= 4;
1154 if (IsFast)
1155 *IsFast = AlignedBy4;
1156
1157 return AlignedBy4;
Tom Stellard64a9d082016-10-14 18:10:39 +00001158 }
1159
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001160 if (Subtarget->hasUnalignedBufferAccess()) {
1161 // If we have an uniform constant load, it still requires using a slow
1162 // buffer instruction if unaligned.
1163 if (IsFast) {
Matt Arsenault0da63502018-08-31 05:49:54 +00001164 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1165 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001166 (Align % 4 == 0) : true;
1167 }
1168
1169 return true;
1170 }
1171
Tom Stellard33e64c62015-02-04 20:49:52 +00001172 // Smaller than dword value must be aligned.
Tom Stellard33e64c62015-02-04 20:49:52 +00001173 if (VT.bitsLT(MVT::i32))
1174 return false;
1175
Matt Arsenault1018c892014-04-24 17:08:26 +00001176 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1177 // byte-address are ignored, thus forcing Dword alignment.
Tom Stellarde812f2f2014-07-21 15:45:06 +00001178 // This applies to private, global, and constant memory.
Matt Arsenault1018c892014-04-24 17:08:26 +00001179 if (IsFast)
1180 *IsFast = true;
Tom Stellardc6b299c2015-02-02 18:02:28 +00001181
1182 return VT.bitsGT(MVT::i32) && Align % 4 == 0;
Tom Stellard0125f2a2013-06-25 02:39:35 +00001183}
1184
Matt Arsenault46645fa2014-07-28 17:49:26 +00001185EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
1186 unsigned SrcAlign, bool IsMemset,
1187 bool ZeroMemset,
1188 bool MemcpyStrSrc,
1189 MachineFunction &MF) const {
1190 // FIXME: Should account for address space here.
1191
1192 // The default fallback uses the private pointer size as a guess for a type to
1193 // use. Make sure we switch these to 64-bit accesses.
1194
1195 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1196 return MVT::v4i32;
1197
1198 if (Size >= 8 && DstAlign >= 4)
1199 return MVT::v2i32;
1200
1201 // Use the default.
1202 return MVT::Other;
1203}
1204
Matt Arsenault0da63502018-08-31 05:49:54 +00001205static bool isFlatGlobalAddrSpace(unsigned AS) {
1206 return AS == AMDGPUAS::GLOBAL_ADDRESS ||
1207 AS == AMDGPUAS::FLAT_ADDRESS ||
Matt Arsenault7f6dc592018-09-10 11:59:27 +00001208 AS == AMDGPUAS::CONSTANT_ADDRESS;
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001209}
1210
1211bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1212 unsigned DestAS) const {
Matt Arsenault0da63502018-08-31 05:49:54 +00001213 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001214}
1215
Alexander Timofeev18009562016-12-08 17:28:47 +00001216bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1217 const MemSDNode *MemNode = cast<MemSDNode>(N);
1218 const Value *Ptr = MemNode->getMemOperand()->getValue();
Matt Arsenault0a0c8712018-03-27 18:39:45 +00001219 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
Alexander Timofeev18009562016-12-08 17:28:47 +00001220 return I && I->getMetadata("amdgpu.noclobber");
1221}
1222
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001223bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
1224 unsigned DestAS) const {
1225 // Flat -> private/local is a simple truncate.
1226 // Flat -> global is no-op
Matt Arsenault0da63502018-08-31 05:49:54 +00001227 if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001228 return true;
1229
1230 return isNoopAddrSpaceCast(SrcAS, DestAS);
1231}
1232
Tom Stellarda6f24c62015-12-15 20:55:55 +00001233bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1234 const MemSDNode *MemNode = cast<MemSDNode>(N);
Tom Stellarda6f24c62015-12-15 20:55:55 +00001235
Matt Arsenaultbcf7bec2018-02-09 16:57:48 +00001236 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
Tom Stellarda6f24c62015-12-15 20:55:55 +00001237}
1238
Chandler Carruth9d010ff2014-07-03 00:23:43 +00001239TargetLoweringBase::LegalizeTypeAction
Craig Topper0b5f8162018-11-05 23:26:13 +00001240SITargetLowering::getPreferredVectorAction(MVT VT) const {
Chandler Carruth9d010ff2014-07-03 00:23:43 +00001241 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1242 return TypeSplitVector;
1243
1244 return TargetLoweringBase::getPreferredVectorAction(VT);
Tom Stellardd86003e2013-08-14 23:25:00 +00001245}
Tom Stellard0125f2a2013-06-25 02:39:35 +00001246
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001247bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1248 Type *Ty) const {
Matt Arsenault749035b2016-07-30 01:40:36 +00001249 // FIXME: Could be smarter if called for vector constants.
1250 return true;
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001251}
1252
Tom Stellard2e045bb2016-01-20 00:13:22 +00001253bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001254 if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1255 switch (Op) {
1256 case ISD::LOAD:
1257 case ISD::STORE:
Tom Stellard2e045bb2016-01-20 00:13:22 +00001258
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001259 // These operations are done with 32-bit instructions anyway.
1260 case ISD::AND:
1261 case ISD::OR:
1262 case ISD::XOR:
1263 case ISD::SELECT:
1264 // TODO: Extensions?
1265 return true;
1266 default:
1267 return false;
1268 }
1269 }
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +00001270
Tom Stellard2e045bb2016-01-20 00:13:22 +00001271 // SimplifySetCC uses this function to determine whether or not it should
1272 // create setcc with i1 operands. We don't have instructions for i1 setcc.
1273 if (VT == MVT::i1 && Op == ISD::SETCC)
1274 return false;
1275
1276 return TargetLowering::isTypeDesirableForOp(Op, VT);
1277}
1278
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001279SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1280 const SDLoc &SL,
1281 SDValue Chain,
1282 uint64_t Offset) const {
Mehdi Aminia749f2a2015-07-09 02:09:52 +00001283 const DataLayout &DL = DAG.getDataLayout();
Tom Stellardec2e43c2014-09-22 15:35:29 +00001284 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001285 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1286
1287 const ArgDescriptor *InputPtrReg;
1288 const TargetRegisterClass *RC;
1289
1290 std::tie(InputPtrReg, RC)
1291 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Tom Stellard94593ee2013-06-03 17:40:18 +00001292
Matt Arsenault86033ca2014-07-28 17:31:39 +00001293 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
Matt Arsenault0da63502018-08-31 05:49:54 +00001294 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenaulta0269b62015-06-01 21:58:24 +00001295 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001296 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1297
Matt Arsenault2fb9ccf2018-05-29 17:42:38 +00001298 return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
Jan Veselyfea814d2016-06-21 20:46:20 +00001299}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001300
Matt Arsenault9166ce82017-07-28 15:52:08 +00001301SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1302 const SDLoc &SL) const {
Matt Arsenault75e71922018-06-28 10:18:55 +00001303 uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1304 FIRST_IMPLICIT);
Matt Arsenault9166ce82017-07-28 15:52:08 +00001305 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1306}
1307
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001308SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1309 const SDLoc &SL, SDValue Val,
1310 bool Signed,
Matt Arsenault6dca5422017-01-09 18:52:39 +00001311 const ISD::InputArg *Arg) const {
Matt Arsenault6dca5422017-01-09 18:52:39 +00001312 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1313 VT.bitsLT(MemVT)) {
1314 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1315 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1316 }
1317
Tom Stellardbc6c5232016-10-17 16:21:45 +00001318 if (MemVT.isFloatingPoint())
Matt Arsenault6dca5422017-01-09 18:52:39 +00001319 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001320 else if (Signed)
Matt Arsenault6dca5422017-01-09 18:52:39 +00001321 Val = DAG.getSExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001322 else
Matt Arsenault6dca5422017-01-09 18:52:39 +00001323 Val = DAG.getZExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001324
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001325 return Val;
1326}
1327
1328SDValue SITargetLowering::lowerKernargMemParameter(
1329 SelectionDAG &DAG, EVT VT, EVT MemVT,
1330 const SDLoc &SL, SDValue Chain,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001331 uint64_t Offset, unsigned Align, bool Signed,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001332 const ISD::InputArg *Arg) const {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001333 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
Matt Arsenault0da63502018-08-31 05:49:54 +00001334 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001335 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1336
Matt Arsenault90083d32018-06-07 09:54:49 +00001337 // Try to avoid using an extload by loading earlier than the argument address,
1338 // and extracting the relevant bits. The load should hopefully be merged with
1339 // the previous argument.
Matt Arsenault4bec7d42018-07-20 09:05:08 +00001340 if (MemVT.getStoreSize() < 4 && Align < 4) {
1341 // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
Matt Arsenault90083d32018-06-07 09:54:49 +00001342 int64_t AlignDownOffset = alignDown(Offset, 4);
1343 int64_t OffsetDiff = Offset - AlignDownOffset;
1344
1345 EVT IntVT = MemVT.changeTypeToInteger();
1346
1347 // TODO: If we passed in the base kernel offset we could have a better
1348 // alignment than 4, but we don't really need it.
1349 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1350 SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1351 MachineMemOperand::MODereferenceable |
1352 MachineMemOperand::MOInvariant);
1353
1354 SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1355 SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1356
1357 SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1358 ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1359 ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1360
1361
1362 return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1363 }
1364
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001365 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1366 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001367 MachineMemOperand::MODereferenceable |
1368 MachineMemOperand::MOInvariant);
1369
1370 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
Matt Arsenault6dca5422017-01-09 18:52:39 +00001371 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
Tom Stellard94593ee2013-06-03 17:40:18 +00001372}
1373
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001374SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1375 const SDLoc &SL, SDValue Chain,
1376 const ISD::InputArg &Arg) const {
1377 MachineFunction &MF = DAG.getMachineFunction();
1378 MachineFrameInfo &MFI = MF.getFrameInfo();
1379
1380 if (Arg.Flags.isByVal()) {
1381 unsigned Size = Arg.Flags.getByValSize();
1382 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1383 return DAG.getFrameIndex(FrameIdx, MVT::i32);
1384 }
1385
1386 unsigned ArgOffset = VA.getLocMemOffset();
1387 unsigned ArgSize = VA.getValVT().getStoreSize();
1388
1389 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1390
1391 // Create load nodes to retrieve arguments from the stack.
1392 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1393 SDValue ArgValue;
1394
1395 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1396 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1397 MVT MemVT = VA.getValVT();
1398
1399 switch (VA.getLocInfo()) {
1400 default:
1401 break;
1402 case CCValAssign::BCvt:
1403 MemVT = VA.getLocVT();
1404 break;
1405 case CCValAssign::SExt:
1406 ExtType = ISD::SEXTLOAD;
1407 break;
1408 case CCValAssign::ZExt:
1409 ExtType = ISD::ZEXTLOAD;
1410 break;
1411 case CCValAssign::AExt:
1412 ExtType = ISD::EXTLOAD;
1413 break;
1414 }
1415
1416 ArgValue = DAG.getExtLoad(
1417 ExtType, SL, VA.getLocVT(), Chain, FIN,
1418 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1419 MemVT);
1420 return ArgValue;
1421}
1422
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001423SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1424 const SIMachineFunctionInfo &MFI,
1425 EVT VT,
1426 AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1427 const ArgDescriptor *Reg;
1428 const TargetRegisterClass *RC;
1429
1430 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1431 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1432}
1433
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001434static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1435 CallingConv::ID CallConv,
1436 ArrayRef<ISD::InputArg> Ins,
1437 BitVector &Skipped,
1438 FunctionType *FType,
1439 SIMachineFunctionInfo *Info) {
1440 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001441 const ISD::InputArg *Arg = &Ins[I];
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001442
Matt Arsenault55ab9212018-08-01 19:57:34 +00001443 assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1444 "vector type argument should have been split");
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001445
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001446 // First check if it's a PS input addr.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001447 if (CallConv == CallingConv::AMDGPU_PS &&
1448 !Arg->Flags.isInReg() && !Arg->Flags.isByVal() && PSInputNum <= 15) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001449
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001450 bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1451
1452 // Inconveniently only the first part of the split is marked as isSplit,
1453 // so skip to the end. We only want to increment PSInputNum once for the
1454 // entire split argument.
1455 if (Arg->Flags.isSplit()) {
1456 while (!Arg->Flags.isSplitEnd()) {
1457 assert(!Arg->VT.isVector() &&
1458 "unexpected vector split in ps argument type");
1459 if (!SkipArg)
1460 Splits.push_back(*Arg);
1461 Arg = &Ins[++I];
1462 }
1463 }
1464
1465 if (SkipArg) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001466 // We can safely skip PS inputs.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001467 Skipped.set(Arg->getOrigArgIndex());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001468 ++PSInputNum;
1469 continue;
1470 }
1471
1472 Info->markPSInputAllocated(PSInputNum);
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001473 if (Arg->Used)
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001474 Info->markPSInputEnabled(PSInputNum);
1475
1476 ++PSInputNum;
1477 }
1478
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001479 Splits.push_back(*Arg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001480 }
1481}
1482
1483// Allocate special inputs passed in VGPRs.
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001484static void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1485 MachineFunction &MF,
1486 const SIRegisterInfo &TRI,
1487 SIMachineFunctionInfo &Info) {
1488 if (Info.hasWorkItemIDX()) {
1489 unsigned Reg = AMDGPU::VGPR0;
1490 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001491
1492 CCInfo.AllocateReg(Reg);
1493 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1494 }
1495
1496 if (Info.hasWorkItemIDY()) {
1497 unsigned Reg = AMDGPU::VGPR1;
1498 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1499
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001500 CCInfo.AllocateReg(Reg);
1501 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1502 }
1503
1504 if (Info.hasWorkItemIDZ()) {
1505 unsigned Reg = AMDGPU::VGPR2;
1506 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1507
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001508 CCInfo.AllocateReg(Reg);
1509 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1510 }
1511}
1512
1513// Try to allocate a VGPR at the end of the argument list, or if no argument
1514// VGPRs are left allocating a stack slot.
1515static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) {
1516 ArrayRef<MCPhysReg> ArgVGPRs
1517 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1518 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1519 if (RegIdx == ArgVGPRs.size()) {
1520 // Spill to stack required.
1521 int64_t Offset = CCInfo.AllocateStack(4, 4);
1522
1523 return ArgDescriptor::createStack(Offset);
1524 }
1525
1526 unsigned Reg = ArgVGPRs[RegIdx];
1527 Reg = CCInfo.AllocateReg(Reg);
1528 assert(Reg != AMDGPU::NoRegister);
1529
1530 MachineFunction &MF = CCInfo.getMachineFunction();
1531 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1532 return ArgDescriptor::createRegister(Reg);
1533}
1534
1535static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1536 const TargetRegisterClass *RC,
1537 unsigned NumArgRegs) {
1538 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1539 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1540 if (RegIdx == ArgSGPRs.size())
1541 report_fatal_error("ran out of SGPRs for arguments");
1542
1543 unsigned Reg = ArgSGPRs[RegIdx];
1544 Reg = CCInfo.AllocateReg(Reg);
1545 assert(Reg != AMDGPU::NoRegister);
1546
1547 MachineFunction &MF = CCInfo.getMachineFunction();
1548 MF.addLiveIn(Reg, RC);
1549 return ArgDescriptor::createRegister(Reg);
1550}
1551
1552static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1553 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1554}
1555
1556static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1557 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1558}
1559
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001560static void allocateSpecialInputVGPRs(CCState &CCInfo,
1561 MachineFunction &MF,
1562 const SIRegisterInfo &TRI,
1563 SIMachineFunctionInfo &Info) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001564 if (Info.hasWorkItemIDX())
1565 Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001566
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001567 if (Info.hasWorkItemIDY())
1568 Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001569
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001570 if (Info.hasWorkItemIDZ())
1571 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1572}
1573
1574static void allocateSpecialInputSGPRs(CCState &CCInfo,
1575 MachineFunction &MF,
1576 const SIRegisterInfo &TRI,
1577 SIMachineFunctionInfo &Info) {
1578 auto &ArgInfo = Info.getArgInfo();
1579
1580 // TODO: Unify handling with private memory pointers.
1581
1582 if (Info.hasDispatchPtr())
1583 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1584
1585 if (Info.hasQueuePtr())
1586 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1587
1588 if (Info.hasKernargSegmentPtr())
1589 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1590
1591 if (Info.hasDispatchID())
1592 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1593
1594 // flat_scratch_init is not applicable for non-kernel functions.
1595
1596 if (Info.hasWorkGroupIDX())
1597 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1598
1599 if (Info.hasWorkGroupIDY())
1600 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1601
1602 if (Info.hasWorkGroupIDZ())
1603 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
Matt Arsenault817c2532017-08-03 23:12:44 +00001604
1605 if (Info.hasImplicitArgPtr())
1606 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001607}
1608
1609// Allocate special inputs passed in user SGPRs.
1610static void allocateHSAUserSGPRs(CCState &CCInfo,
1611 MachineFunction &MF,
1612 const SIRegisterInfo &TRI,
1613 SIMachineFunctionInfo &Info) {
Matt Arsenault10fc0622017-06-26 03:01:31 +00001614 if (Info.hasImplicitBufferPtr()) {
1615 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1616 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1617 CCInfo.AllocateReg(ImplicitBufferPtrReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001618 }
1619
1620 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1621 if (Info.hasPrivateSegmentBuffer()) {
1622 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1623 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1624 CCInfo.AllocateReg(PrivateSegmentBufferReg);
1625 }
1626
1627 if (Info.hasDispatchPtr()) {
1628 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1629 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1630 CCInfo.AllocateReg(DispatchPtrReg);
1631 }
1632
1633 if (Info.hasQueuePtr()) {
1634 unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1635 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1636 CCInfo.AllocateReg(QueuePtrReg);
1637 }
1638
1639 if (Info.hasKernargSegmentPtr()) {
1640 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1641 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1642 CCInfo.AllocateReg(InputPtrReg);
1643 }
1644
1645 if (Info.hasDispatchID()) {
1646 unsigned DispatchIDReg = Info.addDispatchID(TRI);
1647 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1648 CCInfo.AllocateReg(DispatchIDReg);
1649 }
1650
1651 if (Info.hasFlatScratchInit()) {
1652 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1653 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1654 CCInfo.AllocateReg(FlatScratchInitReg);
1655 }
1656
1657 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1658 // these from the dispatch pointer.
1659}
1660
1661// Allocate special input registers that are initialized per-wave.
1662static void allocateSystemSGPRs(CCState &CCInfo,
1663 MachineFunction &MF,
1664 SIMachineFunctionInfo &Info,
Marek Olsak584d2c02017-05-04 22:25:20 +00001665 CallingConv::ID CallConv,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001666 bool IsShader) {
1667 if (Info.hasWorkGroupIDX()) {
1668 unsigned Reg = Info.addWorkGroupIDX();
1669 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1670 CCInfo.AllocateReg(Reg);
1671 }
1672
1673 if (Info.hasWorkGroupIDY()) {
1674 unsigned Reg = Info.addWorkGroupIDY();
1675 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1676 CCInfo.AllocateReg(Reg);
1677 }
1678
1679 if (Info.hasWorkGroupIDZ()) {
1680 unsigned Reg = Info.addWorkGroupIDZ();
1681 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1682 CCInfo.AllocateReg(Reg);
1683 }
1684
1685 if (Info.hasWorkGroupInfo()) {
1686 unsigned Reg = Info.addWorkGroupInfo();
1687 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1688 CCInfo.AllocateReg(Reg);
1689 }
1690
1691 if (Info.hasPrivateSegmentWaveByteOffset()) {
1692 // Scratch wave offset passed in system SGPR.
1693 unsigned PrivateSegmentWaveByteOffsetReg;
1694
1695 if (IsShader) {
Marek Olsak584d2c02017-05-04 22:25:20 +00001696 PrivateSegmentWaveByteOffsetReg =
1697 Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1698
1699 // This is true if the scratch wave byte offset doesn't have a fixed
1700 // location.
1701 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1702 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1703 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1704 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001705 } else
1706 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1707
1708 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1709 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1710 }
1711}
1712
1713static void reservePrivateMemoryRegs(const TargetMachine &TM,
1714 MachineFunction &MF,
1715 const SIRegisterInfo &TRI,
Matt Arsenault1cc47f82017-07-18 16:44:56 +00001716 SIMachineFunctionInfo &Info) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001717 // Now that we've figured out where the scratch register inputs are, see if
1718 // should reserve the arguments and use them directly.
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001719 MachineFrameInfo &MFI = MF.getFrameInfo();
1720 bool HasStackObjects = MFI.hasStackObjects();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001721
1722 // Record that we know we have non-spill stack objects so we don't need to
1723 // check all stack objects later.
1724 if (HasStackObjects)
1725 Info.setHasNonSpillStackObjects(true);
1726
1727 // Everything live out of a block is spilled with fast regalloc, so it's
1728 // almost certain that spilling will be required.
1729 if (TM.getOptLevel() == CodeGenOpt::None)
1730 HasStackObjects = true;
1731
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001732 // For now assume stack access is needed in any callee functions, so we need
1733 // the scratch registers to pass in.
1734 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1735
Tom Stellard5bfbae52018-07-11 20:59:01 +00001736 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +00001737 if (ST.isAmdHsaOrMesa(MF.getFunction())) {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001738 if (RequiresStackAccess) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001739 // If we have stack objects, we unquestionably need the private buffer
1740 // resource. For the Code Object V2 ABI, this will be the first 4 user
1741 // SGPR inputs. We can reserve those and use them directly.
1742
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001743 unsigned PrivateSegmentBufferReg = Info.getPreloadedReg(
1744 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001745 Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1746
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001747 if (MFI.hasCalls()) {
1748 // If we have calls, we need to keep the frame register in a register
1749 // that won't be clobbered by a call, so ensure it is copied somewhere.
1750
1751 // This is not a problem for the scratch wave offset, because the same
1752 // registers are reserved in all functions.
1753
1754 // FIXME: Nothing is really ensuring this is a call preserved register,
1755 // it's just selected from the end so it happens to be.
1756 unsigned ReservedOffsetReg
1757 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1758 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1759 } else {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001760 unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg(
1761 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001762 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1763 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001764 } else {
1765 unsigned ReservedBufferReg
1766 = TRI.reservedPrivateSegmentBufferReg(MF);
1767 unsigned ReservedOffsetReg
1768 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1769
1770 // We tentatively reserve the last registers (skipping the last two
1771 // which may contain VCC). After register allocation, we'll replace
1772 // these with the ones immediately after those which were really
1773 // allocated. In the prologue copies will be inserted from the argument
1774 // to these reserved registers.
1775 Info.setScratchRSrcReg(ReservedBufferReg);
1776 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1777 }
1778 } else {
1779 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1780
1781 // Without HSA, relocations are used for the scratch pointer and the
1782 // buffer resource setup is always inserted in the prologue. Scratch wave
1783 // offset is still in an input SGPR.
1784 Info.setScratchRSrcReg(ReservedBufferReg);
1785
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001786 if (HasStackObjects && !MFI.hasCalls()) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001787 unsigned ScratchWaveOffsetReg = Info.getPreloadedReg(
1788 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001789 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1790 } else {
1791 unsigned ReservedOffsetReg
1792 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1793 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1794 }
1795 }
1796}
1797
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001798bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1799 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1800 return !Info->isEntryFunction();
1801}
1802
1803void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1804
1805}
1806
1807void SITargetLowering::insertCopiesSplitCSR(
1808 MachineBasicBlock *Entry,
1809 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1810 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1811
1812 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1813 if (!IStart)
1814 return;
1815
1816 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1817 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1818 MachineBasicBlock::iterator MBBI = Entry->begin();
1819 for (const MCPhysReg *I = IStart; *I; ++I) {
1820 const TargetRegisterClass *RC = nullptr;
1821 if (AMDGPU::SReg_64RegClass.contains(*I))
1822 RC = &AMDGPU::SGPR_64RegClass;
1823 else if (AMDGPU::SReg_32RegClass.contains(*I))
1824 RC = &AMDGPU::SGPR_32RegClass;
1825 else
1826 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1827
1828 unsigned NewVR = MRI->createVirtualRegister(RC);
1829 // Create copy from CSR to a virtual register.
1830 Entry->addLiveIn(*I);
1831 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1832 .addReg(*I);
1833
1834 // Insert the copy-back instructions right before the terminator.
1835 for (auto *Exit : Exits)
1836 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1837 TII->get(TargetOpcode::COPY), *I)
1838 .addReg(NewVR);
1839 }
1840}
1841
Christian Konig2c8f6d52013-03-07 09:03:52 +00001842SDValue SITargetLowering::LowerFormalArguments(
Eric Christopher7792e322015-01-30 23:24:40 +00001843 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001844 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1845 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001846 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001847
1848 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001849 const Function &Fn = MF.getFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00001850 FunctionType *FType = MF.getFunction().getFunctionType();
Christian Konig99ee0f42013-03-07 09:04:14 +00001851 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellard5bfbae52018-07-11 20:59:01 +00001852 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001853
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +00001854 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00001855 DiagnosticInfoUnsupported NoGraphicsHSA(
Matthias Braunf1caa282017-12-15 22:22:58 +00001856 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
Matt Arsenaultd48da142015-11-02 23:23:02 +00001857 DAG.getContext()->diagnose(NoGraphicsHSA);
Diana Picus81bc3172016-05-26 15:24:55 +00001858 return DAG.getEntryNode();
Matt Arsenaultd48da142015-11-02 23:23:02 +00001859 }
1860
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00001861 // Create stack objects that are used for emitting debugger prologue if
1862 // "amdgpu-debugger-emit-prologue" attribute was specified.
1863 if (ST.debuggerEmitPrologue())
1864 createDebuggerPrologueStackObjects(MF);
1865
Christian Konig2c8f6d52013-03-07 09:03:52 +00001866 SmallVector<ISD::InputArg, 16> Splits;
Christian Konig2c8f6d52013-03-07 09:03:52 +00001867 SmallVector<CCValAssign, 16> ArgLocs;
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001868 BitVector Skipped(Ins.size());
Eric Christopherb5217502014-08-06 18:45:26 +00001869 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1870 *DAG.getContext());
Christian Konig2c8f6d52013-03-07 09:03:52 +00001871
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001872 bool IsShader = AMDGPU::isShader(CallConv);
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +00001873 bool IsKernel = AMDGPU::isKernel(CallConv);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001874 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
Christian Konig99ee0f42013-03-07 09:04:14 +00001875
Matt Arsenaultd1867c02017-08-02 00:59:51 +00001876 if (!IsEntryFunc) {
1877 // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over
1878 // this when allocating argument fixed offsets.
1879 CCInfo.AllocateStack(4, 4);
1880 }
1881
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001882 if (IsShader) {
1883 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1884
1885 // At least one interpolation mode must be enabled or else the GPU will
1886 // hang.
1887 //
1888 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1889 // set PSInputAddr, the user wants to enable some bits after the compilation
1890 // based on run-time states. Since we can't know what the final PSInputEna
1891 // will look like, so we shouldn't do anything here and the user should take
1892 // responsibility for the correct programming.
1893 //
1894 // Otherwise, the following restrictions apply:
1895 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1896 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1897 // enabled too.
Tim Renoufc8ffffe2017-10-12 16:16:41 +00001898 if (CallConv == CallingConv::AMDGPU_PS) {
1899 if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1900 ((Info->getPSInputAddr() & 0xF) == 0 &&
1901 Info->isPSInputAllocated(11))) {
1902 CCInfo.AllocateReg(AMDGPU::VGPR0);
1903 CCInfo.AllocateReg(AMDGPU::VGPR1);
1904 Info->markPSInputAllocated(0);
1905 Info->markPSInputEnabled(0);
1906 }
1907 if (Subtarget->isAmdPalOS()) {
1908 // For isAmdPalOS, the user does not enable some bits after compilation
1909 // based on run-time states; the register values being generated here are
1910 // the final ones set in hardware. Therefore we need to apply the
1911 // workaround to PSInputAddr and PSInputEnable together. (The case where
1912 // a bit is set in PSInputAddr but not PSInputEnable is where the
1913 // frontend set up an input arg for a particular interpolation mode, but
1914 // nothing uses that input arg. Really we should have an earlier pass
1915 // that removes such an arg.)
1916 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1917 if ((PsInputBits & 0x7F) == 0 ||
1918 ((PsInputBits & 0xF) == 0 &&
1919 (PsInputBits >> 11 & 1)))
1920 Info->markPSInputEnabled(
1921 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
1922 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001923 }
1924
Tom Stellard2f3f9852017-01-25 01:25:13 +00001925 assert(!Info->hasDispatchPtr() &&
Tom Stellardf110f8f2016-04-14 16:27:03 +00001926 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1927 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1928 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1929 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1930 !Info->hasWorkItemIDZ());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001931 } else if (IsKernel) {
1932 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001933 } else {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001934 Splits.append(Ins.begin(), Ins.end());
Tom Stellardaf775432013-10-23 00:44:32 +00001935 }
1936
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001937 if (IsEntryFunc) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001938 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001939 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
Tom Stellard2f3f9852017-01-25 01:25:13 +00001940 }
1941
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001942 if (IsKernel) {
Tom Stellardbbeb45a2016-09-16 21:53:00 +00001943 analyzeFormalArgumentsCompute(CCInfo, Ins);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001944 } else {
1945 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1946 CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1947 }
Christian Konig2c8f6d52013-03-07 09:03:52 +00001948
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001949 SmallVector<SDValue, 16> Chains;
1950
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001951 // FIXME: This is the minimum kernel argument alignment. We should improve
1952 // this to the maximum alignment of the arguments.
1953 //
1954 // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
1955 // kern arg offset.
1956 const unsigned KernelArgBaseAlign = 16;
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001957
1958 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
Christian Konigb7be72d2013-05-17 09:46:48 +00001959 const ISD::InputArg &Arg = Ins[i];
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001960 if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
Christian Konigb7be72d2013-05-17 09:46:48 +00001961 InVals.push_back(DAG.getUNDEF(Arg.VT));
Christian Konig99ee0f42013-03-07 09:04:14 +00001962 continue;
1963 }
1964
Christian Konig2c8f6d52013-03-07 09:03:52 +00001965 CCValAssign &VA = ArgLocs[ArgIdx++];
Craig Topper7f416c82014-11-16 21:17:18 +00001966 MVT VT = VA.getLocVT();
Tom Stellarded882c22013-06-03 17:40:11 +00001967
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001968 if (IsEntryFunc && VA.isMemLoc()) {
Tom Stellardaf775432013-10-23 00:44:32 +00001969 VT = Ins[i].VT;
Tom Stellardbbeb45a2016-09-16 21:53:00 +00001970 EVT MemVT = VA.getLocVT();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001971
Matt Arsenault4bec7d42018-07-20 09:05:08 +00001972 const uint64_t Offset = VA.getLocMemOffset();
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001973 unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001974
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001975 SDValue Arg = lowerKernargMemParameter(
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001976 DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001977 Chains.push_back(Arg.getValue(1));
Tom Stellardca7ecf32014-08-22 18:49:31 +00001978
Craig Toppere3dcce92015-08-01 22:20:21 +00001979 auto *ParamTy =
Andrew Trick05938a52015-02-16 18:10:47 +00001980 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
Tom Stellard5bfbae52018-07-11 20:59:01 +00001981 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001982 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
Tom Stellardca7ecf32014-08-22 18:49:31 +00001983 // On SI local pointers are just offsets into LDS, so they are always
1984 // less than 16-bits. On CI and newer they could potentially be
1985 // real pointers, so we can't guarantee their size.
1986 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
1987 DAG.getValueType(MVT::i16));
1988 }
1989
Tom Stellarded882c22013-06-03 17:40:11 +00001990 InVals.push_back(Arg);
1991 continue;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001992 } else if (!IsEntryFunc && VA.isMemLoc()) {
1993 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
1994 InVals.push_back(Val);
1995 if (!Arg.Flags.isByVal())
1996 Chains.push_back(Val.getValue(1));
1997 continue;
Tom Stellarded882c22013-06-03 17:40:11 +00001998 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001999
Christian Konig2c8f6d52013-03-07 09:03:52 +00002000 assert(VA.isRegLoc() && "Parameter must be in a register!");
2001
2002 unsigned Reg = VA.getLocReg();
Christian Konig2c8f6d52013-03-07 09:03:52 +00002003 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
Matt Arsenaultb3463552017-07-15 05:52:59 +00002004 EVT ValVT = VA.getValVT();
Christian Konig2c8f6d52013-03-07 09:03:52 +00002005
2006 Reg = MF.addLiveIn(Reg, RC);
2007 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2008
Matt Arsenault45b98182017-11-15 00:45:43 +00002009 if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) {
2010 // The return object should be reasonably addressable.
2011
2012 // FIXME: This helps when the return is a real sret. If it is a
2013 // automatically inserted sret (i.e. CanLowerReturn returns false), an
2014 // extra copy is inserted in SelectionDAGBuilder which obscures this.
2015 unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits;
2016 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2017 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2018 }
2019
Matt Arsenaultb3463552017-07-15 05:52:59 +00002020 // If this is an 8 or 16-bit value, it is really passed promoted
2021 // to 32 bits. Insert an assert[sz]ext to capture this, then
2022 // truncate to the right size.
2023 switch (VA.getLocInfo()) {
2024 case CCValAssign::Full:
2025 break;
2026 case CCValAssign::BCvt:
2027 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2028 break;
2029 case CCValAssign::SExt:
2030 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2031 DAG.getValueType(ValVT));
2032 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2033 break;
2034 case CCValAssign::ZExt:
2035 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2036 DAG.getValueType(ValVT));
2037 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2038 break;
2039 case CCValAssign::AExt:
2040 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2041 break;
2042 default:
2043 llvm_unreachable("Unknown loc info!");
2044 }
2045
Christian Konig2c8f6d52013-03-07 09:03:52 +00002046 InVals.push_back(Val);
2047 }
Tom Stellarde99fb652015-01-20 19:33:04 +00002048
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002049 if (!IsEntryFunc) {
2050 // Special inputs come after user arguments.
2051 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2052 }
2053
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002054 // Start adding system SGPRs.
2055 if (IsEntryFunc) {
2056 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002057 } else {
2058 CCInfo.AllocateReg(Info->getScratchRSrcReg());
2059 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2060 CCInfo.AllocateReg(Info->getFrameOffsetReg());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002061 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002062 }
Matt Arsenaultcf13d182015-07-10 22:51:36 +00002063
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002064 auto &ArgUsageInfo =
2065 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
Matt Arsenaultceafc552018-05-29 17:42:50 +00002066 ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002067
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002068 unsigned StackArgSize = CCInfo.getNextStackOffset();
2069 Info->setBytesInStackArgArea(StackArgSize);
2070
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002071 return Chains.empty() ? Chain :
2072 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
Christian Konig2c8f6d52013-03-07 09:03:52 +00002073}
2074
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002075// TODO: If return values can't fit in registers, we should return as many as
2076// possible in registers before passing on stack.
2077bool SITargetLowering::CanLowerReturn(
2078 CallingConv::ID CallConv,
2079 MachineFunction &MF, bool IsVarArg,
2080 const SmallVectorImpl<ISD::OutputArg> &Outs,
2081 LLVMContext &Context) const {
2082 // Replacing returns with sret/stack usage doesn't make sense for shaders.
2083 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2084 // for shaders. Vector types should be explicitly handled by CC.
2085 if (AMDGPU::isEntryFunctionCC(CallConv))
2086 return true;
2087
2088 SmallVector<CCValAssign, 16> RVLocs;
2089 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2090 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2091}
2092
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002093SDValue
2094SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2095 bool isVarArg,
2096 const SmallVectorImpl<ISD::OutputArg> &Outs,
2097 const SmallVectorImpl<SDValue> &OutVals,
2098 const SDLoc &DL, SelectionDAG &DAG) const {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002099 MachineFunction &MF = DAG.getMachineFunction();
2100 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2101
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002102 if (AMDGPU::isKernel(CallConv)) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002103 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2104 OutVals, DL, DAG);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002105 }
2106
2107 bool IsShader = AMDGPU::isShader(CallConv);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002108
Matt Arsenault55ab9212018-08-01 19:57:34 +00002109 Info->setIfReturnsVoid(Outs.empty());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002110 bool IsWaveEnd = Info->returnsVoid() && IsShader;
Marek Olsak8e9cc632016-01-13 17:23:09 +00002111
Marek Olsak8a0f3352016-01-13 17:23:04 +00002112 // CCValAssign - represent the assignment of the return value to a location.
2113 SmallVector<CCValAssign, 48> RVLocs;
Matt Arsenault55ab9212018-08-01 19:57:34 +00002114 SmallVector<ISD::OutputArg, 48> Splits;
Marek Olsak8a0f3352016-01-13 17:23:04 +00002115
2116 // CCState - Info about the registers and stack slots.
2117 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2118 *DAG.getContext());
2119
2120 // Analyze outgoing return values.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002121 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
Marek Olsak8a0f3352016-01-13 17:23:04 +00002122
2123 SDValue Flag;
2124 SmallVector<SDValue, 48> RetOps;
2125 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2126
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002127 // Add return address for callable functions.
2128 if (!Info->isEntryFunction()) {
2129 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2130 SDValue ReturnAddrReg = CreateLiveInRegister(
2131 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2132
2133 // FIXME: Should be able to use a vreg here, but need a way to prevent it
2134 // from being allcoated to a CSR.
2135
2136 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2137 MVT::i64);
2138
2139 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
2140 Flag = Chain.getValue(1);
2141
2142 RetOps.push_back(PhysReturnAddrReg);
2143 }
2144
Marek Olsak8a0f3352016-01-13 17:23:04 +00002145 // Copy the result values into the output registers.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002146 for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2147 ++I, ++RealRVLocIdx) {
2148 CCValAssign &VA = RVLocs[I];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002149 assert(VA.isRegLoc() && "Can only return in registers!");
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002150 // TODO: Partially return in registers if return values don't fit.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002151 SDValue Arg = OutVals[RealRVLocIdx];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002152
2153 // Copied from other backends.
2154 switch (VA.getLocInfo()) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002155 case CCValAssign::Full:
2156 break;
2157 case CCValAssign::BCvt:
2158 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2159 break;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002160 case CCValAssign::SExt:
2161 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2162 break;
2163 case CCValAssign::ZExt:
2164 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2165 break;
2166 case CCValAssign::AExt:
2167 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2168 break;
2169 default:
2170 llvm_unreachable("Unknown loc info!");
Marek Olsak8a0f3352016-01-13 17:23:04 +00002171 }
2172
2173 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2174 Flag = Chain.getValue(1);
2175 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2176 }
2177
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002178 // FIXME: Does sret work properly?
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002179 if (!Info->isEntryFunction()) {
Tom Stellardc5a154d2018-06-28 23:47:12 +00002180 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002181 const MCPhysReg *I =
2182 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2183 if (I) {
2184 for (; *I; ++I) {
2185 if (AMDGPU::SReg_64RegClass.contains(*I))
2186 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2187 else if (AMDGPU::SReg_32RegClass.contains(*I))
2188 RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2189 else
2190 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2191 }
2192 }
2193 }
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002194
Marek Olsak8a0f3352016-01-13 17:23:04 +00002195 // Update chain and glue.
2196 RetOps[0] = Chain;
2197 if (Flag.getNode())
2198 RetOps.push_back(Flag);
2199
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002200 unsigned Opc = AMDGPUISD::ENDPGM;
2201 if (!IsWaveEnd)
2202 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
Matt Arsenault9babdf42016-06-22 20:15:28 +00002203 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002204}
2205
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002206SDValue SITargetLowering::LowerCallResult(
2207 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2208 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2209 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2210 SDValue ThisVal) const {
2211 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2212
2213 // Assign locations to each value returned by this call.
2214 SmallVector<CCValAssign, 16> RVLocs;
2215 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2216 *DAG.getContext());
2217 CCInfo.AnalyzeCallResult(Ins, RetCC);
2218
2219 // Copy all of the result registers out of their specified physreg.
2220 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2221 CCValAssign VA = RVLocs[i];
2222 SDValue Val;
2223
2224 if (VA.isRegLoc()) {
2225 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2226 Chain = Val.getValue(1);
2227 InFlag = Val.getValue(2);
2228 } else if (VA.isMemLoc()) {
2229 report_fatal_error("TODO: return values in memory");
2230 } else
2231 llvm_unreachable("unknown argument location type");
2232
2233 switch (VA.getLocInfo()) {
2234 case CCValAssign::Full:
2235 break;
2236 case CCValAssign::BCvt:
2237 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2238 break;
2239 case CCValAssign::ZExt:
2240 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2241 DAG.getValueType(VA.getValVT()));
2242 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2243 break;
2244 case CCValAssign::SExt:
2245 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2246 DAG.getValueType(VA.getValVT()));
2247 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2248 break;
2249 case CCValAssign::AExt:
2250 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2251 break;
2252 default:
2253 llvm_unreachable("Unknown loc info!");
2254 }
2255
2256 InVals.push_back(Val);
2257 }
2258
2259 return Chain;
2260}
2261
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002262// Add code to pass special inputs required depending on used features separate
2263// from the explicit user arguments present in the IR.
2264void SITargetLowering::passSpecialInputs(
2265 CallLoweringInfo &CLI,
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002266 CCState &CCInfo,
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002267 const SIMachineFunctionInfo &Info,
2268 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2269 SmallVectorImpl<SDValue> &MemOpChains,
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002270 SDValue Chain) const {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002271 // If we don't have a call site, this was a call inserted by
2272 // legalization. These can never use special inputs.
2273 if (!CLI.CS)
2274 return;
2275
2276 const Function *CalleeFunc = CLI.CS.getCalledFunction();
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002277 assert(CalleeFunc);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002278
2279 SelectionDAG &DAG = CLI.DAG;
2280 const SDLoc &DL = CLI.DL;
2281
Tom Stellardc5a154d2018-06-28 23:47:12 +00002282 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002283
2284 auto &ArgUsageInfo =
2285 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2286 const AMDGPUFunctionArgInfo &CalleeArgInfo
2287 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2288
2289 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2290
2291 // TODO: Unify with private memory register handling. This is complicated by
2292 // the fact that at least in kernels, the input argument is not necessarily
2293 // in the same location as the input.
2294 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2295 AMDGPUFunctionArgInfo::DISPATCH_PTR,
2296 AMDGPUFunctionArgInfo::QUEUE_PTR,
2297 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2298 AMDGPUFunctionArgInfo::DISPATCH_ID,
2299 AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2300 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2301 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2302 AMDGPUFunctionArgInfo::WORKITEM_ID_X,
2303 AMDGPUFunctionArgInfo::WORKITEM_ID_Y,
Matt Arsenault817c2532017-08-03 23:12:44 +00002304 AMDGPUFunctionArgInfo::WORKITEM_ID_Z,
2305 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002306 };
2307
2308 for (auto InputID : InputRegs) {
2309 const ArgDescriptor *OutgoingArg;
2310 const TargetRegisterClass *ArgRC;
2311
2312 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2313 if (!OutgoingArg)
2314 continue;
2315
2316 const ArgDescriptor *IncomingArg;
2317 const TargetRegisterClass *IncomingArgRC;
2318 std::tie(IncomingArg, IncomingArgRC)
2319 = CallerArgInfo.getPreloadedValue(InputID);
2320 assert(IncomingArgRC == ArgRC);
2321
2322 // All special arguments are ints for now.
2323 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
Matt Arsenault817c2532017-08-03 23:12:44 +00002324 SDValue InputReg;
2325
2326 if (IncomingArg) {
2327 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2328 } else {
2329 // The implicit arg ptr is special because it doesn't have a corresponding
2330 // input for kernels, and is computed from the kernarg segment pointer.
2331 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2332 InputReg = getImplicitArgPtr(DAG, DL);
2333 }
2334
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002335 if (OutgoingArg->isRegister()) {
2336 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2337 } else {
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002338 unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2339 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2340 SpecialArgOffset);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002341 MemOpChains.push_back(ArgStore);
2342 }
2343 }
2344}
2345
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002346static bool canGuaranteeTCO(CallingConv::ID CC) {
2347 return CC == CallingConv::Fast;
2348}
2349
2350/// Return true if we might ever do TCO for calls with this calling convention.
2351static bool mayTailCallThisCC(CallingConv::ID CC) {
2352 switch (CC) {
2353 case CallingConv::C:
2354 return true;
2355 default:
2356 return canGuaranteeTCO(CC);
2357 }
2358}
2359
2360bool SITargetLowering::isEligibleForTailCallOptimization(
2361 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2362 const SmallVectorImpl<ISD::OutputArg> &Outs,
2363 const SmallVectorImpl<SDValue> &OutVals,
2364 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2365 if (!mayTailCallThisCC(CalleeCC))
2366 return false;
2367
2368 MachineFunction &MF = DAG.getMachineFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00002369 const Function &CallerF = MF.getFunction();
2370 CallingConv::ID CallerCC = CallerF.getCallingConv();
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002371 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2372 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2373
2374 // Kernels aren't callable, and don't have a live in return address so it
2375 // doesn't make sense to do a tail call with entry functions.
2376 if (!CallerPreserved)
2377 return false;
2378
2379 bool CCMatch = CallerCC == CalleeCC;
2380
2381 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2382 if (canGuaranteeTCO(CalleeCC) && CCMatch)
2383 return true;
2384 return false;
2385 }
2386
2387 // TODO: Can we handle var args?
2388 if (IsVarArg)
2389 return false;
2390
Matthias Braunf1caa282017-12-15 22:22:58 +00002391 for (const Argument &Arg : CallerF.args()) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002392 if (Arg.hasByValAttr())
2393 return false;
2394 }
2395
2396 LLVMContext &Ctx = *DAG.getContext();
2397
2398 // Check that the call results are passed in the same way.
2399 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2400 CCAssignFnForCall(CalleeCC, IsVarArg),
2401 CCAssignFnForCall(CallerCC, IsVarArg)))
2402 return false;
2403
2404 // The callee has to preserve all registers the caller needs to preserve.
2405 if (!CCMatch) {
2406 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2407 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2408 return false;
2409 }
2410
2411 // Nothing more to check if the callee is taking no arguments.
2412 if (Outs.empty())
2413 return true;
2414
2415 SmallVector<CCValAssign, 16> ArgLocs;
2416 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2417
2418 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2419
2420 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2421 // If the stack arguments for this call do not fit into our own save area then
2422 // the call cannot be made tail.
2423 // TODO: Is this really necessary?
2424 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2425 return false;
2426
2427 const MachineRegisterInfo &MRI = MF.getRegInfo();
2428 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2429}
2430
2431bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2432 if (!CI->isTailCall())
2433 return false;
2434
2435 const Function *ParentFn = CI->getParent()->getParent();
2436 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2437 return false;
2438
2439 auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2440 return (Attr.getValueAsString() != "true");
2441}
2442
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002443// The wave scratch offset register is used as the global base pointer.
2444SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2445 SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002446 SelectionDAG &DAG = CLI.DAG;
2447 const SDLoc &DL = CLI.DL;
2448 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2449 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2450 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2451 SDValue Chain = CLI.Chain;
2452 SDValue Callee = CLI.Callee;
2453 bool &IsTailCall = CLI.IsTailCall;
2454 CallingConv::ID CallConv = CLI.CallConv;
2455 bool IsVarArg = CLI.IsVarArg;
2456 bool IsSibCall = false;
2457 bool IsThisReturn = false;
2458 MachineFunction &MF = DAG.getMachineFunction();
2459
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002460 if (IsVarArg) {
2461 return lowerUnhandledCall(CLI, InVals,
2462 "unsupported call to variadic function ");
2463 }
2464
Matt Arsenault935f3b72018-08-08 16:58:39 +00002465 if (!CLI.CS.getInstruction())
2466 report_fatal_error("unsupported libcall legalization");
2467
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002468 if (!CLI.CS.getCalledFunction()) {
2469 return lowerUnhandledCall(CLI, InVals,
2470 "unsupported indirect call to function ");
2471 }
2472
2473 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2474 return lowerUnhandledCall(CLI, InVals,
2475 "unsupported required tail call to function ");
2476 }
2477
Matt Arsenault1fb90132018-06-28 10:18:36 +00002478 if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2479 // Note the issue is with the CC of the calling function, not of the call
2480 // itself.
2481 return lowerUnhandledCall(CLI, InVals,
2482 "unsupported call from graphics shader of function ");
2483 }
2484
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002485 // The first 4 bytes are reserved for the callee's emergency stack slot.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002486 if (IsTailCall) {
2487 IsTailCall = isEligibleForTailCallOptimization(
2488 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2489 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2490 report_fatal_error("failed to perform tail call elimination on a call "
2491 "site marked musttail");
2492 }
2493
2494 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2495
2496 // A sibling call is one where we're under the usual C ABI and not planning
2497 // to change that but can still do a tail call:
2498 if (!TailCallOpt && IsTailCall)
2499 IsSibCall = true;
2500
2501 if (IsTailCall)
2502 ++NumTailCalls;
2503 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002504
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002505 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2506
2507 // Analyze operands of the call, assigning locations to each operand.
2508 SmallVector<CCValAssign, 16> ArgLocs;
2509 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2510 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002511
2512 // The first 4 bytes are reserved for the callee's emergency stack slot.
2513 CCInfo.AllocateStack(4, 4);
2514
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002515 CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2516
2517 // Get a count of how many bytes are to be pushed on the stack.
2518 unsigned NumBytes = CCInfo.getNextStackOffset();
2519
2520 if (IsSibCall) {
2521 // Since we're not changing the ABI to make this a tail call, the memory
2522 // operands are already available in the caller's incoming argument space.
2523 NumBytes = 0;
2524 }
2525
2526 // FPDiff is the byte offset of the call's argument area from the callee's.
2527 // Stores to callee stack arguments will be placed in FixedStackSlots offset
2528 // by this amount for a tail call. In a sibling call it must be 0 because the
2529 // caller will deallocate the entire stack and the callee still expects its
2530 // arguments to begin at SP+0. Completely unused for non-tail calls.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002531 int32_t FPDiff = 0;
2532 MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002533 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2534
Matt Arsenault6efd0822017-09-14 17:14:57 +00002535 SDValue CallerSavedFP;
2536
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002537 // Adjust the stack pointer for the new arguments...
2538 // These operations are automatically eliminated by the prolog/epilog pass
2539 if (!IsSibCall) {
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002540 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002541
2542 unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2543
2544 // In the HSA case, this should be an identity copy.
2545 SDValue ScratchRSrcReg
2546 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2547 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
2548
2549 // TODO: Don't hardcode these registers and get from the callee function.
2550 SDValue ScratchWaveOffsetReg
2551 = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2552 RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
Matt Arsenault6efd0822017-09-14 17:14:57 +00002553
2554 if (!Info->isEntryFunction()) {
2555 // Avoid clobbering this function's FP value. In the current convention
2556 // callee will overwrite this, so do save/restore around the call site.
2557 CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2558 Info->getFrameOffsetReg(), MVT::i32);
2559 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002560 }
2561
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002562 SmallVector<SDValue, 8> MemOpChains;
2563 MVT PtrVT = MVT::i32;
2564
2565 // Walk the register/memloc assignments, inserting copies/loads.
2566 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2567 ++i, ++realArgIdx) {
2568 CCValAssign &VA = ArgLocs[i];
2569 SDValue Arg = OutVals[realArgIdx];
2570
2571 // Promote the value if needed.
2572 switch (VA.getLocInfo()) {
2573 case CCValAssign::Full:
2574 break;
2575 case CCValAssign::BCvt:
2576 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2577 break;
2578 case CCValAssign::ZExt:
2579 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2580 break;
2581 case CCValAssign::SExt:
2582 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2583 break;
2584 case CCValAssign::AExt:
2585 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2586 break;
2587 case CCValAssign::FPExt:
2588 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2589 break;
2590 default:
2591 llvm_unreachable("Unknown loc info!");
2592 }
2593
2594 if (VA.isRegLoc()) {
2595 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2596 } else {
2597 assert(VA.isMemLoc());
2598
2599 SDValue DstAddr;
2600 MachinePointerInfo DstInfo;
2601
2602 unsigned LocMemOffset = VA.getLocMemOffset();
2603 int32_t Offset = LocMemOffset;
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002604
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002605 SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002606 unsigned Align = 0;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002607
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002608 if (IsTailCall) {
2609 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2610 unsigned OpSize = Flags.isByVal() ?
2611 Flags.getByValSize() : VA.getValVT().getStoreSize();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002612
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002613 // FIXME: We can have better than the minimum byval required alignment.
2614 Align = Flags.isByVal() ? Flags.getByValAlign() :
2615 MinAlign(Subtarget->getStackAlignment(), Offset);
2616
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002617 Offset = Offset + FPDiff;
2618 int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2619
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002620 DstAddr = DAG.getFrameIndex(FI, PtrVT);
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002621 DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2622
2623 // Make sure any stack arguments overlapping with where we're storing
2624 // are loaded before this eventual operation. Otherwise they'll be
2625 // clobbered.
2626
2627 // FIXME: Why is this really necessary? This seems to just result in a
2628 // lot of code to copy the stack and write them back to the same
2629 // locations, which are supposed to be immutable?
2630 Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2631 } else {
2632 DstAddr = PtrOff;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002633 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002634 Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002635 }
2636
2637 if (Outs[i].Flags.isByVal()) {
2638 SDValue SizeNode =
2639 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2640 SDValue Cpy = DAG.getMemcpy(
2641 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2642 /*isVol = */ false, /*AlwaysInline = */ true,
Yaxun Liuc5962262017-11-22 16:13:35 +00002643 /*isTailCall = */ false, DstInfo,
2644 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
Matt Arsenault0da63502018-08-31 05:49:54 +00002645 *DAG.getContext(), AMDGPUAS::PRIVATE_ADDRESS))));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002646
2647 MemOpChains.push_back(Cpy);
2648 } else {
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002649 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002650 MemOpChains.push_back(Store);
2651 }
2652 }
2653 }
2654
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002655 // Copy special input registers after user input arguments.
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002656 passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002657
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002658 if (!MemOpChains.empty())
2659 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2660
2661 // Build a sequence of copy-to-reg nodes chained together with token chain
2662 // and flag operands which copy the outgoing args into the appropriate regs.
2663 SDValue InFlag;
2664 for (auto &RegToPass : RegsToPass) {
2665 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2666 RegToPass.second, InFlag);
2667 InFlag = Chain.getValue(1);
2668 }
2669
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002670
2671 SDValue PhysReturnAddrReg;
2672 if (IsTailCall) {
2673 // Since the return is being combined with the call, we need to pass on the
2674 // return address.
2675
2676 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2677 SDValue ReturnAddrReg = CreateLiveInRegister(
2678 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2679
2680 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2681 MVT::i64);
2682 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2683 InFlag = Chain.getValue(1);
2684 }
2685
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002686 // We don't usually want to end the call-sequence here because we would tidy
2687 // the frame up *after* the call, however in the ABI-changing tail-call case
2688 // we've carefully laid out the parameters so that when sp is reset they'll be
2689 // in the correct location.
2690 if (IsTailCall && !IsSibCall) {
2691 Chain = DAG.getCALLSEQ_END(Chain,
2692 DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2693 DAG.getTargetConstant(0, DL, MVT::i32),
2694 InFlag, DL);
2695 InFlag = Chain.getValue(1);
2696 }
2697
2698 std::vector<SDValue> Ops;
2699 Ops.push_back(Chain);
2700 Ops.push_back(Callee);
2701
2702 if (IsTailCall) {
2703 // Each tail call may have to adjust the stack by a different amount, so
2704 // this information must travel along with the operation for eventual
2705 // consumption by emitEpilogue.
2706 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002707
2708 Ops.push_back(PhysReturnAddrReg);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002709 }
2710
2711 // Add argument registers to the end of the list so that they are known live
2712 // into the call.
2713 for (auto &RegToPass : RegsToPass) {
2714 Ops.push_back(DAG.getRegister(RegToPass.first,
2715 RegToPass.second.getValueType()));
2716 }
2717
2718 // Add a register mask operand representing the call-preserved registers.
2719
Tom Stellardc5a154d2018-06-28 23:47:12 +00002720 auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002721 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2722 assert(Mask && "Missing call preserved mask for calling convention");
2723 Ops.push_back(DAG.getRegisterMask(Mask));
2724
2725 if (InFlag.getNode())
2726 Ops.push_back(InFlag);
2727
2728 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2729
2730 // If we're doing a tall call, use a TC_RETURN here rather than an
2731 // actual call instruction.
2732 if (IsTailCall) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002733 MFI.setHasTailCall();
2734 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002735 }
2736
2737 // Returns a chain and a flag for retval copy to use.
2738 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2739 Chain = Call.getValue(0);
2740 InFlag = Call.getValue(1);
2741
Matt Arsenault6efd0822017-09-14 17:14:57 +00002742 if (CallerSavedFP) {
2743 SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2744 Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2745 InFlag = Chain.getValue(1);
2746 }
2747
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002748 uint64_t CalleePopBytes = NumBytes;
2749 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002750 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2751 InFlag, DL);
2752 if (!Ins.empty())
2753 InFlag = Chain.getValue(1);
2754
2755 // Handle result values, copying them out of physregs into vregs that we
2756 // return.
2757 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2758 InVals, IsThisReturn,
2759 IsThisReturn ? OutVals[0] : SDValue());
2760}
2761
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002762unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2763 SelectionDAG &DAG) const {
2764 unsigned Reg = StringSwitch<unsigned>(RegName)
2765 .Case("m0", AMDGPU::M0)
2766 .Case("exec", AMDGPU::EXEC)
2767 .Case("exec_lo", AMDGPU::EXEC_LO)
2768 .Case("exec_hi", AMDGPU::EXEC_HI)
2769 .Case("flat_scratch", AMDGPU::FLAT_SCR)
2770 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2771 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2772 .Default(AMDGPU::NoRegister);
2773
2774 if (Reg == AMDGPU::NoRegister) {
2775 report_fatal_error(Twine("invalid register name \""
2776 + StringRef(RegName) + "\"."));
2777
2778 }
2779
Tom Stellard5bfbae52018-07-11 20:59:01 +00002780 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002781 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
2782 report_fatal_error(Twine("invalid register \""
2783 + StringRef(RegName) + "\" for subtarget."));
2784 }
2785
2786 switch (Reg) {
2787 case AMDGPU::M0:
2788 case AMDGPU::EXEC_LO:
2789 case AMDGPU::EXEC_HI:
2790 case AMDGPU::FLAT_SCR_LO:
2791 case AMDGPU::FLAT_SCR_HI:
2792 if (VT.getSizeInBits() == 32)
2793 return Reg;
2794 break;
2795 case AMDGPU::EXEC:
2796 case AMDGPU::FLAT_SCR:
2797 if (VT.getSizeInBits() == 64)
2798 return Reg;
2799 break;
2800 default:
2801 llvm_unreachable("missing register type checking");
2802 }
2803
2804 report_fatal_error(Twine("invalid type for register \""
2805 + StringRef(RegName) + "\"."));
2806}
2807
Matt Arsenault786724a2016-07-12 21:41:32 +00002808// If kill is not the last instruction, split the block so kill is always a
2809// proper terminator.
2810MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
2811 MachineBasicBlock *BB) const {
2812 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2813
2814 MachineBasicBlock::iterator SplitPoint(&MI);
2815 ++SplitPoint;
2816
2817 if (SplitPoint == BB->end()) {
2818 // Don't bother with a new block.
Marek Olsakce76ea02017-10-24 10:27:13 +00002819 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002820 return BB;
2821 }
2822
2823 MachineFunction *MF = BB->getParent();
2824 MachineBasicBlock *SplitBB
2825 = MF->CreateMachineBasicBlock(BB->getBasicBlock());
2826
Matt Arsenault786724a2016-07-12 21:41:32 +00002827 MF->insert(++MachineFunction::iterator(BB), SplitBB);
2828 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2829
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002830 SplitBB->transferSuccessorsAndUpdatePHIs(BB);
Matt Arsenault786724a2016-07-12 21:41:32 +00002831 BB->addSuccessor(SplitBB);
2832
Marek Olsakce76ea02017-10-24 10:27:13 +00002833 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002834 return SplitBB;
2835}
2836
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002837// Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2838// wavefront. If the value is uniform and just happens to be in a VGPR, this
2839// will only do one iteration. In the worst case, this will loop 64 times.
2840//
2841// TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002842static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
2843 const SIInstrInfo *TII,
2844 MachineRegisterInfo &MRI,
2845 MachineBasicBlock &OrigBB,
2846 MachineBasicBlock &LoopBB,
2847 const DebugLoc &DL,
2848 const MachineOperand &IdxReg,
2849 unsigned InitReg,
2850 unsigned ResultReg,
2851 unsigned PhiReg,
2852 unsigned InitSaveExecReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002853 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002854 bool UseGPRIdxMode,
2855 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002856 MachineBasicBlock::iterator I = LoopBB.begin();
2857
2858 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2859 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2860 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2861 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2862
2863 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2864 .addReg(InitReg)
2865 .addMBB(&OrigBB)
2866 .addReg(ResultReg)
2867 .addMBB(&LoopBB);
2868
2869 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2870 .addReg(InitSaveExecReg)
2871 .addMBB(&OrigBB)
2872 .addReg(NewExec)
2873 .addMBB(&LoopBB);
2874
2875 // Read the next variant <- also loop target.
2876 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2877 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2878
2879 // Compare the just read M0 value to all possible Idx values.
2880 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2881 .addReg(CurrentIdxReg)
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +00002882 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002883
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002884 // Update EXEC, save the original EXEC value to VCC.
2885 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2886 .addReg(CondReg, RegState::Kill);
2887
2888 MRI.setSimpleHint(NewExec, CondReg);
2889
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002890 if (UseGPRIdxMode) {
2891 unsigned IdxReg;
2892 if (Offset == 0) {
2893 IdxReg = CurrentIdxReg;
2894 } else {
2895 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2896 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2897 .addReg(CurrentIdxReg, RegState::Kill)
2898 .addImm(Offset);
2899 }
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002900 unsigned IdxMode = IsIndirectSrc ?
2901 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
2902 MachineInstr *SetOn =
2903 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2904 .addReg(IdxReg, RegState::Kill)
2905 .addImm(IdxMode);
2906 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002907 } else {
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002908 // Move index from VCC into M0
2909 if (Offset == 0) {
2910 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2911 .addReg(CurrentIdxReg, RegState::Kill);
2912 } else {
2913 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2914 .addReg(CurrentIdxReg, RegState::Kill)
2915 .addImm(Offset);
2916 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002917 }
2918
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002919 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002920 MachineInstr *InsertPt =
2921 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002922 .addReg(AMDGPU::EXEC)
2923 .addReg(NewExec);
2924
2925 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2926 // s_cbranch_scc0?
2927
2928 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
2929 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
2930 .addMBB(&LoopBB);
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002931
2932 return InsertPt->getIterator();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002933}
2934
2935// This has slightly sub-optimal regalloc when the source vector is killed by
2936// the read. The register allocator does not understand that the kill is
2937// per-workitem, so is kept alive for the whole loop so we end up not re-using a
2938// subregister from it, using 1 more VGPR than necessary. This was saved when
2939// this was expanded after register allocation.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002940static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
2941 MachineBasicBlock &MBB,
2942 MachineInstr &MI,
2943 unsigned InitResultReg,
2944 unsigned PhiReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002945 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002946 bool UseGPRIdxMode,
2947 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002948 MachineFunction *MF = MBB.getParent();
2949 MachineRegisterInfo &MRI = MF->getRegInfo();
2950 const DebugLoc &DL = MI.getDebugLoc();
2951 MachineBasicBlock::iterator I(&MI);
2952
2953 unsigned DstReg = MI.getOperand(0).getReg();
Matt Arsenault301162c2017-11-15 21:51:43 +00002954 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
2955 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002956
2957 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
2958
2959 // Save the EXEC mask
2960 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
2961 .addReg(AMDGPU::EXEC);
2962
2963 // To insert the loop we need to split the block. Move everything after this
2964 // point to a new block, and insert a new empty block between the two.
2965 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
2966 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
2967 MachineFunction::iterator MBBI(MBB);
2968 ++MBBI;
2969
2970 MF->insert(MBBI, LoopBB);
2971 MF->insert(MBBI, RemainderBB);
2972
2973 LoopBB->addSuccessor(LoopBB);
2974 LoopBB->addSuccessor(RemainderBB);
2975
2976 // Move the rest of the block into a new block.
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002977 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002978 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
2979
2980 MBB.addSuccessor(LoopBB);
2981
2982 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2983
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002984 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
2985 InitResultReg, DstReg, PhiReg, TmpExec,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002986 Offset, UseGPRIdxMode, IsIndirectSrc);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002987
2988 MachineBasicBlock::iterator First = RemainderBB->begin();
2989 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
2990 .addReg(SaveExec);
2991
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002992 return InsPt;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002993}
2994
2995// Returns subreg index, offset
2996static std::pair<unsigned, int>
2997computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
2998 const TargetRegisterClass *SuperRC,
2999 unsigned VecReg,
3000 int Offset) {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003001 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003002
3003 // Skip out of bounds offsets, or else we would end up using an undefined
3004 // register.
3005 if (Offset >= NumElts || Offset < 0)
3006 return std::make_pair(AMDGPU::sub0, Offset);
3007
3008 return std::make_pair(AMDGPU::sub0 + Offset, 0);
3009}
3010
3011// Return true if the index is an SGPR and was set.
3012static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
3013 MachineRegisterInfo &MRI,
3014 MachineInstr &MI,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003015 int Offset,
3016 bool UseGPRIdxMode,
3017 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003018 MachineBasicBlock *MBB = MI.getParent();
3019 const DebugLoc &DL = MI.getDebugLoc();
3020 MachineBasicBlock::iterator I(&MI);
3021
3022 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3023 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3024
3025 assert(Idx->getReg() != AMDGPU::NoRegister);
3026
3027 if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3028 return false;
3029
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003030 if (UseGPRIdxMode) {
3031 unsigned IdxMode = IsIndirectSrc ?
3032 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
3033 if (Offset == 0) {
3034 MachineInstr *SetOn =
Diana Picus116bbab2017-01-13 09:58:52 +00003035 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3036 .add(*Idx)
3037 .addImm(IdxMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003038
Matt Arsenaultdac31db2016-10-13 12:45:16 +00003039 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003040 } else {
3041 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3042 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
Diana Picus116bbab2017-01-13 09:58:52 +00003043 .add(*Idx)
3044 .addImm(Offset);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003045 MachineInstr *SetOn =
3046 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3047 .addReg(Tmp, RegState::Kill)
3048 .addImm(IdxMode);
3049
Matt Arsenaultdac31db2016-10-13 12:45:16 +00003050 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003051 }
3052
3053 return true;
3054 }
3055
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003056 if (Offset == 0) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00003057 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3058 .add(*Idx);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003059 } else {
3060 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00003061 .add(*Idx)
3062 .addImm(Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003063 }
3064
3065 return true;
3066}
3067
3068// Control flow needs to be inserted if indexing with a VGPR.
3069static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
3070 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00003071 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003072 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003073 const SIRegisterInfo &TRI = TII->getRegisterInfo();
3074 MachineFunction *MF = MBB.getParent();
3075 MachineRegisterInfo &MRI = MF->getRegInfo();
3076
3077 unsigned Dst = MI.getOperand(0).getReg();
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003078 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003079 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3080
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003081 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003082
3083 unsigned SubReg;
3084 std::tie(SubReg, Offset)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003085 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003086
Marek Olsake22fdb92017-03-21 17:00:32 +00003087 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003088
3089 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003090 MachineBasicBlock::iterator I(&MI);
3091 const DebugLoc &DL = MI.getDebugLoc();
3092
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003093 if (UseGPRIdxMode) {
3094 // TODO: Look at the uses to avoid the copy. This may require rescheduling
3095 // to avoid interfering with other uses, so probably requires a new
3096 // optimization pass.
3097 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003098 .addReg(SrcReg, RegState::Undef, SubReg)
3099 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003100 .addReg(AMDGPU::M0, RegState::Implicit);
3101 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3102 } else {
3103 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003104 .addReg(SrcReg, RegState::Undef, SubReg)
3105 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003106 }
3107
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003108 MI.eraseFromParent();
3109
3110 return &MBB;
3111 }
3112
3113 const DebugLoc &DL = MI.getDebugLoc();
3114 MachineBasicBlock::iterator I(&MI);
3115
3116 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3117 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3118
3119 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3120
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003121 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3122 Offset, UseGPRIdxMode, true);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003123 MachineBasicBlock *LoopBB = InsPt->getParent();
3124
3125 if (UseGPRIdxMode) {
3126 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003127 .addReg(SrcReg, RegState::Undef, SubReg)
3128 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003129 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003130 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003131 } else {
3132 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003133 .addReg(SrcReg, RegState::Undef, SubReg)
3134 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003135 }
3136
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003137 MI.eraseFromParent();
3138
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003139 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003140}
3141
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003142static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3143 const TargetRegisterClass *VecRC) {
3144 switch (TRI.getRegSizeInBits(*VecRC)) {
3145 case 32: // 4 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003146 return AMDGPU::V_MOVRELD_B32_V1;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003147 case 64: // 8 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003148 return AMDGPU::V_MOVRELD_B32_V2;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003149 case 128: // 16 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003150 return AMDGPU::V_MOVRELD_B32_V4;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003151 case 256: // 32 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003152 return AMDGPU::V_MOVRELD_B32_V8;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003153 case 512: // 64 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003154 return AMDGPU::V_MOVRELD_B32_V16;
3155 default:
3156 llvm_unreachable("unsupported size for MOVRELD pseudos");
3157 }
3158}
3159
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003160static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3161 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00003162 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003163 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003164 const SIRegisterInfo &TRI = TII->getRegisterInfo();
3165 MachineFunction *MF = MBB.getParent();
3166 MachineRegisterInfo &MRI = MF->getRegInfo();
3167
3168 unsigned Dst = MI.getOperand(0).getReg();
3169 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3170 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3171 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3172 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3173 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3174
3175 // This can be an immediate, but will be folded later.
3176 assert(Val->getReg());
3177
3178 unsigned SubReg;
3179 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3180 SrcVec->getReg(),
3181 Offset);
Marek Olsake22fdb92017-03-21 17:00:32 +00003182 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003183
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003184 if (Idx->getReg() == AMDGPU::NoRegister) {
3185 MachineBasicBlock::iterator I(&MI);
3186 const DebugLoc &DL = MI.getDebugLoc();
3187
3188 assert(Offset == 0);
3189
3190 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
Diana Picus116bbab2017-01-13 09:58:52 +00003191 .add(*SrcVec)
3192 .add(*Val)
3193 .addImm(SubReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003194
3195 MI.eraseFromParent();
3196 return &MBB;
3197 }
3198
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003199 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003200 MachineBasicBlock::iterator I(&MI);
3201 const DebugLoc &DL = MI.getDebugLoc();
3202
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003203 if (UseGPRIdxMode) {
3204 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003205 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3206 .add(*Val)
3207 .addReg(Dst, RegState::ImplicitDefine)
3208 .addReg(SrcVec->getReg(), RegState::Implicit)
3209 .addReg(AMDGPU::M0, RegState::Implicit);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003210
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003211 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3212 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003213 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003214
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003215 BuildMI(MBB, I, DL, MovRelDesc)
3216 .addReg(Dst, RegState::Define)
3217 .addReg(SrcVec->getReg())
Diana Picus116bbab2017-01-13 09:58:52 +00003218 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003219 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003220 }
3221
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003222 MI.eraseFromParent();
3223 return &MBB;
3224 }
3225
3226 if (Val->isReg())
3227 MRI.clearKillFlags(Val->getReg());
3228
3229 const DebugLoc &DL = MI.getDebugLoc();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003230
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003231 unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3232
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003233 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003234 Offset, UseGPRIdxMode, false);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003235 MachineBasicBlock *LoopBB = InsPt->getParent();
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003236
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003237 if (UseGPRIdxMode) {
3238 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003239 .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3240 .add(*Val) // src0
3241 .addReg(Dst, RegState::ImplicitDefine)
3242 .addReg(PhiReg, RegState::Implicit)
3243 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003244 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003245 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003246 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003247
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003248 BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3249 .addReg(Dst, RegState::Define)
3250 .addReg(PhiReg)
Diana Picus116bbab2017-01-13 09:58:52 +00003251 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003252 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003253 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003254
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003255 MI.eraseFromParent();
3256
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003257 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003258}
3259
Matt Arsenault786724a2016-07-12 21:41:32 +00003260MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3261 MachineInstr &MI, MachineBasicBlock *BB) const {
Tom Stellard244891d2016-12-20 15:52:17 +00003262
3263 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3264 MachineFunction *MF = BB->getParent();
3265 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3266
3267 if (TII->isMIMG(MI)) {
Matt Arsenault905f3512017-12-29 17:18:14 +00003268 if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3269 report_fatal_error("missing mem operand from MIMG instruction");
3270 }
Tom Stellard244891d2016-12-20 15:52:17 +00003271 // Add a memoperand for mimg instructions so that they aren't assumed to
3272 // be ordered memory instuctions.
3273
Tom Stellard244891d2016-12-20 15:52:17 +00003274 return BB;
3275 }
3276
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003277 switch (MI.getOpcode()) {
Matt Arsenault301162c2017-11-15 21:51:43 +00003278 case AMDGPU::S_ADD_U64_PSEUDO:
3279 case AMDGPU::S_SUB_U64_PSEUDO: {
3280 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3281 const DebugLoc &DL = MI.getDebugLoc();
3282
3283 MachineOperand &Dest = MI.getOperand(0);
3284 MachineOperand &Src0 = MI.getOperand(1);
3285 MachineOperand &Src1 = MI.getOperand(2);
3286
3287 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3288 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3289
3290 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3291 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3292 &AMDGPU::SReg_32_XM0RegClass);
3293 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3294 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3295 &AMDGPU::SReg_32_XM0RegClass);
3296
3297 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3298 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3299 &AMDGPU::SReg_32_XM0RegClass);
3300 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3301 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3302 &AMDGPU::SReg_32_XM0RegClass);
3303
3304 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3305
3306 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3307 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3308 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3309 .add(Src0Sub0)
3310 .add(Src1Sub0);
3311 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3312 .add(Src0Sub1)
3313 .add(Src1Sub1);
3314 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3315 .addReg(DestSub0)
3316 .addImm(AMDGPU::sub0)
3317 .addReg(DestSub1)
3318 .addImm(AMDGPU::sub1);
3319 MI.eraseFromParent();
3320 return BB;
3321 }
3322 case AMDGPU::SI_INIT_M0: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003323 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
Matt Arsenault4ac341c2016-04-14 21:58:15 +00003324 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Diana Picus116bbab2017-01-13 09:58:52 +00003325 .add(MI.getOperand(0));
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003326 MI.eraseFromParent();
Matt Arsenault20711b72015-02-20 22:10:45 +00003327 return BB;
Matt Arsenault301162c2017-11-15 21:51:43 +00003328 }
Marek Olsak2d825902017-04-28 20:21:58 +00003329 case AMDGPU::SI_INIT_EXEC:
3330 // This should be before all vector instructions.
3331 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3332 AMDGPU::EXEC)
3333 .addImm(MI.getOperand(0).getImm());
3334 MI.eraseFromParent();
3335 return BB;
3336
3337 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3338 // Extract the thread count from an SGPR input and set EXEC accordingly.
3339 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3340 //
3341 // S_BFE_U32 count, input, {shift, 7}
3342 // S_BFM_B64 exec, count, 0
3343 // S_CMP_EQ_U32 count, 64
3344 // S_CMOV_B64 exec, -1
3345 MachineInstr *FirstMI = &*BB->begin();
3346 MachineRegisterInfo &MRI = MF->getRegInfo();
3347 unsigned InputReg = MI.getOperand(0).getReg();
3348 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3349 bool Found = false;
3350
3351 // Move the COPY of the input reg to the beginning, so that we can use it.
3352 for (auto I = BB->begin(); I != &MI; I++) {
3353 if (I->getOpcode() != TargetOpcode::COPY ||
3354 I->getOperand(0).getReg() != InputReg)
3355 continue;
3356
3357 if (I == FirstMI) {
3358 FirstMI = &*++BB->begin();
3359 } else {
3360 I->removeFromParent();
3361 BB->insert(FirstMI, &*I);
3362 }
3363 Found = true;
3364 break;
3365 }
3366 assert(Found);
Davide Italiano0dcc0152017-05-11 19:58:52 +00003367 (void)Found;
Marek Olsak2d825902017-04-28 20:21:58 +00003368
3369 // This should be before all vector instructions.
3370 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3371 .addReg(InputReg)
3372 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3373 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3374 AMDGPU::EXEC)
3375 .addReg(CountReg)
3376 .addImm(0);
3377 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3378 .addReg(CountReg, RegState::Kill)
3379 .addImm(64);
3380 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3381 AMDGPU::EXEC)
3382 .addImm(-1);
3383 MI.eraseFromParent();
3384 return BB;
3385 }
3386
Changpeng Fang01f60622016-03-15 17:28:44 +00003387 case AMDGPU::GET_GROUPSTATICSIZE: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003388 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3c07c812016-07-22 17:01:33 +00003389 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
Diana Picus116bbab2017-01-13 09:58:52 +00003390 .add(MI.getOperand(0))
3391 .addImm(MFI->getLDSSize());
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003392 MI.eraseFromParent();
Changpeng Fang01f60622016-03-15 17:28:44 +00003393 return BB;
3394 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003395 case AMDGPU::SI_INDIRECT_SRC_V1:
3396 case AMDGPU::SI_INDIRECT_SRC_V2:
3397 case AMDGPU::SI_INDIRECT_SRC_V4:
3398 case AMDGPU::SI_INDIRECT_SRC_V8:
3399 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003400 return emitIndirectSrc(MI, *BB, *getSubtarget());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003401 case AMDGPU::SI_INDIRECT_DST_V1:
3402 case AMDGPU::SI_INDIRECT_DST_V2:
3403 case AMDGPU::SI_INDIRECT_DST_V4:
3404 case AMDGPU::SI_INDIRECT_DST_V8:
3405 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003406 return emitIndirectDst(MI, *BB, *getSubtarget());
Marek Olsakce76ea02017-10-24 10:27:13 +00003407 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3408 case AMDGPU::SI_KILL_I1_PSEUDO:
Matt Arsenault786724a2016-07-12 21:41:32 +00003409 return splitKillBlock(MI, BB);
Matt Arsenault22e41792016-08-27 01:00:37 +00003410 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3411 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
Matt Arsenault22e41792016-08-27 01:00:37 +00003412
3413 unsigned Dst = MI.getOperand(0).getReg();
3414 unsigned Src0 = MI.getOperand(1).getReg();
3415 unsigned Src1 = MI.getOperand(2).getReg();
3416 const DebugLoc &DL = MI.getDebugLoc();
3417 unsigned SrcCond = MI.getOperand(3).getReg();
3418
3419 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3420 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003421 unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenault22e41792016-08-27 01:00:37 +00003422
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003423 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3424 .addReg(SrcCond);
Matt Arsenault22e41792016-08-27 01:00:37 +00003425 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
3426 .addReg(Src0, 0, AMDGPU::sub0)
3427 .addReg(Src1, 0, AMDGPU::sub0)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003428 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003429 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
3430 .addReg(Src0, 0, AMDGPU::sub1)
3431 .addReg(Src1, 0, AMDGPU::sub1)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003432 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003433
3434 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3435 .addReg(DstLo)
3436 .addImm(AMDGPU::sub0)
3437 .addReg(DstHi)
3438 .addImm(AMDGPU::sub1);
3439 MI.eraseFromParent();
3440 return BB;
3441 }
Matt Arsenault327188a2016-12-15 21:57:11 +00003442 case AMDGPU::SI_BR_UNDEF: {
3443 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3444 const DebugLoc &DL = MI.getDebugLoc();
3445 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
Diana Picus116bbab2017-01-13 09:58:52 +00003446 .add(MI.getOperand(0));
Matt Arsenault327188a2016-12-15 21:57:11 +00003447 Br->getOperand(1).setIsUndef(true); // read undef SCC
3448 MI.eraseFromParent();
3449 return BB;
3450 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003451 case AMDGPU::ADJCALLSTACKUP:
3452 case AMDGPU::ADJCALLSTACKDOWN: {
3453 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3454 MachineInstrBuilder MIB(*MF, &MI);
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003455
3456 // Add an implicit use of the frame offset reg to prevent the restore copy
3457 // inserted after the call from being reorderd after stack operations in the
3458 // the caller's frame.
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003459 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003460 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3461 .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003462 return BB;
3463 }
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003464 case AMDGPU::SI_CALL_ISEL:
3465 case AMDGPU::SI_TCRETURN_ISEL: {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003466 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3467 const DebugLoc &DL = MI.getDebugLoc();
3468 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003469
3470 MachineRegisterInfo &MRI = MF->getRegInfo();
3471 unsigned GlobalAddrReg = MI.getOperand(0).getReg();
3472 MachineInstr *PCRel = MRI.getVRegDef(GlobalAddrReg);
3473 assert(PCRel->getOpcode() == AMDGPU::SI_PC_ADD_REL_OFFSET);
3474
3475 const GlobalValue *G = PCRel->getOperand(1).getGlobal();
3476
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003477 MachineInstrBuilder MIB;
3478 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
3479 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg)
3480 .add(MI.getOperand(0))
3481 .addGlobalAddress(G);
3482 } else {
3483 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_TCRETURN))
3484 .add(MI.getOperand(0))
3485 .addGlobalAddress(G);
3486
3487 // There is an additional imm operand for tcreturn, but it should be in the
3488 // right place already.
3489 }
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003490
3491 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003492 MIB.add(MI.getOperand(I));
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003493
Chandler Carruthc73c0302018-08-16 21:30:05 +00003494 MIB.cloneMemRefs(MI);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003495 MI.eraseFromParent();
3496 return BB;
3497 }
Changpeng Fang01f60622016-03-15 17:28:44 +00003498 default:
3499 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
Tom Stellard75aadc22012-12-11 21:25:42 +00003500 }
Tom Stellard75aadc22012-12-11 21:25:42 +00003501}
3502
Matt Arsenaulte11d8ac2017-10-13 21:10:22 +00003503bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3504 return isTypeLegal(VT.getScalarType());
3505}
3506
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003507bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3508 // This currently forces unfolding various combinations of fsub into fma with
3509 // free fneg'd operands. As long as we have fast FMA (controlled by
3510 // isFMAFasterThanFMulAndFAdd), we should perform these.
3511
3512 // When fma is quarter rate, for f64 where add / sub are at best half rate,
3513 // most of these combines appear to be cycle neutral but save on instruction
3514 // count / code size.
3515 return true;
3516}
3517
Mehdi Amini44ede332015-07-09 02:09:04 +00003518EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3519 EVT VT) const {
Tom Stellard83747202013-07-18 21:43:53 +00003520 if (!VT.isVector()) {
3521 return MVT::i1;
3522 }
Matt Arsenault8596f712014-11-28 22:51:38 +00003523 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
Tom Stellard75aadc22012-12-11 21:25:42 +00003524}
3525
Matt Arsenault94163282016-12-22 16:36:25 +00003526MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3527 // TODO: Should i16 be used always if legal? For now it would force VALU
3528 // shifts.
3529 return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
Christian Konig082a14a2013-03-18 11:34:05 +00003530}
3531
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003532// Answering this is somewhat tricky and depends on the specific device which
3533// have different rates for fma or all f64 operations.
3534//
3535// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3536// regardless of which device (although the number of cycles differs between
3537// devices), so it is always profitable for f64.
3538//
3539// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3540// only on full rate devices. Normally, we should prefer selecting v_mad_f32
3541// which we can always do even without fused FP ops since it returns the same
3542// result as the separate operations and since it is always full
3543// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3544// however does not support denormals, so we do report fma as faster if we have
3545// a fast fma device and require denormals.
3546//
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003547bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3548 VT = VT.getScalarType();
3549
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003550 switch (VT.getSimpleVT().SimpleTy) {
Matt Arsenault0084adc2018-04-30 19:08:16 +00003551 case MVT::f32: {
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003552 // This is as fast on some subtargets. However, we always have full rate f32
3553 // mad available which returns the same result as the separate operations
Matt Arsenault8d630032015-02-20 22:10:41 +00003554 // which we should prefer over fma. We can't use this if we want to support
3555 // denormals, so only report this in these cases.
Matt Arsenault0084adc2018-04-30 19:08:16 +00003556 if (Subtarget->hasFP32Denormals())
3557 return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3558
3559 // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3560 return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3561 }
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003562 case MVT::f64:
3563 return true;
Matt Arsenault9e22bc22016-12-22 03:21:48 +00003564 case MVT::f16:
3565 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003566 default:
3567 break;
3568 }
3569
3570 return false;
3571}
3572
Tom Stellard75aadc22012-12-11 21:25:42 +00003573//===----------------------------------------------------------------------===//
3574// Custom DAG Lowering Operations
3575//===----------------------------------------------------------------------===//
3576
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003577// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3578// wider vector type is legal.
3579SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3580 SelectionDAG &DAG) const {
3581 unsigned Opc = Op.getOpcode();
3582 EVT VT = Op.getValueType();
3583 assert(VT == MVT::v4f16);
3584
3585 SDValue Lo, Hi;
3586 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3587
3588 SDLoc SL(Op);
3589 SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3590 Op->getFlags());
3591 SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3592 Op->getFlags());
3593
3594 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3595}
3596
3597// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3598// wider vector type is legal.
3599SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3600 SelectionDAG &DAG) const {
3601 unsigned Opc = Op.getOpcode();
3602 EVT VT = Op.getValueType();
3603 assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3604
3605 SDValue Lo0, Hi0;
3606 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3607 SDValue Lo1, Hi1;
3608 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3609
3610 SDLoc SL(Op);
3611
3612 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3613 Op->getFlags());
3614 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3615 Op->getFlags());
3616
3617 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3618}
3619
Tom Stellard75aadc22012-12-11 21:25:42 +00003620SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3621 switch (Op.getOpcode()) {
3622 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
Tom Stellardf8794352012-12-19 22:10:31 +00003623 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
Tom Stellard35bb18c2013-08-26 15:06:04 +00003624 case ISD::LOAD: {
Tom Stellarde812f2f2014-07-21 15:45:06 +00003625 SDValue Result = LowerLOAD(Op, DAG);
3626 assert((!Result.getNode() ||
3627 Result.getNode()->getNumValues() == 2) &&
3628 "Load should return a value and a chain");
3629 return Result;
Tom Stellard35bb18c2013-08-26 15:06:04 +00003630 }
Tom Stellardaf775432013-10-23 00:44:32 +00003631
Matt Arsenaultad14ce82014-07-19 18:44:39 +00003632 case ISD::FSIN:
3633 case ISD::FCOS:
3634 return LowerTrig(Op, DAG);
Tom Stellard0ec134f2014-02-04 17:18:40 +00003635 case ISD::SELECT: return LowerSELECT(Op, DAG);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00003636 case ISD::FDIV: return LowerFDIV(Op, DAG);
Tom Stellard354a43c2016-04-01 18:27:37 +00003637 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +00003638 case ISD::STORE: return LowerSTORE(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003639 case ISD::GlobalAddress: {
3640 MachineFunction &MF = DAG.getMachineFunction();
3641 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3642 return LowerGlobalAddress(MFI, Op, DAG);
Tom Stellard94593ee2013-06-03 17:40:18 +00003643 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003644 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00003645 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003646 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00003647 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
Matt Arsenault3aef8092017-01-23 23:09:58 +00003648 case ISD::INSERT_VECTOR_ELT:
3649 return lowerINSERT_VECTOR_ELT(Op, DAG);
3650 case ISD::EXTRACT_VECTOR_ELT:
3651 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
Matt Arsenault67a98152018-05-16 11:47:30 +00003652 case ISD::BUILD_VECTOR:
3653 return lowerBUILD_VECTOR(Op, DAG);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003654 case ISD::FP_ROUND:
3655 return lowerFP_ROUND(Op, DAG);
Matt Arsenault3e025382017-04-24 17:49:13 +00003656 case ISD::TRAP:
Matt Arsenault3e025382017-04-24 17:49:13 +00003657 return lowerTRAP(Op, DAG);
Tony Tye43259df2018-05-16 16:19:34 +00003658 case ISD::DEBUGTRAP:
3659 return lowerDEBUGTRAP(Op, DAG);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003660 case ISD::FABS:
3661 case ISD::FNEG:
Matt Arsenault36cdcfa2018-08-02 13:43:42 +00003662 case ISD::FCANONICALIZE:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003663 return splitUnaryVectorOp(Op, DAG);
Matt Arsenault687ec752018-10-22 16:27:27 +00003664 case ISD::FMINNUM:
3665 case ISD::FMAXNUM:
3666 return lowerFMINNUM_FMAXNUM(Op, DAG);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003667 case ISD::SHL:
3668 case ISD::SRA:
3669 case ISD::SRL:
3670 case ISD::ADD:
3671 case ISD::SUB:
3672 case ISD::MUL:
3673 case ISD::SMIN:
3674 case ISD::SMAX:
3675 case ISD::UMIN:
3676 case ISD::UMAX:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003677 case ISD::FADD:
3678 case ISD::FMUL:
Matt Arsenault687ec752018-10-22 16:27:27 +00003679 case ISD::FMINNUM_IEEE:
3680 case ISD::FMAXNUM_IEEE:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003681 return splitBinaryVectorOp(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +00003682 }
3683 return SDValue();
3684}
3685
Matt Arsenault1349a042018-05-22 06:32:10 +00003686static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
3687 const SDLoc &DL,
3688 SelectionDAG &DAG, bool Unpacked) {
3689 if (!LoadVT.isVector())
3690 return Result;
3691
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003692 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3693 // Truncate to v2i16/v4i16.
3694 EVT IntLoadVT = LoadVT.changeTypeToInteger();
Matt Arsenault1349a042018-05-22 06:32:10 +00003695
3696 // Workaround legalizer not scalarizing truncate after vector op
3697 // legalization byt not creating intermediate vector trunc.
3698 SmallVector<SDValue, 4> Elts;
3699 DAG.ExtractVectorElements(Result, Elts);
3700 for (SDValue &Elt : Elts)
3701 Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
3702
3703 Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
3704
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003705 // Bitcast to original type (v2f16/v4f16).
Matt Arsenault1349a042018-05-22 06:32:10 +00003706 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003707 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003708
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003709 // Cast back to the original packed type.
3710 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3711}
3712
Matt Arsenault1349a042018-05-22 06:32:10 +00003713SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
3714 MemSDNode *M,
3715 SelectionDAG &DAG,
Tim Renouf366a49d2018-08-02 23:33:01 +00003716 ArrayRef<SDValue> Ops,
Matt Arsenault1349a042018-05-22 06:32:10 +00003717 bool IsIntrinsic) const {
3718 SDLoc DL(M);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003719
3720 bool Unpacked = Subtarget->hasUnpackedD16VMem();
Matt Arsenault1349a042018-05-22 06:32:10 +00003721 EVT LoadVT = M->getValueType(0);
3722
Matt Arsenault1349a042018-05-22 06:32:10 +00003723 EVT EquivLoadVT = LoadVT;
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003724 if (Unpacked && LoadVT.isVector()) {
3725 EquivLoadVT = LoadVT.isVector() ?
3726 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3727 LoadVT.getVectorNumElements()) : LoadVT;
Matt Arsenault1349a042018-05-22 06:32:10 +00003728 }
3729
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003730 // Change from v4f16/v2f16 to EquivLoadVT.
3731 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3732
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003733 SDValue Load
3734 = DAG.getMemIntrinsicNode(
3735 IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
3736 VTList, Ops, M->getMemoryVT(),
3737 M->getMemOperand());
3738 if (!Unpacked) // Just adjusted the opcode.
3739 return Load;
Changpeng Fang4737e892018-01-18 22:08:53 +00003740
Matt Arsenault1349a042018-05-22 06:32:10 +00003741 SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
Changpeng Fang4737e892018-01-18 22:08:53 +00003742
Matt Arsenault1349a042018-05-22 06:32:10 +00003743 return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003744}
3745
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00003746static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
3747 SDNode *N, SelectionDAG &DAG) {
3748 EVT VT = N->getValueType(0);
3749 const auto *CD = dyn_cast<ConstantSDNode>(N->getOperand(3));
3750 if (!CD)
3751 return DAG.getUNDEF(VT);
3752
3753 int CondCode = CD->getSExtValue();
3754 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
3755 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
3756 return DAG.getUNDEF(VT);
3757
3758 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
3759
3760
3761 SDValue LHS = N->getOperand(1);
3762 SDValue RHS = N->getOperand(2);
3763
3764 SDLoc DL(N);
3765
3766 EVT CmpVT = LHS.getValueType();
3767 if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
3768 unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
3769 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3770 LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
3771 RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
3772 }
3773
3774 ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
3775
3776 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, LHS, RHS,
3777 DAG.getCondCode(CCOpcode));
3778}
3779
3780static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
3781 SDNode *N, SelectionDAG &DAG) {
3782 EVT VT = N->getValueType(0);
3783 const auto *CD = dyn_cast<ConstantSDNode>(N->getOperand(3));
3784 if (!CD)
3785 return DAG.getUNDEF(VT);
3786
3787 int CondCode = CD->getSExtValue();
3788 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
3789 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
3790 return DAG.getUNDEF(VT);
3791 }
3792
3793 SDValue Src0 = N->getOperand(1);
3794 SDValue Src1 = N->getOperand(2);
3795 EVT CmpVT = Src0.getValueType();
3796 SDLoc SL(N);
3797
3798 if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
3799 Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
3800 Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
3801 }
3802
3803 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
3804 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
3805 return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src0,
3806 Src1, DAG.getCondCode(CCOpcode));
3807}
3808
Matt Arsenault3aef8092017-01-23 23:09:58 +00003809void SITargetLowering::ReplaceNodeResults(SDNode *N,
3810 SmallVectorImpl<SDValue> &Results,
3811 SelectionDAG &DAG) const {
3812 switch (N->getOpcode()) {
3813 case ISD::INSERT_VECTOR_ELT: {
3814 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3815 Results.push_back(Res);
3816 return;
3817 }
3818 case ISD::EXTRACT_VECTOR_ELT: {
3819 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3820 Results.push_back(Res);
3821 return;
3822 }
Matt Arsenault1f17c662017-02-22 00:27:34 +00003823 case ISD::INTRINSIC_WO_CHAIN: {
3824 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Marek Olsak13e47412018-01-31 20:18:04 +00003825 switch (IID) {
3826 case Intrinsic::amdgcn_cvt_pkrtz: {
Matt Arsenault1f17c662017-02-22 00:27:34 +00003827 SDValue Src0 = N->getOperand(1);
3828 SDValue Src1 = N->getOperand(2);
3829 SDLoc SL(N);
3830 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
3831 Src0, Src1);
Matt Arsenault1f17c662017-02-22 00:27:34 +00003832 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3833 return;
3834 }
Marek Olsak13e47412018-01-31 20:18:04 +00003835 case Intrinsic::amdgcn_cvt_pknorm_i16:
3836 case Intrinsic::amdgcn_cvt_pknorm_u16:
3837 case Intrinsic::amdgcn_cvt_pk_i16:
3838 case Intrinsic::amdgcn_cvt_pk_u16: {
3839 SDValue Src0 = N->getOperand(1);
3840 SDValue Src1 = N->getOperand(2);
3841 SDLoc SL(N);
3842 unsigned Opcode;
3843
3844 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
3845 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
3846 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
3847 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
3848 else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
3849 Opcode = AMDGPUISD::CVT_PK_I16_I32;
3850 else
3851 Opcode = AMDGPUISD::CVT_PK_U16_U32;
3852
Matt Arsenault709374d2018-08-01 20:13:58 +00003853 EVT VT = N->getValueType(0);
3854 if (isTypeLegal(VT))
3855 Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
3856 else {
3857 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
3858 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
3859 }
Marek Olsak13e47412018-01-31 20:18:04 +00003860 return;
3861 }
3862 }
Simon Pilgrimd362d272017-07-08 19:50:03 +00003863 break;
Matt Arsenault1f17c662017-02-22 00:27:34 +00003864 }
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003865 case ISD::INTRINSIC_W_CHAIN: {
Matt Arsenault1349a042018-05-22 06:32:10 +00003866 if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003867 Results.push_back(Res);
Matt Arsenault1349a042018-05-22 06:32:10 +00003868 Results.push_back(Res.getValue(1));
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003869 return;
3870 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003871
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003872 break;
3873 }
Matt Arsenault4a486232017-04-19 20:53:07 +00003874 case ISD::SELECT: {
3875 SDLoc SL(N);
3876 EVT VT = N->getValueType(0);
3877 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3878 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3879 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3880
3881 EVT SelectVT = NewVT;
3882 if (NewVT.bitsLT(MVT::i32)) {
3883 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3884 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3885 SelectVT = MVT::i32;
3886 }
3887
3888 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3889 N->getOperand(0), LHS, RHS);
3890
3891 if (NewVT != SelectVT)
3892 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3893 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3894 return;
3895 }
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003896 case ISD::FNEG: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003897 if (N->getValueType(0) != MVT::v2f16)
3898 break;
3899
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003900 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003901 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3902
3903 SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
3904 BC,
3905 DAG.getConstant(0x80008000, SL, MVT::i32));
3906 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3907 return;
3908 }
3909 case ISD::FABS: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003910 if (N->getValueType(0) != MVT::v2f16)
3911 break;
3912
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003913 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003914 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3915
3916 SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
3917 BC,
3918 DAG.getConstant(0x7fff7fff, SL, MVT::i32));
3919 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3920 return;
3921 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00003922 default:
3923 break;
3924 }
3925}
3926
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00003927/// Helper function for LowerBRCOND
Tom Stellardf8794352012-12-19 22:10:31 +00003928static SDNode *findUser(SDValue Value, unsigned Opcode) {
Tom Stellard75aadc22012-12-11 21:25:42 +00003929
Tom Stellardf8794352012-12-19 22:10:31 +00003930 SDNode *Parent = Value.getNode();
3931 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
3932 I != E; ++I) {
3933
3934 if (I.getUse().get() != Value)
3935 continue;
3936
3937 if (I->getOpcode() == Opcode)
3938 return *I;
3939 }
Craig Topper062a2ba2014-04-25 05:30:21 +00003940 return nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00003941}
3942
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003943unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
Matt Arsenault6408c912016-09-16 22:11:18 +00003944 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
3945 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003946 case Intrinsic::amdgcn_if:
3947 return AMDGPUISD::IF;
3948 case Intrinsic::amdgcn_else:
3949 return AMDGPUISD::ELSE;
3950 case Intrinsic::amdgcn_loop:
3951 return AMDGPUISD::LOOP;
3952 case Intrinsic::amdgcn_end_cf:
3953 llvm_unreachable("should not occur");
Matt Arsenault6408c912016-09-16 22:11:18 +00003954 default:
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003955 return 0;
Matt Arsenault6408c912016-09-16 22:11:18 +00003956 }
Tom Stellardbc4497b2016-02-12 23:45:29 +00003957 }
Matt Arsenault6408c912016-09-16 22:11:18 +00003958
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00003959 // break, if_break, else_break are all only used as inputs to loop, not
3960 // directly as branch conditions.
3961 return 0;
Tom Stellardbc4497b2016-02-12 23:45:29 +00003962}
3963
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003964void SITargetLowering::createDebuggerPrologueStackObjects(
3965 MachineFunction &MF) const {
3966 // Create stack objects that are used for emitting debugger prologue.
3967 //
3968 // Debugger prologue writes work group IDs and work item IDs to scratch memory
3969 // at fixed location in the following format:
3970 // offset 0: work group ID x
3971 // offset 4: work group ID y
3972 // offset 8: work group ID z
3973 // offset 16: work item ID x
3974 // offset 20: work item ID y
3975 // offset 24: work item ID z
3976 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
3977 int ObjectIdx = 0;
3978
3979 // For each dimension:
3980 for (unsigned i = 0; i < 3; ++i) {
3981 // Create fixed stack object for work group ID.
Matthias Braun941a7052016-07-28 18:40:00 +00003982 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003983 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
3984 // Create fixed stack object for work item ID.
Matthias Braun941a7052016-07-28 18:40:00 +00003985 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00003986 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
3987 }
3988}
3989
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003990bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
3991 const Triple &TT = getTargetMachine().getTargetTriple();
Matt Arsenault0da63502018-08-31 05:49:54 +00003992 return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
3993 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00003994 AMDGPU::shouldEmitConstantsToTextSection(TT);
3995}
3996
3997bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
Matt Arsenault0da63502018-08-31 05:49:54 +00003998 return (GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
3999 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4000 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004001 !shouldEmitFixup(GV) &&
4002 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
4003}
4004
4005bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4006 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4007}
4008
Tom Stellardf8794352012-12-19 22:10:31 +00004009/// This transforms the control flow intrinsics to get the branch destination as
4010/// last parameter, also switches branch target with BR if the need arise
4011SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4012 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00004013 SDLoc DL(BRCOND);
Tom Stellardf8794352012-12-19 22:10:31 +00004014
4015 SDNode *Intr = BRCOND.getOperand(1).getNode();
4016 SDValue Target = BRCOND.getOperand(2);
Craig Topper062a2ba2014-04-25 05:30:21 +00004017 SDNode *BR = nullptr;
Tom Stellardbc4497b2016-02-12 23:45:29 +00004018 SDNode *SetCC = nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00004019
4020 if (Intr->getOpcode() == ISD::SETCC) {
4021 // As long as we negate the condition everything is fine
Tom Stellardbc4497b2016-02-12 23:45:29 +00004022 SetCC = Intr;
Tom Stellardf8794352012-12-19 22:10:31 +00004023 Intr = SetCC->getOperand(0).getNode();
4024
4025 } else {
4026 // Get the target from BR if we don't negate the condition
4027 BR = findUser(BRCOND, ISD::BR);
4028 Target = BR->getOperand(1);
4029 }
4030
Matt Arsenault6408c912016-09-16 22:11:18 +00004031 // FIXME: This changes the types of the intrinsics instead of introducing new
4032 // nodes with the correct types.
4033 // e.g. llvm.amdgcn.loop
4034
4035 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4036 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4037
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004038 unsigned CFNode = isCFIntrinsic(Intr);
4039 if (CFNode == 0) {
Tom Stellardbc4497b2016-02-12 23:45:29 +00004040 // This is a uniform branch so we don't need to legalize.
4041 return BRCOND;
4042 }
4043
Matt Arsenault6408c912016-09-16 22:11:18 +00004044 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4045 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4046
Tom Stellardbc4497b2016-02-12 23:45:29 +00004047 assert(!SetCC ||
4048 (SetCC->getConstantOperandVal(1) == 1 &&
Tom Stellardbc4497b2016-02-12 23:45:29 +00004049 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4050 ISD::SETNE));
Tom Stellardf8794352012-12-19 22:10:31 +00004051
Tom Stellardf8794352012-12-19 22:10:31 +00004052 // operands of the new intrinsic call
4053 SmallVector<SDValue, 4> Ops;
Matt Arsenault6408c912016-09-16 22:11:18 +00004054 if (HaveChain)
4055 Ops.push_back(BRCOND.getOperand(0));
4056
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004057 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
Tom Stellardf8794352012-12-19 22:10:31 +00004058 Ops.push_back(Target);
4059
Matt Arsenault6408c912016-09-16 22:11:18 +00004060 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4061
Tom Stellardf8794352012-12-19 22:10:31 +00004062 // build the new intrinsic call
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004063 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00004064
Matt Arsenault6408c912016-09-16 22:11:18 +00004065 if (!HaveChain) {
4066 SDValue Ops[] = {
4067 SDValue(Result, 0),
4068 BRCOND.getOperand(0)
4069 };
4070
4071 Result = DAG.getMergeValues(Ops, DL).getNode();
4072 }
4073
Tom Stellardf8794352012-12-19 22:10:31 +00004074 if (BR) {
4075 // Give the branch instruction our target
4076 SDValue Ops[] = {
4077 BR->getOperand(0),
4078 BRCOND.getOperand(2)
4079 };
Chandler Carruth356665a2014-08-01 22:09:43 +00004080 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4081 DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4082 BR = NewBR.getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00004083 }
4084
4085 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4086
4087 // Copy the intrinsic results to registers
4088 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4089 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
4090 if (!CopyToReg)
4091 continue;
4092
4093 Chain = DAG.getCopyToReg(
4094 Chain, DL,
4095 CopyToReg->getOperand(1),
4096 SDValue(Result, i - 1),
4097 SDValue());
4098
4099 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4100 }
4101
4102 // Remove the old intrinsic from the chain
4103 DAG.ReplaceAllUsesOfValueWith(
4104 SDValue(Intr, Intr->getNumValues() - 1),
4105 Intr->getOperand(0));
4106
4107 return Chain;
Tom Stellard75aadc22012-12-11 21:25:42 +00004108}
4109
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004110SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4111 SDValue Op,
4112 const SDLoc &DL,
4113 EVT VT) const {
4114 return Op.getValueType().bitsLE(VT) ?
4115 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4116 DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4117}
4118
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004119SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004120 assert(Op.getValueType() == MVT::f16 &&
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004121 "Do not know how to custom lower FP_ROUND for non-f16 type");
4122
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004123 SDValue Src = Op.getOperand(0);
4124 EVT SrcVT = Src.getValueType();
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004125 if (SrcVT != MVT::f64)
4126 return Op;
4127
4128 SDLoc DL(Op);
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004129
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004130 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4131 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
Mandeep Singh Grang5e1697e2017-06-06 05:08:36 +00004132 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004133}
4134
Matt Arsenault687ec752018-10-22 16:27:27 +00004135SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4136 SelectionDAG &DAG) const {
4137 EVT VT = Op.getValueType();
4138 bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction());
4139
4140 // FIXME: Assert during eslection that this is only selected for
4141 // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4142 // mode functions, but this happens to be OK since it's only done in cases
4143 // where there is known no sNaN.
4144 if (IsIEEEMode)
4145 return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4146
4147 if (VT == MVT::v4f16)
4148 return splitBinaryVectorOp(Op, DAG);
4149 return Op;
4150}
4151
Matt Arsenault3e025382017-04-24 17:49:13 +00004152SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4153 SDLoc SL(Op);
Matt Arsenault3e025382017-04-24 17:49:13 +00004154 SDValue Chain = Op.getOperand(0);
4155
Tom Stellard5bfbae52018-07-11 20:59:01 +00004156 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00004157 !Subtarget->isTrapHandlerEnabled())
Matt Arsenault3e025382017-04-24 17:49:13 +00004158 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
Tony Tye43259df2018-05-16 16:19:34 +00004159
4160 MachineFunction &MF = DAG.getMachineFunction();
4161 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4162 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4163 assert(UserSGPR != AMDGPU::NoRegister);
4164 SDValue QueuePtr = CreateLiveInRegister(
4165 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4166 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4167 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4168 QueuePtr, SDValue());
4169 SDValue Ops[] = {
4170 ToReg,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004171 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
Tony Tye43259df2018-05-16 16:19:34 +00004172 SGPR01,
4173 ToReg.getValue(1)
4174 };
4175 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4176}
4177
4178SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4179 SDLoc SL(Op);
4180 SDValue Chain = Op.getOperand(0);
4181 MachineFunction &MF = DAG.getMachineFunction();
4182
Tom Stellard5bfbae52018-07-11 20:59:01 +00004183 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00004184 !Subtarget->isTrapHandlerEnabled()) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004185 DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
Matt Arsenault3e025382017-04-24 17:49:13 +00004186 "debugtrap handler not supported",
4187 Op.getDebugLoc(),
4188 DS_Warning);
Matthias Braunf1caa282017-12-15 22:22:58 +00004189 LLVMContext &Ctx = MF.getFunction().getContext();
Matt Arsenault3e025382017-04-24 17:49:13 +00004190 Ctx.diagnose(NoTrap);
4191 return Chain;
4192 }
Matt Arsenault3e025382017-04-24 17:49:13 +00004193
Tony Tye43259df2018-05-16 16:19:34 +00004194 SDValue Ops[] = {
4195 Chain,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004196 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
Tony Tye43259df2018-05-16 16:19:34 +00004197 };
4198 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
Matt Arsenault3e025382017-04-24 17:49:13 +00004199}
4200
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004201SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
Matt Arsenault99c14522016-04-25 19:27:24 +00004202 SelectionDAG &DAG) const {
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004203 // FIXME: Use inline constants (src_{shared, private}_base) instead.
4204 if (Subtarget->hasApertureRegs()) {
Matt Arsenault0da63502018-08-31 05:49:54 +00004205 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004206 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4207 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
Matt Arsenault0da63502018-08-31 05:49:54 +00004208 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004209 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4210 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4211 unsigned Encoding =
4212 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4213 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4214 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
Matt Arsenaulte823d922017-02-18 18:29:53 +00004215
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004216 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4217 SDValue ApertureReg = SDValue(
4218 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4219 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4220 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
Matt Arsenaulte823d922017-02-18 18:29:53 +00004221 }
4222
Matt Arsenault99c14522016-04-25 19:27:24 +00004223 MachineFunction &MF = DAG.getMachineFunction();
4224 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004225 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4226 assert(UserSGPR != AMDGPU::NoRegister);
4227
Matt Arsenault99c14522016-04-25 19:27:24 +00004228 SDValue QueuePtr = CreateLiveInRegister(
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004229 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
Matt Arsenault99c14522016-04-25 19:27:24 +00004230
4231 // Offset into amd_queue_t for group_segment_aperture_base_hi /
4232 // private_segment_aperture_base_hi.
Matt Arsenault0da63502018-08-31 05:49:54 +00004233 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
Matt Arsenault99c14522016-04-25 19:27:24 +00004234
Matt Arsenaultb655fa92017-11-29 01:25:12 +00004235 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
Matt Arsenault99c14522016-04-25 19:27:24 +00004236
4237 // TODO: Use custom target PseudoSourceValue.
4238 // TODO: We should use the value from the IR intrinsic call, but it might not
4239 // be available and how do we get it?
4240 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
Matt Arsenault0da63502018-08-31 05:49:54 +00004241 AMDGPUAS::CONSTANT_ADDRESS));
Matt Arsenault99c14522016-04-25 19:27:24 +00004242
4243 MachinePointerInfo PtrInfo(V, StructOffset);
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004244 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
Justin Lebar9c375812016-07-15 18:27:10 +00004245 MinAlign(64, StructOffset),
Justin Lebaradbf09e2016-09-11 01:38:58 +00004246 MachineMemOperand::MODereferenceable |
4247 MachineMemOperand::MOInvariant);
Matt Arsenault99c14522016-04-25 19:27:24 +00004248}
4249
4250SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4251 SelectionDAG &DAG) const {
4252 SDLoc SL(Op);
4253 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4254
4255 SDValue Src = ASC->getOperand(0);
Matt Arsenault99c14522016-04-25 19:27:24 +00004256 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4257
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004258 const AMDGPUTargetMachine &TM =
4259 static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4260
Matt Arsenault99c14522016-04-25 19:27:24 +00004261 // flat -> local/private
Matt Arsenault0da63502018-08-31 05:49:54 +00004262 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004263 unsigned DestAS = ASC->getDestAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004264
Matt Arsenault0da63502018-08-31 05:49:54 +00004265 if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4266 DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004267 unsigned NullVal = TM.getNullPointerValue(DestAS);
4268 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault99c14522016-04-25 19:27:24 +00004269 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4270 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4271
4272 return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4273 NonNull, Ptr, SegmentNullPtr);
4274 }
4275 }
4276
4277 // local/private -> flat
Matt Arsenault0da63502018-08-31 05:49:54 +00004278 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004279 unsigned SrcAS = ASC->getSrcAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004280
Matt Arsenault0da63502018-08-31 05:49:54 +00004281 if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4282 SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004283 unsigned NullVal = TM.getNullPointerValue(SrcAS);
4284 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault971c85e2017-03-13 19:47:31 +00004285
Matt Arsenault99c14522016-04-25 19:27:24 +00004286 SDValue NonNull
4287 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4288
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004289 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00004290 SDValue CvtPtr
4291 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4292
4293 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4294 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4295 FlatNullPtr);
4296 }
4297 }
4298
4299 // global <-> flat are no-ops and never emitted.
4300
4301 const MachineFunction &MF = DAG.getMachineFunction();
4302 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
Matthias Braunf1caa282017-12-15 22:22:58 +00004303 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
Matt Arsenault99c14522016-04-25 19:27:24 +00004304 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4305
4306 return DAG.getUNDEF(ASC->getValueType(0));
4307}
4308
Matt Arsenault3aef8092017-01-23 23:09:58 +00004309SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4310 SelectionDAG &DAG) const {
Matt Arsenault67a98152018-05-16 11:47:30 +00004311 SDValue Vec = Op.getOperand(0);
4312 SDValue InsVal = Op.getOperand(1);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004313 SDValue Idx = Op.getOperand(2);
Matt Arsenault67a98152018-05-16 11:47:30 +00004314 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004315 EVT EltVT = VecVT.getVectorElementType();
4316 unsigned VecSize = VecVT.getSizeInBits();
4317 unsigned EltSize = EltVT.getSizeInBits();
Matt Arsenault67a98152018-05-16 11:47:30 +00004318
Matt Arsenault9224c002018-06-05 19:52:46 +00004319
4320 assert(VecSize <= 64);
Matt Arsenault67a98152018-05-16 11:47:30 +00004321
4322 unsigned NumElts = VecVT.getVectorNumElements();
4323 SDLoc SL(Op);
4324 auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4325
Matt Arsenault9224c002018-06-05 19:52:46 +00004326 if (NumElts == 4 && EltSize == 16 && KIdx) {
Matt Arsenault67a98152018-05-16 11:47:30 +00004327 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4328
4329 SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4330 DAG.getConstant(0, SL, MVT::i32));
4331 SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4332 DAG.getConstant(1, SL, MVT::i32));
4333
4334 SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4335 SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4336
4337 unsigned Idx = KIdx->getZExtValue();
4338 bool InsertLo = Idx < 2;
4339 SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4340 InsertLo ? LoVec : HiVec,
4341 DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4342 DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4343
4344 InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4345
4346 SDValue Concat = InsertLo ?
4347 DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4348 DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4349
4350 return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4351 }
4352
Matt Arsenault3aef8092017-01-23 23:09:58 +00004353 if (isa<ConstantSDNode>(Idx))
4354 return SDValue();
4355
Matt Arsenault9224c002018-06-05 19:52:46 +00004356 MVT IntVT = MVT::getIntegerVT(VecSize);
Matt Arsenault67a98152018-05-16 11:47:30 +00004357
Matt Arsenault3aef8092017-01-23 23:09:58 +00004358 // Avoid stack access for dynamic indexing.
Matt Arsenault9224c002018-06-05 19:52:46 +00004359 SDValue Val = InsVal;
4360 if (InsVal.getValueType() == MVT::f16)
4361 Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004362
4363 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
Matt Arsenault67a98152018-05-16 11:47:30 +00004364 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Val);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004365
Matt Arsenault9224c002018-06-05 19:52:46 +00004366 assert(isPowerOf2_32(EltSize));
4367 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4368
Matt Arsenault3aef8092017-01-23 23:09:58 +00004369 // Convert vector index to bit-index.
Matt Arsenault9224c002018-06-05 19:52:46 +00004370 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004371
Matt Arsenault67a98152018-05-16 11:47:30 +00004372 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4373 SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4374 DAG.getConstant(0xffff, SL, IntVT),
Matt Arsenault3aef8092017-01-23 23:09:58 +00004375 ScaledIdx);
4376
Matt Arsenault67a98152018-05-16 11:47:30 +00004377 SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4378 SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4379 DAG.getNOT(SL, BFM, IntVT), BCVec);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004380
Matt Arsenault67a98152018-05-16 11:47:30 +00004381 SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4382 return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004383}
4384
4385SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4386 SelectionDAG &DAG) const {
4387 SDLoc SL(Op);
4388
4389 EVT ResultVT = Op.getValueType();
4390 SDValue Vec = Op.getOperand(0);
4391 SDValue Idx = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004392 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004393 unsigned VecSize = VecVT.getSizeInBits();
4394 EVT EltVT = VecVT.getVectorElementType();
4395 assert(VecSize <= 64);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004396
Matt Arsenault98f29462017-05-17 20:30:58 +00004397 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4398
Hiroshi Inoue372ffa12018-04-13 11:37:06 +00004399 // Make sure we do any optimizations that will make it easier to fold
Matt Arsenault98f29462017-05-17 20:30:58 +00004400 // source modifiers before obscuring it with bit operations.
4401
4402 // XXX - Why doesn't this get called when vector_shuffle is expanded?
4403 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4404 return Combined;
4405
Matt Arsenault9224c002018-06-05 19:52:46 +00004406 unsigned EltSize = EltVT.getSizeInBits();
4407 assert(isPowerOf2_32(EltSize));
Matt Arsenault3aef8092017-01-23 23:09:58 +00004408
Matt Arsenault9224c002018-06-05 19:52:46 +00004409 MVT IntVT = MVT::getIntegerVT(VecSize);
4410 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4411
4412 // Convert vector index to bit-index (* EltSize)
4413 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004414
Matt Arsenault67a98152018-05-16 11:47:30 +00004415 SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4416 SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004417
Matt Arsenault67a98152018-05-16 11:47:30 +00004418 if (ResultVT == MVT::f16) {
4419 SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4420 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4421 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00004422
Matt Arsenault67a98152018-05-16 11:47:30 +00004423 return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4424}
4425
4426SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4427 SelectionDAG &DAG) const {
4428 SDLoc SL(Op);
4429 EVT VT = Op.getValueType();
Matt Arsenault67a98152018-05-16 11:47:30 +00004430
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004431 if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4432 EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4433
4434 // Turn into pair of packed build_vectors.
4435 // TODO: Special case for constants that can be materialized with s_mov_b64.
4436 SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4437 { Op.getOperand(0), Op.getOperand(1) });
4438 SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4439 { Op.getOperand(2), Op.getOperand(3) });
4440
4441 SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4442 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4443
4444 SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4445 return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4446 }
4447
Matt Arsenault1349a042018-05-22 06:32:10 +00004448 assert(VT == MVT::v2f16 || VT == MVT::v2i16);
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004449 assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
Matt Arsenault67a98152018-05-16 11:47:30 +00004450
Matt Arsenault1349a042018-05-22 06:32:10 +00004451 SDValue Lo = Op.getOperand(0);
4452 SDValue Hi = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004453
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004454 // Avoid adding defined bits with the zero_extend.
4455 if (Hi.isUndef()) {
4456 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4457 SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
4458 return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
4459 }
Matt Arsenault67a98152018-05-16 11:47:30 +00004460
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004461 Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
Matt Arsenault1349a042018-05-22 06:32:10 +00004462 Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4463
4464 SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4465 DAG.getConstant(16, SL, MVT::i32));
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004466 if (Lo.isUndef())
4467 return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
4468
4469 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4470 Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
Matt Arsenault1349a042018-05-22 06:32:10 +00004471
4472 SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
Matt Arsenault1349a042018-05-22 06:32:10 +00004473 return DAG.getNode(ISD::BITCAST, SL, VT, Or);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004474}
4475
Tom Stellard418beb72016-07-13 14:23:33 +00004476bool
4477SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4478 // We can fold offsets for anything that doesn't require a GOT relocation.
Matt Arsenault0da63502018-08-31 05:49:54 +00004479 return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4480 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4481 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004482 !shouldEmitGOTReloc(GA->getGlobal());
Tom Stellard418beb72016-07-13 14:23:33 +00004483}
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004484
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004485static SDValue
4486buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4487 const SDLoc &DL, unsigned Offset, EVT PtrVT,
4488 unsigned GAFlags = SIInstrInfo::MO_NONE) {
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004489 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4490 // lowered to the following code sequence:
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004491 //
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004492 // For constant address space:
4493 // s_getpc_b64 s[0:1]
4494 // s_add_u32 s0, s0, $symbol
4495 // s_addc_u32 s1, s1, 0
4496 //
4497 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4498 // a fixup or relocation is emitted to replace $symbol with a literal
4499 // constant, which is a pc-relative offset from the encoding of the $symbol
4500 // operand to the global variable.
4501 //
4502 // For global address space:
4503 // s_getpc_b64 s[0:1]
4504 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4505 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4506 //
4507 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4508 // fixups or relocations are emitted to replace $symbol@*@lo and
4509 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4510 // which is a 64-bit pc-relative offset from the encoding of the $symbol
4511 // operand to the global variable.
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004512 //
4513 // What we want here is an offset from the value returned by s_getpc
4514 // (which is the address of the s_add_u32 instruction) to the global
4515 // variable, but since the encoding of $symbol starts 4 bytes after the start
4516 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4517 // small. This requires us to add 4 to the global variable offset in order to
4518 // compute the correct address.
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004519 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4520 GAFlags);
4521 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4522 GAFlags == SIInstrInfo::MO_NONE ?
4523 GAFlags : GAFlags + 1);
4524 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004525}
4526
Tom Stellard418beb72016-07-13 14:23:33 +00004527SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
4528 SDValue Op,
4529 SelectionDAG &DAG) const {
4530 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00004531 const GlobalValue *GV = GSD->getGlobal();
Matt Arsenaultd1f45712018-09-10 12:16:11 +00004532 if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
4533 GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
4534 GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS)
Tom Stellard418beb72016-07-13 14:23:33 +00004535 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
4536
4537 SDLoc DL(GSD);
Tom Stellard418beb72016-07-13 14:23:33 +00004538 EVT PtrVT = Op.getValueType();
4539
Matt Arsenaultd1f45712018-09-10 12:16:11 +00004540 // FIXME: Should not make address space based decisions here.
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004541 if (shouldEmitFixup(GV))
Tom Stellard418beb72016-07-13 14:23:33 +00004542 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004543 else if (shouldEmitPCReloc(GV))
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004544 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
4545 SIInstrInfo::MO_REL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004546
4547 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004548 SIInstrInfo::MO_GOTPCREL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004549
4550 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
Matt Arsenault0da63502018-08-31 05:49:54 +00004551 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
Tom Stellard418beb72016-07-13 14:23:33 +00004552 const DataLayout &DataLayout = DAG.getDataLayout();
4553 unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
Matt Arsenaultd77fcc22018-09-10 02:23:39 +00004554 MachinePointerInfo PtrInfo
4555 = MachinePointerInfo::getGOT(DAG.getMachineFunction());
Tom Stellard418beb72016-07-13 14:23:33 +00004556
Justin Lebar9c375812016-07-15 18:27:10 +00004557 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
Justin Lebaradbf09e2016-09-11 01:38:58 +00004558 MachineMemOperand::MODereferenceable |
4559 MachineMemOperand::MOInvariant);
Tom Stellard418beb72016-07-13 14:23:33 +00004560}
4561
Benjamin Kramerbdc49562016-06-12 15:39:02 +00004562SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
4563 const SDLoc &DL, SDValue V) const {
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004564 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
4565 // the destination register.
4566 //
Tom Stellardfc92e772015-05-12 14:18:14 +00004567 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
4568 // so we will end up with redundant moves to m0.
4569 //
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004570 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
4571
4572 // A Null SDValue creates a glue result.
4573 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
4574 V, Chain);
4575 return SDValue(M0, 0);
Tom Stellardfc92e772015-05-12 14:18:14 +00004576}
4577
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004578SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
4579 SDValue Op,
4580 MVT VT,
4581 unsigned Offset) const {
4582 SDLoc SL(Op);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004583 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004584 DAG.getEntryNode(), Offset, 4, false);
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004585 // The local size values will have the hi 16-bits as zero.
4586 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
4587 DAG.getValueType(VT));
4588}
4589
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004590static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4591 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004592 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004593 "non-hsa intrinsic with hsa target",
4594 DL.getDebugLoc());
4595 DAG.getContext()->diagnose(BadIntrin);
4596 return DAG.getUNDEF(VT);
4597}
4598
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004599static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4600 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004601 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004602 "intrinsic not supported on subtarget",
4603 DL.getDebugLoc());
Matt Arsenaulte0132462016-01-30 05:19:45 +00004604 DAG.getContext()->diagnose(BadIntrin);
4605 return DAG.getUNDEF(VT);
4606}
4607
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004608static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
4609 ArrayRef<SDValue> Elts) {
4610 assert(!Elts.empty());
4611 MVT Type;
4612 unsigned NumElts;
4613
4614 if (Elts.size() == 1) {
4615 Type = MVT::f32;
4616 NumElts = 1;
4617 } else if (Elts.size() == 2) {
4618 Type = MVT::v2f32;
4619 NumElts = 2;
4620 } else if (Elts.size() <= 4) {
4621 Type = MVT::v4f32;
4622 NumElts = 4;
4623 } else if (Elts.size() <= 8) {
4624 Type = MVT::v8f32;
4625 NumElts = 8;
4626 } else {
4627 assert(Elts.size() <= 16);
4628 Type = MVT::v16f32;
4629 NumElts = 16;
4630 }
4631
4632 SmallVector<SDValue, 16> VecElts(NumElts);
4633 for (unsigned i = 0; i < Elts.size(); ++i) {
4634 SDValue Elt = Elts[i];
4635 if (Elt.getValueType() != MVT::f32)
4636 Elt = DAG.getBitcast(MVT::f32, Elt);
4637 VecElts[i] = Elt;
4638 }
4639 for (unsigned i = Elts.size(); i < NumElts; ++i)
4640 VecElts[i] = DAG.getUNDEF(MVT::f32);
4641
4642 if (NumElts == 1)
4643 return VecElts[0];
4644 return DAG.getBuildVector(Type, DL, VecElts);
4645}
4646
4647static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
4648 SDValue *GLC, SDValue *SLC) {
4649 auto CachePolicyConst = dyn_cast<ConstantSDNode>(CachePolicy.getNode());
4650 if (!CachePolicyConst)
4651 return false;
4652
4653 uint64_t Value = CachePolicyConst->getZExtValue();
4654 SDLoc DL(CachePolicy);
4655 if (GLC) {
4656 *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
4657 Value &= ~(uint64_t)0x1;
4658 }
4659 if (SLC) {
4660 *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
4661 Value &= ~(uint64_t)0x2;
4662 }
4663
4664 return Value == 0;
4665}
4666
David Stuttardf77079f2019-01-14 11:55:24 +00004667// Re-construct the required return value for a image load intrinsic.
4668// This is more complicated due to the optional use TexFailCtrl which means the required
4669// return type is an aggregate
4670static SDValue constructRetValue(SelectionDAG &DAG,
4671 MachineSDNode *Result,
4672 ArrayRef<EVT> ResultTypes,
4673 bool IsTexFail, bool Unpacked, bool IsD16,
4674 int DMaskPop, int NumVDataDwords,
4675 const SDLoc &DL, LLVMContext &Context) {
4676 // Determine the required return type. This is the same regardless of IsTexFail flag
4677 EVT ReqRetVT = ResultTypes[0];
4678 EVT ReqRetEltVT = ReqRetVT.isVector() ? ReqRetVT.getVectorElementType() : ReqRetVT;
4679 int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
4680 EVT AdjEltVT = Unpacked && IsD16 ? MVT::i32 : ReqRetEltVT;
4681 EVT AdjVT = Unpacked ? ReqRetNumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, ReqRetNumElts)
4682 : AdjEltVT
4683 : ReqRetVT;
4684
4685 // Extract data part of the result
4686 // Bitcast the result to the same type as the required return type
4687 int NumElts;
4688 if (IsD16 && !Unpacked)
4689 NumElts = NumVDataDwords << 1;
4690 else
4691 NumElts = NumVDataDwords;
4692
4693 EVT CastVT = NumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, NumElts)
4694 : AdjEltVT;
4695
4696 // Special case for v8f16. Rather than add support for this, use v4i32 to
4697 // extract the data elements
4698 bool V8F16Special = false;
4699 if (CastVT == MVT::v8f16) {
4700 CastVT = MVT::v4i32;
4701 DMaskPop >>= 1;
4702 ReqRetNumElts >>= 1;
4703 V8F16Special = true;
4704 AdjVT = MVT::v2i32;
4705 }
4706
4707 SDValue N = SDValue(Result, 0);
4708 SDValue CastRes = DAG.getNode(ISD::BITCAST, DL, CastVT, N);
4709
4710 // Iterate over the result
4711 SmallVector<SDValue, 4> BVElts;
4712
4713 if (CastVT.isVector()) {
4714 DAG.ExtractVectorElements(CastRes, BVElts, 0, DMaskPop);
4715 } else {
4716 BVElts.push_back(CastRes);
4717 }
4718 int ExtraElts = ReqRetNumElts - DMaskPop;
4719 while(ExtraElts--)
4720 BVElts.push_back(DAG.getUNDEF(AdjEltVT));
4721
4722 SDValue PreTFCRes;
4723 if (ReqRetNumElts > 1) {
4724 SDValue NewVec = DAG.getBuildVector(AdjVT, DL, BVElts);
4725 if (IsD16 && Unpacked)
4726 PreTFCRes = adjustLoadValueTypeImpl(NewVec, ReqRetVT, DL, DAG, Unpacked);
4727 else
4728 PreTFCRes = NewVec;
4729 } else {
4730 PreTFCRes = BVElts[0];
4731 }
4732
4733 if (V8F16Special)
4734 PreTFCRes = DAG.getNode(ISD::BITCAST, DL, MVT::v4f16, PreTFCRes);
4735
4736 if (!IsTexFail) {
4737 if (Result->getNumValues() > 1)
4738 return DAG.getMergeValues({PreTFCRes, SDValue(Result, 1)}, DL);
4739 else
4740 return PreTFCRes;
4741 }
4742
4743 // Extract the TexFail result and insert into aggregate return
4744 SmallVector<SDValue, 1> TFCElt;
4745 DAG.ExtractVectorElements(N, TFCElt, DMaskPop, 1);
4746 SDValue TFCRes = DAG.getNode(ISD::BITCAST, DL, ResultTypes[1], TFCElt[0]);
4747 return DAG.getMergeValues({PreTFCRes, TFCRes, SDValue(Result, 1)}, DL);
4748}
4749
4750static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
4751 SDValue *LWE, bool &IsTexFail) {
4752 auto TexFailCtrlConst = dyn_cast<ConstantSDNode>(TexFailCtrl.getNode());
4753 if (!TexFailCtrlConst)
4754 return false;
4755
4756 uint64_t Value = TexFailCtrlConst->getZExtValue();
4757 if (Value) {
4758 IsTexFail = true;
4759 }
4760
4761 SDLoc DL(TexFailCtrlConst);
4762 *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
4763 Value &= ~(uint64_t)0x1;
4764 *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
4765 Value &= ~(uint64_t)0x2;
4766
4767 return Value == 0;
4768}
4769
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004770SDValue SITargetLowering::lowerImage(SDValue Op,
4771 const AMDGPU::ImageDimIntrinsicInfo *Intr,
4772 SelectionDAG &DAG) const {
4773 SDLoc DL(Op);
Ryan Taylor1f334d02018-08-28 15:07:30 +00004774 MachineFunction &MF = DAG.getMachineFunction();
4775 const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004776 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
4777 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
4778 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004779 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
4780 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
4781 unsigned IntrOpcode = Intr->BaseOpcode;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004782
David Stuttardf77079f2019-01-14 11:55:24 +00004783 SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end());
4784 SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end());
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004785 bool IsD16 = false;
Ryan Taylor1f334d02018-08-28 15:07:30 +00004786 bool IsA16 = false;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004787 SDValue VData;
4788 int NumVDataDwords;
David Stuttardf77079f2019-01-14 11:55:24 +00004789 bool AdjustRetType = false;
4790
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004791 unsigned AddrIdx; // Index of first address argument
4792 unsigned DMask;
David Stuttardf77079f2019-01-14 11:55:24 +00004793 unsigned DMaskLanes = 0;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004794
4795 if (BaseOpcode->Atomic) {
4796 VData = Op.getOperand(2);
4797
4798 bool Is64Bit = VData.getValueType() == MVT::i64;
4799 if (BaseOpcode->AtomicX2) {
4800 SDValue VData2 = Op.getOperand(3);
4801 VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
4802 {VData, VData2});
4803 if (Is64Bit)
4804 VData = DAG.getBitcast(MVT::v4i32, VData);
4805
4806 ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
4807 DMask = Is64Bit ? 0xf : 0x3;
4808 NumVDataDwords = Is64Bit ? 4 : 2;
4809 AddrIdx = 4;
4810 } else {
4811 DMask = Is64Bit ? 0x3 : 0x1;
4812 NumVDataDwords = Is64Bit ? 2 : 1;
4813 AddrIdx = 3;
4814 }
4815 } else {
David Stuttardf77079f2019-01-14 11:55:24 +00004816 unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1;
4817 auto DMaskConst = dyn_cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
4818 if (!DMaskConst)
4819 return Op;
4820 DMask = DMaskConst->getZExtValue();
4821 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004822
4823 if (BaseOpcode->Store) {
4824 VData = Op.getOperand(2);
4825
4826 MVT StoreVT = VData.getSimpleValueType();
4827 if (StoreVT.getScalarType() == MVT::f16) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004828 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004829 !BaseOpcode->HasD16)
4830 return Op; // D16 is unsupported for this instruction
4831
4832 IsD16 = true;
4833 VData = handleD16VData(VData, DAG);
4834 }
4835
4836 NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004837 } else {
David Stuttardf77079f2019-01-14 11:55:24 +00004838 // Work out the num dwords based on the dmask popcount and underlying type
4839 // and whether packing is supported.
4840 MVT LoadVT = ResultTypes[0].getSimpleVT();
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004841 if (LoadVT.getScalarType() == MVT::f16) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004842 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004843 !BaseOpcode->HasD16)
4844 return Op; // D16 is unsupported for this instruction
4845
4846 IsD16 = true;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004847 }
4848
David Stuttardf77079f2019-01-14 11:55:24 +00004849 // Confirm that the return type is large enough for the dmask specified
4850 if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
4851 (!LoadVT.isVector() && DMaskLanes > 1))
4852 return Op;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004853
David Stuttardf77079f2019-01-14 11:55:24 +00004854 if (IsD16 && !Subtarget->hasUnpackedD16VMem())
4855 NumVDataDwords = (DMaskLanes + 1) / 2;
4856 else
4857 NumVDataDwords = DMaskLanes;
4858
4859 AdjustRetType = true;
4860 }
David Stuttardc6603862018-11-29 20:14:17 +00004861
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004862 AddrIdx = DMaskIdx + 1;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004863 }
4864
Ryan Taylor1f334d02018-08-28 15:07:30 +00004865 unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
4866 unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
4867 unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
4868 unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients +
4869 NumCoords + NumLCM;
4870 unsigned NumMIVAddrs = NumVAddrs;
4871
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004872 SmallVector<SDValue, 4> VAddrs;
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004873
4874 // Optimize _L to _LZ when _L is zero
4875 if (LZMappingInfo) {
4876 if (auto ConstantLod =
Ryan Taylor1f334d02018-08-28 15:07:30 +00004877 dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004878 if (ConstantLod->isZero() || ConstantLod->isNegative()) {
4879 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
Ryan Taylor1f334d02018-08-28 15:07:30 +00004880 NumMIVAddrs--; // remove 'lod'
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004881 }
4882 }
4883 }
4884
Ryan Taylor1f334d02018-08-28 15:07:30 +00004885 // Check for 16 bit addresses and pack if true.
4886 unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs;
4887 MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType();
Neil Henning63718b22018-10-31 10:34:48 +00004888 const MVT VAddrScalarVT = VAddrVT.getScalarType();
4889 if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16)) &&
Ryan Taylor1f334d02018-08-28 15:07:30 +00004890 ST->hasFeature(AMDGPU::FeatureR128A16)) {
4891 IsA16 = true;
Neil Henning63718b22018-10-31 10:34:48 +00004892 const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
Ryan Taylor1f334d02018-08-28 15:07:30 +00004893 for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) {
4894 SDValue AddrLo, AddrHi;
4895 // Push back extra arguments.
4896 if (i < DimIdx) {
4897 AddrLo = Op.getOperand(i);
4898 } else {
4899 AddrLo = Op.getOperand(i);
4900 // Dz/dh, dz/dv and the last odd coord are packed with undef. Also,
4901 // in 1D, derivatives dx/dh and dx/dv are packed with undef.
4902 if (((i + 1) >= (AddrIdx + NumMIVAddrs)) ||
Matt Arsenault0da63502018-08-31 05:49:54 +00004903 ((NumGradients / 2) % 2 == 1 &&
4904 (i == DimIdx + (NumGradients / 2) - 1 ||
Ryan Taylor1f334d02018-08-28 15:07:30 +00004905 i == DimIdx + NumGradients - 1))) {
4906 AddrHi = DAG.getUNDEF(MVT::f16);
4907 } else {
4908 AddrHi = Op.getOperand(i + 1);
4909 i++;
4910 }
Neil Henning63718b22018-10-31 10:34:48 +00004911 AddrLo = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorVT,
Ryan Taylor1f334d02018-08-28 15:07:30 +00004912 {AddrLo, AddrHi});
4913 AddrLo = DAG.getBitcast(MVT::i32, AddrLo);
4914 }
4915 VAddrs.push_back(AddrLo);
4916 }
4917 } else {
4918 for (unsigned i = 0; i < NumMIVAddrs; ++i)
4919 VAddrs.push_back(Op.getOperand(AddrIdx + i));
4920 }
4921
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004922 SDValue VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
4923
4924 SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
4925 SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
4926 unsigned CtrlIdx; // Index of texfailctrl argument
4927 SDValue Unorm;
4928 if (!BaseOpcode->Sampler) {
4929 Unorm = True;
4930 CtrlIdx = AddrIdx + NumVAddrs + 1;
4931 } else {
4932 auto UnormConst =
4933 dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
4934 if (!UnormConst)
4935 return Op;
4936
4937 Unorm = UnormConst->getZExtValue() ? True : False;
4938 CtrlIdx = AddrIdx + NumVAddrs + 3;
4939 }
4940
David Stuttardf77079f2019-01-14 11:55:24 +00004941 SDValue TFE;
4942 SDValue LWE;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004943 SDValue TexFail = Op.getOperand(CtrlIdx);
David Stuttardf77079f2019-01-14 11:55:24 +00004944 bool IsTexFail = false;
4945 if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004946 return Op;
4947
David Stuttardf77079f2019-01-14 11:55:24 +00004948 if (IsTexFail) {
4949 if (!DMaskLanes) {
4950 // Expecting to get an error flag since TFC is on - and dmask is 0
4951 // Force dmask to be at least 1 otherwise the instruction will fail
4952 DMask = 0x1;
4953 DMaskLanes = 1;
4954 NumVDataDwords = 1;
4955 }
4956 NumVDataDwords += 1;
4957 AdjustRetType = true;
4958 }
4959
4960 // Has something earlier tagged that the return type needs adjusting
4961 // This happens if the instruction is a load or has set TexFailCtrl flags
4962 if (AdjustRetType) {
4963 // NumVDataDwords reflects the true number of dwords required in the return type
4964 if (DMaskLanes == 0 && !BaseOpcode->Store) {
4965 // This is a no-op load. This can be eliminated
4966 SDValue Undef = DAG.getUNDEF(Op.getValueType());
4967 if (isa<MemSDNode>(Op))
4968 return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
4969 return Undef;
4970 }
4971
4972 // Have to use a power of 2 number of dwords
4973 NumVDataDwords = 1 << Log2_32_Ceil(NumVDataDwords);
4974
4975 EVT NewVT = NumVDataDwords > 1 ?
4976 EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumVDataDwords)
4977 : MVT::f32;
4978
4979 ResultTypes[0] = NewVT;
4980 if (ResultTypes.size() == 3) {
4981 // Original result was aggregate type used for TexFailCtrl results
4982 // The actual instruction returns as a vector type which has now been
4983 // created. Remove the aggregate result.
4984 ResultTypes.erase(&ResultTypes[1]);
4985 }
4986 }
4987
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004988 SDValue GLC;
4989 SDValue SLC;
4990 if (BaseOpcode->Atomic) {
4991 GLC = True; // TODO no-return optimization
4992 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC))
4993 return Op;
4994 } else {
4995 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC))
4996 return Op;
4997 }
4998
4999 SmallVector<SDValue, 14> Ops;
5000 if (BaseOpcode->Store || BaseOpcode->Atomic)
5001 Ops.push_back(VData); // vdata
5002 Ops.push_back(VAddr);
5003 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
5004 if (BaseOpcode->Sampler)
5005 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
5006 Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
5007 Ops.push_back(Unorm);
5008 Ops.push_back(GLC);
5009 Ops.push_back(SLC);
Ryan Taylor1f334d02018-08-28 15:07:30 +00005010 Ops.push_back(IsA16 && // a16 or r128
5011 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
David Stuttardf77079f2019-01-14 11:55:24 +00005012 Ops.push_back(TFE); // tfe
5013 Ops.push_back(LWE); // lwe
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005014 Ops.push_back(DimInfo->DA ? True : False);
5015 if (BaseOpcode->HasD16)
5016 Ops.push_back(IsD16 ? True : False);
5017 if (isa<MemSDNode>(Op))
5018 Ops.push_back(Op.getOperand(0)); // chain
5019
5020 int NumVAddrDwords = VAddr.getValueType().getSizeInBits() / 32;
5021 int Opcode = -1;
5022
Tom Stellard5bfbae52018-07-11 20:59:01 +00005023 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Ryan Taylor894c8fd2018-08-01 12:12:01 +00005024 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005025 NumVDataDwords, NumVAddrDwords);
5026 if (Opcode == -1)
Ryan Taylor894c8fd2018-08-01 12:12:01 +00005027 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005028 NumVDataDwords, NumVAddrDwords);
5029 assert(Opcode != -1);
5030
5031 MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
5032 if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
Chandler Carruth66654b72018-08-14 23:30:32 +00005033 MachineMemOperand *MemRef = MemOp->getMemOperand();
5034 DAG.setNodeMemRefs(NewNode, {MemRef});
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005035 }
5036
5037 if (BaseOpcode->AtomicX2) {
5038 SmallVector<SDValue, 1> Elt;
5039 DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
5040 return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
David Stuttardf77079f2019-01-14 11:55:24 +00005041 } else if (!BaseOpcode->Store) {
5042 return constructRetValue(DAG, NewNode,
5043 OrigResultTypes, IsTexFail,
5044 Subtarget->hasUnpackedD16VMem(), IsD16,
5045 DMaskLanes, NumVDataDwords, DL,
5046 *DAG.getContext());
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005047 }
5048
5049 return SDValue(NewNode, 0);
5050}
5051
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005052SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
5053 SDValue Offset, SDValue GLC,
5054 SelectionDAG &DAG) const {
5055 MachineFunction &MF = DAG.getMachineFunction();
5056 MachineMemOperand *MMO = MF.getMachineMemOperand(
5057 MachinePointerInfo(),
5058 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
5059 MachineMemOperand::MOInvariant,
5060 VT.getStoreSize(), VT.getStoreSize());
5061
5062 if (!Offset->isDivergent()) {
5063 SDValue Ops[] = {
5064 Rsrc,
5065 Offset, // Offset
5066 GLC // glc
5067 };
5068 return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
5069 DAG.getVTList(VT), Ops, VT, MMO);
5070 }
5071
5072 // We have a divergent offset. Emit a MUBUF buffer load instead. We can
5073 // assume that the buffer is unswizzled.
5074 SmallVector<SDValue, 4> Loads;
5075 unsigned NumLoads = 1;
5076 MVT LoadVT = VT.getSimpleVT();
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005077 unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
Simon Pilgrim44dfd812018-12-07 21:44:25 +00005078 assert((LoadVT.getScalarType() == MVT::i32 ||
5079 LoadVT.getScalarType() == MVT::f32) &&
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005080 isPowerOf2_32(NumElts));
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005081
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005082 if (NumElts == 8 || NumElts == 16) {
5083 NumLoads = NumElts == 16 ? 4 : 2;
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005084 LoadVT = MVT::v4i32;
5085 }
5086
5087 SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
5088 unsigned CachePolicy = cast<ConstantSDNode>(GLC)->getZExtValue();
5089 SDValue Ops[] = {
5090 DAG.getEntryNode(), // Chain
5091 Rsrc, // rsrc
5092 DAG.getConstant(0, DL, MVT::i32), // vindex
5093 {}, // voffset
5094 {}, // soffset
5095 {}, // offset
5096 DAG.getConstant(CachePolicy, DL, MVT::i32), // cachepolicy
5097 DAG.getConstant(0, DL, MVT::i1), // idxen
5098 };
5099
5100 // Use the alignment to ensure that the required offsets will fit into the
5101 // immediate offsets.
5102 setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4);
5103
5104 uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
5105 for (unsigned i = 0; i < NumLoads; ++i) {
5106 Ops[5] = DAG.getConstant(InstOffset + 16 * i, DL, MVT::i32);
5107 Loads.push_back(DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList,
5108 Ops, LoadVT, MMO));
5109 }
5110
5111 if (VT == MVT::v8i32 || VT == MVT::v16i32)
5112 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
5113
5114 return Loads[0];
5115}
5116
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005117SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
5118 SelectionDAG &DAG) const {
5119 MachineFunction &MF = DAG.getMachineFunction();
Tom Stellarddcb9f092015-07-09 21:20:37 +00005120 auto MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005121
5122 EVT VT = Op.getValueType();
5123 SDLoc DL(Op);
5124 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5125
Sanjay Patela2607012015-09-16 16:31:21 +00005126 // TODO: Should this propagate fast-math-flags?
5127
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005128 switch (IntrinsicID) {
Tom Stellard2f3f9852017-01-25 01:25:13 +00005129 case Intrinsic::amdgcn_implicit_buffer_ptr: {
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +00005130 if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
Matt Arsenault10fc0622017-06-26 03:01:31 +00005131 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005132 return getPreloadedValue(DAG, *MFI, VT,
5133 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
Tom Stellard2f3f9852017-01-25 01:25:13 +00005134 }
Tom Stellard48f29f22015-11-26 00:43:29 +00005135 case Intrinsic::amdgcn_dispatch_ptr:
Matt Arsenault48ab5262016-04-25 19:27:18 +00005136 case Intrinsic::amdgcn_queue_ptr: {
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +00005137 if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00005138 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00005139 MF.getFunction(), "unsupported hsa intrinsic without hsa target",
Oliver Stannard7e7d9832016-02-02 13:52:43 +00005140 DL.getDebugLoc());
Matt Arsenault800fecf2016-01-11 21:18:33 +00005141 DAG.getContext()->diagnose(BadIntrin);
5142 return DAG.getUNDEF(VT);
5143 }
5144
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005145 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
5146 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
5147 return getPreloadedValue(DAG, *MFI, VT, RegID);
Matt Arsenault48ab5262016-04-25 19:27:18 +00005148 }
Jan Veselyfea814d2016-06-21 20:46:20 +00005149 case Intrinsic::amdgcn_implicitarg_ptr: {
Matt Arsenault9166ce82017-07-28 15:52:08 +00005150 if (MFI->isEntryFunction())
5151 return getImplicitArgPtr(DAG, DL);
Matt Arsenault817c2532017-08-03 23:12:44 +00005152 return getPreloadedValue(DAG, *MFI, VT,
5153 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
Jan Veselyfea814d2016-06-21 20:46:20 +00005154 }
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00005155 case Intrinsic::amdgcn_kernarg_segment_ptr: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005156 return getPreloadedValue(DAG, *MFI, VT,
5157 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00005158 }
Matt Arsenault8d718dc2016-07-22 17:01:30 +00005159 case Intrinsic::amdgcn_dispatch_id: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005160 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
Matt Arsenault8d718dc2016-07-22 17:01:30 +00005161 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005162 case Intrinsic::amdgcn_rcp:
5163 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
5164 case Intrinsic::amdgcn_rsq:
5165 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00005166 case Intrinsic::amdgcn_rsq_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00005167 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005168 return emitRemovedIntrinsicError(DAG, DL, VT);
5169
5170 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00005171 case Intrinsic::amdgcn_rcp_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00005172 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault32fc5272016-07-26 16:45:45 +00005173 return emitRemovedIntrinsicError(DAG, DL, VT);
5174 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
Matt Arsenault09b2c4a2016-07-15 21:26:52 +00005175 case Intrinsic::amdgcn_rsq_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00005176 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault79963e82016-02-13 01:03:00 +00005177 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
Tom Stellard48f29f22015-11-26 00:43:29 +00005178
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005179 Type *Type = VT.getTypeForEVT(*DAG.getContext());
5180 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
5181 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
5182
5183 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5184 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
5185 DAG.getConstantFP(Max, DL, VT));
5186 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
5187 DAG.getConstantFP(Min, DL, VT));
5188 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005189 case Intrinsic::r600_read_ngroups_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005190 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005191 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005192
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005193 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005194 SI::KernelInputOffsets::NGROUPS_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005195 case Intrinsic::r600_read_ngroups_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005196 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005197 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005198
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005199 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005200 SI::KernelInputOffsets::NGROUPS_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005201 case Intrinsic::r600_read_ngroups_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005202 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005203 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005204
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005205 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005206 SI::KernelInputOffsets::NGROUPS_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005207 case Intrinsic::r600_read_global_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005208 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005209 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005210
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005211 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005212 SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005213 case Intrinsic::r600_read_global_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005214 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005215 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005216
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005217 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005218 SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005219 case Intrinsic::r600_read_global_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005220 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005221 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005222
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005223 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005224 SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005225 case Intrinsic::r600_read_local_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005226 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005227 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005228
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005229 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5230 SI::KernelInputOffsets::LOCAL_SIZE_X);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005231 case Intrinsic::r600_read_local_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005232 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005233 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005234
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005235 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5236 SI::KernelInputOffsets::LOCAL_SIZE_Y);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005237 case Intrinsic::r600_read_local_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005238 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005239 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005240
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005241 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5242 SI::KernelInputOffsets::LOCAL_SIZE_Z);
Matt Arsenault43976df2016-01-30 04:25:19 +00005243 case Intrinsic::amdgcn_workgroup_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005244 case Intrinsic::r600_read_tgid_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005245 return getPreloadedValue(DAG, *MFI, VT,
5246 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
Matt Arsenault43976df2016-01-30 04:25:19 +00005247 case Intrinsic::amdgcn_workgroup_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005248 case Intrinsic::r600_read_tgid_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005249 return getPreloadedValue(DAG, *MFI, VT,
5250 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
Matt Arsenault43976df2016-01-30 04:25:19 +00005251 case Intrinsic::amdgcn_workgroup_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005252 case Intrinsic::r600_read_tgid_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005253 return getPreloadedValue(DAG, *MFI, VT,
5254 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
Reid Kleckner4dc0b1a2018-11-01 19:54:45 +00005255 case Intrinsic::amdgcn_workitem_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005256 case Intrinsic::r600_read_tidig_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005257 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5258 SDLoc(DAG.getEntryNode()),
5259 MFI->getArgInfo().WorkItemIDX);
Matt Arsenault43976df2016-01-30 04:25:19 +00005260 case Intrinsic::amdgcn_workitem_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005261 case Intrinsic::r600_read_tidig_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005262 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5263 SDLoc(DAG.getEntryNode()),
5264 MFI->getArgInfo().WorkItemIDY);
Matt Arsenault43976df2016-01-30 04:25:19 +00005265 case Intrinsic::amdgcn_workitem_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005266 case Intrinsic::r600_read_tidig_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005267 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5268 SDLoc(DAG.getEntryNode()),
5269 MFI->getArgInfo().WorkItemIDZ);
Matt Arsenaultaa9bcd52018-12-07 17:46:16 +00005270 case SIIntrinsic::SI_load_const: {
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005271 SDValue Load =
5272 lowerSBuffer(MVT::i32, DL, Op.getOperand(1), Op.getOperand(2),
5273 DAG.getTargetConstant(0, DL, MVT::i1), DAG);
Tim Renouf904343f2018-08-25 14:53:17 +00005274 return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Load);
5275 }
5276 case Intrinsic::amdgcn_s_buffer_load: {
5277 unsigned Cache = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005278 return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2),
5279 DAG.getTargetConstant(Cache & 1, DL, MVT::i1), DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005280 }
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00005281 case Intrinsic::amdgcn_fdiv_fast:
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005282 return lowerFDIV_FAST(Op, DAG);
Tom Stellard2187bb82016-12-06 23:52:13 +00005283 case Intrinsic::amdgcn_interp_mov: {
5284 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5285 SDValue Glue = M0.getValue(1);
5286 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
5287 Op.getOperand(2), Op.getOperand(3), Glue);
5288 }
Tom Stellardad7d03d2015-12-15 17:02:49 +00005289 case Intrinsic::amdgcn_interp_p1: {
5290 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5291 SDValue Glue = M0.getValue(1);
5292 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
5293 Op.getOperand(2), Op.getOperand(3), Glue);
5294 }
5295 case Intrinsic::amdgcn_interp_p2: {
5296 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5297 SDValue Glue = SDValue(M0.getNode(), 1);
5298 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
5299 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
5300 Glue);
5301 }
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005302 case Intrinsic::amdgcn_sin:
5303 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
5304
5305 case Intrinsic::amdgcn_cos:
5306 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
5307
5308 case Intrinsic::amdgcn_log_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00005309 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005310 return SDValue();
5311
5312 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00005313 MF.getFunction(), "intrinsic not supported on subtarget",
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005314 DL.getDebugLoc());
5315 DAG.getContext()->diagnose(BadIntrin);
5316 return DAG.getUNDEF(VT);
5317 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005318 case Intrinsic::amdgcn_ldexp:
5319 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
5320 Op.getOperand(1), Op.getOperand(2));
Matt Arsenault74015162016-05-28 00:19:52 +00005321
5322 case Intrinsic::amdgcn_fract:
5323 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
5324
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005325 case Intrinsic::amdgcn_class:
5326 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
5327 Op.getOperand(1), Op.getOperand(2));
5328 case Intrinsic::amdgcn_div_fmas:
5329 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
5330 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5331 Op.getOperand(4));
5332
5333 case Intrinsic::amdgcn_div_fixup:
5334 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
5335 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5336
5337 case Intrinsic::amdgcn_trig_preop:
5338 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
5339 Op.getOperand(1), Op.getOperand(2));
5340 case Intrinsic::amdgcn_div_scale: {
5341 // 3rd parameter required to be a constant.
5342 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
5343 if (!Param)
Matt Arsenault206f8262017-08-01 20:49:41 +00005344 return DAG.getMergeValues({ DAG.getUNDEF(VT), DAG.getUNDEF(MVT::i1) }, DL);
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005345
5346 // Translate to the operands expected by the machine instruction. The
5347 // first parameter must be the same as the first instruction.
5348 SDValue Numerator = Op.getOperand(1);
5349 SDValue Denominator = Op.getOperand(2);
5350
5351 // Note this order is opposite of the machine instruction's operations,
5352 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
5353 // intrinsic has the numerator as the first operand to match a normal
5354 // division operation.
5355
5356 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
5357
5358 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
5359 Denominator, Numerator);
5360 }
Wei Ding07e03712016-07-28 16:42:13 +00005361 case Intrinsic::amdgcn_icmp: {
Marek Olsak33eb4d92019-01-15 02:13:18 +00005362 // There is a Pat that handles this variant, so return it as-is.
5363 if (Op.getOperand(1).getValueType() == MVT::i1 &&
5364 Op.getConstantOperandVal(2) == 0 &&
5365 Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
5366 return Op;
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00005367 return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
Wei Ding07e03712016-07-28 16:42:13 +00005368 }
5369 case Intrinsic::amdgcn_fcmp: {
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00005370 return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
Wei Ding07e03712016-07-28 16:42:13 +00005371 }
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00005372 case Intrinsic::amdgcn_fmed3:
5373 return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
5374 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Farhana Aleenc370d7b2018-07-16 18:19:59 +00005375 case Intrinsic::amdgcn_fdot2:
5376 return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00005377 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5378 Op.getOperand(4));
Matt Arsenault32fc5272016-07-26 16:45:45 +00005379 case Intrinsic::amdgcn_fmul_legacy:
5380 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
5381 Op.getOperand(1), Op.getOperand(2));
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00005382 case Intrinsic::amdgcn_sffbh:
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00005383 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
Matt Arsenaultf5262252017-02-22 23:04:58 +00005384 case Intrinsic::amdgcn_sbfe:
5385 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
5386 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5387 case Intrinsic::amdgcn_ubfe:
5388 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
5389 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Marek Olsak13e47412018-01-31 20:18:04 +00005390 case Intrinsic::amdgcn_cvt_pkrtz:
5391 case Intrinsic::amdgcn_cvt_pknorm_i16:
5392 case Intrinsic::amdgcn_cvt_pknorm_u16:
5393 case Intrinsic::amdgcn_cvt_pk_i16:
5394 case Intrinsic::amdgcn_cvt_pk_u16: {
5395 // FIXME: Stop adding cast if v2f16/v2i16 are legal.
Matt Arsenault1f17c662017-02-22 00:27:34 +00005396 EVT VT = Op.getValueType();
Marek Olsak13e47412018-01-31 20:18:04 +00005397 unsigned Opcode;
5398
5399 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
5400 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
5401 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
5402 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
5403 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
5404 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
5405 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
5406 Opcode = AMDGPUISD::CVT_PK_I16_I32;
5407 else
5408 Opcode = AMDGPUISD::CVT_PK_U16_U32;
5409
Matt Arsenault709374d2018-08-01 20:13:58 +00005410 if (isTypeLegal(VT))
5411 return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
5412
Marek Olsak13e47412018-01-31 20:18:04 +00005413 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
Matt Arsenault1f17c662017-02-22 00:27:34 +00005414 Op.getOperand(1), Op.getOperand(2));
5415 return DAG.getNode(ISD::BITCAST, DL, VT, Node);
5416 }
Connor Abbott8c217d02017-08-04 18:36:49 +00005417 case Intrinsic::amdgcn_wqm: {
5418 SDValue Src = Op.getOperand(1);
5419 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src),
5420 0);
5421 }
Connor Abbott92638ab2017-08-04 18:36:52 +00005422 case Intrinsic::amdgcn_wwm: {
5423 SDValue Src = Op.getOperand(1);
5424 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src),
5425 0);
5426 }
Stanislav Mekhanoshindacda792018-06-26 20:04:19 +00005427 case Intrinsic::amdgcn_fmad_ftz:
5428 return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
5429 Op.getOperand(2), Op.getOperand(3));
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005430 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005431 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5432 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
5433 return lowerImage(Op, ImageDimIntr, DAG);
5434
Matt Arsenault754dd3e2017-04-03 18:08:08 +00005435 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005436 }
5437}
5438
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005439SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
5440 SelectionDAG &DAG) const {
5441 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Tom Stellard6f9ef142016-12-20 17:19:44 +00005442 SDLoc DL(Op);
David Stuttard70e8bc12017-06-22 16:29:22 +00005443
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005444 switch (IntrID) {
Marek Olsakc5cec5e2019-01-16 15:43:53 +00005445 case Intrinsic::amdgcn_ds_ordered_add:
5446 case Intrinsic::amdgcn_ds_ordered_swap: {
5447 MemSDNode *M = cast<MemSDNode>(Op);
5448 SDValue Chain = M->getOperand(0);
5449 SDValue M0 = M->getOperand(2);
5450 SDValue Value = M->getOperand(3);
5451 unsigned OrderedCountIndex = M->getConstantOperandVal(7);
5452 unsigned WaveRelease = M->getConstantOperandVal(8);
5453 unsigned WaveDone = M->getConstantOperandVal(9);
5454 unsigned ShaderType;
5455 unsigned Instruction;
5456
5457 switch (IntrID) {
5458 case Intrinsic::amdgcn_ds_ordered_add:
5459 Instruction = 0;
5460 break;
5461 case Intrinsic::amdgcn_ds_ordered_swap:
5462 Instruction = 1;
5463 break;
5464 }
5465
5466 if (WaveDone && !WaveRelease)
5467 report_fatal_error("ds_ordered_count: wave_done requires wave_release");
5468
5469 switch (DAG.getMachineFunction().getFunction().getCallingConv()) {
5470 case CallingConv::AMDGPU_CS:
5471 case CallingConv::AMDGPU_KERNEL:
5472 ShaderType = 0;
5473 break;
5474 case CallingConv::AMDGPU_PS:
5475 ShaderType = 1;
5476 break;
5477 case CallingConv::AMDGPU_VS:
5478 ShaderType = 2;
5479 break;
5480 case CallingConv::AMDGPU_GS:
5481 ShaderType = 3;
5482 break;
5483 default:
5484 report_fatal_error("ds_ordered_count unsupported for this calling conv");
5485 }
5486
5487 unsigned Offset0 = OrderedCountIndex << 2;
5488 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
5489 (Instruction << 4);
5490 unsigned Offset = Offset0 | (Offset1 << 8);
5491
5492 SDValue Ops[] = {
5493 Chain,
5494 Value,
5495 DAG.getTargetConstant(Offset, DL, MVT::i16),
5496 copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue
5497 };
5498 return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
5499 M->getVTList(), Ops, M->getMemoryVT(),
5500 M->getMemOperand());
5501 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005502 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005503 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005504 case Intrinsic::amdgcn_ds_fadd:
5505 case Intrinsic::amdgcn_ds_fmin:
5506 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005507 MemSDNode *M = cast<MemSDNode>(Op);
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005508 unsigned Opc;
5509 switch (IntrID) {
5510 case Intrinsic::amdgcn_atomic_inc:
5511 Opc = AMDGPUISD::ATOMIC_INC;
5512 break;
5513 case Intrinsic::amdgcn_atomic_dec:
5514 Opc = AMDGPUISD::ATOMIC_DEC;
5515 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005516 case Intrinsic::amdgcn_ds_fadd:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005517 Opc = AMDGPUISD::ATOMIC_LOAD_FADD;
5518 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005519 case Intrinsic::amdgcn_ds_fmin:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005520 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
5521 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005522 case Intrinsic::amdgcn_ds_fmax:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005523 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
5524 break;
5525 default:
5526 llvm_unreachable("Unknown intrinsic!");
5527 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005528 SDValue Ops[] = {
5529 M->getOperand(0), // Chain
5530 M->getOperand(2), // Ptr
5531 M->getOperand(3) // Value
5532 };
5533
5534 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
5535 M->getMemoryVT(), M->getMemOperand());
5536 }
Tom Stellard6f9ef142016-12-20 17:19:44 +00005537 case Intrinsic::amdgcn_buffer_load:
5538 case Intrinsic::amdgcn_buffer_load_format: {
Tim Renouf4f703f52018-08-21 11:07:10 +00005539 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
5540 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
5541 unsigned IdxEn = 1;
5542 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
5543 IdxEn = Idx->getZExtValue() != 0;
Tom Stellard6f9ef142016-12-20 17:19:44 +00005544 SDValue Ops[] = {
5545 Op.getOperand(0), // Chain
5546 Op.getOperand(2), // rsrc
5547 Op.getOperand(3), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00005548 SDValue(), // voffset -- will be set by setBufferOffsets
5549 SDValue(), // soffset -- will be set by setBufferOffsets
5550 SDValue(), // offset -- will be set by setBufferOffsets
5551 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
5552 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Tom Stellard6f9ef142016-12-20 17:19:44 +00005553 };
Tom Stellard6f9ef142016-12-20 17:19:44 +00005554
Tim Renouf4f703f52018-08-21 11:07:10 +00005555 setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
Tom Stellard6f9ef142016-12-20 17:19:44 +00005556 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
5557 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
Tim Renouf4f703f52018-08-21 11:07:10 +00005558
5559 EVT VT = Op.getValueType();
5560 EVT IntVT = VT.changeTypeToInteger();
5561 auto *M = cast<MemSDNode>(Op);
5562 EVT LoadVT = Op.getValueType();
5563
5564 if (LoadVT.getScalarType() == MVT::f16)
5565 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5566 M, DAG, Ops);
5567 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5568 M->getMemOperand());
5569 }
5570 case Intrinsic::amdgcn_raw_buffer_load:
5571 case Intrinsic::amdgcn_raw_buffer_load_format: {
5572 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
5573 SDValue Ops[] = {
5574 Op.getOperand(0), // Chain
5575 Op.getOperand(2), // rsrc
5576 DAG.getConstant(0, DL, MVT::i32), // vindex
5577 Offsets.first, // voffset
5578 Op.getOperand(4), // soffset
5579 Offsets.second, // offset
5580 Op.getOperand(5), // cachepolicy
5581 DAG.getConstant(0, DL, MVT::i1), // idxen
5582 };
5583
5584 unsigned Opc = (IntrID == Intrinsic::amdgcn_raw_buffer_load) ?
5585 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
5586
5587 EVT VT = Op.getValueType();
5588 EVT IntVT = VT.changeTypeToInteger();
5589 auto *M = cast<MemSDNode>(Op);
5590 EVT LoadVT = Op.getValueType();
5591
5592 if (LoadVT.getScalarType() == MVT::f16)
5593 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5594 M, DAG, Ops);
5595 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5596 M->getMemOperand());
5597 }
5598 case Intrinsic::amdgcn_struct_buffer_load:
5599 case Intrinsic::amdgcn_struct_buffer_load_format: {
5600 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
5601 SDValue Ops[] = {
5602 Op.getOperand(0), // Chain
5603 Op.getOperand(2), // rsrc
5604 Op.getOperand(3), // vindex
5605 Offsets.first, // voffset
5606 Op.getOperand(5), // soffset
5607 Offsets.second, // offset
5608 Op.getOperand(6), // cachepolicy
5609 DAG.getConstant(1, DL, MVT::i1), // idxen
5610 };
5611
5612 unsigned Opc = (IntrID == Intrinsic::amdgcn_struct_buffer_load) ?
5613 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
5614
Tom Stellard6f9ef142016-12-20 17:19:44 +00005615 EVT VT = Op.getValueType();
5616 EVT IntVT = VT.changeTypeToInteger();
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005617 auto *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00005618 EVT LoadVT = Op.getValueType();
Matt Arsenault1349a042018-05-22 06:32:10 +00005619
Tim Renouf366a49d2018-08-02 23:33:01 +00005620 if (LoadVT.getScalarType() == MVT::f16)
5621 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5622 M, DAG, Ops);
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005623 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5624 M->getMemOperand());
Tom Stellard6f9ef142016-12-20 17:19:44 +00005625 }
David Stuttard70e8bc12017-06-22 16:29:22 +00005626 case Intrinsic::amdgcn_tbuffer_load: {
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005627 MemSDNode *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00005628 EVT LoadVT = Op.getValueType();
Matt Arsenault1349a042018-05-22 06:32:10 +00005629
Tim Renouf35484c92018-08-21 11:06:05 +00005630 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
5631 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
5632 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
5633 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
5634 unsigned IdxEn = 1;
5635 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
5636 IdxEn = Idx->getZExtValue() != 0;
David Stuttard70e8bc12017-06-22 16:29:22 +00005637 SDValue Ops[] = {
5638 Op.getOperand(0), // Chain
5639 Op.getOperand(2), // rsrc
5640 Op.getOperand(3), // vindex
5641 Op.getOperand(4), // voffset
5642 Op.getOperand(5), // soffset
5643 Op.getOperand(6), // offset
Tim Renouf35484c92018-08-21 11:06:05 +00005644 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
5645 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
5646 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
5647 };
5648
5649 if (LoadVT.getScalarType() == MVT::f16)
5650 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5651 M, DAG, Ops);
5652 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
5653 Op->getVTList(), Ops, LoadVT,
5654 M->getMemOperand());
5655 }
5656 case Intrinsic::amdgcn_raw_tbuffer_load: {
5657 MemSDNode *M = cast<MemSDNode>(Op);
5658 EVT LoadVT = Op.getValueType();
5659 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
5660
5661 SDValue Ops[] = {
5662 Op.getOperand(0), // Chain
5663 Op.getOperand(2), // rsrc
5664 DAG.getConstant(0, DL, MVT::i32), // vindex
5665 Offsets.first, // voffset
5666 Op.getOperand(4), // soffset
5667 Offsets.second, // offset
5668 Op.getOperand(5), // format
5669 Op.getOperand(6), // cachepolicy
5670 DAG.getConstant(0, DL, MVT::i1), // idxen
5671 };
5672
5673 if (LoadVT.getScalarType() == MVT::f16)
5674 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5675 M, DAG, Ops);
5676 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
5677 Op->getVTList(), Ops, LoadVT,
5678 M->getMemOperand());
5679 }
5680 case Intrinsic::amdgcn_struct_tbuffer_load: {
5681 MemSDNode *M = cast<MemSDNode>(Op);
5682 EVT LoadVT = Op.getValueType();
5683 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
5684
5685 SDValue Ops[] = {
5686 Op.getOperand(0), // Chain
5687 Op.getOperand(2), // rsrc
5688 Op.getOperand(3), // vindex
5689 Offsets.first, // voffset
5690 Op.getOperand(5), // soffset
5691 Offsets.second, // offset
5692 Op.getOperand(6), // format
5693 Op.getOperand(7), // cachepolicy
5694 DAG.getConstant(1, DL, MVT::i1), // idxen
David Stuttard70e8bc12017-06-22 16:29:22 +00005695 };
5696
Tim Renouf366a49d2018-08-02 23:33:01 +00005697 if (LoadVT.getScalarType() == MVT::f16)
5698 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5699 M, DAG, Ops);
David Stuttard70e8bc12017-06-22 16:29:22 +00005700 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
Matt Arsenault1349a042018-05-22 06:32:10 +00005701 Op->getVTList(), Ops, LoadVT,
5702 M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00005703 }
Marek Olsak5cec6412017-11-09 01:52:48 +00005704 case Intrinsic::amdgcn_buffer_atomic_swap:
5705 case Intrinsic::amdgcn_buffer_atomic_add:
5706 case Intrinsic::amdgcn_buffer_atomic_sub:
5707 case Intrinsic::amdgcn_buffer_atomic_smin:
5708 case Intrinsic::amdgcn_buffer_atomic_umin:
5709 case Intrinsic::amdgcn_buffer_atomic_smax:
5710 case Intrinsic::amdgcn_buffer_atomic_umax:
5711 case Intrinsic::amdgcn_buffer_atomic_and:
5712 case Intrinsic::amdgcn_buffer_atomic_or:
5713 case Intrinsic::amdgcn_buffer_atomic_xor: {
Tim Renouf4f703f52018-08-21 11:07:10 +00005714 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
5715 unsigned IdxEn = 1;
5716 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
5717 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00005718 SDValue Ops[] = {
5719 Op.getOperand(0), // Chain
5720 Op.getOperand(2), // vdata
5721 Op.getOperand(3), // rsrc
5722 Op.getOperand(4), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00005723 SDValue(), // voffset -- will be set by setBufferOffsets
5724 SDValue(), // soffset -- will be set by setBufferOffsets
5725 SDValue(), // offset -- will be set by setBufferOffsets
5726 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
5727 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00005728 };
Tim Renouf4f703f52018-08-21 11:07:10 +00005729 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005730 EVT VT = Op.getValueType();
5731
5732 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00005733 unsigned Opcode = 0;
5734
5735 switch (IntrID) {
5736 case Intrinsic::amdgcn_buffer_atomic_swap:
5737 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
5738 break;
5739 case Intrinsic::amdgcn_buffer_atomic_add:
5740 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
5741 break;
5742 case Intrinsic::amdgcn_buffer_atomic_sub:
5743 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
5744 break;
5745 case Intrinsic::amdgcn_buffer_atomic_smin:
5746 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
5747 break;
5748 case Intrinsic::amdgcn_buffer_atomic_umin:
5749 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
5750 break;
5751 case Intrinsic::amdgcn_buffer_atomic_smax:
5752 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
5753 break;
5754 case Intrinsic::amdgcn_buffer_atomic_umax:
5755 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
5756 break;
5757 case Intrinsic::amdgcn_buffer_atomic_and:
5758 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
5759 break;
5760 case Intrinsic::amdgcn_buffer_atomic_or:
5761 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
5762 break;
5763 case Intrinsic::amdgcn_buffer_atomic_xor:
5764 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
5765 break;
5766 default:
5767 llvm_unreachable("unhandled atomic opcode");
5768 }
5769
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005770 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
5771 M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005772 }
Tim Renouf4f703f52018-08-21 11:07:10 +00005773 case Intrinsic::amdgcn_raw_buffer_atomic_swap:
5774 case Intrinsic::amdgcn_raw_buffer_atomic_add:
5775 case Intrinsic::amdgcn_raw_buffer_atomic_sub:
5776 case Intrinsic::amdgcn_raw_buffer_atomic_smin:
5777 case Intrinsic::amdgcn_raw_buffer_atomic_umin:
5778 case Intrinsic::amdgcn_raw_buffer_atomic_smax:
5779 case Intrinsic::amdgcn_raw_buffer_atomic_umax:
5780 case Intrinsic::amdgcn_raw_buffer_atomic_and:
5781 case Intrinsic::amdgcn_raw_buffer_atomic_or:
5782 case Intrinsic::amdgcn_raw_buffer_atomic_xor: {
5783 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
5784 SDValue Ops[] = {
5785 Op.getOperand(0), // Chain
5786 Op.getOperand(2), // vdata
5787 Op.getOperand(3), // rsrc
5788 DAG.getConstant(0, DL, MVT::i32), // vindex
5789 Offsets.first, // voffset
5790 Op.getOperand(5), // soffset
5791 Offsets.second, // offset
5792 Op.getOperand(6), // cachepolicy
5793 DAG.getConstant(0, DL, MVT::i1), // idxen
5794 };
5795 EVT VT = Op.getValueType();
Marek Olsak5cec6412017-11-09 01:52:48 +00005796
Tim Renouf4f703f52018-08-21 11:07:10 +00005797 auto *M = cast<MemSDNode>(Op);
5798 unsigned Opcode = 0;
5799
5800 switch (IntrID) {
5801 case Intrinsic::amdgcn_raw_buffer_atomic_swap:
5802 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
5803 break;
5804 case Intrinsic::amdgcn_raw_buffer_atomic_add:
5805 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
5806 break;
5807 case Intrinsic::amdgcn_raw_buffer_atomic_sub:
5808 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
5809 break;
5810 case Intrinsic::amdgcn_raw_buffer_atomic_smin:
5811 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
5812 break;
5813 case Intrinsic::amdgcn_raw_buffer_atomic_umin:
5814 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
5815 break;
5816 case Intrinsic::amdgcn_raw_buffer_atomic_smax:
5817 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
5818 break;
5819 case Intrinsic::amdgcn_raw_buffer_atomic_umax:
5820 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
5821 break;
5822 case Intrinsic::amdgcn_raw_buffer_atomic_and:
5823 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
5824 break;
5825 case Intrinsic::amdgcn_raw_buffer_atomic_or:
5826 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
5827 break;
5828 case Intrinsic::amdgcn_raw_buffer_atomic_xor:
5829 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
5830 break;
5831 default:
5832 llvm_unreachable("unhandled atomic opcode");
5833 }
5834
5835 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
5836 M->getMemOperand());
5837 }
5838 case Intrinsic::amdgcn_struct_buffer_atomic_swap:
5839 case Intrinsic::amdgcn_struct_buffer_atomic_add:
5840 case Intrinsic::amdgcn_struct_buffer_atomic_sub:
5841 case Intrinsic::amdgcn_struct_buffer_atomic_smin:
5842 case Intrinsic::amdgcn_struct_buffer_atomic_umin:
5843 case Intrinsic::amdgcn_struct_buffer_atomic_smax:
5844 case Intrinsic::amdgcn_struct_buffer_atomic_umax:
5845 case Intrinsic::amdgcn_struct_buffer_atomic_and:
5846 case Intrinsic::amdgcn_struct_buffer_atomic_or:
5847 case Intrinsic::amdgcn_struct_buffer_atomic_xor: {
5848 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
5849 SDValue Ops[] = {
5850 Op.getOperand(0), // Chain
5851 Op.getOperand(2), // vdata
5852 Op.getOperand(3), // rsrc
5853 Op.getOperand(4), // vindex
5854 Offsets.first, // voffset
5855 Op.getOperand(6), // soffset
5856 Offsets.second, // offset
5857 Op.getOperand(7), // cachepolicy
5858 DAG.getConstant(1, DL, MVT::i1), // idxen
5859 };
5860 EVT VT = Op.getValueType();
5861
5862 auto *M = cast<MemSDNode>(Op);
5863 unsigned Opcode = 0;
5864
5865 switch (IntrID) {
5866 case Intrinsic::amdgcn_struct_buffer_atomic_swap:
5867 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
5868 break;
5869 case Intrinsic::amdgcn_struct_buffer_atomic_add:
5870 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
5871 break;
5872 case Intrinsic::amdgcn_struct_buffer_atomic_sub:
5873 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
5874 break;
5875 case Intrinsic::amdgcn_struct_buffer_atomic_smin:
5876 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
5877 break;
5878 case Intrinsic::amdgcn_struct_buffer_atomic_umin:
5879 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
5880 break;
5881 case Intrinsic::amdgcn_struct_buffer_atomic_smax:
5882 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
5883 break;
5884 case Intrinsic::amdgcn_struct_buffer_atomic_umax:
5885 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
5886 break;
5887 case Intrinsic::amdgcn_struct_buffer_atomic_and:
5888 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
5889 break;
5890 case Intrinsic::amdgcn_struct_buffer_atomic_or:
5891 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
5892 break;
5893 case Intrinsic::amdgcn_struct_buffer_atomic_xor:
5894 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
5895 break;
5896 default:
5897 llvm_unreachable("unhandled atomic opcode");
5898 }
5899
5900 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
5901 M->getMemOperand());
5902 }
Marek Olsak5cec6412017-11-09 01:52:48 +00005903 case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
Tim Renouf4f703f52018-08-21 11:07:10 +00005904 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
5905 unsigned IdxEn = 1;
5906 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5)))
5907 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00005908 SDValue Ops[] = {
5909 Op.getOperand(0), // Chain
5910 Op.getOperand(2), // src
5911 Op.getOperand(3), // cmp
5912 Op.getOperand(4), // rsrc
5913 Op.getOperand(5), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00005914 SDValue(), // voffset -- will be set by setBufferOffsets
5915 SDValue(), // soffset -- will be set by setBufferOffsets
5916 SDValue(), // offset -- will be set by setBufferOffsets
5917 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
5918 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
5919 };
5920 setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
5921 EVT VT = Op.getValueType();
5922 auto *M = cast<MemSDNode>(Op);
5923
5924 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
5925 Op->getVTList(), Ops, VT, M->getMemOperand());
5926 }
5927 case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
5928 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
5929 SDValue Ops[] = {
5930 Op.getOperand(0), // Chain
5931 Op.getOperand(2), // src
5932 Op.getOperand(3), // cmp
5933 Op.getOperand(4), // rsrc
5934 DAG.getConstant(0, DL, MVT::i32), // vindex
5935 Offsets.first, // voffset
5936 Op.getOperand(6), // soffset
5937 Offsets.second, // offset
5938 Op.getOperand(7), // cachepolicy
5939 DAG.getConstant(0, DL, MVT::i1), // idxen
5940 };
5941 EVT VT = Op.getValueType();
5942 auto *M = cast<MemSDNode>(Op);
5943
5944 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
5945 Op->getVTList(), Ops, VT, M->getMemOperand());
5946 }
5947 case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
5948 auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
5949 SDValue Ops[] = {
5950 Op.getOperand(0), // Chain
5951 Op.getOperand(2), // src
5952 Op.getOperand(3), // cmp
5953 Op.getOperand(4), // rsrc
5954 Op.getOperand(5), // vindex
5955 Offsets.first, // voffset
5956 Op.getOperand(7), // soffset
5957 Offsets.second, // offset
5958 Op.getOperand(8), // cachepolicy
5959 DAG.getConstant(1, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00005960 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005961 EVT VT = Op.getValueType();
5962 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00005963
5964 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005965 Op->getVTList(), Ops, VT, M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005966 }
5967
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005968 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005969 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5970 AMDGPU::getImageDimIntrinsicInfo(IntrID))
5971 return lowerImage(Op, ImageDimIntr, DAG);
Matt Arsenault1349a042018-05-22 06:32:10 +00005972
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005973 return SDValue();
5974 }
5975}
5976
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005977SDValue SITargetLowering::handleD16VData(SDValue VData,
5978 SelectionDAG &DAG) const {
5979 EVT StoreVT = VData.getValueType();
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005980
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00005981 // No change for f16 and legal vector D16 types.
Matt Arsenault1349a042018-05-22 06:32:10 +00005982 if (!StoreVT.isVector())
5983 return VData;
5984
5985 SDLoc DL(VData);
5986 assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
5987
5988 if (Subtarget->hasUnpackedD16VMem()) {
5989 // We need to unpack the packed data to store.
5990 EVT IntStoreVT = StoreVT.changeTypeToInteger();
5991 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
5992
5993 EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
5994 StoreVT.getVectorNumElements());
5995 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
5996 return DAG.UnrollVectorOp(ZExt.getNode());
5997 }
5998
Matt Arsenault02dc7e12018-06-15 15:15:46 +00005999 assert(isTypeLegal(StoreVT));
6000 return VData;
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006001}
6002
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006003SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
6004 SelectionDAG &DAG) const {
Tom Stellardfc92e772015-05-12 14:18:14 +00006005 SDLoc DL(Op);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006006 SDValue Chain = Op.getOperand(0);
6007 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
David Stuttard70e8bc12017-06-22 16:29:22 +00006008 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006009
6010 switch (IntrinsicID) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00006011 case Intrinsic::amdgcn_exp: {
Matt Arsenault4165efd2017-01-17 07:26:53 +00006012 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6013 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6014 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
6015 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
6016
6017 const SDValue Ops[] = {
6018 Chain,
6019 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6020 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
6021 Op.getOperand(4), // src0
6022 Op.getOperand(5), // src1
6023 Op.getOperand(6), // src2
6024 Op.getOperand(7), // src3
6025 DAG.getTargetConstant(0, DL, MVT::i1), // compr
6026 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6027 };
6028
6029 unsigned Opc = Done->isNullValue() ?
6030 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6031 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6032 }
6033 case Intrinsic::amdgcn_exp_compr: {
6034 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6035 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6036 SDValue Src0 = Op.getOperand(4);
6037 SDValue Src1 = Op.getOperand(5);
6038 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
6039 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
6040
6041 SDValue Undef = DAG.getUNDEF(MVT::f32);
6042 const SDValue Ops[] = {
6043 Chain,
6044 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6045 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
6046 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
6047 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
6048 Undef, // src2
6049 Undef, // src3
6050 DAG.getTargetConstant(1, DL, MVT::i1), // compr
6051 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6052 };
6053
6054 unsigned Opc = Done->isNullValue() ?
6055 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6056 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6057 }
6058 case Intrinsic::amdgcn_s_sendmsg:
Matt Arsenaultd3e5cb72017-02-16 02:01:17 +00006059 case Intrinsic::amdgcn_s_sendmsghalt: {
6060 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
6061 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
Tom Stellardfc92e772015-05-12 14:18:14 +00006062 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
6063 SDValue Glue = Chain.getValue(1);
Matt Arsenaulta78ca622017-02-15 22:17:09 +00006064 return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
Jan Veselyd48445d2017-01-04 18:06:55 +00006065 Op.getOperand(2), Glue);
6066 }
Marek Olsak2d825902017-04-28 20:21:58 +00006067 case Intrinsic::amdgcn_init_exec: {
6068 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
6069 Op.getOperand(2));
6070 }
6071 case Intrinsic::amdgcn_init_exec_from_input: {
6072 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
6073 Op.getOperand(2), Op.getOperand(3));
6074 }
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00006075 case Intrinsic::amdgcn_s_barrier: {
6076 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00006077 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matthias Braunf1caa282017-12-15 22:22:58 +00006078 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00006079 if (WGSize <= ST.getWavefrontSize())
6080 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
6081 Op.getOperand(0)), 0);
6082 }
6083 return SDValue();
6084 };
David Stuttard70e8bc12017-06-22 16:29:22 +00006085 case Intrinsic::amdgcn_tbuffer_store: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006086 SDValue VData = Op.getOperand(2);
6087 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6088 if (IsD16)
6089 VData = handleD16VData(VData, DAG);
Tim Renouf35484c92018-08-21 11:06:05 +00006090 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6091 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6092 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6093 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
6094 unsigned IdxEn = 1;
6095 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6096 IdxEn = Idx->getZExtValue() != 0;
David Stuttard70e8bc12017-06-22 16:29:22 +00006097 SDValue Ops[] = {
6098 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006099 VData, // vdata
David Stuttard70e8bc12017-06-22 16:29:22 +00006100 Op.getOperand(3), // rsrc
6101 Op.getOperand(4), // vindex
6102 Op.getOperand(5), // voffset
6103 Op.getOperand(6), // soffset
6104 Op.getOperand(7), // offset
Tim Renouf35484c92018-08-21 11:06:05 +00006105 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6106 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6107 DAG.getConstant(IdxEn, DL, MVT::i1), // idexen
6108 };
6109 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6110 AMDGPUISD::TBUFFER_STORE_FORMAT;
6111 MemSDNode *M = cast<MemSDNode>(Op);
6112 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6113 M->getMemoryVT(), M->getMemOperand());
6114 }
6115
6116 case Intrinsic::amdgcn_struct_tbuffer_store: {
6117 SDValue VData = Op.getOperand(2);
6118 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6119 if (IsD16)
6120 VData = handleD16VData(VData, DAG);
6121 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6122 SDValue Ops[] = {
6123 Chain,
6124 VData, // vdata
6125 Op.getOperand(3), // rsrc
6126 Op.getOperand(4), // vindex
6127 Offsets.first, // voffset
6128 Op.getOperand(6), // soffset
6129 Offsets.second, // offset
6130 Op.getOperand(7), // format
6131 Op.getOperand(8), // cachepolicy
6132 DAG.getConstant(1, DL, MVT::i1), // idexen
6133 };
6134 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6135 AMDGPUISD::TBUFFER_STORE_FORMAT;
6136 MemSDNode *M = cast<MemSDNode>(Op);
6137 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6138 M->getMemoryVT(), M->getMemOperand());
6139 }
6140
6141 case Intrinsic::amdgcn_raw_tbuffer_store: {
6142 SDValue VData = Op.getOperand(2);
6143 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6144 if (IsD16)
6145 VData = handleD16VData(VData, DAG);
6146 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6147 SDValue Ops[] = {
6148 Chain,
6149 VData, // vdata
6150 Op.getOperand(3), // rsrc
6151 DAG.getConstant(0, DL, MVT::i32), // vindex
6152 Offsets.first, // voffset
6153 Op.getOperand(5), // soffset
6154 Offsets.second, // offset
6155 Op.getOperand(6), // format
6156 Op.getOperand(7), // cachepolicy
6157 DAG.getConstant(0, DL, MVT::i1), // idexen
David Stuttard70e8bc12017-06-22 16:29:22 +00006158 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006159 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6160 AMDGPUISD::TBUFFER_STORE_FORMAT;
6161 MemSDNode *M = cast<MemSDNode>(Op);
6162 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6163 M->getMemoryVT(), M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00006164 }
6165
Marek Olsak5cec6412017-11-09 01:52:48 +00006166 case Intrinsic::amdgcn_buffer_store:
6167 case Intrinsic::amdgcn_buffer_store_format: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006168 SDValue VData = Op.getOperand(2);
6169 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6170 if (IsD16)
6171 VData = handleD16VData(VData, DAG);
Tim Renouf4f703f52018-08-21 11:07:10 +00006172 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6173 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6174 unsigned IdxEn = 1;
6175 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6176 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00006177 SDValue Ops[] = {
6178 Chain,
Tim Renouf4f703f52018-08-21 11:07:10 +00006179 VData,
Marek Olsak5cec6412017-11-09 01:52:48 +00006180 Op.getOperand(3), // rsrc
6181 Op.getOperand(4), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00006182 SDValue(), // voffset -- will be set by setBufferOffsets
6183 SDValue(), // soffset -- will be set by setBufferOffsets
6184 SDValue(), // offset -- will be set by setBufferOffsets
6185 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6186 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00006187 };
Tim Renouf4f703f52018-08-21 11:07:10 +00006188 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006189 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
6190 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6191 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6192 MemSDNode *M = cast<MemSDNode>(Op);
6193 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6194 M->getMemoryVT(), M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00006195 }
Tim Renouf4f703f52018-08-21 11:07:10 +00006196
6197 case Intrinsic::amdgcn_raw_buffer_store:
6198 case Intrinsic::amdgcn_raw_buffer_store_format: {
6199 SDValue VData = Op.getOperand(2);
6200 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6201 if (IsD16)
6202 VData = handleD16VData(VData, DAG);
6203 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6204 SDValue Ops[] = {
6205 Chain,
6206 VData,
6207 Op.getOperand(3), // rsrc
6208 DAG.getConstant(0, DL, MVT::i32), // vindex
6209 Offsets.first, // voffset
6210 Op.getOperand(5), // soffset
6211 Offsets.second, // offset
6212 Op.getOperand(6), // cachepolicy
6213 DAG.getConstant(0, DL, MVT::i1), // idxen
6214 };
6215 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_raw_buffer_store ?
6216 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6217 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6218 MemSDNode *M = cast<MemSDNode>(Op);
6219 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6220 M->getMemoryVT(), M->getMemOperand());
6221 }
6222
6223 case Intrinsic::amdgcn_struct_buffer_store:
6224 case Intrinsic::amdgcn_struct_buffer_store_format: {
6225 SDValue VData = Op.getOperand(2);
6226 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6227 if (IsD16)
6228 VData = handleD16VData(VData, DAG);
6229 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6230 SDValue Ops[] = {
6231 Chain,
6232 VData,
6233 Op.getOperand(3), // rsrc
6234 Op.getOperand(4), // vindex
6235 Offsets.first, // voffset
6236 Op.getOperand(6), // soffset
6237 Offsets.second, // offset
6238 Op.getOperand(7), // cachepolicy
6239 DAG.getConstant(1, DL, MVT::i1), // idxen
6240 };
6241 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
6242 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6243 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6244 MemSDNode *M = cast<MemSDNode>(Op);
6245 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6246 M->getMemoryVT(), M->getMemOperand());
6247 }
6248
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00006249 default: {
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00006250 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6251 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6252 return lowerImage(Op, ImageDimIntr, DAG);
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00006253
Matt Arsenault754dd3e2017-04-03 18:08:08 +00006254 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006255 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00006256 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006257}
6258
Tim Renouf4f703f52018-08-21 11:07:10 +00006259// The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
6260// offset (the offset that is included in bounds checking and swizzling, to be
6261// split between the instruction's voffset and immoffset fields) and soffset
6262// (the offset that is excluded from bounds checking and swizzling, to go in
6263// the instruction's soffset field). This function takes the first kind of
6264// offset and figures out how to split it between voffset and immoffset.
Tim Renouf35484c92018-08-21 11:06:05 +00006265std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
6266 SDValue Offset, SelectionDAG &DAG) const {
6267 SDLoc DL(Offset);
6268 const unsigned MaxImm = 4095;
6269 SDValue N0 = Offset;
6270 ConstantSDNode *C1 = nullptr;
Piotr Sobczak378131b2019-01-02 09:47:41 +00006271
6272 if ((C1 = dyn_cast<ConstantSDNode>(N0)))
Tim Renouf35484c92018-08-21 11:06:05 +00006273 N0 = SDValue();
Piotr Sobczak378131b2019-01-02 09:47:41 +00006274 else if (DAG.isBaseWithConstantOffset(N0)) {
6275 C1 = cast<ConstantSDNode>(N0.getOperand(1));
6276 N0 = N0.getOperand(0);
6277 }
Tim Renouf35484c92018-08-21 11:06:05 +00006278
6279 if (C1) {
6280 unsigned ImmOffset = C1->getZExtValue();
6281 // If the immediate value is too big for the immoffset field, put the value
Tim Renoufa37679d2018-10-03 10:29:43 +00006282 // and -4096 into the immoffset field so that the value that is copied/added
Tim Renouf35484c92018-08-21 11:06:05 +00006283 // for the voffset field is a multiple of 4096, and it stands more chance
6284 // of being CSEd with the copy/add for another similar load/store.
Tim Renoufa37679d2018-10-03 10:29:43 +00006285 // However, do not do that rounding down to a multiple of 4096 if that is a
6286 // negative number, as it appears to be illegal to have a negative offset
6287 // in the vgpr, even if adding the immediate offset makes it positive.
Tim Renouf35484c92018-08-21 11:06:05 +00006288 unsigned Overflow = ImmOffset & ~MaxImm;
6289 ImmOffset -= Overflow;
Tim Renoufa37679d2018-10-03 10:29:43 +00006290 if ((int32_t)Overflow < 0) {
6291 Overflow += ImmOffset;
6292 ImmOffset = 0;
6293 }
Tim Renouf35484c92018-08-21 11:06:05 +00006294 C1 = cast<ConstantSDNode>(DAG.getConstant(ImmOffset, DL, MVT::i32));
6295 if (Overflow) {
6296 auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
6297 if (!N0)
6298 N0 = OverflowVal;
6299 else {
6300 SDValue Ops[] = { N0, OverflowVal };
6301 N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
6302 }
6303 }
6304 }
6305 if (!N0)
6306 N0 = DAG.getConstant(0, DL, MVT::i32);
6307 if (!C1)
6308 C1 = cast<ConstantSDNode>(DAG.getConstant(0, DL, MVT::i32));
6309 return {N0, SDValue(C1, 0)};
6310}
6311
Tim Renouf4f703f52018-08-21 11:07:10 +00006312// Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
6313// three offsets (voffset, soffset and instoffset) into the SDValue[3] array
6314// pointed to by Offsets.
6315void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00006316 SelectionDAG &DAG, SDValue *Offsets,
6317 unsigned Align) const {
Tim Renouf4f703f52018-08-21 11:07:10 +00006318 SDLoc DL(CombinedOffset);
6319 if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
6320 uint32_t Imm = C->getZExtValue();
6321 uint32_t SOffset, ImmOffset;
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00006322 if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) {
Tim Renouf4f703f52018-08-21 11:07:10 +00006323 Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
6324 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
6325 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
6326 return;
6327 }
6328 }
6329 if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
6330 SDValue N0 = CombinedOffset.getOperand(0);
6331 SDValue N1 = CombinedOffset.getOperand(1);
6332 uint32_t SOffset, ImmOffset;
6333 int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00006334 if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
6335 Subtarget, Align)) {
Tim Renouf4f703f52018-08-21 11:07:10 +00006336 Offsets[0] = N0;
6337 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
6338 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
6339 return;
6340 }
6341 }
6342 Offsets[0] = CombinedOffset;
6343 Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
6344 Offsets[2] = DAG.getConstant(0, DL, MVT::i32);
6345}
6346
Matt Arsenault90083d32018-06-07 09:54:49 +00006347static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
6348 ISD::LoadExtType ExtType, SDValue Op,
6349 const SDLoc &SL, EVT VT) {
6350 if (VT.bitsLT(Op.getValueType()))
6351 return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
6352
6353 switch (ExtType) {
6354 case ISD::SEXTLOAD:
6355 return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
6356 case ISD::ZEXTLOAD:
6357 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
6358 case ISD::EXTLOAD:
6359 return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
6360 case ISD::NON_EXTLOAD:
6361 return Op;
6362 }
6363
6364 llvm_unreachable("invalid ext type");
6365}
6366
6367SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
6368 SelectionDAG &DAG = DCI.DAG;
6369 if (Ld->getAlignment() < 4 || Ld->isDivergent())
6370 return SDValue();
6371
6372 // FIXME: Constant loads should all be marked invariant.
6373 unsigned AS = Ld->getAddressSpace();
Matt Arsenault0da63502018-08-31 05:49:54 +00006374 if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
6375 AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
Matt Arsenault90083d32018-06-07 09:54:49 +00006376 (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
6377 return SDValue();
6378
6379 // Don't do this early, since it may interfere with adjacent load merging for
6380 // illegal types. We can avoid losing alignment information for exotic types
6381 // pre-legalize.
6382 EVT MemVT = Ld->getMemoryVT();
6383 if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
6384 MemVT.getSizeInBits() >= 32)
6385 return SDValue();
6386
6387 SDLoc SL(Ld);
6388
6389 assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
6390 "unexpected vector extload");
6391
6392 // TODO: Drop only high part of range.
6393 SDValue Ptr = Ld->getBasePtr();
6394 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
6395 MVT::i32, SL, Ld->getChain(), Ptr,
6396 Ld->getOffset(),
6397 Ld->getPointerInfo(), MVT::i32,
6398 Ld->getAlignment(),
6399 Ld->getMemOperand()->getFlags(),
6400 Ld->getAAInfo(),
6401 nullptr); // Drop ranges
6402
6403 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
6404 if (MemVT.isFloatingPoint()) {
6405 assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
6406 "unexpected fp extload");
6407 TruncVT = MemVT.changeTypeToInteger();
6408 }
6409
6410 SDValue Cvt = NewLoad;
6411 if (Ld->getExtensionType() == ISD::SEXTLOAD) {
6412 Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
6413 DAG.getValueType(TruncVT));
6414 } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
6415 Ld->getExtensionType() == ISD::NON_EXTLOAD) {
6416 Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
6417 } else {
6418 assert(Ld->getExtensionType() == ISD::EXTLOAD);
6419 }
6420
6421 EVT VT = Ld->getValueType(0);
6422 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
6423
6424 DCI.AddToWorklist(Cvt.getNode());
6425
6426 // We may need to handle exotic cases, such as i16->i64 extloads, so insert
6427 // the appropriate extension from the 32-bit load.
6428 Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
6429 DCI.AddToWorklist(Cvt.getNode());
6430
6431 // Handle conversion back to floating point if necessary.
6432 Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
6433
6434 return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
6435}
6436
Tom Stellard81d871d2013-11-13 23:36:50 +00006437SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
6438 SDLoc DL(Op);
6439 LoadSDNode *Load = cast<LoadSDNode>(Op);
Matt Arsenault6dfda962016-02-10 18:21:39 +00006440 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaulta1436412016-02-10 18:21:45 +00006441 EVT MemVT = Load->getMemoryVT();
Matt Arsenault6dfda962016-02-10 18:21:39 +00006442
Matt Arsenaulta1436412016-02-10 18:21:45 +00006443 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
Matt Arsenault65ca292a2017-09-07 05:37:34 +00006444 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
6445 return SDValue();
6446
Matt Arsenault6dfda962016-02-10 18:21:39 +00006447 // FIXME: Copied from PPC
6448 // First, load into 32 bits, then truncate to 1 bit.
6449
6450 SDValue Chain = Load->getChain();
6451 SDValue BasePtr = Load->getBasePtr();
6452 MachineMemOperand *MMO = Load->getMemOperand();
6453
Tom Stellard115a6152016-11-10 16:02:37 +00006454 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
6455
Matt Arsenault6dfda962016-02-10 18:21:39 +00006456 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
Tom Stellard115a6152016-11-10 16:02:37 +00006457 BasePtr, RealMemVT, MMO);
Matt Arsenault6dfda962016-02-10 18:21:39 +00006458
6459 SDValue Ops[] = {
Matt Arsenaulta1436412016-02-10 18:21:45 +00006460 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
Matt Arsenault6dfda962016-02-10 18:21:39 +00006461 NewLD.getValue(1)
6462 };
6463
6464 return DAG.getMergeValues(Ops, DL);
6465 }
Tom Stellard81d871d2013-11-13 23:36:50 +00006466
Matt Arsenaulta1436412016-02-10 18:21:45 +00006467 if (!MemVT.isVector())
6468 return SDValue();
Matt Arsenault4d801cd2015-11-24 12:05:03 +00006469
Matt Arsenaulta1436412016-02-10 18:21:45 +00006470 assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
6471 "Custom lowering for non-i32 vectors hasn't been implemented.");
Matt Arsenault4d801cd2015-11-24 12:05:03 +00006472
Farhana Aleen89196642018-03-07 17:09:18 +00006473 unsigned Alignment = Load->getAlignment();
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006474 unsigned AS = Load->getAddressSpace();
6475 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
Farhana Aleen89196642018-03-07 17:09:18 +00006476 AS, Alignment)) {
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006477 SDValue Ops[2];
6478 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
6479 return DAG.getMergeValues(Ops, DL);
6480 }
6481
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006482 MachineFunction &MF = DAG.getMachineFunction();
6483 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
6484 // If there is a possibilty that flat instruction access scratch memory
6485 // then we need to use the same legalization rules we use for private.
Matt Arsenault0da63502018-08-31 05:49:54 +00006486 if (AS == AMDGPUAS::FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006487 AS = MFI->hasFlatScratchInit() ?
Matt Arsenault0da63502018-08-31 05:49:54 +00006488 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006489
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006490 unsigned NumElements = MemVT.getVectorNumElements();
Matt Arsenault6c041a32018-03-29 19:59:28 +00006491
Matt Arsenault0da63502018-08-31 05:49:54 +00006492 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6493 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +00006494 if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32)
Matt Arsenaulta1436412016-02-10 18:21:45 +00006495 return SDValue();
6496 // Non-uniform loads will be selected to MUBUF instructions, so they
Alexander Timofeev18009562016-12-08 17:28:47 +00006497 // have the same legalization requirements as global and private
Matt Arsenaulta1436412016-02-10 18:21:45 +00006498 // loads.
6499 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006500 }
Matt Arsenault6c041a32018-03-29 19:59:28 +00006501
Matt Arsenault0da63502018-08-31 05:49:54 +00006502 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6503 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
6504 AS == AMDGPUAS::GLOBAL_ADDRESS) {
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00006505 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
Farhana Aleen89196642018-03-07 17:09:18 +00006506 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +00006507 Alignment >= 4 && NumElements < 32)
Alexander Timofeev18009562016-12-08 17:28:47 +00006508 return SDValue();
6509 // Non-uniform loads will be selected to MUBUF instructions, so they
6510 // have the same legalization requirements as global and private
6511 // loads.
6512 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006513 }
Matt Arsenault0da63502018-08-31 05:49:54 +00006514 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6515 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
6516 AS == AMDGPUAS::GLOBAL_ADDRESS ||
6517 AS == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006518 if (NumElements > 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00006519 return SplitVectorLoad(Op, DAG);
6520 // v4 loads are supported for private and global memory.
6521 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006522 }
Matt Arsenault0da63502018-08-31 05:49:54 +00006523 if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006524 // Depending on the setting of the private_element_size field in the
6525 // resource descriptor, we can only make private accesses up to a certain
6526 // size.
6527 switch (Subtarget->getMaxPrivateElementSize()) {
6528 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00006529 return scalarizeVectorLoad(Load, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006530 case 8:
6531 if (NumElements > 2)
6532 return SplitVectorLoad(Op, DAG);
6533 return SDValue();
6534 case 16:
6535 // Same as global/flat
6536 if (NumElements > 4)
6537 return SplitVectorLoad(Op, DAG);
6538 return SDValue();
6539 default:
6540 llvm_unreachable("unsupported private_element_size");
6541 }
Matt Arsenault0da63502018-08-31 05:49:54 +00006542 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
Farhana Aleena7cb3112018-03-09 17:41:39 +00006543 // Use ds_read_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00006544 if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
Farhana Aleena7cb3112018-03-09 17:41:39 +00006545 MemVT.getStoreSize() == 16)
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006546 return SDValue();
6547
Farhana Aleena7cb3112018-03-09 17:41:39 +00006548 if (NumElements > 2)
6549 return SplitVectorLoad(Op, DAG);
Nicolai Haehnle48219372018-10-17 15:37:48 +00006550
6551 // SI has a hardware bug in the LDS / GDS boounds checking: if the base
6552 // address is negative, then the instruction is incorrectly treated as
6553 // out-of-bounds even if base + offsets is in bounds. Split vectorized
6554 // loads here to avoid emitting ds_read2_b32. We may re-combine the
6555 // load later in the SILoadStoreOptimizer.
6556 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
6557 NumElements == 2 && MemVT.getStoreSize() == 8 &&
6558 Load->getAlignment() < 8) {
6559 return SplitVectorLoad(Op, DAG);
6560 }
Tom Stellarde9373602014-01-22 19:24:14 +00006561 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006562 return SDValue();
Tom Stellard81d871d2013-11-13 23:36:50 +00006563}
6564
Tom Stellard0ec134f2014-02-04 17:18:40 +00006565SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00006566 EVT VT = Op.getValueType();
6567 assert(VT.getSizeInBits() == 64);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006568
6569 SDLoc DL(Op);
6570 SDValue Cond = Op.getOperand(0);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006571
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006572 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
6573 SDValue One = DAG.getConstant(1, DL, MVT::i32);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006574
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00006575 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
6576 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
6577
6578 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
6579 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006580
6581 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
6582
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00006583 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
6584 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006585
6586 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
6587
Ahmed Bougacha128f8732016-04-26 21:15:30 +00006588 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
Matt Arsenault02dc7e12018-06-15 15:15:46 +00006589 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006590}
6591
Matt Arsenault22ca3f82014-07-15 23:50:10 +00006592// Catch division cases where we can use shortcuts with rcp and rsq
6593// instructions.
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00006594SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
6595 SelectionDAG &DAG) const {
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006596 SDLoc SL(Op);
6597 SDValue LHS = Op.getOperand(0);
6598 SDValue RHS = Op.getOperand(1);
6599 EVT VT = Op.getValueType();
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00006600 const SDNodeFlags Flags = Op->getFlags();
Michael Berg7acc81b2018-05-04 18:48:20 +00006601 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006602
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00006603 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
6604 return SDValue();
6605
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006606 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00006607 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
Matt Arsenault979902b2016-08-02 22:25:04 +00006608 if (CLHS->isExactlyValue(1.0)) {
6609 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
6610 // the CI documentation has a worst case error of 1 ulp.
6611 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
6612 // use it as long as we aren't trying to use denormals.
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00006613 //
6614 // v_rcp_f16 and v_rsq_f16 DO support denormals.
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006615
Matt Arsenault979902b2016-08-02 22:25:04 +00006616 // 1.0 / sqrt(x) -> rsq(x)
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00006617
Matt Arsenault979902b2016-08-02 22:25:04 +00006618 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
6619 // error seems really high at 2^29 ULP.
6620 if (RHS.getOpcode() == ISD::FSQRT)
6621 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
6622
6623 // 1.0 / x -> rcp(x)
6624 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
6625 }
6626
6627 // Same as for 1.0, but expand the sign out of the constant.
6628 if (CLHS->isExactlyValue(-1.0)) {
6629 // -1.0 / x -> rcp (fneg x)
6630 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
6631 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
6632 }
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006633 }
6634 }
6635
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00006636 if (Unsafe) {
Matt Arsenault22ca3f82014-07-15 23:50:10 +00006637 // Turn into multiply by the reciprocal.
6638 // x / y -> x * (1.0 / y)
6639 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00006640 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
Matt Arsenault22ca3f82014-07-15 23:50:10 +00006641 }
6642
6643 return SDValue();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006644}
6645
Tom Stellard8485fa02016-12-07 02:42:15 +00006646static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
6647 EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
6648 if (GlueChain->getNumValues() <= 1) {
6649 return DAG.getNode(Opcode, SL, VT, A, B);
6650 }
6651
6652 assert(GlueChain->getNumValues() == 3);
6653
6654 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
6655 switch (Opcode) {
6656 default: llvm_unreachable("no chain equivalent for opcode");
6657 case ISD::FMUL:
6658 Opcode = AMDGPUISD::FMUL_W_CHAIN;
6659 break;
6660 }
6661
6662 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
6663 GlueChain.getValue(2));
6664}
6665
6666static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
6667 EVT VT, SDValue A, SDValue B, SDValue C,
6668 SDValue GlueChain) {
6669 if (GlueChain->getNumValues() <= 1) {
6670 return DAG.getNode(Opcode, SL, VT, A, B, C);
6671 }
6672
6673 assert(GlueChain->getNumValues() == 3);
6674
6675 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
6676 switch (Opcode) {
6677 default: llvm_unreachable("no chain equivalent for opcode");
6678 case ISD::FMA:
6679 Opcode = AMDGPUISD::FMA_W_CHAIN;
6680 break;
6681 }
6682
6683 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
6684 GlueChain.getValue(2));
6685}
6686
Matt Arsenault4052a572016-12-22 03:05:41 +00006687SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00006688 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
6689 return FastLowered;
6690
Matt Arsenault4052a572016-12-22 03:05:41 +00006691 SDLoc SL(Op);
6692 SDValue Src0 = Op.getOperand(0);
6693 SDValue Src1 = Op.getOperand(1);
6694
6695 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
6696 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
6697
6698 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
6699 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
6700
6701 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
6702 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
6703
6704 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
6705}
6706
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00006707// Faster 2.5 ULP division that does not support denormals.
6708SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
6709 SDLoc SL(Op);
6710 SDValue LHS = Op.getOperand(1);
6711 SDValue RHS = Op.getOperand(2);
6712
6713 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
6714
6715 const APFloat K0Val(BitsToFloat(0x6f800000));
6716 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
6717
6718 const APFloat K1Val(BitsToFloat(0x2f800000));
6719 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
6720
6721 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
6722
6723 EVT SetCCVT =
6724 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
6725
6726 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
6727
6728 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
6729
6730 // TODO: Should this propagate fast-math-flags?
6731 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
6732
6733 // rcp does not support denormals.
6734 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
6735
6736 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
6737
6738 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
6739}
6740
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006741SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00006742 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
Eric Christopher538d09d02016-06-07 20:27:12 +00006743 return FastLowered;
Matt Arsenault22ca3f82014-07-15 23:50:10 +00006744
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006745 SDLoc SL(Op);
6746 SDValue LHS = Op.getOperand(0);
6747 SDValue RHS = Op.getOperand(1);
6748
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006749 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
Matt Arsenault37fefd62016-06-10 02:18:02 +00006750
Wei Dinged0f97f2016-06-09 19:17:15 +00006751 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
Matt Arsenault37fefd62016-06-10 02:18:02 +00006752
Tom Stellard8485fa02016-12-07 02:42:15 +00006753 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
6754 RHS, RHS, LHS);
6755 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
6756 LHS, RHS, LHS);
Matt Arsenault37fefd62016-06-10 02:18:02 +00006757
Matt Arsenaultdfec5ce2016-07-09 07:48:11 +00006758 // Denominator is scaled to not be denormal, so using rcp is ok.
Tom Stellard8485fa02016-12-07 02:42:15 +00006759 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
6760 DenominatorScaled);
6761 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
6762 DenominatorScaled);
Matt Arsenault37fefd62016-06-10 02:18:02 +00006763
Tom Stellard8485fa02016-12-07 02:42:15 +00006764 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
6765 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
6766 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
Matt Arsenault37fefd62016-06-10 02:18:02 +00006767
Tom Stellard8485fa02016-12-07 02:42:15 +00006768 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
Matt Arsenault37fefd62016-06-10 02:18:02 +00006769
Tom Stellard8485fa02016-12-07 02:42:15 +00006770 if (!Subtarget->hasFP32Denormals()) {
6771 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
6772 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
6773 SL, MVT::i32);
6774 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
6775 DAG.getEntryNode(),
6776 EnableDenormValue, BitField);
6777 SDValue Ops[3] = {
6778 NegDivScale0,
6779 EnableDenorm.getValue(0),
6780 EnableDenorm.getValue(1)
6781 };
Matt Arsenault37fefd62016-06-10 02:18:02 +00006782
Tom Stellard8485fa02016-12-07 02:42:15 +00006783 NegDivScale0 = DAG.getMergeValues(Ops, SL);
6784 }
6785
6786 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
6787 ApproxRcp, One, NegDivScale0);
6788
6789 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
6790 ApproxRcp, Fma0);
6791
6792 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
6793 Fma1, Fma1);
6794
6795 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
6796 NumeratorScaled, Mul);
6797
6798 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
6799
6800 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
6801 NumeratorScaled, Fma3);
6802
6803 if (!Subtarget->hasFP32Denormals()) {
6804 const SDValue DisableDenormValue =
6805 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
6806 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
6807 Fma4.getValue(1),
6808 DisableDenormValue,
6809 BitField,
6810 Fma4.getValue(2));
6811
6812 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
6813 DisableDenorm, DAG.getRoot());
6814 DAG.setRoot(OutputChain);
6815 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00006816
Wei Dinged0f97f2016-06-09 19:17:15 +00006817 SDValue Scale = NumeratorScaled.getValue(1);
Tom Stellard8485fa02016-12-07 02:42:15 +00006818 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
6819 Fma4, Fma1, Fma3, Scale);
Matt Arsenault37fefd62016-06-10 02:18:02 +00006820
Wei Dinged0f97f2016-06-09 19:17:15 +00006821 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006822}
6823
6824SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00006825 if (DAG.getTarget().Options.UnsafeFPMath)
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00006826 return lowerFastUnsafeFDIV(Op, DAG);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00006827
6828 SDLoc SL(Op);
6829 SDValue X = Op.getOperand(0);
6830 SDValue Y = Op.getOperand(1);
6831
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006832 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00006833
6834 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
6835
6836 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
6837
6838 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
6839
6840 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
6841
6842 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
6843
6844 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
6845
6846 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
6847
6848 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
6849
6850 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
6851 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
6852
6853 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
6854 NegDivScale0, Mul, DivScale1);
6855
6856 SDValue Scale;
6857
Tom Stellard5bfbae52018-07-11 20:59:01 +00006858 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00006859 // Workaround a hardware bug on SI where the condition output from div_scale
6860 // is not usable.
6861
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006862 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00006863
6864 // Figure out if the scale to use for div_fmas.
6865 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
6866 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
6867 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
6868 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
6869
6870 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
6871 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
6872
6873 SDValue Scale0Hi
6874 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
6875 SDValue Scale1Hi
6876 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
6877
6878 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
6879 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
6880 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
6881 } else {
6882 Scale = DivScale1.getValue(1);
6883 }
6884
6885 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
6886 Fma4, Fma3, Mul, Scale);
6887
6888 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006889}
6890
6891SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
6892 EVT VT = Op.getValueType();
6893
6894 if (VT == MVT::f32)
6895 return LowerFDIV32(Op, DAG);
6896
6897 if (VT == MVT::f64)
6898 return LowerFDIV64(Op, DAG);
6899
Matt Arsenault4052a572016-12-22 03:05:41 +00006900 if (VT == MVT::f16)
6901 return LowerFDIV16(Op, DAG);
6902
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006903 llvm_unreachable("Unexpected type for fdiv");
6904}
6905
Tom Stellard81d871d2013-11-13 23:36:50 +00006906SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
6907 SDLoc DL(Op);
6908 StoreSDNode *Store = cast<StoreSDNode>(Op);
6909 EVT VT = Store->getMemoryVT();
6910
Matt Arsenault95245662016-02-11 05:32:46 +00006911 if (VT == MVT::i1) {
6912 return DAG.getTruncStore(Store->getChain(), DL,
6913 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
6914 Store->getBasePtr(), MVT::i1, Store->getMemOperand());
Tom Stellardb02094e2014-07-21 15:45:01 +00006915 }
6916
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006917 assert(VT.isVector() &&
6918 Store->getValue().getValueType().getScalarType() == MVT::i32);
6919
6920 unsigned AS = Store->getAddressSpace();
6921 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
6922 AS, Store->getAlignment())) {
6923 return expandUnalignedStore(Store, DAG);
6924 }
Tom Stellard81d871d2013-11-13 23:36:50 +00006925
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006926 MachineFunction &MF = DAG.getMachineFunction();
6927 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
6928 // If there is a possibilty that flat instruction access scratch memory
6929 // then we need to use the same legalization rules we use for private.
Matt Arsenault0da63502018-08-31 05:49:54 +00006930 if (AS == AMDGPUAS::FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006931 AS = MFI->hasFlatScratchInit() ?
Matt Arsenault0da63502018-08-31 05:49:54 +00006932 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006933
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006934 unsigned NumElements = VT.getVectorNumElements();
Matt Arsenault0da63502018-08-31 05:49:54 +00006935 if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
6936 AS == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006937 if (NumElements > 4)
6938 return SplitVectorStore(Op, DAG);
6939 return SDValue();
Matt Arsenault0da63502018-08-31 05:49:54 +00006940 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006941 switch (Subtarget->getMaxPrivateElementSize()) {
6942 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00006943 return scalarizeVectorStore(Store, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006944 case 8:
6945 if (NumElements > 2)
6946 return SplitVectorStore(Op, DAG);
6947 return SDValue();
6948 case 16:
6949 if (NumElements > 4)
6950 return SplitVectorStore(Op, DAG);
6951 return SDValue();
6952 default:
6953 llvm_unreachable("unsupported private_element_size");
6954 }
Matt Arsenault0da63502018-08-31 05:49:54 +00006955 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006956 // Use ds_write_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00006957 if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006958 VT.getStoreSize() == 16)
6959 return SDValue();
6960
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006961 if (NumElements > 2)
6962 return SplitVectorStore(Op, DAG);
Nicolai Haehnle48219372018-10-17 15:37:48 +00006963
6964 // SI has a hardware bug in the LDS / GDS boounds checking: if the base
6965 // address is negative, then the instruction is incorrectly treated as
6966 // out-of-bounds even if base + offsets is in bounds. Split vectorized
6967 // stores here to avoid emitting ds_write2_b32. We may re-combine the
6968 // store later in the SILoadStoreOptimizer.
6969 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
6970 NumElements == 2 && VT.getStoreSize() == 8 &&
6971 Store->getAlignment() < 8) {
6972 return SplitVectorStore(Op, DAG);
6973 }
6974
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00006975 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006976 } else {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006977 llvm_unreachable("unhandled address space");
Matt Arsenault95245662016-02-11 05:32:46 +00006978 }
Tom Stellard81d871d2013-11-13 23:36:50 +00006979}
6980
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006981SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006982 SDLoc DL(Op);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006983 EVT VT = Op.getValueType();
6984 SDValue Arg = Op.getOperand(0);
David Stuttard20de3e92018-09-14 10:27:19 +00006985 SDValue TrigVal;
6986
Sanjay Patela2607012015-09-16 16:31:21 +00006987 // TODO: Should this propagate fast-math-flags?
David Stuttard20de3e92018-09-14 10:27:19 +00006988
6989 SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT);
6990
6991 if (Subtarget->hasTrigReducedRange()) {
6992 SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
6993 TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal);
6994 } else {
6995 TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
6996 }
Matt Arsenaultad14ce82014-07-19 18:44:39 +00006997
6998 switch (Op.getOpcode()) {
6999 case ISD::FCOS:
David Stuttard20de3e92018-09-14 10:27:19 +00007000 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007001 case ISD::FSIN:
David Stuttard20de3e92018-09-14 10:27:19 +00007002 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007003 default:
7004 llvm_unreachable("Wrong trig opcode");
7005 }
7006}
7007
Tom Stellard354a43c2016-04-01 18:27:37 +00007008SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
7009 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
7010 assert(AtomicNode->isCompareAndSwap());
7011 unsigned AS = AtomicNode->getAddressSpace();
7012
7013 // No custom lowering required for local address space
Matt Arsenault0da63502018-08-31 05:49:54 +00007014 if (!isFlatGlobalAddrSpace(AS))
Tom Stellard354a43c2016-04-01 18:27:37 +00007015 return Op;
7016
7017 // Non-local address space requires custom lowering for atomic compare
7018 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
7019 SDLoc DL(Op);
7020 SDValue ChainIn = Op.getOperand(0);
7021 SDValue Addr = Op.getOperand(1);
7022 SDValue Old = Op.getOperand(2);
7023 SDValue New = Op.getOperand(3);
7024 EVT VT = Op.getValueType();
7025 MVT SimpleVT = VT.getSimpleVT();
7026 MVT VecType = MVT::getVectorVT(SimpleVT, 2);
7027
Ahmed Bougacha128f8732016-04-26 21:15:30 +00007028 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
Tom Stellard354a43c2016-04-01 18:27:37 +00007029 SDValue Ops[] = { ChainIn, Addr, NewOld };
Matt Arsenault88701812016-06-09 23:42:48 +00007030
7031 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
7032 Ops, VT, AtomicNode->getMemOperand());
Tom Stellard354a43c2016-04-01 18:27:37 +00007033}
7034
Tom Stellard75aadc22012-12-11 21:25:42 +00007035//===----------------------------------------------------------------------===//
7036// Custom DAG optimizations
7037//===----------------------------------------------------------------------===//
7038
Matt Arsenault364a6742014-06-11 17:50:44 +00007039SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
Matt Arsenaulte6986632015-01-14 01:35:22 +00007040 DAGCombinerInfo &DCI) const {
Matt Arsenault364a6742014-06-11 17:50:44 +00007041 EVT VT = N->getValueType(0);
7042 EVT ScalarVT = VT.getScalarType();
7043 if (ScalarVT != MVT::f32)
7044 return SDValue();
7045
7046 SelectionDAG &DAG = DCI.DAG;
7047 SDLoc DL(N);
7048
7049 SDValue Src = N->getOperand(0);
7050 EVT SrcVT = Src.getValueType();
7051
7052 // TODO: We could try to match extracting the higher bytes, which would be
7053 // easier if i8 vectors weren't promoted to i32 vectors, particularly after
7054 // types are legalized. v4i8 -> v4f32 is probably the only case to worry
7055 // about in practice.
Craig Topper80d3bb32018-03-06 19:44:52 +00007056 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
Matt Arsenault364a6742014-06-11 17:50:44 +00007057 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
7058 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
7059 DCI.AddToWorklist(Cvt.getNode());
7060 return Cvt;
7061 }
7062 }
7063
Matt Arsenault364a6742014-06-11 17:50:44 +00007064 return SDValue();
7065}
7066
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007067// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
7068
7069// This is a variant of
7070// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
7071//
7072// The normal DAG combiner will do this, but only if the add has one use since
7073// that would increase the number of instructions.
7074//
7075// This prevents us from seeing a constant offset that can be folded into a
7076// memory instruction's addressing mode. If we know the resulting add offset of
7077// a pointer can be folded into an addressing offset, we can replace the pointer
7078// operand with the add of new constant offset. This eliminates one of the uses,
7079// and may allow the remaining use to also be simplified.
7080//
7081SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
7082 unsigned AddrSpace,
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007083 EVT MemVT,
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007084 DAGCombinerInfo &DCI) const {
7085 SDValue N0 = N->getOperand(0);
7086 SDValue N1 = N->getOperand(1);
7087
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007088 // We only do this to handle cases where it's profitable when there are
7089 // multiple uses of the add, so defer to the standard combine.
Matt Arsenaultc8903122017-11-14 23:46:42 +00007090 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
7091 N0->hasOneUse())
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007092 return SDValue();
7093
7094 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
7095 if (!CN1)
7096 return SDValue();
7097
7098 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
7099 if (!CAdd)
7100 return SDValue();
7101
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007102 // If the resulting offset is too large, we can't fold it into the addressing
7103 // mode offset.
7104 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007105 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
7106
7107 AddrMode AM;
7108 AM.HasBaseReg = true;
7109 AM.BaseOffs = Offset.getSExtValue();
7110 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007111 return SDValue();
7112
7113 SelectionDAG &DAG = DCI.DAG;
7114 SDLoc SL(N);
7115 EVT VT = N->getValueType(0);
7116
7117 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007118 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007119
Matt Arsenaulte5e0c742017-11-13 05:33:35 +00007120 SDNodeFlags Flags;
7121 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
7122 (N0.getOpcode() == ISD::OR ||
7123 N0->getFlags().hasNoUnsignedWrap()));
7124
7125 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007126}
7127
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007128SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
7129 DAGCombinerInfo &DCI) const {
7130 SDValue Ptr = N->getBasePtr();
7131 SelectionDAG &DAG = DCI.DAG;
7132 SDLoc SL(N);
7133
7134 // TODO: We could also do this for multiplies.
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007135 if (Ptr.getOpcode() == ISD::SHL) {
7136 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(),
7137 N->getMemoryVT(), DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007138 if (NewPtr) {
7139 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
7140
7141 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
7142 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
7143 }
7144 }
7145
7146 return SDValue();
7147}
7148
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007149static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
7150 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
7151 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
7152 (Opc == ISD::XOR && Val == 0);
7153}
7154
7155// Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
7156// will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
7157// integer combine opportunities since most 64-bit operations are decomposed
7158// this way. TODO: We won't want this for SALU especially if it is an inline
7159// immediate.
7160SDValue SITargetLowering::splitBinaryBitConstantOp(
7161 DAGCombinerInfo &DCI,
7162 const SDLoc &SL,
7163 unsigned Opc, SDValue LHS,
7164 const ConstantSDNode *CRHS) const {
7165 uint64_t Val = CRHS->getZExtValue();
7166 uint32_t ValLo = Lo_32(Val);
7167 uint32_t ValHi = Hi_32(Val);
7168 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7169
7170 if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
7171 bitOpWithConstantIsReducible(Opc, ValHi)) ||
7172 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
7173 // If we need to materialize a 64-bit immediate, it will be split up later
7174 // anyway. Avoid creating the harder to understand 64-bit immediate
7175 // materialization.
7176 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
7177 }
7178
7179 return SDValue();
7180}
7181
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00007182// Returns true if argument is a boolean value which is not serialized into
7183// memory or argument and does not require v_cmdmask_b32 to be deserialized.
7184static bool isBoolSGPR(SDValue V) {
7185 if (V.getValueType() != MVT::i1)
7186 return false;
7187 switch (V.getOpcode()) {
7188 default: break;
7189 case ISD::SETCC:
7190 case ISD::AND:
7191 case ISD::OR:
7192 case ISD::XOR:
7193 case AMDGPUISD::FP_CLASS:
7194 return true;
7195 }
7196 return false;
7197}
7198
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007199// If a constant has all zeroes or all ones within each byte return it.
7200// Otherwise return 0.
7201static uint32_t getConstantPermuteMask(uint32_t C) {
7202 // 0xff for any zero byte in the mask
7203 uint32_t ZeroByteMask = 0;
7204 if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
7205 if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
7206 if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
7207 if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
7208 uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
7209 if ((NonZeroByteMask & C) != NonZeroByteMask)
7210 return 0; // Partial bytes selected.
7211 return C;
7212}
7213
7214// Check if a node selects whole bytes from its operand 0 starting at a byte
7215// boundary while masking the rest. Returns select mask as in the v_perm_b32
7216// or -1 if not succeeded.
7217// Note byte select encoding:
7218// value 0-3 selects corresponding source byte;
7219// value 0xc selects zero;
7220// value 0xff selects 0xff.
7221static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
7222 assert(V.getValueSizeInBits() == 32);
7223
7224 if (V.getNumOperands() != 2)
7225 return ~0;
7226
7227 ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
7228 if (!N1)
7229 return ~0;
7230
7231 uint32_t C = N1->getZExtValue();
7232
7233 switch (V.getOpcode()) {
7234 default:
7235 break;
7236 case ISD::AND:
7237 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
7238 return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
7239 }
7240 break;
7241
7242 case ISD::OR:
7243 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
7244 return (0x03020100 & ~ConstMask) | ConstMask;
7245 }
7246 break;
7247
7248 case ISD::SHL:
7249 if (C % 8)
7250 return ~0;
7251
7252 return uint32_t((0x030201000c0c0c0cull << C) >> 32);
7253
7254 case ISD::SRL:
7255 if (C % 8)
7256 return ~0;
7257
7258 return uint32_t(0x0c0c0c0c03020100ull >> C);
7259 }
7260
7261 return ~0;
7262}
7263
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007264SDValue SITargetLowering::performAndCombine(SDNode *N,
7265 DAGCombinerInfo &DCI) const {
7266 if (DCI.isBeforeLegalize())
7267 return SDValue();
7268
7269 SelectionDAG &DAG = DCI.DAG;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007270 EVT VT = N->getValueType(0);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007271 SDValue LHS = N->getOperand(0);
7272 SDValue RHS = N->getOperand(1);
7273
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007274
Stanislav Mekhanoshin53a21292017-05-23 19:54:48 +00007275 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
7276 if (VT == MVT::i64 && CRHS) {
7277 if (SDValue Split
7278 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
7279 return Split;
7280 }
7281
7282 if (CRHS && VT == MVT::i32) {
7283 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
7284 // nb = number of trailing zeroes in mask
7285 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
7286 // given that we are selecting 8 or 16 bit fields starting at byte boundary.
7287 uint64_t Mask = CRHS->getZExtValue();
7288 unsigned Bits = countPopulation(Mask);
7289 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
7290 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
7291 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
7292 unsigned Shift = CShift->getZExtValue();
7293 unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
7294 unsigned Offset = NB + Shift;
7295 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
7296 SDLoc SL(N);
7297 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
7298 LHS->getOperand(0),
7299 DAG.getConstant(Offset, SL, MVT::i32),
7300 DAG.getConstant(Bits, SL, MVT::i32));
7301 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
7302 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
7303 DAG.getValueType(NarrowVT));
7304 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
7305 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
7306 return Shl;
7307 }
7308 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007309 }
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007310
7311 // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
7312 if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
7313 isa<ConstantSDNode>(LHS.getOperand(2))) {
7314 uint32_t Sel = getConstantPermuteMask(Mask);
7315 if (!Sel)
7316 return SDValue();
7317
7318 // Select 0xc for all zero bytes
7319 Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
7320 SDLoc DL(N);
7321 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
7322 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
7323 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007324 }
7325
7326 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
7327 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
7328 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007329 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7330 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
7331
7332 SDValue X = LHS.getOperand(0);
7333 SDValue Y = RHS.getOperand(0);
7334 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
7335 return SDValue();
7336
7337 if (LCC == ISD::SETO) {
7338 if (X != LHS.getOperand(1))
7339 return SDValue();
7340
7341 if (RCC == ISD::SETUNE) {
7342 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
7343 if (!C1 || !C1->isInfinity() || C1->isNegative())
7344 return SDValue();
7345
7346 const uint32_t Mask = SIInstrFlags::N_NORMAL |
7347 SIInstrFlags::N_SUBNORMAL |
7348 SIInstrFlags::N_ZERO |
7349 SIInstrFlags::P_ZERO |
7350 SIInstrFlags::P_SUBNORMAL |
7351 SIInstrFlags::P_NORMAL;
7352
7353 static_assert(((~(SIInstrFlags::S_NAN |
7354 SIInstrFlags::Q_NAN |
7355 SIInstrFlags::N_INFINITY |
7356 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
7357 "mask not equal");
7358
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007359 SDLoc DL(N);
7360 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
7361 X, DAG.getConstant(Mask, DL, MVT::i32));
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007362 }
7363 }
7364 }
7365
Matt Arsenault3dcf4ce2018-08-10 18:58:56 +00007366 if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
7367 std::swap(LHS, RHS);
7368
7369 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
7370 RHS.hasOneUse()) {
7371 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7372 // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan)
7373 // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan)
7374 const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
7375 if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
7376 (RHS.getOperand(0) == LHS.getOperand(0) &&
7377 LHS.getOperand(0) == LHS.getOperand(1))) {
7378 const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
7379 unsigned NewMask = LCC == ISD::SETO ?
7380 Mask->getZExtValue() & ~OrdMask :
7381 Mask->getZExtValue() & OrdMask;
7382
7383 SDLoc DL(N);
7384 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
7385 DAG.getConstant(NewMask, DL, MVT::i32));
7386 }
7387 }
7388
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00007389 if (VT == MVT::i32 &&
7390 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
7391 // and x, (sext cc from i1) => select cc, x, 0
7392 if (RHS.getOpcode() != ISD::SIGN_EXTEND)
7393 std::swap(LHS, RHS);
7394 if (isBoolSGPR(RHS.getOperand(0)))
7395 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
7396 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
7397 }
7398
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007399 // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
7400 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7401 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
7402 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
7403 uint32_t LHSMask = getPermuteMask(DAG, LHS);
7404 uint32_t RHSMask = getPermuteMask(DAG, RHS);
7405 if (LHSMask != ~0u && RHSMask != ~0u) {
7406 // Canonicalize the expression in an attempt to have fewer unique masks
7407 // and therefore fewer registers used to hold the masks.
7408 if (LHSMask > RHSMask) {
7409 std::swap(LHSMask, RHSMask);
7410 std::swap(LHS, RHS);
7411 }
7412
7413 // Select 0xc for each lane used from source operand. Zero has 0xc mask
7414 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
7415 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7416 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7417
7418 // Check of we need to combine values from two sources within a byte.
7419 if (!(LHSUsedLanes & RHSUsedLanes) &&
7420 // If we select high and lower word keep it for SDWA.
7421 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
7422 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
7423 // Each byte in each mask is either selector mask 0-3, or has higher
7424 // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
7425 // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
7426 // mask which is not 0xff wins. By anding both masks we have a correct
7427 // result except that 0x0c shall be corrected to give 0x0c only.
7428 uint32_t Mask = LHSMask & RHSMask;
7429 for (unsigned I = 0; I < 32; I += 8) {
7430 uint32_t ByteSel = 0xff << I;
7431 if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
7432 Mask &= (0x0c << I) & 0xffffffff;
7433 }
7434
7435 // Add 4 to each active LHS lane. It will not affect any existing 0xff
7436 // or 0x0c.
7437 uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
7438 SDLoc DL(N);
7439
7440 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
7441 LHS.getOperand(0), RHS.getOperand(0),
7442 DAG.getConstant(Sel, DL, MVT::i32));
7443 }
7444 }
7445 }
7446
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007447 return SDValue();
7448}
7449
Matt Arsenaultf2290332015-01-06 23:00:39 +00007450SDValue SITargetLowering::performOrCombine(SDNode *N,
7451 DAGCombinerInfo &DCI) const {
7452 SelectionDAG &DAG = DCI.DAG;
7453 SDValue LHS = N->getOperand(0);
7454 SDValue RHS = N->getOperand(1);
7455
Matt Arsenault3b082382016-04-12 18:24:38 +00007456 EVT VT = N->getValueType(0);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007457 if (VT == MVT::i1) {
7458 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
7459 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
7460 RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
7461 SDValue Src = LHS.getOperand(0);
7462 if (Src != RHS.getOperand(0))
7463 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00007464
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007465 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
7466 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
7467 if (!CLHS || !CRHS)
7468 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00007469
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007470 // Only 10 bits are used.
7471 static const uint32_t MaxMask = 0x3ff;
Matt Arsenault3b082382016-04-12 18:24:38 +00007472
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007473 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
7474 SDLoc DL(N);
7475 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
7476 Src, DAG.getConstant(NewMask, DL, MVT::i32));
7477 }
Matt Arsenault3b082382016-04-12 18:24:38 +00007478
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007479 return SDValue();
7480 }
7481
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007482 // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
7483 if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
7484 LHS.getOpcode() == AMDGPUISD::PERM &&
7485 isa<ConstantSDNode>(LHS.getOperand(2))) {
7486 uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
7487 if (!Sel)
7488 return SDValue();
7489
7490 Sel |= LHS.getConstantOperandVal(2);
7491 SDLoc DL(N);
7492 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
7493 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
7494 }
7495
7496 // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
7497 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7498 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
7499 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
7500 uint32_t LHSMask = getPermuteMask(DAG, LHS);
7501 uint32_t RHSMask = getPermuteMask(DAG, RHS);
7502 if (LHSMask != ~0u && RHSMask != ~0u) {
7503 // Canonicalize the expression in an attempt to have fewer unique masks
7504 // and therefore fewer registers used to hold the masks.
7505 if (LHSMask > RHSMask) {
7506 std::swap(LHSMask, RHSMask);
7507 std::swap(LHS, RHS);
7508 }
7509
7510 // Select 0xc for each lane used from source operand. Zero has 0xc mask
7511 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
7512 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7513 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7514
7515 // Check of we need to combine values from two sources within a byte.
7516 if (!(LHSUsedLanes & RHSUsedLanes) &&
7517 // If we select high and lower word keep it for SDWA.
7518 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
7519 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
7520 // Kill zero bytes selected by other mask. Zero value is 0xc.
7521 LHSMask &= ~RHSUsedLanes;
7522 RHSMask &= ~LHSUsedLanes;
7523 // Add 4 to each active LHS lane
7524 LHSMask |= LHSUsedLanes & 0x04040404;
7525 // Combine masks
7526 uint32_t Sel = LHSMask | RHSMask;
7527 SDLoc DL(N);
7528
7529 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
7530 LHS.getOperand(0), RHS.getOperand(0),
7531 DAG.getConstant(Sel, DL, MVT::i32));
7532 }
7533 }
7534 }
7535
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007536 if (VT != MVT::i64)
7537 return SDValue();
7538
7539 // TODO: This could be a generic combine with a predicate for extracting the
7540 // high half of an integer being free.
7541
7542 // (or i64:x, (zero_extend i32:y)) ->
7543 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
7544 if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
7545 RHS.getOpcode() != ISD::ZERO_EXTEND)
7546 std::swap(LHS, RHS);
7547
7548 if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
7549 SDValue ExtSrc = RHS.getOperand(0);
7550 EVT SrcVT = ExtSrc.getValueType();
7551 if (SrcVT == MVT::i32) {
7552 SDLoc SL(N);
7553 SDValue LowLHS, HiBits;
7554 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
7555 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
7556
7557 DCI.AddToWorklist(LowOr.getNode());
7558 DCI.AddToWorklist(HiBits.getNode());
7559
7560 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
7561 LowOr, HiBits);
7562 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
Matt Arsenault3b082382016-04-12 18:24:38 +00007563 }
7564 }
7565
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007566 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
7567 if (CRHS) {
7568 if (SDValue Split
7569 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
7570 return Split;
7571 }
Matt Arsenaultf2290332015-01-06 23:00:39 +00007572
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007573 return SDValue();
7574}
Matt Arsenaultf2290332015-01-06 23:00:39 +00007575
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007576SDValue SITargetLowering::performXorCombine(SDNode *N,
7577 DAGCombinerInfo &DCI) const {
7578 EVT VT = N->getValueType(0);
7579 if (VT != MVT::i64)
7580 return SDValue();
Matt Arsenaultf2290332015-01-06 23:00:39 +00007581
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007582 SDValue LHS = N->getOperand(0);
7583 SDValue RHS = N->getOperand(1);
7584
7585 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
7586 if (CRHS) {
7587 if (SDValue Split
7588 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
7589 return Split;
Matt Arsenaultf2290332015-01-06 23:00:39 +00007590 }
7591
7592 return SDValue();
7593}
7594
Matt Arsenault5cf42712017-04-06 20:58:30 +00007595// Instructions that will be lowered with a final instruction that zeros the
7596// high result bits.
7597// XXX - probably only need to list legal operations.
Matt Arsenault8edfaee2017-03-31 19:53:03 +00007598static bool fp16SrcZerosHighBits(unsigned Opc) {
7599 switch (Opc) {
Matt Arsenault5cf42712017-04-06 20:58:30 +00007600 case ISD::FADD:
7601 case ISD::FSUB:
7602 case ISD::FMUL:
7603 case ISD::FDIV:
7604 case ISD::FREM:
7605 case ISD::FMA:
7606 case ISD::FMAD:
7607 case ISD::FCANONICALIZE:
7608 case ISD::FP_ROUND:
7609 case ISD::UINT_TO_FP:
7610 case ISD::SINT_TO_FP:
7611 case ISD::FABS:
7612 // Fabs is lowered to a bit operation, but it's an and which will clear the
7613 // high bits anyway.
7614 case ISD::FSQRT:
7615 case ISD::FSIN:
7616 case ISD::FCOS:
7617 case ISD::FPOWI:
7618 case ISD::FPOW:
7619 case ISD::FLOG:
7620 case ISD::FLOG2:
7621 case ISD::FLOG10:
7622 case ISD::FEXP:
7623 case ISD::FEXP2:
7624 case ISD::FCEIL:
7625 case ISD::FTRUNC:
7626 case ISD::FRINT:
7627 case ISD::FNEARBYINT:
7628 case ISD::FROUND:
7629 case ISD::FFLOOR:
7630 case ISD::FMINNUM:
7631 case ISD::FMAXNUM:
7632 case AMDGPUISD::FRACT:
7633 case AMDGPUISD::CLAMP:
7634 case AMDGPUISD::COS_HW:
7635 case AMDGPUISD::SIN_HW:
7636 case AMDGPUISD::FMIN3:
7637 case AMDGPUISD::FMAX3:
7638 case AMDGPUISD::FMED3:
7639 case AMDGPUISD::FMAD_FTZ:
7640 case AMDGPUISD::RCP:
7641 case AMDGPUISD::RSQ:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00007642 case AMDGPUISD::RCP_IFLAG:
Matt Arsenault5cf42712017-04-06 20:58:30 +00007643 case AMDGPUISD::LDEXP:
Matt Arsenault8edfaee2017-03-31 19:53:03 +00007644 return true;
Matt Arsenault5cf42712017-04-06 20:58:30 +00007645 default:
7646 // fcopysign, select and others may be lowered to 32-bit bit operations
7647 // which don't zero the high bits.
7648 return false;
Matt Arsenault8edfaee2017-03-31 19:53:03 +00007649 }
7650}
7651
7652SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
7653 DAGCombinerInfo &DCI) const {
7654 if (!Subtarget->has16BitInsts() ||
7655 DCI.getDAGCombineLevel() < AfterLegalizeDAG)
7656 return SDValue();
7657
7658 EVT VT = N->getValueType(0);
7659 if (VT != MVT::i32)
7660 return SDValue();
7661
7662 SDValue Src = N->getOperand(0);
7663 if (Src.getValueType() != MVT::i16)
7664 return SDValue();
7665
7666 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
7667 // FIXME: It is not universally true that the high bits are zeroed on gfx9.
7668 if (Src.getOpcode() == ISD::BITCAST) {
7669 SDValue BCSrc = Src.getOperand(0);
7670 if (BCSrc.getValueType() == MVT::f16 &&
7671 fp16SrcZerosHighBits(BCSrc.getOpcode()))
7672 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
7673 }
7674
7675 return SDValue();
7676}
7677
Matt Arsenaultf2290332015-01-06 23:00:39 +00007678SDValue SITargetLowering::performClassCombine(SDNode *N,
7679 DAGCombinerInfo &DCI) const {
7680 SelectionDAG &DAG = DCI.DAG;
7681 SDValue Mask = N->getOperand(1);
7682
7683 // fp_class x, 0 -> false
7684 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
7685 if (CMask->isNullValue())
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007686 return DAG.getConstant(0, SDLoc(N), MVT::i1);
Matt Arsenaultf2290332015-01-06 23:00:39 +00007687 }
7688
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00007689 if (N->getOperand(0).isUndef())
7690 return DAG.getUNDEF(MVT::i1);
7691
Matt Arsenaultf2290332015-01-06 23:00:39 +00007692 return SDValue();
7693}
7694
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00007695SDValue SITargetLowering::performRcpCombine(SDNode *N,
7696 DAGCombinerInfo &DCI) const {
7697 EVT VT = N->getValueType(0);
7698 SDValue N0 = N->getOperand(0);
7699
7700 if (N0.isUndef())
7701 return N0;
7702
7703 if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
7704 N0.getOpcode() == ISD::SINT_TO_FP)) {
7705 return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
7706 N->getFlags());
7707 }
7708
7709 return AMDGPUTargetLowering::performRcpCombine(N, DCI);
7710}
7711
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00007712bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
7713 unsigned MaxDepth) const {
7714 unsigned Opcode = Op.getOpcode();
7715 if (Opcode == ISD::FCANONICALIZE)
7716 return true;
7717
7718 if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
7719 auto F = CFP->getValueAPF();
7720 if (F.isNaN() && F.isSignaling())
7721 return false;
7722 return !F.isDenormal() || denormalsEnabledForType(Op.getValueType());
7723 }
7724
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007725 // If source is a result of another standard FP operation it is already in
7726 // canonical form.
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00007727 if (MaxDepth == 0)
7728 return false;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007729
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00007730 switch (Opcode) {
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007731 // These will flush denorms if required.
7732 case ISD::FADD:
7733 case ISD::FSUB:
7734 case ISD::FMUL:
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007735 case ISD::FCEIL:
7736 case ISD::FFLOOR:
7737 case ISD::FMA:
7738 case ISD::FMAD:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00007739 case ISD::FSQRT:
7740 case ISD::FDIV:
7741 case ISD::FREM:
Matt Arsenaultce6d61f2018-08-06 21:51:52 +00007742 case ISD::FP_ROUND:
7743 case ISD::FP_EXTEND:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00007744 case AMDGPUISD::FMUL_LEGACY:
7745 case AMDGPUISD::FMAD_FTZ:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00007746 case AMDGPUISD::RCP:
7747 case AMDGPUISD::RSQ:
7748 case AMDGPUISD::RSQ_CLAMP:
7749 case AMDGPUISD::RCP_LEGACY:
7750 case AMDGPUISD::RSQ_LEGACY:
7751 case AMDGPUISD::RCP_IFLAG:
7752 case AMDGPUISD::TRIG_PREOP:
7753 case AMDGPUISD::DIV_SCALE:
7754 case AMDGPUISD::DIV_FMAS:
7755 case AMDGPUISD::DIV_FIXUP:
7756 case AMDGPUISD::FRACT:
7757 case AMDGPUISD::LDEXP:
Matt Arsenault08f3fe42018-08-06 23:01:31 +00007758 case AMDGPUISD::CVT_PKRTZ_F16_F32:
Matt Arsenault940e6072018-08-10 19:20:17 +00007759 case AMDGPUISD::CVT_F32_UBYTE0:
7760 case AMDGPUISD::CVT_F32_UBYTE1:
7761 case AMDGPUISD::CVT_F32_UBYTE2:
7762 case AMDGPUISD::CVT_F32_UBYTE3:
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007763 return true;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007764
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007765 // It can/will be lowered or combined as a bit operation.
7766 // Need to check their input recursively to handle.
7767 case ISD::FNEG:
7768 case ISD::FABS:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00007769 case ISD::FCOPYSIGN:
7770 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007771
7772 case ISD::FSIN:
7773 case ISD::FCOS:
7774 case ISD::FSINCOS:
7775 return Op.getValueType().getScalarType() != MVT::f16;
7776
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007777 case ISD::FMINNUM:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00007778 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00007779 case ISD::FMINNUM_IEEE:
7780 case ISD::FMAXNUM_IEEE:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00007781 case AMDGPUISD::CLAMP:
7782 case AMDGPUISD::FMED3:
7783 case AMDGPUISD::FMAX3:
7784 case AMDGPUISD::FMIN3: {
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00007785 // FIXME: Shouldn't treat the generic operations different based these.
Matt Arsenault687ec752018-10-22 16:27:27 +00007786 // However, we aren't really required to flush the result from
7787 // minnum/maxnum..
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007788
Matt Arsenault687ec752018-10-22 16:27:27 +00007789 // snans will be quieted, so we only need to worry about denormals.
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00007790 if (Subtarget->supportsMinMaxDenormModes() ||
Matt Arsenault687ec752018-10-22 16:27:27 +00007791 denormalsEnabledForType(Op.getValueType()))
7792 return true;
7793
7794 // Flushing may be required.
7795 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
7796 // targets need to check their input recursively.
7797
7798 // FIXME: Does this apply with clamp? It's implemented with max.
7799 for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
7800 if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
7801 return false;
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00007802 }
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007803
Matt Arsenault687ec752018-10-22 16:27:27 +00007804 return true;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007805 }
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00007806 case ISD::SELECT: {
7807 return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
7808 isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007809 }
Matt Arsenaulte94ee832018-08-06 22:45:51 +00007810 case ISD::BUILD_VECTOR: {
7811 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
7812 SDValue SrcOp = Op.getOperand(i);
7813 if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
7814 return false;
7815 }
7816
7817 return true;
7818 }
7819 case ISD::EXTRACT_VECTOR_ELT:
7820 case ISD::EXTRACT_SUBVECTOR: {
7821 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
7822 }
7823 case ISD::INSERT_VECTOR_ELT: {
7824 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
7825 isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
7826 }
7827 case ISD::UNDEF:
7828 // Could be anything.
7829 return false;
Matt Arsenault08f3fe42018-08-06 23:01:31 +00007830
Matt Arsenault687ec752018-10-22 16:27:27 +00007831 case ISD::BITCAST: {
7832 // Hack round the mess we make when legalizing extract_vector_elt
7833 SDValue Src = Op.getOperand(0);
7834 if (Src.getValueType() == MVT::i16 &&
7835 Src.getOpcode() == ISD::TRUNCATE) {
7836 SDValue TruncSrc = Src.getOperand(0);
7837 if (TruncSrc.getValueType() == MVT::i32 &&
7838 TruncSrc.getOpcode() == ISD::BITCAST &&
7839 TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
7840 return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
7841 }
7842 }
7843
7844 return false;
7845 }
Matt Arsenault08f3fe42018-08-06 23:01:31 +00007846 case ISD::INTRINSIC_WO_CHAIN: {
7847 unsigned IntrinsicID
7848 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
7849 // TODO: Handle more intrinsics
7850 switch (IntrinsicID) {
7851 case Intrinsic::amdgcn_cvt_pkrtz:
Matt Arsenault940e6072018-08-10 19:20:17 +00007852 case Intrinsic::amdgcn_cubeid:
7853 case Intrinsic::amdgcn_frexp_mant:
7854 case Intrinsic::amdgcn_fdot2:
Matt Arsenault08f3fe42018-08-06 23:01:31 +00007855 return true;
7856 default:
7857 break;
7858 }
Matt Arsenault5bb9d792018-08-10 17:57:12 +00007859
7860 LLVM_FALLTHROUGH;
Matt Arsenault08f3fe42018-08-06 23:01:31 +00007861 }
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00007862 default:
7863 return denormalsEnabledForType(Op.getValueType()) &&
7864 DAG.isKnownNeverSNaN(Op);
7865 }
7866
7867 llvm_unreachable("invalid operation");
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007868}
7869
Matt Arsenault9cd90712016-04-14 01:42:16 +00007870// Constant fold canonicalize.
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00007871SDValue SITargetLowering::getCanonicalConstantFP(
7872 SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
7873 // Flush denormals to 0 if not enabled.
7874 if (C.isDenormal() && !denormalsEnabledForType(VT))
7875 return DAG.getConstantFP(0.0, SL, VT);
7876
7877 if (C.isNaN()) {
7878 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
7879 if (C.isSignaling()) {
7880 // Quiet a signaling NaN.
7881 // FIXME: Is this supposed to preserve payload bits?
7882 return DAG.getConstantFP(CanonicalQNaN, SL, VT);
7883 }
7884
7885 // Make sure it is the canonical NaN bitpattern.
7886 //
7887 // TODO: Can we use -1 as the canonical NaN value since it's an inline
7888 // immediate?
7889 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
7890 return DAG.getConstantFP(CanonicalQNaN, SL, VT);
7891 }
7892
7893 // Already canonical.
7894 return DAG.getConstantFP(C, SL, VT);
7895}
7896
Matt Arsenaulta29e7622018-08-06 22:30:44 +00007897static bool vectorEltWillFoldAway(SDValue Op) {
7898 return Op.isUndef() || isa<ConstantFPSDNode>(Op);
7899}
7900
Matt Arsenault9cd90712016-04-14 01:42:16 +00007901SDValue SITargetLowering::performFCanonicalizeCombine(
7902 SDNode *N,
7903 DAGCombinerInfo &DCI) const {
Matt Arsenault9cd90712016-04-14 01:42:16 +00007904 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault4aec86d2018-07-31 13:34:31 +00007905 SDValue N0 = N->getOperand(0);
Matt Arsenaulta29e7622018-08-06 22:30:44 +00007906 EVT VT = N->getValueType(0);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00007907
Matt Arsenault4aec86d2018-07-31 13:34:31 +00007908 // fcanonicalize undef -> qnan
7909 if (N0.isUndef()) {
Matt Arsenault4aec86d2018-07-31 13:34:31 +00007910 APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
7911 return DAG.getConstantFP(QNaN, SDLoc(N), VT);
7912 }
7913
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00007914 if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
Matt Arsenault9cd90712016-04-14 01:42:16 +00007915 EVT VT = N->getValueType(0);
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00007916 return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
Matt Arsenault9cd90712016-04-14 01:42:16 +00007917 }
7918
Matt Arsenaulta29e7622018-08-06 22:30:44 +00007919 // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
7920 // (fcanonicalize k)
7921 //
7922 // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
7923
7924 // TODO: This could be better with wider vectors that will be split to v2f16,
7925 // and to consider uses since there aren't that many packed operations.
Matt Arsenaultb5acec12018-08-12 08:42:54 +00007926 if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
7927 isTypeLegal(MVT::v2f16)) {
Matt Arsenaulta29e7622018-08-06 22:30:44 +00007928 SDLoc SL(N);
7929 SDValue NewElts[2];
7930 SDValue Lo = N0.getOperand(0);
7931 SDValue Hi = N0.getOperand(1);
Matt Arsenaultb5acec12018-08-12 08:42:54 +00007932 EVT EltVT = Lo.getValueType();
7933
Matt Arsenaulta29e7622018-08-06 22:30:44 +00007934 if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
7935 for (unsigned I = 0; I != 2; ++I) {
7936 SDValue Op = N0.getOperand(I);
Matt Arsenaulta29e7622018-08-06 22:30:44 +00007937 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
7938 NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
7939 CFP->getValueAPF());
7940 } else if (Op.isUndef()) {
Matt Arsenaultb5acec12018-08-12 08:42:54 +00007941 // Handled below based on what the other operand is.
7942 NewElts[I] = Op;
Matt Arsenaulta29e7622018-08-06 22:30:44 +00007943 } else {
7944 NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
7945 }
7946 }
7947
Matt Arsenaultb5acec12018-08-12 08:42:54 +00007948 // If one half is undef, and one is constant, perfer a splat vector rather
7949 // than the normal qNaN. If it's a register, prefer 0.0 since that's
7950 // cheaper to use and may be free with a packed operation.
7951 if (NewElts[0].isUndef()) {
7952 if (isa<ConstantFPSDNode>(NewElts[1]))
7953 NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
7954 NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
7955 }
7956
7957 if (NewElts[1].isUndef()) {
7958 NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
7959 NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
7960 }
7961
Matt Arsenaulta29e7622018-08-06 22:30:44 +00007962 return DAG.getBuildVector(VT, SL, NewElts);
7963 }
7964 }
7965
Matt Arsenault687ec752018-10-22 16:27:27 +00007966 unsigned SrcOpc = N0.getOpcode();
7967
7968 // If it's free to do so, push canonicalizes further up the source, which may
7969 // find a canonical source.
7970 //
7971 // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for
7972 // sNaNs.
7973 if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
7974 auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
7975 if (CRHS && N0.hasOneUse()) {
7976 SDLoc SL(N);
7977 SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
7978 N0.getOperand(0));
7979 SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
7980 DCI.AddToWorklist(Canon0.getNode());
7981
7982 return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
7983 }
7984 }
7985
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00007986 return isCanonicalized(DAG, N0) ? N0 : SDValue();
Matt Arsenault9cd90712016-04-14 01:42:16 +00007987}
7988
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007989static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
7990 switch (Opc) {
7991 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00007992 case ISD::FMAXNUM_IEEE:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007993 return AMDGPUISD::FMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00007994 case ISD::SMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007995 return AMDGPUISD::SMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00007996 case ISD::UMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00007997 return AMDGPUISD::UMAX3;
7998 case ISD::FMINNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00007999 case ISD::FMINNUM_IEEE:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008000 return AMDGPUISD::FMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008001 case ISD::SMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008002 return AMDGPUISD::SMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008003 case ISD::UMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008004 return AMDGPUISD::UMIN3;
8005 default:
8006 llvm_unreachable("Not a min/max opcode");
8007 }
8008}
8009
Matt Arsenault10268f92017-02-27 22:40:39 +00008010SDValue SITargetLowering::performIntMed3ImmCombine(
8011 SelectionDAG &DAG, const SDLoc &SL,
8012 SDValue Op0, SDValue Op1, bool Signed) const {
Matt Arsenaultf639c322016-01-28 20:53:42 +00008013 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
8014 if (!K1)
8015 return SDValue();
8016
8017 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
8018 if (!K0)
8019 return SDValue();
8020
Matt Arsenaultf639c322016-01-28 20:53:42 +00008021 if (Signed) {
8022 if (K0->getAPIntValue().sge(K1->getAPIntValue()))
8023 return SDValue();
8024 } else {
8025 if (K0->getAPIntValue().uge(K1->getAPIntValue()))
8026 return SDValue();
8027 }
8028
8029 EVT VT = K0->getValueType(0);
Matt Arsenault10268f92017-02-27 22:40:39 +00008030 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
8031 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
8032 return DAG.getNode(Med3Opc, SL, VT,
8033 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
8034 }
Tom Stellard115a6152016-11-10 16:02:37 +00008035
Matt Arsenault10268f92017-02-27 22:40:39 +00008036 // If there isn't a 16-bit med3 operation, convert to 32-bit.
Tom Stellard115a6152016-11-10 16:02:37 +00008037 MVT NVT = MVT::i32;
8038 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8039
Matt Arsenault10268f92017-02-27 22:40:39 +00008040 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
8041 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
8042 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
Tom Stellard115a6152016-11-10 16:02:37 +00008043
Matt Arsenault10268f92017-02-27 22:40:39 +00008044 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
8045 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
Matt Arsenaultf639c322016-01-28 20:53:42 +00008046}
8047
Matt Arsenault6b114d22017-08-30 01:20:17 +00008048static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
8049 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
8050 return C;
8051
8052 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
8053 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
8054 return C;
8055 }
8056
8057 return nullptr;
8058}
8059
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008060SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
8061 const SDLoc &SL,
8062 SDValue Op0,
8063 SDValue Op1) const {
Matt Arsenault6b114d22017-08-30 01:20:17 +00008064 ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
Matt Arsenaultf639c322016-01-28 20:53:42 +00008065 if (!K1)
8066 return SDValue();
8067
Matt Arsenault6b114d22017-08-30 01:20:17 +00008068 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
Matt Arsenaultf639c322016-01-28 20:53:42 +00008069 if (!K0)
8070 return SDValue();
8071
8072 // Ordered >= (although NaN inputs should have folded away by now).
8073 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
8074 if (Cmp == APFloat::cmpGreaterThan)
8075 return SDValue();
8076
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008077 // TODO: Check IEEE bit enabled?
Matt Arsenault6b114d22017-08-30 01:20:17 +00008078 EVT VT = Op0.getValueType();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008079 if (Subtarget->enableDX10Clamp()) {
8080 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
8081 // hardware fmed3 behavior converting to a min.
8082 // FIXME: Should this be allowing -0.0?
8083 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
8084 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
8085 }
8086
Matt Arsenault6b114d22017-08-30 01:20:17 +00008087 // med3 for f16 is only available on gfx9+, and not available for v2f16.
8088 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
8089 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
8090 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
8091 // then give the other result, which is different from med3 with a NaN
8092 // input.
8093 SDValue Var = Op0.getOperand(0);
Matt Arsenaultc3dc8e62018-08-03 18:27:52 +00008094 if (!DAG.isKnownNeverSNaN(Var))
Matt Arsenault6b114d22017-08-30 01:20:17 +00008095 return SDValue();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008096
Matt Arsenaultebf46142018-09-18 02:34:54 +00008097 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8098
8099 if ((!K0->hasOneUse() ||
8100 TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
8101 (!K1->hasOneUse() ||
8102 TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
8103 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
8104 Var, SDValue(K0, 0), SDValue(K1, 0));
8105 }
Matt Arsenault6b114d22017-08-30 01:20:17 +00008106 }
Matt Arsenaultf639c322016-01-28 20:53:42 +00008107
Matt Arsenault6b114d22017-08-30 01:20:17 +00008108 return SDValue();
Matt Arsenaultf639c322016-01-28 20:53:42 +00008109}
8110
8111SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
8112 DAGCombinerInfo &DCI) const {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008113 SelectionDAG &DAG = DCI.DAG;
8114
Matt Arsenault79a45db2017-02-22 23:53:37 +00008115 EVT VT = N->getValueType(0);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008116 unsigned Opc = N->getOpcode();
8117 SDValue Op0 = N->getOperand(0);
8118 SDValue Op1 = N->getOperand(1);
8119
8120 // Only do this if the inner op has one use since this will just increases
8121 // register pressure for no benefit.
8122
Matt Arsenault79a45db2017-02-22 23:53:37 +00008123
8124 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
Farhana Aleene80aeac2018-04-03 23:00:30 +00008125 !VT.isVector() && VT != MVT::f64 &&
Matt Arsenaultee324ff2017-05-17 19:25:06 +00008126 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) {
Matt Arsenault5b39b342016-01-28 20:53:48 +00008127 // max(max(a, b), c) -> max3(a, b, c)
8128 // min(min(a, b), c) -> min3(a, b, c)
8129 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
8130 SDLoc DL(N);
8131 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
8132 DL,
8133 N->getValueType(0),
8134 Op0.getOperand(0),
8135 Op0.getOperand(1),
8136 Op1);
8137 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008138
Matt Arsenault5b39b342016-01-28 20:53:48 +00008139 // Try commuted.
8140 // max(a, max(b, c)) -> max3(a, b, c)
8141 // min(a, min(b, c)) -> min3(a, b, c)
8142 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
8143 SDLoc DL(N);
8144 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
8145 DL,
8146 N->getValueType(0),
8147 Op0,
8148 Op1.getOperand(0),
8149 Op1.getOperand(1));
8150 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008151 }
8152
Matt Arsenaultf639c322016-01-28 20:53:42 +00008153 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
8154 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
8155 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
8156 return Med3;
8157 }
8158
8159 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
8160 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
8161 return Med3;
8162 }
8163
8164 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
Matt Arsenault5b39b342016-01-28 20:53:48 +00008165 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
Matt Arsenault687ec752018-10-22 16:27:27 +00008166 (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
Matt Arsenault5b39b342016-01-28 20:53:48 +00008167 (Opc == AMDGPUISD::FMIN_LEGACY &&
8168 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
Matt Arsenault79a45db2017-02-22 23:53:37 +00008169 (VT == MVT::f32 || VT == MVT::f64 ||
Matt Arsenault6b114d22017-08-30 01:20:17 +00008170 (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
8171 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008172 Op0.hasOneUse()) {
Matt Arsenaultf639c322016-01-28 20:53:42 +00008173 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
8174 return Res;
8175 }
8176
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008177 return SDValue();
8178}
8179
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008180static bool isClampZeroToOne(SDValue A, SDValue B) {
8181 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
8182 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
8183 // FIXME: Should this be allowing -0.0?
8184 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
8185 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
8186 }
8187 }
8188
8189 return false;
8190}
8191
8192// FIXME: Should only worry about snans for version with chain.
8193SDValue SITargetLowering::performFMed3Combine(SDNode *N,
8194 DAGCombinerInfo &DCI) const {
8195 EVT VT = N->getValueType(0);
8196 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
8197 // NaNs. With a NaN input, the order of the operands may change the result.
8198
8199 SelectionDAG &DAG = DCI.DAG;
8200 SDLoc SL(N);
8201
8202 SDValue Src0 = N->getOperand(0);
8203 SDValue Src1 = N->getOperand(1);
8204 SDValue Src2 = N->getOperand(2);
8205
8206 if (isClampZeroToOne(Src0, Src1)) {
8207 // const_a, const_b, x -> clamp is safe in all cases including signaling
8208 // nans.
8209 // FIXME: Should this be allowing -0.0?
8210 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
8211 }
8212
8213 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
8214 // handling no dx10-clamp?
8215 if (Subtarget->enableDX10Clamp()) {
8216 // If NaNs is clamped to 0, we are free to reorder the inputs.
8217
8218 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
8219 std::swap(Src0, Src1);
8220
8221 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
8222 std::swap(Src1, Src2);
8223
8224 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
8225 std::swap(Src0, Src1);
8226
8227 if (isClampZeroToOne(Src1, Src2))
8228 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
8229 }
8230
8231 return SDValue();
8232}
8233
Matt Arsenault1f17c662017-02-22 00:27:34 +00008234SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
8235 DAGCombinerInfo &DCI) const {
8236 SDValue Src0 = N->getOperand(0);
8237 SDValue Src1 = N->getOperand(1);
8238 if (Src0.isUndef() && Src1.isUndef())
8239 return DCI.DAG.getUNDEF(N->getValueType(0));
8240 return SDValue();
8241}
8242
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008243SDValue SITargetLowering::performExtractVectorEltCombine(
8244 SDNode *N, DAGCombinerInfo &DCI) const {
8245 SDValue Vec = N->getOperand(0);
Matt Arsenault8cbb4882017-09-20 21:01:24 +00008246 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008247
8248 EVT VecVT = Vec.getValueType();
8249 EVT EltVT = VecVT.getVectorElementType();
8250
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00008251 if ((Vec.getOpcode() == ISD::FNEG ||
8252 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008253 SDLoc SL(N);
8254 EVT EltVT = N->getValueType(0);
8255 SDValue Idx = N->getOperand(1);
8256 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8257 Vec.getOperand(0), Idx);
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00008258 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008259 }
8260
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008261 // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
8262 // =>
8263 // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
8264 // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
8265 // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
Farhana Aleene24f3ff2018-05-09 21:18:34 +00008266 if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008267 SDLoc SL(N);
8268 EVT EltVT = N->getValueType(0);
8269 SDValue Idx = N->getOperand(1);
8270 unsigned Opc = Vec.getOpcode();
8271
8272 switch(Opc) {
8273 default:
Stanislav Mekhanoshinbcb34ac2018-11-13 21:18:21 +00008274 break;
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008275 // TODO: Support other binary operations.
8276 case ISD::FADD:
Matt Arsenaulta8160732018-08-15 21:34:06 +00008277 case ISD::FSUB:
8278 case ISD::FMUL:
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008279 case ISD::ADD:
Farhana Aleene24f3ff2018-05-09 21:18:34 +00008280 case ISD::UMIN:
8281 case ISD::UMAX:
8282 case ISD::SMIN:
8283 case ISD::SMAX:
8284 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008285 case ISD::FMINNUM:
8286 case ISD::FMAXNUM_IEEE:
8287 case ISD::FMINNUM_IEEE: {
Matt Arsenaulta8160732018-08-15 21:34:06 +00008288 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8289 Vec.getOperand(0), Idx);
8290 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8291 Vec.getOperand(1), Idx);
8292
8293 DCI.AddToWorklist(Elt0.getNode());
8294 DCI.AddToWorklist(Elt1.getNode());
8295 return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
8296 }
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008297 }
8298 }
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008299
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008300 unsigned VecSize = VecVT.getSizeInBits();
8301 unsigned EltSize = EltVT.getSizeInBits();
8302
Stanislav Mekhanoshinbcb34ac2018-11-13 21:18:21 +00008303 // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
8304 // This elminates non-constant index and subsequent movrel or scratch access.
8305 // Sub-dword vectors of size 2 dword or less have better implementation.
8306 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
8307 // instructions.
8308 if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) &&
8309 !isa<ConstantSDNode>(N->getOperand(1))) {
8310 SDLoc SL(N);
8311 SDValue Idx = N->getOperand(1);
8312 EVT IdxVT = Idx.getValueType();
8313 SDValue V;
8314 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
8315 SDValue IC = DAG.getConstant(I, SL, IdxVT);
8316 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
8317 if (I == 0)
8318 V = Elt;
8319 else
8320 V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
8321 }
8322 return V;
8323 }
8324
8325 if (!DCI.isBeforeLegalize())
8326 return SDValue();
8327
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008328 // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
8329 // elements. This exposes more load reduction opportunities by replacing
8330 // multiple small extract_vector_elements with a single 32-bit extract.
8331 auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
Matt Arsenaultbf07a502018-08-31 15:39:52 +00008332 if (isa<MemSDNode>(Vec) &&
8333 EltSize <= 16 &&
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008334 EltVT.isByteSized() &&
8335 VecSize > 32 &&
8336 VecSize % 32 == 0 &&
8337 Idx) {
8338 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
8339
8340 unsigned BitIndex = Idx->getZExtValue() * EltSize;
8341 unsigned EltIdx = BitIndex / 32;
8342 unsigned LeftoverBitIdx = BitIndex % 32;
8343 SDLoc SL(N);
8344
8345 SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
8346 DCI.AddToWorklist(Cast.getNode());
8347
8348 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
8349 DAG.getConstant(EltIdx, SL, MVT::i32));
8350 DCI.AddToWorklist(Elt.getNode());
8351 SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
8352 DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
8353 DCI.AddToWorklist(Srl.getNode());
8354
8355 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
8356 DCI.AddToWorklist(Trunc.getNode());
8357 return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
8358 }
8359
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008360 return SDValue();
8361}
8362
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00008363SDValue
8364SITargetLowering::performInsertVectorEltCombine(SDNode *N,
8365 DAGCombinerInfo &DCI) const {
8366 SDValue Vec = N->getOperand(0);
8367 SDValue Idx = N->getOperand(2);
8368 EVT VecVT = Vec.getValueType();
8369 EVT EltVT = VecVT.getVectorElementType();
8370 unsigned VecSize = VecVT.getSizeInBits();
8371 unsigned EltSize = EltVT.getSizeInBits();
8372
8373 // INSERT_VECTOR_ELT (<n x e>, var-idx)
8374 // => BUILD_VECTOR n x select (e, const-idx)
8375 // This elminates non-constant index and subsequent movrel or scratch access.
8376 // Sub-dword vectors of size 2 dword or less have better implementation.
8377 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
8378 // instructions.
8379 if (isa<ConstantSDNode>(Idx) ||
8380 VecSize > 256 || (VecSize <= 64 && EltSize < 32))
8381 return SDValue();
8382
8383 SelectionDAG &DAG = DCI.DAG;
8384 SDLoc SL(N);
8385 SDValue Ins = N->getOperand(1);
8386 EVT IdxVT = Idx.getValueType();
8387
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00008388 SmallVector<SDValue, 16> Ops;
8389 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
8390 SDValue IC = DAG.getConstant(I, SL, IdxVT);
8391 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
8392 SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
8393 Ops.push_back(V);
8394 }
8395
8396 return DAG.getBuildVector(VecVT, SL, Ops);
8397}
8398
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00008399unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
8400 const SDNode *N0,
8401 const SDNode *N1) const {
8402 EVT VT = N0->getValueType(0);
8403
Matt Arsenault770ec862016-12-22 03:55:35 +00008404 // Only do this if we are not trying to support denormals. v_mad_f32 does not
8405 // support denormals ever.
8406 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
8407 (VT == MVT::f16 && !Subtarget->hasFP16Denormals()))
8408 return ISD::FMAD;
8409
8410 const TargetOptions &Options = DAG.getTarget().Options;
Amara Emersond28f0cd42017-05-01 15:17:51 +00008411 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
Michael Berg7acc81b2018-05-04 18:48:20 +00008412 (N0->getFlags().hasAllowContract() &&
8413 N1->getFlags().hasAllowContract())) &&
Matt Arsenault770ec862016-12-22 03:55:35 +00008414 isFMAFasterThanFMulAndFAdd(VT)) {
8415 return ISD::FMA;
8416 }
8417
8418 return 0;
8419}
8420
Matt Arsenault4f6318f2017-11-06 17:04:37 +00008421static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
8422 EVT VT,
8423 SDValue N0, SDValue N1, SDValue N2,
8424 bool Signed) {
8425 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
8426 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
8427 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
8428 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
8429}
8430
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008431SDValue SITargetLowering::performAddCombine(SDNode *N,
8432 DAGCombinerInfo &DCI) const {
8433 SelectionDAG &DAG = DCI.DAG;
8434 EVT VT = N->getValueType(0);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008435 SDLoc SL(N);
8436 SDValue LHS = N->getOperand(0);
8437 SDValue RHS = N->getOperand(1);
8438
Matt Arsenault4f6318f2017-11-06 17:04:37 +00008439 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
8440 && Subtarget->hasMad64_32() &&
8441 !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
8442 VT.getScalarSizeInBits() <= 64) {
8443 if (LHS.getOpcode() != ISD::MUL)
8444 std::swap(LHS, RHS);
8445
8446 SDValue MulLHS = LHS.getOperand(0);
8447 SDValue MulRHS = LHS.getOperand(1);
8448 SDValue AddRHS = RHS;
8449
8450 // TODO: Maybe restrict if SGPR inputs.
8451 if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
8452 numBitsUnsigned(MulRHS, DAG) <= 32) {
8453 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
8454 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
8455 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
8456 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
8457 }
8458
8459 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
8460 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
8461 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
8462 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
8463 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
8464 }
8465
8466 return SDValue();
8467 }
8468
Farhana Aleen07e61232018-05-02 18:16:39 +00008469 if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
Matt Arsenault4f6318f2017-11-06 17:04:37 +00008470 return SDValue();
8471
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008472 // add x, zext (setcc) => addcarry x, 0, setcc
8473 // add x, sext (setcc) => subcarry x, 0, setcc
8474 unsigned Opc = LHS.getOpcode();
8475 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008476 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008477 std::swap(RHS, LHS);
8478
8479 Opc = RHS.getOpcode();
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008480 switch (Opc) {
8481 default: break;
8482 case ISD::ZERO_EXTEND:
8483 case ISD::SIGN_EXTEND:
8484 case ISD::ANY_EXTEND: {
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008485 auto Cond = RHS.getOperand(0);
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00008486 if (!isBoolSGPR(Cond))
Stanislav Mekhanoshin3ed38c62017-06-21 23:46:22 +00008487 break;
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008488 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
8489 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
8490 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
8491 return DAG.getNode(Opc, SL, VTList, Args);
8492 }
8493 case ISD::ADDCARRY: {
8494 // add x, (addcarry y, 0, cc) => addcarry x, y, cc
8495 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8496 if (!C || C->getZExtValue() != 0) break;
8497 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
8498 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
8499 }
8500 }
8501 return SDValue();
8502}
8503
8504SDValue SITargetLowering::performSubCombine(SDNode *N,
8505 DAGCombinerInfo &DCI) const {
8506 SelectionDAG &DAG = DCI.DAG;
8507 EVT VT = N->getValueType(0);
8508
8509 if (VT != MVT::i32)
8510 return SDValue();
8511
8512 SDLoc SL(N);
8513 SDValue LHS = N->getOperand(0);
8514 SDValue RHS = N->getOperand(1);
8515
8516 unsigned Opc = LHS.getOpcode();
8517 if (Opc != ISD::SUBCARRY)
8518 std::swap(RHS, LHS);
8519
8520 if (LHS.getOpcode() == ISD::SUBCARRY) {
8521 // sub (subcarry x, 0, cc), y => subcarry x, y, cc
8522 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
8523 if (!C || C->getZExtValue() != 0)
8524 return SDValue();
8525 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
8526 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
8527 }
8528 return SDValue();
8529}
8530
8531SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
8532 DAGCombinerInfo &DCI) const {
8533
8534 if (N->getValueType(0) != MVT::i32)
8535 return SDValue();
8536
8537 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
8538 if (!C || C->getZExtValue() != 0)
8539 return SDValue();
8540
8541 SelectionDAG &DAG = DCI.DAG;
8542 SDValue LHS = N->getOperand(0);
8543
8544 // addcarry (add x, y), 0, cc => addcarry x, y, cc
8545 // subcarry (sub x, y), 0, cc => subcarry x, y, cc
8546 unsigned LHSOpc = LHS.getOpcode();
8547 unsigned Opc = N->getOpcode();
8548 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
8549 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
8550 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
8551 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008552 }
8553 return SDValue();
8554}
8555
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008556SDValue SITargetLowering::performFAddCombine(SDNode *N,
8557 DAGCombinerInfo &DCI) const {
8558 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8559 return SDValue();
8560
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008561 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault770ec862016-12-22 03:55:35 +00008562 EVT VT = N->getValueType(0);
Matt Arsenault770ec862016-12-22 03:55:35 +00008563
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008564 SDLoc SL(N);
8565 SDValue LHS = N->getOperand(0);
8566 SDValue RHS = N->getOperand(1);
8567
8568 // These should really be instruction patterns, but writing patterns with
8569 // source modiifiers is a pain.
8570
8571 // fadd (fadd (a, a), b) -> mad 2.0, a, b
8572 if (LHS.getOpcode() == ISD::FADD) {
8573 SDValue A = LHS.getOperand(0);
8574 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00008575 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00008576 if (FusedOp != 0) {
8577 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00008578 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00008579 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008580 }
8581 }
8582
8583 // fadd (b, fadd (a, a)) -> mad 2.0, a, b
8584 if (RHS.getOpcode() == ISD::FADD) {
8585 SDValue A = RHS.getOperand(0);
8586 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00008587 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00008588 if (FusedOp != 0) {
8589 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00008590 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00008591 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008592 }
8593 }
8594
8595 return SDValue();
8596}
8597
8598SDValue SITargetLowering::performFSubCombine(SDNode *N,
8599 DAGCombinerInfo &DCI) const {
8600 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8601 return SDValue();
8602
8603 SelectionDAG &DAG = DCI.DAG;
8604 SDLoc SL(N);
8605 EVT VT = N->getValueType(0);
8606 assert(!VT.isVector());
8607
8608 // Try to get the fneg to fold into the source modifier. This undoes generic
8609 // DAG combines and folds them into the mad.
8610 //
8611 // Only do this if we are not trying to support denormals. v_mad_f32 does
8612 // not support denormals ever.
Matt Arsenault770ec862016-12-22 03:55:35 +00008613 SDValue LHS = N->getOperand(0);
8614 SDValue RHS = N->getOperand(1);
8615 if (LHS.getOpcode() == ISD::FADD) {
8616 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
8617 SDValue A = LHS.getOperand(0);
8618 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00008619 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00008620 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008621 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
8622 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
8623
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00008624 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008625 }
8626 }
Matt Arsenault770ec862016-12-22 03:55:35 +00008627 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008628
Matt Arsenault770ec862016-12-22 03:55:35 +00008629 if (RHS.getOpcode() == ISD::FADD) {
8630 // (fsub c, (fadd a, a)) -> mad -2.0, a, c
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008631
Matt Arsenault770ec862016-12-22 03:55:35 +00008632 SDValue A = RHS.getOperand(0);
8633 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00008634 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00008635 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008636 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00008637 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008638 }
8639 }
8640 }
8641
8642 return SDValue();
8643}
8644
Farhana Aleenc370d7b2018-07-16 18:19:59 +00008645SDValue SITargetLowering::performFMACombine(SDNode *N,
8646 DAGCombinerInfo &DCI) const {
8647 SelectionDAG &DAG = DCI.DAG;
8648 EVT VT = N->getValueType(0);
8649 SDLoc SL(N);
8650
Stanislav Mekhanoshind3757d32019-01-10 03:25:20 +00008651 if (!Subtarget->hasDotInsts() || VT != MVT::f32)
Farhana Aleenc370d7b2018-07-16 18:19:59 +00008652 return SDValue();
8653
8654 // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
8655 // FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
8656 SDValue Op1 = N->getOperand(0);
8657 SDValue Op2 = N->getOperand(1);
8658 SDValue FMA = N->getOperand(2);
8659
8660 if (FMA.getOpcode() != ISD::FMA ||
8661 Op1.getOpcode() != ISD::FP_EXTEND ||
8662 Op2.getOpcode() != ISD::FP_EXTEND)
8663 return SDValue();
8664
8665 // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
8666 // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
8667 // is sufficient to allow generaing fdot2.
8668 const TargetOptions &Options = DAG.getTarget().Options;
8669 if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
8670 (N->getFlags().hasAllowContract() &&
8671 FMA->getFlags().hasAllowContract())) {
8672 Op1 = Op1.getOperand(0);
8673 Op2 = Op2.getOperand(0);
8674 if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8675 Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
8676 return SDValue();
8677
8678 SDValue Vec1 = Op1.getOperand(0);
8679 SDValue Idx1 = Op1.getOperand(1);
8680 SDValue Vec2 = Op2.getOperand(0);
8681
8682 SDValue FMAOp1 = FMA.getOperand(0);
8683 SDValue FMAOp2 = FMA.getOperand(1);
8684 SDValue FMAAcc = FMA.getOperand(2);
8685
8686 if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
8687 FMAOp2.getOpcode() != ISD::FP_EXTEND)
8688 return SDValue();
8689
8690 FMAOp1 = FMAOp1.getOperand(0);
8691 FMAOp2 = FMAOp2.getOperand(0);
8692 if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8693 FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
8694 return SDValue();
8695
8696 SDValue Vec3 = FMAOp1.getOperand(0);
8697 SDValue Vec4 = FMAOp2.getOperand(0);
8698 SDValue Idx2 = FMAOp1.getOperand(1);
8699
8700 if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
8701 // Idx1 and Idx2 cannot be the same.
8702 Idx1 == Idx2)
8703 return SDValue();
8704
8705 if (Vec1 == Vec2 || Vec3 == Vec4)
8706 return SDValue();
8707
8708 if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
8709 return SDValue();
8710
8711 if ((Vec1 == Vec3 && Vec2 == Vec4) ||
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00008712 (Vec1 == Vec4 && Vec2 == Vec3)) {
8713 return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
8714 DAG.getTargetConstant(0, SL, MVT::i1));
8715 }
Farhana Aleenc370d7b2018-07-16 18:19:59 +00008716 }
8717 return SDValue();
8718}
8719
Matt Arsenault6f6233d2015-01-06 23:00:41 +00008720SDValue SITargetLowering::performSetCCCombine(SDNode *N,
8721 DAGCombinerInfo &DCI) const {
8722 SelectionDAG &DAG = DCI.DAG;
8723 SDLoc SL(N);
8724
8725 SDValue LHS = N->getOperand(0);
8726 SDValue RHS = N->getOperand(1);
8727 EVT VT = LHS.getValueType();
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00008728 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
8729
8730 auto CRHS = dyn_cast<ConstantSDNode>(RHS);
8731 if (!CRHS) {
8732 CRHS = dyn_cast<ConstantSDNode>(LHS);
8733 if (CRHS) {
8734 std::swap(LHS, RHS);
8735 CC = getSetCCSwappedOperands(CC);
8736 }
8737 }
8738
Stanislav Mekhanoshin3b117942018-06-16 03:46:59 +00008739 if (CRHS) {
8740 if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
8741 isBoolSGPR(LHS.getOperand(0))) {
8742 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
8743 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
8744 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1
8745 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc
8746 if ((CRHS->isAllOnesValue() &&
8747 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
8748 (CRHS->isNullValue() &&
8749 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
8750 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
8751 DAG.getConstant(-1, SL, MVT::i1));
8752 if ((CRHS->isAllOnesValue() &&
8753 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
8754 (CRHS->isNullValue() &&
8755 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
8756 return LHS.getOperand(0);
8757 }
8758
8759 uint64_t CRHSVal = CRHS->getZExtValue();
8760 if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
8761 LHS.getOpcode() == ISD::SELECT &&
8762 isa<ConstantSDNode>(LHS.getOperand(1)) &&
8763 isa<ConstantSDNode>(LHS.getOperand(2)) &&
8764 LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
8765 isBoolSGPR(LHS.getOperand(0))) {
8766 // Given CT != FT:
8767 // setcc (select cc, CT, CF), CF, eq => xor cc, -1
8768 // setcc (select cc, CT, CF), CF, ne => cc
8769 // setcc (select cc, CT, CF), CT, ne => xor cc, -1
8770 // setcc (select cc, CT, CF), CT, eq => cc
8771 uint64_t CT = LHS.getConstantOperandVal(1);
8772 uint64_t CF = LHS.getConstantOperandVal(2);
8773
8774 if ((CF == CRHSVal && CC == ISD::SETEQ) ||
8775 (CT == CRHSVal && CC == ISD::SETNE))
8776 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
8777 DAG.getConstant(-1, SL, MVT::i1));
8778 if ((CF == CRHSVal && CC == ISD::SETNE) ||
8779 (CT == CRHSVal && CC == ISD::SETEQ))
8780 return LHS.getOperand(0);
8781 }
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00008782 }
Matt Arsenault6f6233d2015-01-06 23:00:41 +00008783
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00008784 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
8785 VT != MVT::f16))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00008786 return SDValue();
8787
Matt Arsenault8ad00d32018-08-10 18:58:41 +00008788 // Match isinf/isfinite pattern
Matt Arsenault6f6233d2015-01-06 23:00:41 +00008789 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
Matt Arsenault8ad00d32018-08-10 18:58:41 +00008790 // (fcmp one (fabs x), inf) -> (fp_class x,
8791 // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero)
8792 if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
Matt Arsenault6f6233d2015-01-06 23:00:41 +00008793 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
8794 if (!CRHS)
8795 return SDValue();
8796
8797 const APFloat &APF = CRHS->getValueAPF();
8798 if (APF.isInfinity() && !APF.isNegative()) {
Matt Arsenault8ad00d32018-08-10 18:58:41 +00008799 const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
8800 SIInstrFlags::N_INFINITY;
8801 const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
8802 SIInstrFlags::P_ZERO |
8803 SIInstrFlags::N_NORMAL |
8804 SIInstrFlags::P_NORMAL |
8805 SIInstrFlags::N_SUBNORMAL |
8806 SIInstrFlags::P_SUBNORMAL;
8807 unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008808 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
8809 DAG.getConstant(Mask, SL, MVT::i32));
Matt Arsenault6f6233d2015-01-06 23:00:41 +00008810 }
8811 }
8812
8813 return SDValue();
8814}
8815
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008816SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
8817 DAGCombinerInfo &DCI) const {
8818 SelectionDAG &DAG = DCI.DAG;
8819 SDLoc SL(N);
8820 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
8821
8822 SDValue Src = N->getOperand(0);
8823 SDValue Srl = N->getOperand(0);
8824 if (Srl.getOpcode() == ISD::ZERO_EXTEND)
8825 Srl = Srl.getOperand(0);
8826
8827 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
8828 if (Srl.getOpcode() == ISD::SRL) {
8829 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
8830 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
8831 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
8832
8833 if (const ConstantSDNode *C =
8834 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
8835 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
8836 EVT(MVT::i32));
8837
8838 unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
8839 if (SrcOffset < 32 && SrcOffset % 8 == 0) {
8840 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
8841 MVT::f32, Srl);
8842 }
8843 }
8844 }
8845
8846 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
8847
Craig Topperd0af7e82017-04-28 05:31:46 +00008848 KnownBits Known;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008849 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
8850 !DCI.isBeforeLegalizeOps());
8851 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Stanislav Mekhanoshined0d6c62019-01-09 02:24:22 +00008852 if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008853 DCI.CommitTargetLoweringOpt(TLO);
8854 }
8855
8856 return SDValue();
8857}
8858
Tom Stellard1b95fed2018-05-24 05:28:34 +00008859SDValue SITargetLowering::performClampCombine(SDNode *N,
8860 DAGCombinerInfo &DCI) const {
8861 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
8862 if (!CSrc)
8863 return SDValue();
8864
8865 const APFloat &F = CSrc->getValueAPF();
8866 APFloat Zero = APFloat::getZero(F.getSemantics());
8867 APFloat::cmpResult Cmp0 = F.compare(Zero);
8868 if (Cmp0 == APFloat::cmpLessThan ||
8869 (Cmp0 == APFloat::cmpUnordered && Subtarget->enableDX10Clamp())) {
8870 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
8871 }
8872
8873 APFloat One(F.getSemantics(), "1.0");
8874 APFloat::cmpResult Cmp1 = F.compare(One);
8875 if (Cmp1 == APFloat::cmpGreaterThan)
8876 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
8877
8878 return SDValue(CSrc, 0);
8879}
8880
8881
Tom Stellard75aadc22012-12-11 21:25:42 +00008882SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
8883 DAGCombinerInfo &DCI) const {
Stanislav Mekhanoshin443a7f92018-11-27 15:13:37 +00008884 if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
8885 return SDValue();
8886
Tom Stellard75aadc22012-12-11 21:25:42 +00008887 switch (N->getOpcode()) {
Matt Arsenault22b4c252014-12-21 16:48:42 +00008888 default:
8889 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008890 case ISD::ADD:
8891 return performAddCombine(N, DCI);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008892 case ISD::SUB:
8893 return performSubCombine(N, DCI);
8894 case ISD::ADDCARRY:
8895 case ISD::SUBCARRY:
8896 return performAddCarrySubCarryCombine(N, DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008897 case ISD::FADD:
8898 return performFAddCombine(N, DCI);
8899 case ISD::FSUB:
8900 return performFSubCombine(N, DCI);
Matt Arsenault6f6233d2015-01-06 23:00:41 +00008901 case ISD::SETCC:
8902 return performSetCCCombine(N, DCI);
Matt Arsenault5b39b342016-01-28 20:53:48 +00008903 case ISD::FMAXNUM:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008904 case ISD::FMINNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008905 case ISD::FMAXNUM_IEEE:
8906 case ISD::FMINNUM_IEEE:
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008907 case ISD::SMAX:
8908 case ISD::SMIN:
8909 case ISD::UMAX:
Matt Arsenault5b39b342016-01-28 20:53:48 +00008910 case ISD::UMIN:
8911 case AMDGPUISD::FMIN_LEGACY:
Stanislav Mekhanoshin443a7f92018-11-27 15:13:37 +00008912 case AMDGPUISD::FMAX_LEGACY:
8913 return performMinMaxCombine(N, DCI);
Farhana Aleenc370d7b2018-07-16 18:19:59 +00008914 case ISD::FMA:
8915 return performFMACombine(N, DCI);
Matt Arsenault90083d32018-06-07 09:54:49 +00008916 case ISD::LOAD: {
8917 if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
8918 return Widended;
8919 LLVM_FALLTHROUGH;
8920 }
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00008921 case ISD::STORE:
8922 case ISD::ATOMIC_LOAD:
8923 case ISD::ATOMIC_STORE:
8924 case ISD::ATOMIC_CMP_SWAP:
8925 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
8926 case ISD::ATOMIC_SWAP:
8927 case ISD::ATOMIC_LOAD_ADD:
8928 case ISD::ATOMIC_LOAD_SUB:
8929 case ISD::ATOMIC_LOAD_AND:
8930 case ISD::ATOMIC_LOAD_OR:
8931 case ISD::ATOMIC_LOAD_XOR:
8932 case ISD::ATOMIC_LOAD_NAND:
8933 case ISD::ATOMIC_LOAD_MIN:
8934 case ISD::ATOMIC_LOAD_MAX:
8935 case ISD::ATOMIC_LOAD_UMIN:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00008936 case ISD::ATOMIC_LOAD_UMAX:
8937 case AMDGPUISD::ATOMIC_INC:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00008938 case AMDGPUISD::ATOMIC_DEC:
8939 case AMDGPUISD::ATOMIC_LOAD_FADD:
8940 case AMDGPUISD::ATOMIC_LOAD_FMIN:
8941 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00008942 if (DCI.isBeforeLegalize())
8943 break;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008944 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00008945 case ISD::AND:
8946 return performAndCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00008947 case ISD::OR:
8948 return performOrCombine(N, DCI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00008949 case ISD::XOR:
8950 return performXorCombine(N, DCI);
Matt Arsenault8edfaee2017-03-31 19:53:03 +00008951 case ISD::ZERO_EXTEND:
8952 return performZeroExtendCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00008953 case AMDGPUISD::FP_CLASS:
8954 return performClassCombine(N, DCI);
Matt Arsenault9cd90712016-04-14 01:42:16 +00008955 case ISD::FCANONICALIZE:
8956 return performFCanonicalizeCombine(N, DCI);
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00008957 case AMDGPUISD::RCP:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00008958 return performRcpCombine(N, DCI);
8959 case AMDGPUISD::FRACT:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00008960 case AMDGPUISD::RSQ:
Matt Arsenault32fc5272016-07-26 16:45:45 +00008961 case AMDGPUISD::RCP_LEGACY:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00008962 case AMDGPUISD::RSQ_LEGACY:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00008963 case AMDGPUISD::RCP_IFLAG:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00008964 case AMDGPUISD::RSQ_CLAMP:
8965 case AMDGPUISD::LDEXP: {
8966 SDValue Src = N->getOperand(0);
8967 if (Src.isUndef())
8968 return Src;
8969 break;
8970 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008971 case ISD::SINT_TO_FP:
8972 case ISD::UINT_TO_FP:
8973 return performUCharToFloatCombine(N, DCI);
8974 case AMDGPUISD::CVT_F32_UBYTE0:
8975 case AMDGPUISD::CVT_F32_UBYTE1:
8976 case AMDGPUISD::CVT_F32_UBYTE2:
8977 case AMDGPUISD::CVT_F32_UBYTE3:
8978 return performCvtF32UByteNCombine(N, DCI);
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008979 case AMDGPUISD::FMED3:
8980 return performFMed3Combine(N, DCI);
Matt Arsenault1f17c662017-02-22 00:27:34 +00008981 case AMDGPUISD::CVT_PKRTZ_F16_F32:
8982 return performCvtPkRTZCombine(N, DCI);
Tom Stellard1b95fed2018-05-24 05:28:34 +00008983 case AMDGPUISD::CLAMP:
8984 return performClampCombine(N, DCI);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00008985 case ISD::SCALAR_TO_VECTOR: {
8986 SelectionDAG &DAG = DCI.DAG;
8987 EVT VT = N->getValueType(0);
8988
8989 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
8990 if (VT == MVT::v2i16 || VT == MVT::v2f16) {
8991 SDLoc SL(N);
8992 SDValue Src = N->getOperand(0);
8993 EVT EltVT = Src.getValueType();
8994 if (EltVT == MVT::f16)
8995 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
8996
8997 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
8998 return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
8999 }
9000
9001 break;
9002 }
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00009003 case ISD::EXTRACT_VECTOR_ELT:
9004 return performExtractVectorEltCombine(N, DCI);
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00009005 case ISD::INSERT_VECTOR_ELT:
9006 return performInsertVectorEltCombine(N, DCI);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00009007 }
Matt Arsenault5565f65e2014-05-22 18:09:07 +00009008 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Tom Stellard75aadc22012-12-11 21:25:42 +00009009}
Christian Konigd910b7d2013-02-26 17:52:16 +00009010
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009011/// Helper function for adjustWritemask
Benjamin Kramer635e3682013-05-23 15:43:05 +00009012static unsigned SubIdx2Lane(unsigned Idx) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00009013 switch (Idx) {
9014 default: return 0;
9015 case AMDGPU::sub0: return 0;
9016 case AMDGPU::sub1: return 1;
9017 case AMDGPU::sub2: return 2;
9018 case AMDGPU::sub3: return 3;
David Stuttardf77079f2019-01-14 11:55:24 +00009019 case AMDGPU::sub4: return 4; // Possible with TFE/LWE
Christian Konig8e06e2a2013-04-10 08:39:08 +00009020 }
9021}
9022
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009023/// Adjust the writemask of MIMG instructions
Matt Arsenault68f05052017-12-04 22:18:27 +00009024SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
9025 SelectionDAG &DAG) const {
Nicolai Haehnlef2674312018-06-21 13:36:01 +00009026 unsigned Opcode = Node->getMachineOpcode();
9027
9028 // Subtract 1 because the vdata output is not a MachineSDNode operand.
9029 int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
9030 if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
9031 return Node; // not implemented for D16
9032
David Stuttardf77079f2019-01-14 11:55:24 +00009033 SDNode *Users[5] = { nullptr };
Tom Stellard54774e52013-10-23 02:53:47 +00009034 unsigned Lane = 0;
Nicolai Haehnlef2674312018-06-21 13:36:01 +00009035 unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00009036 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
Tom Stellard54774e52013-10-23 02:53:47 +00009037 unsigned NewDmask = 0;
David Stuttardf77079f2019-01-14 11:55:24 +00009038 unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
9039 unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
9040 bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) ||
9041 Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
9042 unsigned TFCLane = 0;
Matt Arsenault856777d2017-12-08 20:00:57 +00009043 bool HasChain = Node->getNumValues() > 1;
9044
9045 if (OldDmask == 0) {
9046 // These are folded out, but on the chance it happens don't assert.
9047 return Node;
9048 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009049
David Stuttardf77079f2019-01-14 11:55:24 +00009050 unsigned OldBitsSet = countPopulation(OldDmask);
9051 // Work out which is the TFE/LWE lane if that is enabled.
9052 if (UsesTFC) {
9053 TFCLane = OldBitsSet;
9054 }
9055
Christian Konig8e06e2a2013-04-10 08:39:08 +00009056 // Try to figure out the used register components
9057 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
9058 I != E; ++I) {
9059
Matt Arsenault93e65ea2017-02-22 21:16:41 +00009060 // Don't look at users of the chain.
9061 if (I.getUse().getResNo() != 0)
9062 continue;
9063
Christian Konig8e06e2a2013-04-10 08:39:08 +00009064 // Abort if we can't understand the usage
9065 if (!I->isMachineOpcode() ||
9066 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
Matt Arsenault68f05052017-12-04 22:18:27 +00009067 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009068
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +00009069 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
Tom Stellard54774e52013-10-23 02:53:47 +00009070 // Note that subregs are packed, i.e. Lane==0 is the first bit set
9071 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
9072 // set, etc.
Christian Konig8b1ed282013-04-10 08:39:16 +00009073 Lane = SubIdx2Lane(I->getConstantOperandVal(1));
Christian Konig8e06e2a2013-04-10 08:39:08 +00009074
David Stuttardf77079f2019-01-14 11:55:24 +00009075 // Check if the use is for the TFE/LWE generated result at VGPRn+1.
9076 if (UsesTFC && Lane == TFCLane) {
9077 Users[Lane] = *I;
9078 } else {
9079 // Set which texture component corresponds to the lane.
9080 unsigned Comp;
9081 for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
9082 Comp = countTrailingZeros(Dmask);
9083 Dmask &= ~(1 << Comp);
9084 }
9085
9086 // Abort if we have more than one user per component.
9087 if (Users[Lane])
9088 return Node;
9089
9090 Users[Lane] = *I;
9091 NewDmask |= 1 << Comp;
Tom Stellard54774e52013-10-23 02:53:47 +00009092 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009093 }
9094
David Stuttardf77079f2019-01-14 11:55:24 +00009095 // Don't allow 0 dmask, as hardware assumes one channel enabled.
9096 bool NoChannels = !NewDmask;
9097 if (NoChannels) {
9098 // If the original dmask has one channel - then nothing to do
9099 if (OldBitsSet == 1)
9100 return Node;
9101 // Use an arbitrary dmask - required for the instruction to work
9102 NewDmask = 1;
9103 }
Tom Stellard54774e52013-10-23 02:53:47 +00009104 // Abort if there's no change
9105 if (NewDmask == OldDmask)
Matt Arsenault68f05052017-12-04 22:18:27 +00009106 return Node;
9107
9108 unsigned BitsSet = countPopulation(NewDmask);
9109
David Stuttardf77079f2019-01-14 11:55:24 +00009110 // Check for TFE or LWE - increase the number of channels by one to account
9111 // for the extra return value
9112 // This will need adjustment for D16 if this is also included in
9113 // adjustWriteMask (this function) but at present D16 are excluded.
9114 unsigned NewChannels = BitsSet + UsesTFC;
9115
9116 int NewOpcode =
9117 AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
Matt Arsenault68f05052017-12-04 22:18:27 +00009118 assert(NewOpcode != -1 &&
9119 NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
9120 "failed to find equivalent MIMG op");
Christian Konig8e06e2a2013-04-10 08:39:08 +00009121
9122 // Adjust the writemask in the node
Matt Arsenault68f05052017-12-04 22:18:27 +00009123 SmallVector<SDValue, 12> Ops;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00009124 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009125 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
Nikolay Haustov2f684f12016-02-26 09:51:05 +00009126 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
Christian Konig8e06e2a2013-04-10 08:39:08 +00009127
Matt Arsenault68f05052017-12-04 22:18:27 +00009128 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
9129
David Stuttardf77079f2019-01-14 11:55:24 +00009130 MVT ResultVT = NewChannels == 1 ?
9131 SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
9132 NewChannels == 5 ? 8 : NewChannels);
Matt Arsenault856777d2017-12-08 20:00:57 +00009133 SDVTList NewVTList = HasChain ?
9134 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
9135
Matt Arsenault68f05052017-12-04 22:18:27 +00009136
9137 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
9138 NewVTList, Ops);
Matt Arsenaultecad0d532017-12-08 20:00:45 +00009139
Matt Arsenault856777d2017-12-08 20:00:57 +00009140 if (HasChain) {
9141 // Update chain.
Chandler Carruth66654b72018-08-14 23:30:32 +00009142 DAG.setNodeMemRefs(NewNode, Node->memoperands());
Matt Arsenault856777d2017-12-08 20:00:57 +00009143 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
9144 }
Matt Arsenault68f05052017-12-04 22:18:27 +00009145
David Stuttardf77079f2019-01-14 11:55:24 +00009146 if (NewChannels == 1) {
Matt Arsenault68f05052017-12-04 22:18:27 +00009147 assert(Node->hasNUsesOfValue(1, 0));
9148 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
9149 SDLoc(Node), Users[Lane]->getValueType(0),
9150 SDValue(NewNode, 0));
Christian Konig8b1ed282013-04-10 08:39:16 +00009151 DAG.ReplaceAllUsesWith(Users[Lane], Copy);
Matt Arsenault68f05052017-12-04 22:18:27 +00009152 return nullptr;
Christian Konig8b1ed282013-04-10 08:39:16 +00009153 }
9154
Christian Konig8e06e2a2013-04-10 08:39:08 +00009155 // Update the users of the node with the new indices
David Stuttardf77079f2019-01-14 11:55:24 +00009156 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00009157 SDNode *User = Users[i];
David Stuttardf77079f2019-01-14 11:55:24 +00009158 if (!User) {
9159 // Handle the special case of NoChannels. We set NewDmask to 1 above, but
9160 // Users[0] is still nullptr because channel 0 doesn't really have a use.
9161 if (i || !NoChannels)
9162 continue;
9163 } else {
9164 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
9165 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
9166 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009167
9168 switch (Idx) {
9169 default: break;
9170 case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
9171 case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
9172 case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
David Stuttardf77079f2019-01-14 11:55:24 +00009173 case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009174 }
9175 }
Matt Arsenault68f05052017-12-04 22:18:27 +00009176
9177 DAG.RemoveDeadNode(Node);
9178 return nullptr;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009179}
9180
Tom Stellardc98ee202015-07-16 19:40:07 +00009181static bool isFrameIndexOp(SDValue Op) {
9182 if (Op.getOpcode() == ISD::AssertZext)
9183 Op = Op.getOperand(0);
9184
9185 return isa<FrameIndexSDNode>(Op);
9186}
9187
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009188/// Legalize target independent instructions (e.g. INSERT_SUBREG)
Tom Stellard3457a842014-10-09 19:06:00 +00009189/// with frame index operands.
9190/// LLVM assumes that inputs are to these instructions are registers.
Matt Arsenault0d0d6c22017-04-12 21:58:23 +00009191SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
9192 SelectionDAG &DAG) const {
9193 if (Node->getOpcode() == ISD::CopyToReg) {
9194 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
9195 SDValue SrcVal = Node->getOperand(2);
9196
9197 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
9198 // to try understanding copies to physical registers.
9199 if (SrcVal.getValueType() == MVT::i1 &&
9200 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
9201 SDLoc SL(Node);
9202 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
9203 SDValue VReg = DAG.getRegister(
9204 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
9205
9206 SDNode *Glued = Node->getGluedNode();
9207 SDValue ToVReg
9208 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
9209 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
9210 SDValue ToResultReg
9211 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
9212 VReg, ToVReg.getValue(1));
9213 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
9214 DAG.RemoveDeadNode(Node);
9215 return ToResultReg.getNode();
9216 }
9217 }
Tom Stellard8dd392e2014-10-09 18:09:15 +00009218
9219 SmallVector<SDValue, 8> Ops;
Tom Stellard3457a842014-10-09 19:06:00 +00009220 for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
Tom Stellardc98ee202015-07-16 19:40:07 +00009221 if (!isFrameIndexOp(Node->getOperand(i))) {
Tom Stellard3457a842014-10-09 19:06:00 +00009222 Ops.push_back(Node->getOperand(i));
Tom Stellard8dd392e2014-10-09 18:09:15 +00009223 continue;
9224 }
9225
Tom Stellard3457a842014-10-09 19:06:00 +00009226 SDLoc DL(Node);
Tom Stellard8dd392e2014-10-09 18:09:15 +00009227 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
Tom Stellard3457a842014-10-09 19:06:00 +00009228 Node->getOperand(i).getValueType(),
9229 Node->getOperand(i)), 0));
Tom Stellard8dd392e2014-10-09 18:09:15 +00009230 }
9231
Mark Searles4e3d6162017-10-16 23:38:53 +00009232 return DAG.UpdateNodeOperands(Node, Ops);
Tom Stellard8dd392e2014-10-09 18:09:15 +00009233}
9234
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009235/// Fold the instructions after selecting them.
Matt Arsenault68f05052017-12-04 22:18:27 +00009236/// Returns null if users were already updated.
Christian Konig8e06e2a2013-04-10 08:39:08 +00009237SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
9238 SelectionDAG &DAG) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00009239 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00009240 unsigned Opcode = Node->getMachineOpcode();
Christian Konig8e06e2a2013-04-10 08:39:08 +00009241
Nicolai Haehnlec06bfa12016-07-11 21:59:43 +00009242 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
Nicolai Haehnlef2674312018-06-21 13:36:01 +00009243 !TII->isGather4(Opcode)) {
Matt Arsenault68f05052017-12-04 22:18:27 +00009244 return adjustWritemask(Node, DAG);
9245 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009246
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00009247 if (Opcode == AMDGPU::INSERT_SUBREG ||
9248 Opcode == AMDGPU::REG_SEQUENCE) {
Tom Stellard8dd392e2014-10-09 18:09:15 +00009249 legalizeTargetIndependentNode(Node, DAG);
9250 return Node;
9251 }
Matt Arsenault206f8262017-08-01 20:49:41 +00009252
9253 switch (Opcode) {
9254 case AMDGPU::V_DIV_SCALE_F32:
9255 case AMDGPU::V_DIV_SCALE_F64: {
9256 // Satisfy the operand register constraint when one of the inputs is
9257 // undefined. Ordinarily each undef value will have its own implicit_def of
9258 // a vreg, so force these to use a single register.
9259 SDValue Src0 = Node->getOperand(0);
9260 SDValue Src1 = Node->getOperand(1);
9261 SDValue Src2 = Node->getOperand(2);
9262
9263 if ((Src0.isMachineOpcode() &&
9264 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
9265 (Src0 == Src1 || Src0 == Src2))
9266 break;
9267
9268 MVT VT = Src0.getValueType().getSimpleVT();
9269 const TargetRegisterClass *RC = getRegClassFor(VT);
9270
9271 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
9272 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
9273
9274 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
9275 UndefReg, Src0, SDValue());
9276
9277 // src0 must be the same register as src1 or src2, even if the value is
9278 // undefined, so make sure we don't violate this constraint.
9279 if (Src0.isMachineOpcode() &&
9280 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
9281 if (Src1.isMachineOpcode() &&
9282 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
9283 Src0 = Src1;
9284 else if (Src2.isMachineOpcode() &&
9285 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
9286 Src0 = Src2;
9287 else {
9288 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
9289 Src0 = UndefReg;
9290 Src1 = UndefReg;
9291 }
9292 } else
9293 break;
9294
9295 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
9296 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
9297 Ops.push_back(Node->getOperand(I));
9298
9299 Ops.push_back(ImpDef.getValue(1));
9300 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
9301 }
9302 default:
9303 break;
9304 }
9305
Tom Stellard654d6692015-01-08 15:08:17 +00009306 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009307}
Christian Konig8b1ed282013-04-10 08:39:16 +00009308
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009309/// Assign the register class depending on the number of
Christian Konig8b1ed282013-04-10 08:39:16 +00009310/// bits set in the writemask
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009311void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
Christian Konig8b1ed282013-04-10 08:39:16 +00009312 SDNode *Node) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00009313 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009314
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009315 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Matt Arsenault6005fcb2015-10-21 21:51:02 +00009316
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009317 if (TII->isVOP3(MI.getOpcode())) {
Matt Arsenault6005fcb2015-10-21 21:51:02 +00009318 // Make sure constant bus requirements are respected.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009319 TII->legalizeOperandsVOP3(MRI, MI);
Matt Arsenault6005fcb2015-10-21 21:51:02 +00009320 return;
9321 }
Matt Arsenaultcb0ac3d2014-09-26 17:54:59 +00009322
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009323 // Replace unused atomics with the no return version.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009324 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009325 if (NoRetAtomicOp != -1) {
9326 if (!Node->hasAnyUseOfValue(0)) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009327 MI.setDesc(TII->get(NoRetAtomicOp));
9328 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00009329 return;
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009330 }
9331
Tom Stellard354a43c2016-04-01 18:27:37 +00009332 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
9333 // instruction, because the return type of these instructions is a vec2 of
9334 // the memory type, so it can be tied to the input operand.
9335 // This means these instructions always have a use, so we need to add a
9336 // special case to check if the atomic has only one extract_subreg use,
9337 // which itself has no uses.
9338 if ((Node->hasNUsesOfValue(1, 0) &&
Nicolai Haehnle750082d2016-04-15 14:42:36 +00009339 Node->use_begin()->isMachineOpcode() &&
Tom Stellard354a43c2016-04-01 18:27:37 +00009340 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
9341 !Node->use_begin()->hasAnyUseOfValue(0))) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009342 unsigned Def = MI.getOperand(0).getReg();
Tom Stellard354a43c2016-04-01 18:27:37 +00009343
9344 // Change this into a noret atomic.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009345 MI.setDesc(TII->get(NoRetAtomicOp));
9346 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00009347
9348 // If we only remove the def operand from the atomic instruction, the
9349 // extract_subreg will be left with a use of a vreg without a def.
9350 // So we need to insert an implicit_def to avoid machine verifier
9351 // errors.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009352 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
Tom Stellard354a43c2016-04-01 18:27:37 +00009353 TII->get(AMDGPU::IMPLICIT_DEF), Def);
9354 }
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009355 return;
9356 }
Christian Konig8b1ed282013-04-10 08:39:16 +00009357}
Tom Stellard0518ff82013-06-03 17:39:58 +00009358
Benjamin Kramerbdc49562016-06-12 15:39:02 +00009359static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
9360 uint64_t Val) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009361 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
Matt Arsenault485defe2014-11-05 19:01:17 +00009362 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
9363}
9364
9365MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00009366 const SDLoc &DL,
Matt Arsenault485defe2014-11-05 19:01:17 +00009367 SDValue Ptr) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00009368 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault485defe2014-11-05 19:01:17 +00009369
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009370 // Build the half of the subregister with the constants before building the
9371 // full 128-bit register. If we are building multiple resource descriptors,
9372 // this will allow CSEing of the 2-component register.
9373 const SDValue Ops0[] = {
9374 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
9375 buildSMovImm32(DAG, DL, 0),
9376 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
9377 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
9378 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
9379 };
Matt Arsenault485defe2014-11-05 19:01:17 +00009380
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009381 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
9382 MVT::v2i32, Ops0), 0);
Matt Arsenault485defe2014-11-05 19:01:17 +00009383
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009384 // Combine the constants and the pointer.
9385 const SDValue Ops1[] = {
9386 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
9387 Ptr,
9388 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
9389 SubRegHi,
9390 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
9391 };
Matt Arsenault485defe2014-11-05 19:01:17 +00009392
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009393 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
Matt Arsenault485defe2014-11-05 19:01:17 +00009394}
9395
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009396/// Return a resource descriptor with the 'Add TID' bit enabled
Benjamin Kramerdf005cb2015-08-08 18:27:36 +00009397/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
9398/// of the resource descriptor) to create an offset, which is added to
9399/// the resource pointer.
Benjamin Kramerbdc49562016-06-12 15:39:02 +00009400MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
9401 SDValue Ptr, uint32_t RsrcDword1,
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009402 uint64_t RsrcDword2And3) const {
9403 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
9404 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
9405 if (RsrcDword1) {
9406 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009407 DAG.getConstant(RsrcDword1, DL, MVT::i32)),
9408 0);
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009409 }
9410
9411 SDValue DataLo = buildSMovImm32(DAG, DL,
9412 RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
9413 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
9414
9415 const SDValue Ops[] = {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009416 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009417 PtrLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009418 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009419 PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009420 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009421 DataLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009422 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009423 DataHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009424 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009425 };
9426
9427 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
9428}
9429
Tom Stellardd7e6f132015-04-08 01:09:26 +00009430//===----------------------------------------------------------------------===//
9431// SI Inline Assembly Support
9432//===----------------------------------------------------------------------===//
9433
9434std::pair<unsigned, const TargetRegisterClass *>
9435SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +00009436 StringRef Constraint,
Tom Stellardd7e6f132015-04-08 01:09:26 +00009437 MVT VT) const {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009438 const TargetRegisterClass *RC = nullptr;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009439 if (Constraint.size() == 1) {
9440 switch (Constraint[0]) {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009441 default:
9442 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009443 case 's':
9444 case 'r':
9445 switch (VT.getSizeInBits()) {
9446 default:
9447 return std::make_pair(0U, nullptr);
9448 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00009449 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009450 RC = &AMDGPU::SReg_32_XM0RegClass;
9451 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009452 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009453 RC = &AMDGPU::SGPR_64RegClass;
9454 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009455 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009456 RC = &AMDGPU::SReg_128RegClass;
9457 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009458 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009459 RC = &AMDGPU::SReg_256RegClass;
9460 break;
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +00009461 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009462 RC = &AMDGPU::SReg_512RegClass;
9463 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009464 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009465 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009466 case 'v':
9467 switch (VT.getSizeInBits()) {
9468 default:
9469 return std::make_pair(0U, nullptr);
9470 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00009471 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009472 RC = &AMDGPU::VGPR_32RegClass;
9473 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009474 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009475 RC = &AMDGPU::VReg_64RegClass;
9476 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009477 case 96:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009478 RC = &AMDGPU::VReg_96RegClass;
9479 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009480 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009481 RC = &AMDGPU::VReg_128RegClass;
9482 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009483 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009484 RC = &AMDGPU::VReg_256RegClass;
9485 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009486 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009487 RC = &AMDGPU::VReg_512RegClass;
9488 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009489 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009490 break;
Tom Stellardd7e6f132015-04-08 01:09:26 +00009491 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009492 // We actually support i128, i16 and f16 as inline parameters
9493 // even if they are not reported as legal
9494 if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
9495 VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
9496 return std::make_pair(0U, RC);
Tom Stellardd7e6f132015-04-08 01:09:26 +00009497 }
9498
9499 if (Constraint.size() > 1) {
Tom Stellardd7e6f132015-04-08 01:09:26 +00009500 if (Constraint[1] == 'v') {
9501 RC = &AMDGPU::VGPR_32RegClass;
9502 } else if (Constraint[1] == 's') {
9503 RC = &AMDGPU::SGPR_32RegClass;
9504 }
9505
9506 if (RC) {
Matt Arsenault0b554ed2015-06-23 02:05:55 +00009507 uint32_t Idx;
9508 bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
9509 if (!Failed && Idx < RC->getNumRegs())
Tom Stellardd7e6f132015-04-08 01:09:26 +00009510 return std::make_pair(RC->getRegister(Idx), RC);
9511 }
9512 }
9513 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9514}
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009515
9516SITargetLowering::ConstraintType
9517SITargetLowering::getConstraintType(StringRef Constraint) const {
9518 if (Constraint.size() == 1) {
9519 switch (Constraint[0]) {
9520 default: break;
9521 case 's':
9522 case 'v':
9523 return C_RegisterClass;
9524 }
9525 }
9526 return TargetLowering::getConstraintType(Constraint);
9527}
Matt Arsenault1cc47f82017-07-18 16:44:56 +00009528
9529// Figure out which registers should be reserved for stack access. Only after
9530// the function is legalized do we know all of the non-spill stack objects or if
9531// calls are present.
9532void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
9533 MachineRegisterInfo &MRI = MF.getRegInfo();
9534 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
9535 const MachineFrameInfo &MFI = MF.getFrameInfo();
Tom Stellardc5a154d2018-06-28 23:47:12 +00009536 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault1cc47f82017-07-18 16:44:56 +00009537
9538 if (Info->isEntryFunction()) {
9539 // Callable functions have fixed registers used for stack access.
9540 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
9541 }
9542
9543 // We have to assume the SP is needed in case there are calls in the function
9544 // during lowering. Calls are only detected after the function is
9545 // lowered. We're about to reserve registers, so don't bother using it if we
9546 // aren't really going to use it.
9547 bool NeedSP = !Info->isEntryFunction() ||
9548 MFI.hasVarSizedObjects() ||
9549 MFI.hasCalls();
9550
9551 if (NeedSP) {
9552 unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF);
9553 Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg);
9554
9555 assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg());
9556 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
9557 Info->getStackPtrOffsetReg()));
9558 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
9559 }
9560
9561 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
9562 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
9563 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
9564 Info->getScratchWaveOffsetReg());
9565
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +00009566 Info->limitOccupancy(MF);
9567
Matt Arsenault1cc47f82017-07-18 16:44:56 +00009568 TargetLoweringBase::finalizeLowering(MF);
9569}
Matt Arsenault45b98182017-11-15 00:45:43 +00009570
9571void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
9572 KnownBits &Known,
9573 const APInt &DemandedElts,
9574 const SelectionDAG &DAG,
9575 unsigned Depth) const {
9576 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
9577 DAG, Depth);
9578
9579 if (getSubtarget()->enableHugePrivateBuffer())
9580 return;
9581
9582 // Technically it may be possible to have a dispatch with a single workitem
9583 // that uses the full private memory size, but that's not really useful. We
9584 // can't use vaddr in MUBUF instructions if we don't know the address
9585 // calculation won't overflow, so assume the sign bit is never set.
9586 Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits);
9587}
Tom Stellard264c1712018-06-13 15:06:37 +00009588
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +00009589LLVM_ATTRIBUTE_UNUSED
9590static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
9591 assert(N->getOpcode() == ISD::CopyFromReg);
9592 do {
9593 // Follow the chain until we find an INLINEASM node.
9594 N = N->getOperand(0).getNode();
9595 if (N->getOpcode() == ISD::INLINEASM)
9596 return true;
9597 } while (N->getOpcode() == ISD::CopyFromReg);
9598 return false;
9599}
9600
Tom Stellard264c1712018-06-13 15:06:37 +00009601bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
Nicolai Haehnle35617ed2018-08-30 14:21:36 +00009602 FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const
Tom Stellard264c1712018-06-13 15:06:37 +00009603{
9604 switch (N->getOpcode()) {
Tom Stellard264c1712018-06-13 15:06:37 +00009605 case ISD::CopyFromReg:
9606 {
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +00009607 const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
9608 const MachineFunction * MF = FLI->MF;
9609 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
9610 const MachineRegisterInfo &MRI = MF->getRegInfo();
9611 const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
9612 unsigned Reg = R->getReg();
9613 if (TRI.isPhysicalRegister(Reg))
9614 return !TRI.isSGPRReg(MRI, Reg);
Tom Stellard264c1712018-06-13 15:06:37 +00009615
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +00009616 if (MRI.isLiveIn(Reg)) {
9617 // workitem.id.x workitem.id.y workitem.id.z
9618 // Any VGPR formal argument is also considered divergent
9619 if (!TRI.isSGPRReg(MRI, Reg))
9620 return true;
9621 // Formal arguments of non-entry functions
9622 // are conservatively considered divergent
9623 else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
9624 return true;
9625 return false;
Tom Stellard264c1712018-06-13 15:06:37 +00009626 }
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +00009627 const Value *V = FLI->getValueFromVirtualReg(Reg);
9628 if (V)
9629 return KDA->isDivergent(V);
9630 assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
9631 return !TRI.isSGPRReg(MRI, Reg);
Tom Stellard264c1712018-06-13 15:06:37 +00009632 }
9633 break;
9634 case ISD::LOAD: {
Matt Arsenault813613c2018-09-04 18:58:19 +00009635 const LoadSDNode *L = cast<LoadSDNode>(N);
9636 unsigned AS = L->getAddressSpace();
9637 // A flat load may access private memory.
9638 return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
Tom Stellard264c1712018-06-13 15:06:37 +00009639 } break;
9640 case ISD::CALLSEQ_END:
9641 return true;
9642 break;
9643 case ISD::INTRINSIC_WO_CHAIN:
9644 {
9645
9646 }
9647 return AMDGPU::isIntrinsicSourceOfDivergence(
9648 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
9649 case ISD::INTRINSIC_W_CHAIN:
9650 return AMDGPU::isIntrinsicSourceOfDivergence(
9651 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
9652 // In some cases intrinsics that are a source of divergence have been
9653 // lowered to AMDGPUISD so we also need to check those too.
9654 case AMDGPUISD::INTERP_MOV:
9655 case AMDGPUISD::INTERP_P1:
9656 case AMDGPUISD::INTERP_P2:
9657 return true;
9658 }
9659 return false;
9660}
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00009661
9662bool SITargetLowering::denormalsEnabledForType(EVT VT) const {
9663 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
9664 case MVT::f32:
9665 return Subtarget->hasFP32Denormals();
9666 case MVT::f64:
9667 return Subtarget->hasFP64Denormals();
9668 case MVT::f16:
9669 return Subtarget->hasFP16Denormals();
9670 default:
9671 return false;
9672 }
9673}
Matt Arsenault687ec752018-10-22 16:27:27 +00009674
9675bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
9676 const SelectionDAG &DAG,
9677 bool SNaN,
9678 unsigned Depth) const {
9679 if (Op.getOpcode() == AMDGPUISD::CLAMP) {
9680 if (Subtarget->enableDX10Clamp())
9681 return true; // Clamped to 0.
9682 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
9683 }
9684
9685 return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
9686 SNaN, Depth);
9687}