blob: 164c7e868649df3693cee71f02602c31336945ad [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard75aadc22012-12-11 21:25:42 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// Custom DAG lowering for SI
Tom Stellard75aadc22012-12-11 21:25:42 +000011//
12//===----------------------------------------------------------------------===//
13
Sylvestre Ledrudf92dab2018-11-02 17:25:40 +000014#if defined(_MSC_VER) || defined(__MINGW32__)
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000015// Provide M_PI.
16#define _USE_MATH_DEFINES
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000017#endif
18
Chandler Carruth6bda14b2017-06-06 11:49:48 +000019#include "SIISelLowering.h"
Christian Konig99ee0f42013-03-07 09:04:14 +000020#include "AMDGPU.h"
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000021#include "AMDGPUSubtarget.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000022#include "AMDGPUTargetMachine.h"
Tom Stellard8485fa02016-12-07 02:42:15 +000023#include "SIDefines.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000024#include "SIInstrInfo.h"
25#include "SIMachineFunctionInfo.h"
26#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000027#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000028#include "Utils/AMDGPUBaseInfo.h"
29#include "llvm/ADT/APFloat.h"
30#include "llvm/ADT/APInt.h"
31#include "llvm/ADT/ArrayRef.h"
Alexey Samsonova253bf92014-08-27 19:36:53 +000032#include "llvm/ADT/BitVector.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000033#include "llvm/ADT/SmallVector.h"
Matt Arsenault71bcbd42017-08-11 20:42:08 +000034#include "llvm/ADT/Statistic.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000035#include "llvm/ADT/StringRef.h"
Matt Arsenault9a10cea2016-01-26 04:29:24 +000036#include "llvm/ADT/StringSwitch.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000037#include "llvm/ADT/Twine.h"
Wei Ding07e03712016-07-28 16:42:13 +000038#include "llvm/CodeGen/Analysis.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000039#include "llvm/CodeGen/CallingConvLower.h"
40#include "llvm/CodeGen/DAGCombine.h"
41#include "llvm/CodeGen/ISDOpcodes.h"
42#include "llvm/CodeGen/MachineBasicBlock.h"
43#include "llvm/CodeGen/MachineFrameInfo.h"
44#include "llvm/CodeGen/MachineFunction.h"
45#include "llvm/CodeGen/MachineInstr.h"
46#include "llvm/CodeGen/MachineInstrBuilder.h"
47#include "llvm/CodeGen/MachineMemOperand.h"
Matt Arsenault8623e8d2017-08-03 23:00:29 +000048#include "llvm/CodeGen/MachineModuleInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000049#include "llvm/CodeGen/MachineOperand.h"
50#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000051#include "llvm/CodeGen/SelectionDAG.h"
52#include "llvm/CodeGen/SelectionDAGNodes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000053#include "llvm/CodeGen/TargetCallingConv.h"
54#include "llvm/CodeGen/TargetRegisterInfo.h"
Craig Topper2fa14362018-03-29 17:21:10 +000055#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000056#include "llvm/IR/Constants.h"
57#include "llvm/IR/DataLayout.h"
58#include "llvm/IR/DebugLoc.h"
59#include "llvm/IR/DerivedTypes.h"
Oliver Stannard7e7d9832016-02-02 13:52:43 +000060#include "llvm/IR/DiagnosticInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000061#include "llvm/IR/Function.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000062#include "llvm/IR/GlobalValue.h"
63#include "llvm/IR/InstrTypes.h"
64#include "llvm/IR/Instruction.h"
65#include "llvm/IR/Instructions.h"
Matt Arsenault7dc01c92017-03-15 23:15:12 +000066#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000067#include "llvm/IR/Type.h"
68#include "llvm/Support/Casting.h"
69#include "llvm/Support/CodeGen.h"
70#include "llvm/Support/CommandLine.h"
71#include "llvm/Support/Compiler.h"
72#include "llvm/Support/ErrorHandling.h"
Craig Topperd0af7e82017-04-28 05:31:46 +000073#include "llvm/Support/KnownBits.h"
David Blaikie13e77db2018-03-23 23:58:25 +000074#include "llvm/Support/MachineValueType.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000075#include "llvm/Support/MathExtras.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000076#include "llvm/Target/TargetOptions.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000077#include <cassert>
78#include <cmath>
79#include <cstdint>
80#include <iterator>
81#include <tuple>
82#include <utility>
83#include <vector>
Tom Stellard75aadc22012-12-11 21:25:42 +000084
85using namespace llvm;
86
Matt Arsenault71bcbd42017-08-11 20:42:08 +000087#define DEBUG_TYPE "si-lower"
88
89STATISTIC(NumTailCalls, "Number of tail calls");
90
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000091static cl::opt<bool> EnableVGPRIndexMode(
92 "amdgpu-vgpr-index-mode",
93 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
94 cl::init(false));
95
Stanislav Mekhanoshin93f15c92019-05-03 21:17:29 +000096static cl::opt<bool> DisableLoopAlignment(
97 "amdgpu-disable-loop-alignment",
98 cl::desc("Do not align and prefetch loops"),
99 cl::init(false));
100
Tom Stellardf110f8f2016-04-14 16:27:03 +0000101static unsigned findFirstFreeSGPR(CCState &CCInfo) {
102 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
103 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
104 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
105 return AMDGPU::SGPR0 + Reg;
106 }
107 }
108 llvm_unreachable("Cannot allocate sgpr");
109}
110
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000111SITargetLowering::SITargetLowering(const TargetMachine &TM,
Tom Stellard5bfbae52018-07-11 20:59:01 +0000112 const GCNSubtarget &STI)
Tom Stellardc5a154d2018-06-28 23:47:12 +0000113 : AMDGPUTargetLowering(TM, STI),
114 Subtarget(&STI) {
Tom Stellard1bd80722014-04-30 15:31:33 +0000115 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
Tom Stellard436780b2014-05-15 14:41:57 +0000116 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000117
Marek Olsak79c05872016-11-25 17:37:09 +0000118 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000119 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000120
Tom Stellard436780b2014-05-15 14:41:57 +0000121 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
122 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
123 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000124
Tim Renouf361b5b22019-03-21 12:01:21 +0000125 addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
126 addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
127
Matt Arsenault61001bb2015-11-25 19:58:34 +0000128 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
129 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
130
Tom Stellard436780b2014-05-15 14:41:57 +0000131 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
132 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000133
Tim Renouf033f99a2019-03-22 10:11:21 +0000134 addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
135 addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
136
Tom Stellardf0a21072014-11-18 20:39:39 +0000137 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000138 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
139
Tom Stellardf0a21072014-11-18 20:39:39 +0000140 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000141 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000142
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000143 if (Subtarget->has16BitInsts()) {
Marek Olsak79c05872016-11-25 17:37:09 +0000144 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
145 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard115a6152016-11-10 16:02:37 +0000146
Matt Arsenault1349a042018-05-22 06:32:10 +0000147 // Unless there are also VOP3P operations, not operations are really legal.
Matt Arsenault7596f132017-02-27 20:52:10 +0000148 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
149 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000150 addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
151 addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
Matt Arsenault7596f132017-02-27 20:52:10 +0000152 }
153
Tom Stellardc5a154d2018-06-28 23:47:12 +0000154 computeRegisterProperties(Subtarget->getRegisterInfo());
Tom Stellard75aadc22012-12-11 21:25:42 +0000155
Tom Stellard35bb18c2013-08-26 15:06:04 +0000156 // We need to custom lower vector stores from local memory
Matt Arsenault71e66762016-05-21 02:27:49 +0000157 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
Tim Renouf361b5b22019-03-21 12:01:21 +0000158 setOperationAction(ISD::LOAD, MVT::v3i32, Custom);
Tom Stellard35bb18c2013-08-26 15:06:04 +0000159 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
Tim Renouf033f99a2019-03-22 10:11:21 +0000160 setOperationAction(ISD::LOAD, MVT::v5i32, Custom);
Tom Stellardaf775432013-10-23 00:44:32 +0000161 setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
162 setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000163 setOperationAction(ISD::LOAD, MVT::i1, Custom);
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +0000164 setOperationAction(ISD::LOAD, MVT::v32i32, Custom);
Matt Arsenault2b957b52016-05-02 20:07:26 +0000165
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000166 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
Tim Renouf361b5b22019-03-21 12:01:21 +0000167 setOperationAction(ISD::STORE, MVT::v3i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000168 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
Tim Renouf033f99a2019-03-22 10:11:21 +0000169 setOperationAction(ISD::STORE, MVT::v5i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000170 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
171 setOperationAction(ISD::STORE, MVT::v16i32, Custom);
172 setOperationAction(ISD::STORE, MVT::i1, Custom);
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +0000173 setOperationAction(ISD::STORE, MVT::v32i32, Custom);
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000174
Jan Vesely06200bd2017-01-06 21:00:46 +0000175 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
176 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
177 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
178 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
179 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
180 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
181 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
182 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
183 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
184 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
185
Matt Arsenault71e66762016-05-21 02:27:49 +0000186 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
187 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000188
189 setOperationAction(ISD::SELECT, MVT::i1, Promote);
Tom Stellard0ec134f2014-02-04 17:18:40 +0000190 setOperationAction(ISD::SELECT, MVT::i64, Custom);
Tom Stellardda99c6e2014-03-24 16:07:30 +0000191 setOperationAction(ISD::SELECT, MVT::f64, Promote);
192 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
Tom Stellard81d871d2013-11-13 23:36:50 +0000193
Tom Stellard3ca1bfc2014-06-10 16:01:22 +0000194 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
195 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
196 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
197 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
Matt Arsenault71e66762016-05-21 02:27:49 +0000198 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
Tom Stellard754f80f2013-04-05 23:31:51 +0000199
Tom Stellardd1efda82016-01-20 21:48:24 +0000200 setOperationAction(ISD::SETCC, MVT::i1, Promote);
Tom Stellard83747202013-07-18 21:43:53 +0000201 setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
202 setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
Matt Arsenault18f56be2016-12-22 16:27:11 +0000203 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
Tom Stellard83747202013-07-18 21:43:53 +0000204
Matt Arsenault71e66762016-05-21 02:27:49 +0000205 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
206 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
Matt Arsenaulte306a322014-10-21 16:25:08 +0000207
Matt Arsenault4e466652014-04-16 01:41:30 +0000208 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
209 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000210 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
211 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000212 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
215
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000216 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000217 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000218 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
Matt Arsenaultb3a80e52018-08-15 21:25:20 +0000219 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
220 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom);
Marek Olsak13e47412018-01-31 20:18:04 +0000221 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000222 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
223
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000224 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
225 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
David Stuttardf77079f2019-01-14 11:55:24 +0000226 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000227 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
Ryan Taylor00e063a2019-03-19 16:07:00 +0000228 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
229 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000230
231 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
Matt Arsenault4165efd2017-01-17 07:26:53 +0000232 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
233 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000234 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
Ryan Taylor00e063a2019-03-19 16:07:00 +0000235 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
236 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000237
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000238 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000239 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000240 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
241 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
242 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
243 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
Tom Stellardafcf12f2013-09-12 02:55:14 +0000244
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +0000245 setOperationAction(ISD::UADDO, MVT::i32, Legal);
246 setOperationAction(ISD::USUBO, MVT::i32, Legal);
247
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000248 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
249 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
250
Matt Arsenaulte7191392018-08-08 16:58:33 +0000251 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
252 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
253 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
254
Matt Arsenault84445dd2017-11-30 22:51:26 +0000255#if 0
256 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
257 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
258#endif
259
Benjamin Kramer867bfc52015-03-07 17:41:00 +0000260 // We only support LOAD/STORE and vector manipulation ops for vectors
261 // with > 4 elements.
Matt Arsenault7596f132017-02-27 20:52:10 +0000262 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +0000263 MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16, MVT::v32i32 }) {
Tom Stellard967bf582014-02-13 23:34:15 +0000264 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000265 switch (Op) {
Tom Stellard967bf582014-02-13 23:34:15 +0000266 case ISD::LOAD:
267 case ISD::STORE:
268 case ISD::BUILD_VECTOR:
269 case ISD::BITCAST:
270 case ISD::EXTRACT_VECTOR_ELT:
271 case ISD::INSERT_VECTOR_ELT:
Tom Stellard967bf582014-02-13 23:34:15 +0000272 case ISD::INSERT_SUBVECTOR:
273 case ISD::EXTRACT_SUBVECTOR:
Matt Arsenault61001bb2015-11-25 19:58:34 +0000274 case ISD::SCALAR_TO_VECTOR:
Tom Stellard967bf582014-02-13 23:34:15 +0000275 break;
Tom Stellardc0503db2014-08-09 01:06:56 +0000276 case ISD::CONCAT_VECTORS:
277 setOperationAction(Op, VT, Custom);
278 break;
Tom Stellard967bf582014-02-13 23:34:15 +0000279 default:
Matt Arsenaultd504a742014-05-15 21:44:05 +0000280 setOperationAction(Op, VT, Expand);
Tom Stellard967bf582014-02-13 23:34:15 +0000281 break;
282 }
283 }
284 }
285
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000286 setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
287
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000288 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
289 // is expanded to avoid having two separate loops in case the index is a VGPR.
290
Matt Arsenault61001bb2015-11-25 19:58:34 +0000291 // Most operations are naturally 32-bit vector operations. We only support
292 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
293 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
294 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
295 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
296
297 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
298 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
299
300 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
301 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
302
303 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
304 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
305 }
306
Matt Arsenault71e66762016-05-21 02:27:49 +0000307 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
308 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
309 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
310 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000311
Matt Arsenault67a98152018-05-16 11:47:30 +0000312 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
313 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
314
Matt Arsenault3aef8092017-01-23 23:09:58 +0000315 // Avoid stack access for these.
316 // TODO: Generalize to more vector types.
317 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
318 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault67a98152018-05-16 11:47:30 +0000319 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
320 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
321
Matt Arsenault3aef8092017-01-23 23:09:58 +0000322 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
323 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault9224c002018-06-05 19:52:46 +0000324 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
325 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
326 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
327
328 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
329 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
330 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
Matt Arsenault3aef8092017-01-23 23:09:58 +0000331
Matt Arsenault67a98152018-05-16 11:47:30 +0000332 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
333 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
334 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
335 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
336
Tim Renouf361b5b22019-03-21 12:01:21 +0000337 // Deal with vec3 vector operations when widened to vec4.
338 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Expand);
339 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Expand);
340 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Expand);
341 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Expand);
342
Tim Renouf033f99a2019-03-22 10:11:21 +0000343 // Deal with vec5 vector operations when widened to vec8.
344 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Expand);
345 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Expand);
346 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Expand);
347 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Expand);
348
Tom Stellard354a43c2016-04-01 18:27:37 +0000349 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
350 // and output demarshalling
351 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
352 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
353
354 // We can't return success/failure, only the old value,
355 // let LLVM add the comparison
356 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
357 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
358
Tom Stellardc5a154d2018-06-28 23:47:12 +0000359 if (Subtarget->hasFlatAddressSpace()) {
Matt Arsenault99c14522016-04-25 19:27:24 +0000360 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
361 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
362 }
363
Matt Arsenault71e66762016-05-21 02:27:49 +0000364 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
365 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
366
367 // On SI this is s_memtime and s_memrealtime on VI.
368 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
Matt Arsenault3e025382017-04-24 17:49:13 +0000369 setOperationAction(ISD::TRAP, MVT::Other, Custom);
370 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000371
Tom Stellardc5a154d2018-06-28 23:47:12 +0000372 if (Subtarget->has16BitInsts()) {
373 setOperationAction(ISD::FLOG, MVT::f16, Custom);
Matt Arsenault7121bed2018-08-16 17:07:52 +0000374 setOperationAction(ISD::FEXP, MVT::f16, Custom);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000375 setOperationAction(ISD::FLOG10, MVT::f16, Custom);
376 }
377
378 // v_mad_f32 does not support denormals according to some sources.
379 if (!Subtarget->hasFP32Denormals())
380 setOperationAction(ISD::FMAD, MVT::f32, Legal);
381
382 if (!Subtarget->hasBFI()) {
383 // fcopysign can be done in a single instruction with BFI.
384 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
385 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
386 }
387
388 if (!Subtarget->hasBCNT(32))
389 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
390
391 if (!Subtarget->hasBCNT(64))
392 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
393
394 if (Subtarget->hasFFBH())
395 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
396
397 if (Subtarget->hasFFBL())
398 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
399
400 // We only really have 32-bit BFE instructions (and 16-bit on VI).
401 //
402 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
403 // effort to match them now. We want this to be false for i64 cases when the
404 // extraction isn't restricted to the upper or lower half. Ideally we would
405 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
406 // span the midpoint are probably relatively rare, so don't worry about them
407 // for now.
408 if (Subtarget->hasBFE())
409 setHasExtractBitsInsn(true);
410
Matt Arsenault687ec752018-10-22 16:27:27 +0000411 setOperationAction(ISD::FMINNUM, MVT::f32, Custom);
412 setOperationAction(ISD::FMAXNUM, MVT::f32, Custom);
413 setOperationAction(ISD::FMINNUM, MVT::f64, Custom);
414 setOperationAction(ISD::FMAXNUM, MVT::f64, Custom);
415
416
417 // These are really only legal for ieee_mode functions. We should be avoiding
418 // them for functions that don't have ieee_mode enabled, so just say they are
419 // legal.
420 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
421 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
422 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
423 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
424
Matt Arsenault71e66762016-05-21 02:27:49 +0000425
Tom Stellard5bfbae52018-07-11 20:59:01 +0000426 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000427 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
428 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
429 setOperationAction(ISD::FRINT, MVT::f64, Legal);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000430 } else {
431 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
432 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
433 setOperationAction(ISD::FRINT, MVT::f64, Custom);
434 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000435 }
436
437 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
438
439 setOperationAction(ISD::FSIN, MVT::f32, Custom);
440 setOperationAction(ISD::FCOS, MVT::f32, Custom);
441 setOperationAction(ISD::FDIV, MVT::f32, Custom);
442 setOperationAction(ISD::FDIV, MVT::f64, Custom);
443
Tom Stellard115a6152016-11-10 16:02:37 +0000444 if (Subtarget->has16BitInsts()) {
445 setOperationAction(ISD::Constant, MVT::i16, Legal);
446
447 setOperationAction(ISD::SMIN, MVT::i16, Legal);
448 setOperationAction(ISD::SMAX, MVT::i16, Legal);
449
450 setOperationAction(ISD::UMIN, MVT::i16, Legal);
451 setOperationAction(ISD::UMAX, MVT::i16, Legal);
452
Tom Stellard115a6152016-11-10 16:02:37 +0000453 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
454 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
455
456 setOperationAction(ISD::ROTR, MVT::i16, Promote);
457 setOperationAction(ISD::ROTL, MVT::i16, Promote);
458
459 setOperationAction(ISD::SDIV, MVT::i16, Promote);
460 setOperationAction(ISD::UDIV, MVT::i16, Promote);
461 setOperationAction(ISD::SREM, MVT::i16, Promote);
462 setOperationAction(ISD::UREM, MVT::i16, Promote);
463
464 setOperationAction(ISD::BSWAP, MVT::i16, Promote);
465 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
466
467 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
468 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
469 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
470 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
Jan Veselyb283ea02018-03-02 02:50:22 +0000471 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
Tom Stellard115a6152016-11-10 16:02:37 +0000472
473 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
474
475 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
476
477 setOperationAction(ISD::LOAD, MVT::i16, Custom);
478
479 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
480
Tom Stellard115a6152016-11-10 16:02:37 +0000481 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
482 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
483 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
484 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000485
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000486 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
487 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
488 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
489 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000490
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000491 // F16 - Constant Actions.
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000492 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000493
494 // F16 - Load/Store Actions.
495 setOperationAction(ISD::LOAD, MVT::f16, Promote);
496 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
497 setOperationAction(ISD::STORE, MVT::f16, Promote);
498 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
499
500 // F16 - VOP1 Actions.
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +0000501 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000502 setOperationAction(ISD::FCOS, MVT::f16, Promote);
503 setOperationAction(ISD::FSIN, MVT::f16, Promote);
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000504 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
505 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
506 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
507 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
Matt Arsenaultb5d23272017-03-24 20:04:18 +0000508 setOperationAction(ISD::FROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000509
510 // F16 - VOP2 Actions.
Konstantin Zhuravlyov662e01d2016-11-17 03:49:01 +0000511 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000512 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
Matt Arsenault687ec752018-10-22 16:27:27 +0000513
Matt Arsenault4052a572016-12-22 03:05:41 +0000514 setOperationAction(ISD::FDIV, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000515
516 // F16 - VOP3 Actions.
517 setOperationAction(ISD::FMA, MVT::f16, Legal);
Stanislav Mekhanoshin28a19362019-05-04 04:20:37 +0000518 if (!Subtarget->hasFP16Denormals() && STI.hasMadF16())
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000519 setOperationAction(ISD::FMAD, MVT::f16, Legal);
Tom Stellard115a6152016-11-10 16:02:37 +0000520
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000521 for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
Matt Arsenault7596f132017-02-27 20:52:10 +0000522 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
523 switch (Op) {
524 case ISD::LOAD:
525 case ISD::STORE:
526 case ISD::BUILD_VECTOR:
527 case ISD::BITCAST:
528 case ISD::EXTRACT_VECTOR_ELT:
529 case ISD::INSERT_VECTOR_ELT:
530 case ISD::INSERT_SUBVECTOR:
531 case ISD::EXTRACT_SUBVECTOR:
532 case ISD::SCALAR_TO_VECTOR:
533 break;
534 case ISD::CONCAT_VECTORS:
535 setOperationAction(Op, VT, Custom);
536 break;
537 default:
538 setOperationAction(Op, VT, Expand);
539 break;
540 }
541 }
542 }
543
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000544 // XXX - Do these do anything? Vector constants turn into build_vector.
545 setOperationAction(ISD::Constant, MVT::v2i16, Legal);
546 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
547
Matt Arsenaultdfb88df2018-05-13 10:04:38 +0000548 setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
549 setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
550
Matt Arsenault7596f132017-02-27 20:52:10 +0000551 setOperationAction(ISD::STORE, MVT::v2i16, Promote);
552 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
553 setOperationAction(ISD::STORE, MVT::v2f16, Promote);
554 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
555
556 setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
557 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
558 setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
559 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000560
561 setOperationAction(ISD::AND, MVT::v2i16, Promote);
562 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
563 setOperationAction(ISD::OR, MVT::v2i16, Promote);
564 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
565 setOperationAction(ISD::XOR, MVT::v2i16, Promote);
566 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000567
Matt Arsenault1349a042018-05-22 06:32:10 +0000568 setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
569 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
570 setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
571 AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
572
573 setOperationAction(ISD::STORE, MVT::v4i16, Promote);
574 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
575 setOperationAction(ISD::STORE, MVT::v4f16, Promote);
576 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
577
578 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
579 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
580 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
581 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
582
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000583 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
584 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
585 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
586
Matt Arsenault1349a042018-05-22 06:32:10 +0000587 if (!Subtarget->hasVOP3PInsts()) {
588 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
589 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
590 }
591
592 setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
593 // This isn't really legal, but this avoids the legalizer unrolling it (and
594 // allows matching fneg (fabs x) patterns)
595 setOperationAction(ISD::FABS, MVT::v2f16, Legal);
Matt Arsenault687ec752018-10-22 16:27:27 +0000596
597 setOperationAction(ISD::FMAXNUM, MVT::f16, Custom);
598 setOperationAction(ISD::FMINNUM, MVT::f16, Custom);
599 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal);
600 setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal);
601
602 setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom);
603 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom);
604
605 setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand);
606 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand);
Matt Arsenault1349a042018-05-22 06:32:10 +0000607 }
608
609 if (Subtarget->hasVOP3PInsts()) {
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000610 setOperationAction(ISD::ADD, MVT::v2i16, Legal);
611 setOperationAction(ISD::SUB, MVT::v2i16, Legal);
612 setOperationAction(ISD::MUL, MVT::v2i16, Legal);
613 setOperationAction(ISD::SHL, MVT::v2i16, Legal);
614 setOperationAction(ISD::SRL, MVT::v2i16, Legal);
615 setOperationAction(ISD::SRA, MVT::v2i16, Legal);
616 setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
617 setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
618 setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
619 setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
620
621 setOperationAction(ISD::FADD, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000622 setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
623 setOperationAction(ISD::FMA, MVT::v2f16, Legal);
Matt Arsenault687ec752018-10-22 16:27:27 +0000624
625 setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal);
626 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal);
627
Matt Arsenault540512c2018-04-26 19:21:37 +0000628 setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000629
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000630 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
631 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000632
633 setOperationAction(ISD::SHL, MVT::v4i16, Custom);
634 setOperationAction(ISD::SRA, MVT::v4i16, Custom);
635 setOperationAction(ISD::SRL, MVT::v4i16, Custom);
636 setOperationAction(ISD::ADD, MVT::v4i16, Custom);
637 setOperationAction(ISD::SUB, MVT::v4i16, Custom);
638 setOperationAction(ISD::MUL, MVT::v4i16, Custom);
639
640 setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
641 setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
642 setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
643 setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
644
645 setOperationAction(ISD::FADD, MVT::v4f16, Custom);
646 setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
Matt Arsenault687ec752018-10-22 16:27:27 +0000647
648 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom);
649 setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom);
650
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000651 setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
652 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
Matt Arsenault36cdcfa2018-08-02 13:43:42 +0000653 setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000654
Matt Arsenault7121bed2018-08-16 17:07:52 +0000655 setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000656 setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
657 setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
Matt Arsenault1349a042018-05-22 06:32:10 +0000658 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000659
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000660 setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
661 setOperationAction(ISD::FABS, MVT::v4f16, Custom);
662
Matt Arsenault1349a042018-05-22 06:32:10 +0000663 if (Subtarget->has16BitInsts()) {
664 setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
665 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
666 setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
667 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
Matt Arsenault4a486232017-04-19 20:53:07 +0000668 } else {
Matt Arsenault1349a042018-05-22 06:32:10 +0000669 // Legalization hack.
Matt Arsenault4a486232017-04-19 20:53:07 +0000670 setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
671 setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
Matt Arsenaulte9524f12018-06-06 21:28:11 +0000672
673 setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
674 setOperationAction(ISD::FABS, MVT::v2f16, Custom);
Matt Arsenault4a486232017-04-19 20:53:07 +0000675 }
676
677 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
678 setOperationAction(ISD::SELECT, VT, Custom);
Matt Arsenault7596f132017-02-27 20:52:10 +0000679 }
680
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000681 setTargetDAGCombine(ISD::ADD);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +0000682 setTargetDAGCombine(ISD::ADDCARRY);
683 setTargetDAGCombine(ISD::SUB);
684 setTargetDAGCombine(ISD::SUBCARRY);
Matt Arsenault02cb0ff2014-09-29 14:59:34 +0000685 setTargetDAGCombine(ISD::FADD);
Matt Arsenault8675db12014-08-29 16:01:14 +0000686 setTargetDAGCombine(ISD::FSUB);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +0000687 setTargetDAGCombine(ISD::FMINNUM);
688 setTargetDAGCombine(ISD::FMAXNUM);
Matt Arsenault687ec752018-10-22 16:27:27 +0000689 setTargetDAGCombine(ISD::FMINNUM_IEEE);
690 setTargetDAGCombine(ISD::FMAXNUM_IEEE);
Farhana Aleenc370d7b2018-07-16 18:19:59 +0000691 setTargetDAGCombine(ISD::FMA);
Matt Arsenault5881f4e2015-06-09 00:52:37 +0000692 setTargetDAGCombine(ISD::SMIN);
693 setTargetDAGCombine(ISD::SMAX);
694 setTargetDAGCombine(ISD::UMIN);
695 setTargetDAGCombine(ISD::UMAX);
Tom Stellard75aadc22012-12-11 21:25:42 +0000696 setTargetDAGCombine(ISD::SETCC);
Matt Arsenaultd0101a22015-01-06 23:00:46 +0000697 setTargetDAGCombine(ISD::AND);
Matt Arsenaultf2290332015-01-06 23:00:39 +0000698 setTargetDAGCombine(ISD::OR);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000699 setTargetDAGCombine(ISD::XOR);
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +0000700 setTargetDAGCombine(ISD::SINT_TO_FP);
Matt Arsenault364a6742014-06-11 17:50:44 +0000701 setTargetDAGCombine(ISD::UINT_TO_FP);
Matt Arsenault9cd90712016-04-14 01:42:16 +0000702 setTargetDAGCombine(ISD::FCANONICALIZE);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000703 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
Matt Arsenault8edfaee2017-03-31 19:53:03 +0000704 setTargetDAGCombine(ISD::ZERO_EXTEND);
Ryan Taylor00e063a2019-03-19 16:07:00 +0000705 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +0000706 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +0000707 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
Matt Arsenault364a6742014-06-11 17:50:44 +0000708
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000709 // All memory operations. Some folding on the pointer operand is done to help
710 // matching the constant offsets in the addressing modes.
711 setTargetDAGCombine(ISD::LOAD);
712 setTargetDAGCombine(ISD::STORE);
713 setTargetDAGCombine(ISD::ATOMIC_LOAD);
714 setTargetDAGCombine(ISD::ATOMIC_STORE);
715 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
716 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
717 setTargetDAGCombine(ISD::ATOMIC_SWAP);
718 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
719 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
720 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
721 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
722 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
723 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
724 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
725 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
726 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
727 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
Matt Arsenaulta5840c32019-01-22 18:36:06 +0000728 setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000729
Christian Konigeecebd02013-03-26 14:04:02 +0000730 setSchedulingPreference(Sched::RegPressure);
Tom Stellard75aadc22012-12-11 21:25:42 +0000731}
732
Tom Stellard5bfbae52018-07-11 20:59:01 +0000733const GCNSubtarget *SITargetLowering::getSubtarget() const {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000734 return Subtarget;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000735}
736
Tom Stellard0125f2a2013-06-25 02:39:35 +0000737//===----------------------------------------------------------------------===//
738// TargetLowering queries
739//===----------------------------------------------------------------------===//
740
Tom Stellardb12f4de2018-05-22 19:37:55 +0000741// v_mad_mix* support a conversion from f16 to f32.
742//
743// There is only one special case when denormals are enabled we don't currently,
744// where this is OK to use.
745bool SITargetLowering::isFPExtFoldable(unsigned Opcode,
746 EVT DestVT, EVT SrcVT) const {
747 return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
748 (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
749 DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
750 SrcVT.getScalarType() == MVT::f16;
751}
752
Zvi Rackover1b736822017-07-26 08:06:58 +0000753bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000754 // SI has some legal vector types, but no legal vector operations. Say no
755 // shuffles are legal in order to prefer scalarizing some vector operations.
756 return false;
757}
758
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000759MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
760 CallingConv::ID CC,
761 EVT VT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000762 // TODO: Consider splitting all arguments into 32-bit pieces.
763 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000764 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000765 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000766 if (Size == 32)
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000767 return ScalarVT.getSimpleVT();
Matt Arsenault0395da72018-07-31 19:17:47 +0000768
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000769 if (Size == 64)
770 return MVT::i32;
771
Matt Arsenault57b59662018-09-10 11:49:23 +0000772 if (Size == 16 && Subtarget->has16BitInsts())
Matt Arsenault0395da72018-07-31 19:17:47 +0000773 return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000774 }
775
776 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
777}
778
779unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
780 CallingConv::ID CC,
781 EVT VT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000782 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000783 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000784 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000785 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenault0395da72018-07-31 19:17:47 +0000786
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000787 if (Size == 32)
Matt Arsenault0395da72018-07-31 19:17:47 +0000788 return NumElts;
789
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000790 if (Size == 64)
791 return 2 * NumElts;
792
Matt Arsenault57b59662018-09-10 11:49:23 +0000793 if (Size == 16 && Subtarget->has16BitInsts())
794 return (VT.getVectorNumElements() + 1) / 2;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000795 }
796
797 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
798}
799
800unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
801 LLVMContext &Context, CallingConv::ID CC,
802 EVT VT, EVT &IntermediateVT,
803 unsigned &NumIntermediates, MVT &RegisterVT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000804 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000805 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000806 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000807 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000808 if (Size == 32) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000809 RegisterVT = ScalarVT.getSimpleVT();
810 IntermediateVT = RegisterVT;
Matt Arsenault0395da72018-07-31 19:17:47 +0000811 NumIntermediates = NumElts;
812 return NumIntermediates;
813 }
814
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000815 if (Size == 64) {
816 RegisterVT = MVT::i32;
817 IntermediateVT = RegisterVT;
818 NumIntermediates = 2 * NumElts;
819 return NumIntermediates;
820 }
821
Matt Arsenault0395da72018-07-31 19:17:47 +0000822 // FIXME: We should fix the ABI to be the same on targets without 16-bit
823 // support, but unless we can properly handle 3-vectors, it will be still be
824 // inconsistent.
Matt Arsenault57b59662018-09-10 11:49:23 +0000825 if (Size == 16 && Subtarget->has16BitInsts()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000826 RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
827 IntermediateVT = RegisterVT;
Matt Arsenault57b59662018-09-10 11:49:23 +0000828 NumIntermediates = (NumElts + 1) / 2;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000829 return NumIntermediates;
830 }
831 }
832
833 return TargetLowering::getVectorTypeBreakdownForCallingConv(
834 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
835}
836
David Stuttardf77079f2019-01-14 11:55:24 +0000837static MVT memVTFromAggregate(Type *Ty) {
838 // Only limited forms of aggregate type currently expected.
839 assert(Ty->isStructTy() && "Expected struct type");
840
841
842 Type *ElementType = nullptr;
843 unsigned NumElts;
844 if (Ty->getContainedType(0)->isVectorTy()) {
845 VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0));
846 ElementType = VecComponent->getElementType();
847 NumElts = VecComponent->getNumElements();
848 } else {
849 ElementType = Ty->getContainedType(0);
850 NumElts = 1;
851 }
852
853 assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type");
854
855 // Calculate the size of the memVT type from the aggregate
856 unsigned Pow2Elts = 0;
857 unsigned ElementSize;
858 switch (ElementType->getTypeID()) {
859 default:
860 llvm_unreachable("Unknown type!");
861 case Type::IntegerTyID:
862 ElementSize = cast<IntegerType>(ElementType)->getBitWidth();
863 break;
864 case Type::HalfTyID:
865 ElementSize = 16;
866 break;
867 case Type::FloatTyID:
868 ElementSize = 32;
869 break;
870 }
871 unsigned AdditionalElts = ElementSize == 16 ? 2 : 1;
872 Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts);
873
874 return MVT::getVectorVT(MVT::getVT(ElementType, false),
875 Pow2Elts);
876}
877
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000878bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
879 const CallInst &CI,
Matt Arsenault7d7adf42017-12-14 22:34:10 +0000880 MachineFunction &MF,
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000881 unsigned IntrID) const {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000882 if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +0000883 AMDGPU::lookupRsrcIntrinsic(IntrID)) {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000884 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
885 (Intrinsic::ID)IntrID);
886 if (Attr.hasFnAttribute(Attribute::ReadNone))
887 return false;
888
889 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
890
891 if (RsrcIntr->IsImage) {
892 Info.ptrVal = MFI->getImagePSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000893 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000894 CI.getArgOperand(RsrcIntr->RsrcArg));
895 Info.align = 0;
896 } else {
897 Info.ptrVal = MFI->getBufferPSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000898 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000899 CI.getArgOperand(RsrcIntr->RsrcArg));
900 }
901
902 Info.flags = MachineMemOperand::MODereferenceable;
903 if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
904 Info.opc = ISD::INTRINSIC_W_CHAIN;
David Stuttardf77079f2019-01-14 11:55:24 +0000905 Info.memVT = MVT::getVT(CI.getType(), true);
906 if (Info.memVT == MVT::Other) {
907 // Some intrinsics return an aggregate type - special case to work out
908 // the correct memVT
909 Info.memVT = memVTFromAggregate(CI.getType());
910 }
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000911 Info.flags |= MachineMemOperand::MOLoad;
912 } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
913 Info.opc = ISD::INTRINSIC_VOID;
914 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
915 Info.flags |= MachineMemOperand::MOStore;
916 } else {
917 // Atomic
918 Info.opc = ISD::INTRINSIC_W_CHAIN;
919 Info.memVT = MVT::getVT(CI.getType());
920 Info.flags = MachineMemOperand::MOLoad |
921 MachineMemOperand::MOStore |
922 MachineMemOperand::MODereferenceable;
923
924 // XXX - Should this be volatile without known ordering?
925 Info.flags |= MachineMemOperand::MOVolatile;
926 }
927 return true;
928 }
929
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000930 switch (IntrID) {
931 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000932 case Intrinsic::amdgcn_atomic_dec:
Marek Olsakc5cec5e2019-01-16 15:43:53 +0000933 case Intrinsic::amdgcn_ds_ordered_add:
934 case Intrinsic::amdgcn_ds_ordered_swap:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000935 case Intrinsic::amdgcn_ds_fadd:
936 case Intrinsic::amdgcn_ds_fmin:
937 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000938 Info.opc = ISD::INTRINSIC_W_CHAIN;
939 Info.memVT = MVT::getVT(CI.getType());
940 Info.ptrVal = CI.getOperand(0);
941 Info.align = 0;
Matt Arsenault11171332017-12-14 21:39:51 +0000942 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000943
Matt Arsenaultcaf13162019-03-12 21:02:54 +0000944 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
945 if (!Vol->isZero())
Matt Arsenault11171332017-12-14 21:39:51 +0000946 Info.flags |= MachineMemOperand::MOVolatile;
947
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000948 return true;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000949 }
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000950 case Intrinsic::amdgcn_ds_append:
951 case Intrinsic::amdgcn_ds_consume: {
952 Info.opc = ISD::INTRINSIC_W_CHAIN;
953 Info.memVT = MVT::getVT(CI.getType());
954 Info.ptrVal = CI.getOperand(0);
955 Info.align = 0;
956 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Matt Arsenault905f3512017-12-29 17:18:14 +0000957
Matt Arsenaultcaf13162019-03-12 21:02:54 +0000958 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
959 if (!Vol->isZero())
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000960 Info.flags |= MachineMemOperand::MOVolatile;
961
962 return true;
963 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000964 default:
965 return false;
966 }
967}
968
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000969bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
970 SmallVectorImpl<Value*> &Ops,
971 Type *&AccessTy) const {
972 switch (II->getIntrinsicID()) {
973 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000974 case Intrinsic::amdgcn_atomic_dec:
Marek Olsakc5cec5e2019-01-16 15:43:53 +0000975 case Intrinsic::amdgcn_ds_ordered_add:
976 case Intrinsic::amdgcn_ds_ordered_swap:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000977 case Intrinsic::amdgcn_ds_fadd:
978 case Intrinsic::amdgcn_ds_fmin:
979 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000980 Value *Ptr = II->getArgOperand(0);
981 AccessTy = II->getType();
982 Ops.push_back(Ptr);
983 return true;
984 }
985 default:
986 return false;
987 }
Matt Arsenaulte306a322014-10-21 16:25:08 +0000988}
989
Tom Stellard70580f82015-07-20 14:28:41 +0000990bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
Matt Arsenaultd9b77842017-06-12 17:06:35 +0000991 if (!Subtarget->hasFlatInstOffsets()) {
992 // Flat instructions do not have offsets, and only have the register
993 // address.
994 return AM.BaseOffs == 0 && AM.Scale == 0;
995 }
996
997 // GFX9 added a 13-bit signed offset. When using regular flat instructions,
998 // the sign bit is ignored and is treated as a 12-bit unsigned offset.
999
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001000 // GFX10 shrinked signed offset to 12 bits. When using regular flat
1001 // instructions, the sign bit is also ignored and is treated as 11-bit
1002 // unsigned offset.
1003
1004 if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
1005 return isUInt<11>(AM.BaseOffs) && AM.Scale == 0;
1006
Matt Arsenaultd9b77842017-06-12 17:06:35 +00001007 // Just r + i
1008 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
Tom Stellard70580f82015-07-20 14:28:41 +00001009}
1010
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +00001011bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
1012 if (Subtarget->hasFlatGlobalInsts())
1013 return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
1014
1015 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
1016 // Assume the we will use FLAT for all global memory accesses
1017 // on VI.
1018 // FIXME: This assumption is currently wrong. On VI we still use
1019 // MUBUF instructions for the r + i addressing mode. As currently
1020 // implemented, the MUBUF instructions only work on buffer < 4GB.
1021 // It may be possible to support > 4GB buffers with MUBUF instructions,
1022 // by setting the stride value in the resource descriptor which would
1023 // increase the size limit to (stride * 4GB). However, this is risky,
1024 // because it has never been validated.
1025 return isLegalFlatAddressingMode(AM);
1026 }
1027
1028 return isLegalMUBUFAddressingMode(AM);
1029}
1030
Matt Arsenault711b3902015-08-07 20:18:34 +00001031bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1032 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1033 // additionally can do r + r + i with addr64. 32-bit has more addressing
1034 // mode options. Depending on the resource constant, it can also do
1035 // (i64 r0) + (i32 r1) * (i14 i).
1036 //
1037 // Private arrays end up using a scratch buffer most of the time, so also
1038 // assume those use MUBUF instructions. Scratch loads / stores are currently
1039 // implemented as mubuf instructions with offen bit set, so slightly
1040 // different than the normal addr64.
1041 if (!isUInt<12>(AM.BaseOffs))
1042 return false;
1043
1044 // FIXME: Since we can split immediate into soffset and immediate offset,
1045 // would it make sense to allow any immediate?
1046
1047 switch (AM.Scale) {
1048 case 0: // r + i or just i, depending on HasBaseReg.
1049 return true;
1050 case 1:
1051 return true; // We have r + r or r + i.
1052 case 2:
1053 if (AM.HasBaseReg) {
1054 // Reject 2 * r + r.
1055 return false;
1056 }
1057
1058 // Allow 2 * r as r + r
1059 // Or 2 * r + i is allowed as r + r + i.
1060 return true;
1061 default: // Don't allow n * r
1062 return false;
1063 }
1064}
1065
Mehdi Amini0cdec1e2015-07-09 02:09:40 +00001066bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
1067 const AddrMode &AM, Type *Ty,
Jonas Paulsson024e3192017-07-21 11:59:37 +00001068 unsigned AS, Instruction *I) const {
Matt Arsenault5015a892014-08-15 17:17:07 +00001069 // No global is ever allowed as a base.
1070 if (AM.BaseGV)
1071 return false;
1072
Matt Arsenault0da63502018-08-31 05:49:54 +00001073 if (AS == AMDGPUAS::GLOBAL_ADDRESS)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +00001074 return isLegalGlobalAddressingMode(AM);
Matt Arsenault5015a892014-08-15 17:17:07 +00001075
Matt Arsenault0da63502018-08-31 05:49:54 +00001076 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
Neil Henning523dab02019-03-18 14:44:28 +00001077 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
1078 AS == AMDGPUAS::BUFFER_FAT_POINTER) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001079 // If the offset isn't a multiple of 4, it probably isn't going to be
1080 // correctly aligned.
Matt Arsenault3cc1e002016-08-13 01:43:51 +00001081 // FIXME: Can we get the real alignment here?
Matt Arsenault711b3902015-08-07 20:18:34 +00001082 if (AM.BaseOffs % 4 != 0)
1083 return isLegalMUBUFAddressingMode(AM);
1084
1085 // There are no SMRD extloads, so if we have to do a small type access we
1086 // will use a MUBUF load.
1087 // FIXME?: We also need to do this if unaligned, but we don't know the
1088 // alignment here.
Stanislav Mekhanoshin57d341c2018-05-15 22:07:51 +00001089 if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +00001090 return isLegalGlobalAddressingMode(AM);
Matt Arsenault711b3902015-08-07 20:18:34 +00001091
Tom Stellard5bfbae52018-07-11 20:59:01 +00001092 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001093 // SMRD instructions have an 8-bit, dword offset on SI.
1094 if (!isUInt<8>(AM.BaseOffs / 4))
1095 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +00001096 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001097 // On CI+, this can also be a 32-bit literal constant offset. If it fits
1098 // in 8-bits, it can use a smaller encoding.
1099 if (!isUInt<32>(AM.BaseOffs / 4))
1100 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +00001101 } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001102 // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1103 if (!isUInt<20>(AM.BaseOffs))
1104 return false;
1105 } else
1106 llvm_unreachable("unhandled generation");
1107
1108 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1109 return true;
1110
1111 if (AM.Scale == 1 && AM.HasBaseReg)
1112 return true;
1113
1114 return false;
Matt Arsenault711b3902015-08-07 20:18:34 +00001115
Matt Arsenault0da63502018-08-31 05:49:54 +00001116 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001117 return isLegalMUBUFAddressingMode(AM);
Matt Arsenault0da63502018-08-31 05:49:54 +00001118 } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1119 AS == AMDGPUAS::REGION_ADDRESS) {
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001120 // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1121 // field.
1122 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1123 // an 8-bit dword offset but we don't know the alignment here.
1124 if (!isUInt<16>(AM.BaseOffs))
Matt Arsenault5015a892014-08-15 17:17:07 +00001125 return false;
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001126
1127 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1128 return true;
1129
1130 if (AM.Scale == 1 && AM.HasBaseReg)
1131 return true;
1132
Matt Arsenault5015a892014-08-15 17:17:07 +00001133 return false;
Matt Arsenault0da63502018-08-31 05:49:54 +00001134 } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1135 AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
Matt Arsenault7d1b6c82016-04-29 06:25:10 +00001136 // For an unknown address space, this usually means that this is for some
1137 // reason being used for pure arithmetic, and not based on some addressing
1138 // computation. We don't have instructions that compute pointers with any
1139 // addressing modes, so treat them as having no offset like flat
1140 // instructions.
Tom Stellard70580f82015-07-20 14:28:41 +00001141 return isLegalFlatAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001142 } else {
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001143 llvm_unreachable("unhandled address space");
1144 }
Matt Arsenault5015a892014-08-15 17:17:07 +00001145}
1146
Nirav Dave4dcad5d2017-07-10 20:25:54 +00001147bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1148 const SelectionDAG &DAG) const {
Matt Arsenault0da63502018-08-31 05:49:54 +00001149 if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001150 return (MemVT.getSizeInBits() <= 4 * 32);
Matt Arsenault0da63502018-08-31 05:49:54 +00001151 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001152 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1153 return (MemVT.getSizeInBits() <= MaxPrivateBits);
Matt Arsenault0da63502018-08-31 05:49:54 +00001154 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001155 return (MemVT.getSizeInBits() <= 2 * 32);
1156 }
1157 return true;
1158}
1159
Simon Pilgrim4e0648a2019-06-12 17:14:03 +00001160bool SITargetLowering::allowsMisalignedMemoryAccesses(
1161 EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
1162 bool *IsFast) const {
Matt Arsenault1018c892014-04-24 17:08:26 +00001163 if (IsFast)
1164 *IsFast = false;
1165
Matt Arsenault1018c892014-04-24 17:08:26 +00001166 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1167 // which isn't a simple VT.
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001168 // Until MVT is extended to handle this, simply check for the size and
1169 // rely on the condition below: allow accesses if the size is a multiple of 4.
1170 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1171 VT.getStoreSize() > 16)) {
Tom Stellard81d871d2013-11-13 23:36:50 +00001172 return false;
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001173 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001174
Matt Arsenault0da63502018-08-31 05:49:54 +00001175 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1176 AddrSpace == AMDGPUAS::REGION_ADDRESS) {
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001177 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1178 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1179 // with adjacent offsets.
Sanjay Patelce74db92015-09-03 15:03:19 +00001180 bool AlignedBy4 = (Align % 4 == 0);
1181 if (IsFast)
1182 *IsFast = AlignedBy4;
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001183
Sanjay Patelce74db92015-09-03 15:03:19 +00001184 return AlignedBy4;
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001185 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001186
Tom Stellard64a9d082016-10-14 18:10:39 +00001187 // FIXME: We have to be conservative here and assume that flat operations
1188 // will access scratch. If we had access to the IR function, then we
1189 // could determine if any private memory was used in the function.
1190 if (!Subtarget->hasUnalignedScratchAccess() &&
Matt Arsenault0da63502018-08-31 05:49:54 +00001191 (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1192 AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
Matt Arsenaultf4320112018-09-24 13:18:15 +00001193 bool AlignedBy4 = Align >= 4;
1194 if (IsFast)
1195 *IsFast = AlignedBy4;
1196
1197 return AlignedBy4;
Tom Stellard64a9d082016-10-14 18:10:39 +00001198 }
1199
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001200 if (Subtarget->hasUnalignedBufferAccess()) {
1201 // If we have an uniform constant load, it still requires using a slow
1202 // buffer instruction if unaligned.
1203 if (IsFast) {
Matt Arsenault0da63502018-08-31 05:49:54 +00001204 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1205 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001206 (Align % 4 == 0) : true;
1207 }
1208
1209 return true;
1210 }
1211
Tom Stellard33e64c62015-02-04 20:49:52 +00001212 // Smaller than dword value must be aligned.
Tom Stellard33e64c62015-02-04 20:49:52 +00001213 if (VT.bitsLT(MVT::i32))
1214 return false;
1215
Matt Arsenault1018c892014-04-24 17:08:26 +00001216 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1217 // byte-address are ignored, thus forcing Dword alignment.
Tom Stellarde812f2f2014-07-21 15:45:06 +00001218 // This applies to private, global, and constant memory.
Matt Arsenault1018c892014-04-24 17:08:26 +00001219 if (IsFast)
1220 *IsFast = true;
Tom Stellardc6b299c2015-02-02 18:02:28 +00001221
1222 return VT.bitsGT(MVT::i32) && Align % 4 == 0;
Tom Stellard0125f2a2013-06-25 02:39:35 +00001223}
1224
Sjoerd Meijer180f1ae2019-04-30 08:38:12 +00001225EVT SITargetLowering::getOptimalMemOpType(
1226 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
1227 bool ZeroMemset, bool MemcpyStrSrc,
1228 const AttributeList &FuncAttributes) const {
Matt Arsenault46645fa2014-07-28 17:49:26 +00001229 // FIXME: Should account for address space here.
1230
1231 // The default fallback uses the private pointer size as a guess for a type to
1232 // use. Make sure we switch these to 64-bit accesses.
1233
1234 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1235 return MVT::v4i32;
1236
1237 if (Size >= 8 && DstAlign >= 4)
1238 return MVT::v2i32;
1239
1240 // Use the default.
1241 return MVT::Other;
1242}
1243
Matt Arsenault0da63502018-08-31 05:49:54 +00001244static bool isFlatGlobalAddrSpace(unsigned AS) {
1245 return AS == AMDGPUAS::GLOBAL_ADDRESS ||
1246 AS == AMDGPUAS::FLAT_ADDRESS ||
Matt Arsenaulta8b43392019-02-08 02:40:47 +00001247 AS == AMDGPUAS::CONSTANT_ADDRESS ||
1248 AS > AMDGPUAS::MAX_AMDGPU_ADDRESS;
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001249}
1250
1251bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1252 unsigned DestAS) const {
Matt Arsenault0da63502018-08-31 05:49:54 +00001253 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001254}
1255
Alexander Timofeev18009562016-12-08 17:28:47 +00001256bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1257 const MemSDNode *MemNode = cast<MemSDNode>(N);
1258 const Value *Ptr = MemNode->getMemOperand()->getValue();
Matt Arsenault0a0c8712018-03-27 18:39:45 +00001259 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
Alexander Timofeev18009562016-12-08 17:28:47 +00001260 return I && I->getMetadata("amdgpu.noclobber");
1261}
1262
Matt Arsenault8dbeb922019-06-03 18:41:34 +00001263bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
1264 unsigned DestAS) const {
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001265 // Flat -> private/local is a simple truncate.
1266 // Flat -> global is no-op
Matt Arsenault0da63502018-08-31 05:49:54 +00001267 if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001268 return true;
1269
1270 return isNoopAddrSpaceCast(SrcAS, DestAS);
1271}
1272
Tom Stellarda6f24c62015-12-15 20:55:55 +00001273bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1274 const MemSDNode *MemNode = cast<MemSDNode>(N);
Tom Stellarda6f24c62015-12-15 20:55:55 +00001275
Matt Arsenaultbcf7bec2018-02-09 16:57:48 +00001276 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
Tom Stellarda6f24c62015-12-15 20:55:55 +00001277}
1278
Chandler Carruth9d010ff2014-07-03 00:23:43 +00001279TargetLoweringBase::LegalizeTypeAction
Craig Topper0b5f8162018-11-05 23:26:13 +00001280SITargetLowering::getPreferredVectorAction(MVT VT) const {
Chandler Carruth9d010ff2014-07-03 00:23:43 +00001281 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1282 return TypeSplitVector;
1283
1284 return TargetLoweringBase::getPreferredVectorAction(VT);
Tom Stellardd86003e2013-08-14 23:25:00 +00001285}
Tom Stellard0125f2a2013-06-25 02:39:35 +00001286
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001287bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1288 Type *Ty) const {
Matt Arsenault749035b2016-07-30 01:40:36 +00001289 // FIXME: Could be smarter if called for vector constants.
1290 return true;
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001291}
1292
Tom Stellard2e045bb2016-01-20 00:13:22 +00001293bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001294 if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1295 switch (Op) {
1296 case ISD::LOAD:
1297 case ISD::STORE:
Tom Stellard2e045bb2016-01-20 00:13:22 +00001298
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001299 // These operations are done with 32-bit instructions anyway.
1300 case ISD::AND:
1301 case ISD::OR:
1302 case ISD::XOR:
1303 case ISD::SELECT:
1304 // TODO: Extensions?
1305 return true;
1306 default:
1307 return false;
1308 }
1309 }
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +00001310
Tom Stellard2e045bb2016-01-20 00:13:22 +00001311 // SimplifySetCC uses this function to determine whether or not it should
1312 // create setcc with i1 operands. We don't have instructions for i1 setcc.
1313 if (VT == MVT::i1 && Op == ISD::SETCC)
1314 return false;
1315
1316 return TargetLowering::isTypeDesirableForOp(Op, VT);
1317}
1318
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001319SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1320 const SDLoc &SL,
1321 SDValue Chain,
1322 uint64_t Offset) const {
Mehdi Aminia749f2a2015-07-09 02:09:52 +00001323 const DataLayout &DL = DAG.getDataLayout();
Tom Stellardec2e43c2014-09-22 15:35:29 +00001324 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001325 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1326
1327 const ArgDescriptor *InputPtrReg;
1328 const TargetRegisterClass *RC;
1329
1330 std::tie(InputPtrReg, RC)
1331 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Tom Stellard94593ee2013-06-03 17:40:18 +00001332
Matt Arsenault86033ca2014-07-28 17:31:39 +00001333 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
Matt Arsenault0da63502018-08-31 05:49:54 +00001334 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenaulta0269b62015-06-01 21:58:24 +00001335 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001336 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1337
Matt Arsenault2fb9ccf2018-05-29 17:42:38 +00001338 return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
Jan Veselyfea814d2016-06-21 20:46:20 +00001339}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001340
Matt Arsenault9166ce82017-07-28 15:52:08 +00001341SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1342 const SDLoc &SL) const {
Matt Arsenault75e71922018-06-28 10:18:55 +00001343 uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1344 FIRST_IMPLICIT);
Matt Arsenault9166ce82017-07-28 15:52:08 +00001345 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1346}
1347
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001348SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1349 const SDLoc &SL, SDValue Val,
1350 bool Signed,
Matt Arsenault6dca5422017-01-09 18:52:39 +00001351 const ISD::InputArg *Arg) const {
Tim Renouf361b5b22019-03-21 12:01:21 +00001352 // First, if it is a widened vector, narrow it.
1353 if (VT.isVector() &&
1354 VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
1355 EVT NarrowedVT =
1356 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(),
1357 VT.getVectorNumElements());
1358 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
1359 DAG.getConstant(0, SL, MVT::i32));
1360 }
1361
1362 // Then convert the vector elements or scalar value.
Matt Arsenault6dca5422017-01-09 18:52:39 +00001363 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1364 VT.bitsLT(MemVT)) {
1365 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1366 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1367 }
1368
Tom Stellardbc6c5232016-10-17 16:21:45 +00001369 if (MemVT.isFloatingPoint())
Matt Arsenault6dca5422017-01-09 18:52:39 +00001370 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001371 else if (Signed)
Matt Arsenault6dca5422017-01-09 18:52:39 +00001372 Val = DAG.getSExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001373 else
Matt Arsenault6dca5422017-01-09 18:52:39 +00001374 Val = DAG.getZExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001375
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001376 return Val;
1377}
1378
1379SDValue SITargetLowering::lowerKernargMemParameter(
1380 SelectionDAG &DAG, EVT VT, EVT MemVT,
1381 const SDLoc &SL, SDValue Chain,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001382 uint64_t Offset, unsigned Align, bool Signed,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001383 const ISD::InputArg *Arg) const {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001384 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
Matt Arsenault0da63502018-08-31 05:49:54 +00001385 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001386 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1387
Matt Arsenault90083d32018-06-07 09:54:49 +00001388 // Try to avoid using an extload by loading earlier than the argument address,
1389 // and extracting the relevant bits. The load should hopefully be merged with
1390 // the previous argument.
Matt Arsenault4bec7d42018-07-20 09:05:08 +00001391 if (MemVT.getStoreSize() < 4 && Align < 4) {
1392 // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
Matt Arsenault90083d32018-06-07 09:54:49 +00001393 int64_t AlignDownOffset = alignDown(Offset, 4);
1394 int64_t OffsetDiff = Offset - AlignDownOffset;
1395
1396 EVT IntVT = MemVT.changeTypeToInteger();
1397
1398 // TODO: If we passed in the base kernel offset we could have a better
1399 // alignment than 4, but we don't really need it.
1400 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1401 SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1402 MachineMemOperand::MODereferenceable |
1403 MachineMemOperand::MOInvariant);
1404
1405 SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1406 SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1407
1408 SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1409 ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1410 ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1411
1412
1413 return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1414 }
1415
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001416 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1417 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001418 MachineMemOperand::MODereferenceable |
1419 MachineMemOperand::MOInvariant);
1420
1421 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
Matt Arsenault6dca5422017-01-09 18:52:39 +00001422 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
Tom Stellard94593ee2013-06-03 17:40:18 +00001423}
1424
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001425SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1426 const SDLoc &SL, SDValue Chain,
1427 const ISD::InputArg &Arg) const {
1428 MachineFunction &MF = DAG.getMachineFunction();
1429 MachineFrameInfo &MFI = MF.getFrameInfo();
1430
1431 if (Arg.Flags.isByVal()) {
1432 unsigned Size = Arg.Flags.getByValSize();
1433 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1434 return DAG.getFrameIndex(FrameIdx, MVT::i32);
1435 }
1436
1437 unsigned ArgOffset = VA.getLocMemOffset();
1438 unsigned ArgSize = VA.getValVT().getStoreSize();
1439
1440 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1441
1442 // Create load nodes to retrieve arguments from the stack.
1443 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1444 SDValue ArgValue;
1445
1446 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1447 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1448 MVT MemVT = VA.getValVT();
1449
1450 switch (VA.getLocInfo()) {
1451 default:
1452 break;
1453 case CCValAssign::BCvt:
1454 MemVT = VA.getLocVT();
1455 break;
1456 case CCValAssign::SExt:
1457 ExtType = ISD::SEXTLOAD;
1458 break;
1459 case CCValAssign::ZExt:
1460 ExtType = ISD::ZEXTLOAD;
1461 break;
1462 case CCValAssign::AExt:
1463 ExtType = ISD::EXTLOAD;
1464 break;
1465 }
1466
1467 ArgValue = DAG.getExtLoad(
1468 ExtType, SL, VA.getLocVT(), Chain, FIN,
1469 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1470 MemVT);
1471 return ArgValue;
1472}
1473
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001474SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1475 const SIMachineFunctionInfo &MFI,
1476 EVT VT,
1477 AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1478 const ArgDescriptor *Reg;
1479 const TargetRegisterClass *RC;
1480
1481 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1482 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1483}
1484
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001485static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1486 CallingConv::ID CallConv,
1487 ArrayRef<ISD::InputArg> Ins,
1488 BitVector &Skipped,
1489 FunctionType *FType,
1490 SIMachineFunctionInfo *Info) {
1491 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001492 const ISD::InputArg *Arg = &Ins[I];
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001493
Matt Arsenault55ab9212018-08-01 19:57:34 +00001494 assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1495 "vector type argument should have been split");
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001496
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001497 // First check if it's a PS input addr.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001498 if (CallConv == CallingConv::AMDGPU_PS &&
1499 !Arg->Flags.isInReg() && !Arg->Flags.isByVal() && PSInputNum <= 15) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001500
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001501 bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1502
1503 // Inconveniently only the first part of the split is marked as isSplit,
1504 // so skip to the end. We only want to increment PSInputNum once for the
1505 // entire split argument.
1506 if (Arg->Flags.isSplit()) {
1507 while (!Arg->Flags.isSplitEnd()) {
1508 assert(!Arg->VT.isVector() &&
1509 "unexpected vector split in ps argument type");
1510 if (!SkipArg)
1511 Splits.push_back(*Arg);
1512 Arg = &Ins[++I];
1513 }
1514 }
1515
1516 if (SkipArg) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001517 // We can safely skip PS inputs.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001518 Skipped.set(Arg->getOrigArgIndex());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001519 ++PSInputNum;
1520 continue;
1521 }
1522
1523 Info->markPSInputAllocated(PSInputNum);
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001524 if (Arg->Used)
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001525 Info->markPSInputEnabled(PSInputNum);
1526
1527 ++PSInputNum;
1528 }
1529
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001530 Splits.push_back(*Arg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001531 }
1532}
1533
1534// Allocate special inputs passed in VGPRs.
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001535static void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1536 MachineFunction &MF,
1537 const SIRegisterInfo &TRI,
1538 SIMachineFunctionInfo &Info) {
1539 if (Info.hasWorkItemIDX()) {
1540 unsigned Reg = AMDGPU::VGPR0;
1541 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001542
1543 CCInfo.AllocateReg(Reg);
1544 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1545 }
1546
1547 if (Info.hasWorkItemIDY()) {
1548 unsigned Reg = AMDGPU::VGPR1;
1549 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1550
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001551 CCInfo.AllocateReg(Reg);
1552 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1553 }
1554
1555 if (Info.hasWorkItemIDZ()) {
1556 unsigned Reg = AMDGPU::VGPR2;
1557 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1558
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001559 CCInfo.AllocateReg(Reg);
1560 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1561 }
1562}
1563
1564// Try to allocate a VGPR at the end of the argument list, or if no argument
1565// VGPRs are left allocating a stack slot.
1566static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) {
1567 ArrayRef<MCPhysReg> ArgVGPRs
1568 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1569 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1570 if (RegIdx == ArgVGPRs.size()) {
1571 // Spill to stack required.
1572 int64_t Offset = CCInfo.AllocateStack(4, 4);
1573
1574 return ArgDescriptor::createStack(Offset);
1575 }
1576
1577 unsigned Reg = ArgVGPRs[RegIdx];
1578 Reg = CCInfo.AllocateReg(Reg);
1579 assert(Reg != AMDGPU::NoRegister);
1580
1581 MachineFunction &MF = CCInfo.getMachineFunction();
1582 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1583 return ArgDescriptor::createRegister(Reg);
1584}
1585
1586static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1587 const TargetRegisterClass *RC,
1588 unsigned NumArgRegs) {
1589 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1590 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1591 if (RegIdx == ArgSGPRs.size())
1592 report_fatal_error("ran out of SGPRs for arguments");
1593
1594 unsigned Reg = ArgSGPRs[RegIdx];
1595 Reg = CCInfo.AllocateReg(Reg);
1596 assert(Reg != AMDGPU::NoRegister);
1597
1598 MachineFunction &MF = CCInfo.getMachineFunction();
1599 MF.addLiveIn(Reg, RC);
1600 return ArgDescriptor::createRegister(Reg);
1601}
1602
1603static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1604 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1605}
1606
1607static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1608 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1609}
1610
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001611static void allocateSpecialInputVGPRs(CCState &CCInfo,
1612 MachineFunction &MF,
1613 const SIRegisterInfo &TRI,
1614 SIMachineFunctionInfo &Info) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001615 if (Info.hasWorkItemIDX())
1616 Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001617
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001618 if (Info.hasWorkItemIDY())
1619 Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001620
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001621 if (Info.hasWorkItemIDZ())
1622 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1623}
1624
1625static void allocateSpecialInputSGPRs(CCState &CCInfo,
1626 MachineFunction &MF,
1627 const SIRegisterInfo &TRI,
1628 SIMachineFunctionInfo &Info) {
1629 auto &ArgInfo = Info.getArgInfo();
1630
1631 // TODO: Unify handling with private memory pointers.
1632
1633 if (Info.hasDispatchPtr())
1634 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1635
1636 if (Info.hasQueuePtr())
1637 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1638
1639 if (Info.hasKernargSegmentPtr())
1640 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1641
1642 if (Info.hasDispatchID())
1643 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1644
1645 // flat_scratch_init is not applicable for non-kernel functions.
1646
1647 if (Info.hasWorkGroupIDX())
1648 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1649
1650 if (Info.hasWorkGroupIDY())
1651 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1652
1653 if (Info.hasWorkGroupIDZ())
1654 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
Matt Arsenault817c2532017-08-03 23:12:44 +00001655
1656 if (Info.hasImplicitArgPtr())
1657 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001658}
1659
1660// Allocate special inputs passed in user SGPRs.
1661static void allocateHSAUserSGPRs(CCState &CCInfo,
1662 MachineFunction &MF,
1663 const SIRegisterInfo &TRI,
1664 SIMachineFunctionInfo &Info) {
Matt Arsenault10fc0622017-06-26 03:01:31 +00001665 if (Info.hasImplicitBufferPtr()) {
1666 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1667 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1668 CCInfo.AllocateReg(ImplicitBufferPtrReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001669 }
1670
1671 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1672 if (Info.hasPrivateSegmentBuffer()) {
1673 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1674 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1675 CCInfo.AllocateReg(PrivateSegmentBufferReg);
1676 }
1677
1678 if (Info.hasDispatchPtr()) {
1679 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1680 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1681 CCInfo.AllocateReg(DispatchPtrReg);
1682 }
1683
1684 if (Info.hasQueuePtr()) {
1685 unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1686 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1687 CCInfo.AllocateReg(QueuePtrReg);
1688 }
1689
1690 if (Info.hasKernargSegmentPtr()) {
1691 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1692 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1693 CCInfo.AllocateReg(InputPtrReg);
1694 }
1695
1696 if (Info.hasDispatchID()) {
1697 unsigned DispatchIDReg = Info.addDispatchID(TRI);
1698 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1699 CCInfo.AllocateReg(DispatchIDReg);
1700 }
1701
1702 if (Info.hasFlatScratchInit()) {
1703 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1704 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1705 CCInfo.AllocateReg(FlatScratchInitReg);
1706 }
1707
1708 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1709 // these from the dispatch pointer.
1710}
1711
1712// Allocate special input registers that are initialized per-wave.
1713static void allocateSystemSGPRs(CCState &CCInfo,
1714 MachineFunction &MF,
1715 SIMachineFunctionInfo &Info,
Marek Olsak584d2c02017-05-04 22:25:20 +00001716 CallingConv::ID CallConv,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001717 bool IsShader) {
1718 if (Info.hasWorkGroupIDX()) {
1719 unsigned Reg = Info.addWorkGroupIDX();
1720 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1721 CCInfo.AllocateReg(Reg);
1722 }
1723
1724 if (Info.hasWorkGroupIDY()) {
1725 unsigned Reg = Info.addWorkGroupIDY();
1726 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1727 CCInfo.AllocateReg(Reg);
1728 }
1729
1730 if (Info.hasWorkGroupIDZ()) {
1731 unsigned Reg = Info.addWorkGroupIDZ();
1732 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1733 CCInfo.AllocateReg(Reg);
1734 }
1735
1736 if (Info.hasWorkGroupInfo()) {
1737 unsigned Reg = Info.addWorkGroupInfo();
1738 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1739 CCInfo.AllocateReg(Reg);
1740 }
1741
1742 if (Info.hasPrivateSegmentWaveByteOffset()) {
1743 // Scratch wave offset passed in system SGPR.
1744 unsigned PrivateSegmentWaveByteOffsetReg;
1745
1746 if (IsShader) {
Marek Olsak584d2c02017-05-04 22:25:20 +00001747 PrivateSegmentWaveByteOffsetReg =
1748 Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1749
1750 // This is true if the scratch wave byte offset doesn't have a fixed
1751 // location.
1752 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1753 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1754 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1755 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001756 } else
1757 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1758
1759 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1760 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1761 }
1762}
1763
1764static void reservePrivateMemoryRegs(const TargetMachine &TM,
1765 MachineFunction &MF,
1766 const SIRegisterInfo &TRI,
Matt Arsenault1cc47f82017-07-18 16:44:56 +00001767 SIMachineFunctionInfo &Info) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001768 // Now that we've figured out where the scratch register inputs are, see if
1769 // should reserve the arguments and use them directly.
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001770 MachineFrameInfo &MFI = MF.getFrameInfo();
1771 bool HasStackObjects = MFI.hasStackObjects();
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001772 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001773
1774 // Record that we know we have non-spill stack objects so we don't need to
1775 // check all stack objects later.
1776 if (HasStackObjects)
1777 Info.setHasNonSpillStackObjects(true);
1778
1779 // Everything live out of a block is spilled with fast regalloc, so it's
1780 // almost certain that spilling will be required.
1781 if (TM.getOptLevel() == CodeGenOpt::None)
1782 HasStackObjects = true;
1783
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001784 // For now assume stack access is needed in any callee functions, so we need
1785 // the scratch registers to pass in.
1786 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1787
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001788 if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) {
1789 // If we have stack objects, we unquestionably need the private buffer
1790 // resource. For the Code Object V2 ABI, this will be the first 4 user
1791 // SGPR inputs. We can reserve those and use them directly.
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001792
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001793 unsigned PrivateSegmentBufferReg =
1794 Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
1795 Info.setScratchRSrcReg(PrivateSegmentBufferReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001796 } else {
1797 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001798 // We tentatively reserve the last registers (skipping the last registers
1799 // which may contain VCC, FLAT_SCR, and XNACK). After register allocation,
1800 // we'll replace these with the ones immediately after those which were
1801 // really allocated. In the prologue copies will be inserted from the
1802 // argument to these reserved registers.
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001803
1804 // Without HSA, relocations are used for the scratch pointer and the
1805 // buffer resource setup is always inserted in the prologue. Scratch wave
1806 // offset is still in an input SGPR.
1807 Info.setScratchRSrcReg(ReservedBufferReg);
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001808 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001809
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001810 // This should be accurate for kernels even before the frame is finalized.
1811 const bool HasFP = ST.getFrameLowering()->hasFP(MF);
1812 if (HasFP) {
1813 unsigned ReservedOffsetReg =
1814 TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1815 MachineRegisterInfo &MRI = MF.getRegInfo();
1816
1817 // Try to use s32 as the SP, but move it if it would interfere with input
1818 // arguments. This won't work with calls though.
1819 //
1820 // FIXME: Move SP to avoid any possible inputs, or find a way to spill input
1821 // registers.
1822 if (!MRI.isLiveIn(AMDGPU::SGPR32)) {
1823 Info.setStackPtrOffsetReg(AMDGPU::SGPR32);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001824 } else {
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001825 assert(AMDGPU::isShader(MF.getFunction().getCallingConv()));
1826
1827 if (MFI.hasCalls())
1828 report_fatal_error("call in graphics shader with too many input SGPRs");
1829
1830 for (unsigned Reg : AMDGPU::SGPR_32RegClass) {
1831 if (!MRI.isLiveIn(Reg)) {
1832 Info.setStackPtrOffsetReg(Reg);
1833 break;
1834 }
1835 }
1836
1837 if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG)
1838 report_fatal_error("failed to find register for SP");
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001839 }
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001840
1841 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1842 Info.setFrameOffsetReg(ReservedOffsetReg);
1843 } else if (RequiresStackAccess) {
1844 assert(!MFI.hasCalls());
1845 // We know there are accesses and they will be done relative to SP, so just
1846 // pin it to the input.
1847 //
1848 // FIXME: Should not do this if inline asm is reading/writing these
1849 // registers.
1850 unsigned PreloadedSP = Info.getPreloadedReg(
1851 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1852
1853 Info.setStackPtrOffsetReg(PreloadedSP);
1854 Info.setScratchWaveOffsetReg(PreloadedSP);
1855 Info.setFrameOffsetReg(PreloadedSP);
1856 } else {
1857 assert(!MFI.hasCalls());
1858
1859 // There may not be stack access at all. There may still be spills, or
1860 // access of a constant pointer (in which cases an extra copy will be
1861 // emitted in the prolog).
1862 unsigned ReservedOffsetReg
1863 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1864 Info.setStackPtrOffsetReg(ReservedOffsetReg);
1865 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1866 Info.setFrameOffsetReg(ReservedOffsetReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001867 }
1868}
1869
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001870bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1871 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1872 return !Info->isEntryFunction();
1873}
1874
1875void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1876
1877}
1878
1879void SITargetLowering::insertCopiesSplitCSR(
1880 MachineBasicBlock *Entry,
1881 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1882 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1883
1884 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1885 if (!IStart)
1886 return;
1887
1888 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1889 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1890 MachineBasicBlock::iterator MBBI = Entry->begin();
1891 for (const MCPhysReg *I = IStart; *I; ++I) {
1892 const TargetRegisterClass *RC = nullptr;
1893 if (AMDGPU::SReg_64RegClass.contains(*I))
1894 RC = &AMDGPU::SGPR_64RegClass;
1895 else if (AMDGPU::SReg_32RegClass.contains(*I))
1896 RC = &AMDGPU::SGPR_32RegClass;
1897 else
1898 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1899
1900 unsigned NewVR = MRI->createVirtualRegister(RC);
1901 // Create copy from CSR to a virtual register.
1902 Entry->addLiveIn(*I);
1903 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1904 .addReg(*I);
1905
1906 // Insert the copy-back instructions right before the terminator.
1907 for (auto *Exit : Exits)
1908 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1909 TII->get(TargetOpcode::COPY), *I)
1910 .addReg(NewVR);
1911 }
1912}
1913
Christian Konig2c8f6d52013-03-07 09:03:52 +00001914SDValue SITargetLowering::LowerFormalArguments(
Eric Christopher7792e322015-01-30 23:24:40 +00001915 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001916 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1917 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001918 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001919
1920 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001921 const Function &Fn = MF.getFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00001922 FunctionType *FType = MF.getFunction().getFunctionType();
Christian Konig99ee0f42013-03-07 09:04:14 +00001923 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001924
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +00001925 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00001926 DiagnosticInfoUnsupported NoGraphicsHSA(
Matthias Braunf1caa282017-12-15 22:22:58 +00001927 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
Matt Arsenaultd48da142015-11-02 23:23:02 +00001928 DAG.getContext()->diagnose(NoGraphicsHSA);
Diana Picus81bc3172016-05-26 15:24:55 +00001929 return DAG.getEntryNode();
Matt Arsenaultd48da142015-11-02 23:23:02 +00001930 }
1931
Christian Konig2c8f6d52013-03-07 09:03:52 +00001932 SmallVector<ISD::InputArg, 16> Splits;
Christian Konig2c8f6d52013-03-07 09:03:52 +00001933 SmallVector<CCValAssign, 16> ArgLocs;
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001934 BitVector Skipped(Ins.size());
Eric Christopherb5217502014-08-06 18:45:26 +00001935 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1936 *DAG.getContext());
Christian Konig2c8f6d52013-03-07 09:03:52 +00001937
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001938 bool IsShader = AMDGPU::isShader(CallConv);
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +00001939 bool IsKernel = AMDGPU::isKernel(CallConv);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001940 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
Christian Konig99ee0f42013-03-07 09:04:14 +00001941
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001942 if (IsShader) {
1943 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1944
1945 // At least one interpolation mode must be enabled or else the GPU will
1946 // hang.
1947 //
1948 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1949 // set PSInputAddr, the user wants to enable some bits after the compilation
1950 // based on run-time states. Since we can't know what the final PSInputEna
1951 // will look like, so we shouldn't do anything here and the user should take
1952 // responsibility for the correct programming.
1953 //
1954 // Otherwise, the following restrictions apply:
1955 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1956 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1957 // enabled too.
Tim Renoufc8ffffe2017-10-12 16:16:41 +00001958 if (CallConv == CallingConv::AMDGPU_PS) {
1959 if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1960 ((Info->getPSInputAddr() & 0xF) == 0 &&
1961 Info->isPSInputAllocated(11))) {
1962 CCInfo.AllocateReg(AMDGPU::VGPR0);
1963 CCInfo.AllocateReg(AMDGPU::VGPR1);
1964 Info->markPSInputAllocated(0);
1965 Info->markPSInputEnabled(0);
1966 }
1967 if (Subtarget->isAmdPalOS()) {
1968 // For isAmdPalOS, the user does not enable some bits after compilation
1969 // based on run-time states; the register values being generated here are
1970 // the final ones set in hardware. Therefore we need to apply the
1971 // workaround to PSInputAddr and PSInputEnable together. (The case where
1972 // a bit is set in PSInputAddr but not PSInputEnable is where the
1973 // frontend set up an input arg for a particular interpolation mode, but
1974 // nothing uses that input arg. Really we should have an earlier pass
1975 // that removes such an arg.)
1976 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1977 if ((PsInputBits & 0x7F) == 0 ||
1978 ((PsInputBits & 0xF) == 0 &&
1979 (PsInputBits >> 11 & 1)))
1980 Info->markPSInputEnabled(
1981 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
1982 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001983 }
1984
Tom Stellard2f3f9852017-01-25 01:25:13 +00001985 assert(!Info->hasDispatchPtr() &&
Tom Stellardf110f8f2016-04-14 16:27:03 +00001986 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1987 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1988 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1989 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1990 !Info->hasWorkItemIDZ());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001991 } else if (IsKernel) {
1992 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001993 } else {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001994 Splits.append(Ins.begin(), Ins.end());
Tom Stellardaf775432013-10-23 00:44:32 +00001995 }
1996
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001997 if (IsEntryFunc) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001998 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001999 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
Tom Stellard2f3f9852017-01-25 01:25:13 +00002000 }
2001
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002002 if (IsKernel) {
Tom Stellardbbeb45a2016-09-16 21:53:00 +00002003 analyzeFormalArgumentsCompute(CCInfo, Ins);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002004 } else {
2005 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
2006 CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
2007 }
Christian Konig2c8f6d52013-03-07 09:03:52 +00002008
Matt Arsenaultcf13d182015-07-10 22:51:36 +00002009 SmallVector<SDValue, 16> Chains;
2010
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002011 // FIXME: This is the minimum kernel argument alignment. We should improve
2012 // this to the maximum alignment of the arguments.
2013 //
2014 // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
2015 // kern arg offset.
2016 const unsigned KernelArgBaseAlign = 16;
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002017
2018 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
Christian Konigb7be72d2013-05-17 09:46:48 +00002019 const ISD::InputArg &Arg = Ins[i];
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00002020 if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
Christian Konigb7be72d2013-05-17 09:46:48 +00002021 InVals.push_back(DAG.getUNDEF(Arg.VT));
Christian Konig99ee0f42013-03-07 09:04:14 +00002022 continue;
2023 }
2024
Christian Konig2c8f6d52013-03-07 09:03:52 +00002025 CCValAssign &VA = ArgLocs[ArgIdx++];
Craig Topper7f416c82014-11-16 21:17:18 +00002026 MVT VT = VA.getLocVT();
Tom Stellarded882c22013-06-03 17:40:11 +00002027
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002028 if (IsEntryFunc && VA.isMemLoc()) {
Tom Stellardaf775432013-10-23 00:44:32 +00002029 VT = Ins[i].VT;
Tom Stellardbbeb45a2016-09-16 21:53:00 +00002030 EVT MemVT = VA.getLocVT();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002031
Matt Arsenault4bec7d42018-07-20 09:05:08 +00002032 const uint64_t Offset = VA.getLocMemOffset();
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002033 unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002034
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002035 SDValue Arg = lowerKernargMemParameter(
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002036 DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
Matt Arsenaultcf13d182015-07-10 22:51:36 +00002037 Chains.push_back(Arg.getValue(1));
Tom Stellardca7ecf32014-08-22 18:49:31 +00002038
Craig Toppere3dcce92015-08-01 22:20:21 +00002039 auto *ParamTy =
Andrew Trick05938a52015-02-16 18:10:47 +00002040 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
Tom Stellard5bfbae52018-07-11 20:59:01 +00002041 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
Matt Arsenaultcdd191d2019-01-28 20:14:49 +00002042 ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2043 ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
Tom Stellardca7ecf32014-08-22 18:49:31 +00002044 // On SI local pointers are just offsets into LDS, so they are always
2045 // less than 16-bits. On CI and newer they could potentially be
2046 // real pointers, so we can't guarantee their size.
2047 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
2048 DAG.getValueType(MVT::i16));
2049 }
2050
Tom Stellarded882c22013-06-03 17:40:11 +00002051 InVals.push_back(Arg);
2052 continue;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002053 } else if (!IsEntryFunc && VA.isMemLoc()) {
2054 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2055 InVals.push_back(Val);
2056 if (!Arg.Flags.isByVal())
2057 Chains.push_back(Val.getValue(1));
2058 continue;
Tom Stellarded882c22013-06-03 17:40:11 +00002059 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002060
Christian Konig2c8f6d52013-03-07 09:03:52 +00002061 assert(VA.isRegLoc() && "Parameter must be in a register!");
2062
2063 unsigned Reg = VA.getLocReg();
Christian Konig2c8f6d52013-03-07 09:03:52 +00002064 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
Matt Arsenaultb3463552017-07-15 05:52:59 +00002065 EVT ValVT = VA.getValVT();
Christian Konig2c8f6d52013-03-07 09:03:52 +00002066
2067 Reg = MF.addLiveIn(Reg, RC);
2068 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2069
Matt Arsenault5c714cb2019-05-23 19:38:14 +00002070 if (Arg.Flags.isSRet()) {
Matt Arsenault45b98182017-11-15 00:45:43 +00002071 // The return object should be reasonably addressable.
2072
2073 // FIXME: This helps when the return is a real sret. If it is a
2074 // automatically inserted sret (i.e. CanLowerReturn returns false), an
2075 // extra copy is inserted in SelectionDAGBuilder which obscures this.
Matt Arsenault5c714cb2019-05-23 19:38:14 +00002076 unsigned NumBits
2077 = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex();
Matt Arsenault45b98182017-11-15 00:45:43 +00002078 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2079 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2080 }
2081
Matt Arsenaultb3463552017-07-15 05:52:59 +00002082 // If this is an 8 or 16-bit value, it is really passed promoted
2083 // to 32 bits. Insert an assert[sz]ext to capture this, then
2084 // truncate to the right size.
2085 switch (VA.getLocInfo()) {
2086 case CCValAssign::Full:
2087 break;
2088 case CCValAssign::BCvt:
2089 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2090 break;
2091 case CCValAssign::SExt:
2092 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2093 DAG.getValueType(ValVT));
2094 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2095 break;
2096 case CCValAssign::ZExt:
2097 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2098 DAG.getValueType(ValVT));
2099 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2100 break;
2101 case CCValAssign::AExt:
2102 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2103 break;
2104 default:
2105 llvm_unreachable("Unknown loc info!");
2106 }
2107
Christian Konig2c8f6d52013-03-07 09:03:52 +00002108 InVals.push_back(Val);
2109 }
Tom Stellarde99fb652015-01-20 19:33:04 +00002110
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002111 if (!IsEntryFunc) {
2112 // Special inputs come after user arguments.
2113 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2114 }
2115
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002116 // Start adding system SGPRs.
2117 if (IsEntryFunc) {
2118 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002119 } else {
2120 CCInfo.AllocateReg(Info->getScratchRSrcReg());
2121 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2122 CCInfo.AllocateReg(Info->getFrameOffsetReg());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002123 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002124 }
Matt Arsenaultcf13d182015-07-10 22:51:36 +00002125
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002126 auto &ArgUsageInfo =
2127 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
Matt Arsenaultceafc552018-05-29 17:42:50 +00002128 ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002129
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002130 unsigned StackArgSize = CCInfo.getNextStackOffset();
2131 Info->setBytesInStackArgArea(StackArgSize);
2132
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002133 return Chains.empty() ? Chain :
2134 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
Christian Konig2c8f6d52013-03-07 09:03:52 +00002135}
2136
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002137// TODO: If return values can't fit in registers, we should return as many as
2138// possible in registers before passing on stack.
2139bool SITargetLowering::CanLowerReturn(
2140 CallingConv::ID CallConv,
2141 MachineFunction &MF, bool IsVarArg,
2142 const SmallVectorImpl<ISD::OutputArg> &Outs,
2143 LLVMContext &Context) const {
2144 // Replacing returns with sret/stack usage doesn't make sense for shaders.
2145 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2146 // for shaders. Vector types should be explicitly handled by CC.
2147 if (AMDGPU::isEntryFunctionCC(CallConv))
2148 return true;
2149
2150 SmallVector<CCValAssign, 16> RVLocs;
2151 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2152 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2153}
2154
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002155SDValue
2156SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2157 bool isVarArg,
2158 const SmallVectorImpl<ISD::OutputArg> &Outs,
2159 const SmallVectorImpl<SDValue> &OutVals,
2160 const SDLoc &DL, SelectionDAG &DAG) const {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002161 MachineFunction &MF = DAG.getMachineFunction();
2162 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2163
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002164 if (AMDGPU::isKernel(CallConv)) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002165 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2166 OutVals, DL, DAG);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002167 }
2168
2169 bool IsShader = AMDGPU::isShader(CallConv);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002170
Matt Arsenault55ab9212018-08-01 19:57:34 +00002171 Info->setIfReturnsVoid(Outs.empty());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002172 bool IsWaveEnd = Info->returnsVoid() && IsShader;
Marek Olsak8e9cc632016-01-13 17:23:09 +00002173
Marek Olsak8a0f3352016-01-13 17:23:04 +00002174 // CCValAssign - represent the assignment of the return value to a location.
2175 SmallVector<CCValAssign, 48> RVLocs;
Matt Arsenault55ab9212018-08-01 19:57:34 +00002176 SmallVector<ISD::OutputArg, 48> Splits;
Marek Olsak8a0f3352016-01-13 17:23:04 +00002177
2178 // CCState - Info about the registers and stack slots.
2179 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2180 *DAG.getContext());
2181
2182 // Analyze outgoing return values.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002183 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
Marek Olsak8a0f3352016-01-13 17:23:04 +00002184
2185 SDValue Flag;
2186 SmallVector<SDValue, 48> RetOps;
2187 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2188
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002189 // Add return address for callable functions.
2190 if (!Info->isEntryFunction()) {
2191 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2192 SDValue ReturnAddrReg = CreateLiveInRegister(
2193 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2194
2195 // FIXME: Should be able to use a vreg here, but need a way to prevent it
2196 // from being allcoated to a CSR.
2197
2198 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2199 MVT::i64);
2200
2201 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
2202 Flag = Chain.getValue(1);
2203
2204 RetOps.push_back(PhysReturnAddrReg);
2205 }
2206
Marek Olsak8a0f3352016-01-13 17:23:04 +00002207 // Copy the result values into the output registers.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002208 for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2209 ++I, ++RealRVLocIdx) {
2210 CCValAssign &VA = RVLocs[I];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002211 assert(VA.isRegLoc() && "Can only return in registers!");
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002212 // TODO: Partially return in registers if return values don't fit.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002213 SDValue Arg = OutVals[RealRVLocIdx];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002214
2215 // Copied from other backends.
2216 switch (VA.getLocInfo()) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002217 case CCValAssign::Full:
2218 break;
2219 case CCValAssign::BCvt:
2220 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2221 break;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002222 case CCValAssign::SExt:
2223 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2224 break;
2225 case CCValAssign::ZExt:
2226 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2227 break;
2228 case CCValAssign::AExt:
2229 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2230 break;
2231 default:
2232 llvm_unreachable("Unknown loc info!");
Marek Olsak8a0f3352016-01-13 17:23:04 +00002233 }
2234
2235 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2236 Flag = Chain.getValue(1);
2237 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2238 }
2239
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002240 // FIXME: Does sret work properly?
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002241 if (!Info->isEntryFunction()) {
Tom Stellardc5a154d2018-06-28 23:47:12 +00002242 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002243 const MCPhysReg *I =
2244 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2245 if (I) {
2246 for (; *I; ++I) {
2247 if (AMDGPU::SReg_64RegClass.contains(*I))
2248 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2249 else if (AMDGPU::SReg_32RegClass.contains(*I))
2250 RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2251 else
2252 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2253 }
2254 }
2255 }
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002256
Marek Olsak8a0f3352016-01-13 17:23:04 +00002257 // Update chain and glue.
2258 RetOps[0] = Chain;
2259 if (Flag.getNode())
2260 RetOps.push_back(Flag);
2261
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002262 unsigned Opc = AMDGPUISD::ENDPGM;
2263 if (!IsWaveEnd)
2264 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
Matt Arsenault9babdf42016-06-22 20:15:28 +00002265 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002266}
2267
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002268SDValue SITargetLowering::LowerCallResult(
2269 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2270 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2271 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2272 SDValue ThisVal) const {
2273 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2274
2275 // Assign locations to each value returned by this call.
2276 SmallVector<CCValAssign, 16> RVLocs;
2277 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2278 *DAG.getContext());
2279 CCInfo.AnalyzeCallResult(Ins, RetCC);
2280
2281 // Copy all of the result registers out of their specified physreg.
2282 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2283 CCValAssign VA = RVLocs[i];
2284 SDValue Val;
2285
2286 if (VA.isRegLoc()) {
2287 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2288 Chain = Val.getValue(1);
2289 InFlag = Val.getValue(2);
2290 } else if (VA.isMemLoc()) {
2291 report_fatal_error("TODO: return values in memory");
2292 } else
2293 llvm_unreachable("unknown argument location type");
2294
2295 switch (VA.getLocInfo()) {
2296 case CCValAssign::Full:
2297 break;
2298 case CCValAssign::BCvt:
2299 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2300 break;
2301 case CCValAssign::ZExt:
2302 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2303 DAG.getValueType(VA.getValVT()));
2304 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2305 break;
2306 case CCValAssign::SExt:
2307 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2308 DAG.getValueType(VA.getValVT()));
2309 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2310 break;
2311 case CCValAssign::AExt:
2312 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2313 break;
2314 default:
2315 llvm_unreachable("Unknown loc info!");
2316 }
2317
2318 InVals.push_back(Val);
2319 }
2320
2321 return Chain;
2322}
2323
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002324// Add code to pass special inputs required depending on used features separate
2325// from the explicit user arguments present in the IR.
2326void SITargetLowering::passSpecialInputs(
2327 CallLoweringInfo &CLI,
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002328 CCState &CCInfo,
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002329 const SIMachineFunctionInfo &Info,
2330 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2331 SmallVectorImpl<SDValue> &MemOpChains,
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002332 SDValue Chain) const {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002333 // If we don't have a call site, this was a call inserted by
2334 // legalization. These can never use special inputs.
2335 if (!CLI.CS)
2336 return;
2337
2338 const Function *CalleeFunc = CLI.CS.getCalledFunction();
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002339 assert(CalleeFunc);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002340
2341 SelectionDAG &DAG = CLI.DAG;
2342 const SDLoc &DL = CLI.DL;
2343
Tom Stellardc5a154d2018-06-28 23:47:12 +00002344 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002345
2346 auto &ArgUsageInfo =
2347 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2348 const AMDGPUFunctionArgInfo &CalleeArgInfo
2349 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2350
2351 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2352
2353 // TODO: Unify with private memory register handling. This is complicated by
2354 // the fact that at least in kernels, the input argument is not necessarily
2355 // in the same location as the input.
2356 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2357 AMDGPUFunctionArgInfo::DISPATCH_PTR,
2358 AMDGPUFunctionArgInfo::QUEUE_PTR,
2359 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2360 AMDGPUFunctionArgInfo::DISPATCH_ID,
2361 AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2362 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2363 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2364 AMDGPUFunctionArgInfo::WORKITEM_ID_X,
2365 AMDGPUFunctionArgInfo::WORKITEM_ID_Y,
Matt Arsenault817c2532017-08-03 23:12:44 +00002366 AMDGPUFunctionArgInfo::WORKITEM_ID_Z,
2367 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002368 };
2369
2370 for (auto InputID : InputRegs) {
2371 const ArgDescriptor *OutgoingArg;
2372 const TargetRegisterClass *ArgRC;
2373
2374 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2375 if (!OutgoingArg)
2376 continue;
2377
2378 const ArgDescriptor *IncomingArg;
2379 const TargetRegisterClass *IncomingArgRC;
2380 std::tie(IncomingArg, IncomingArgRC)
2381 = CallerArgInfo.getPreloadedValue(InputID);
2382 assert(IncomingArgRC == ArgRC);
2383
2384 // All special arguments are ints for now.
2385 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
Matt Arsenault817c2532017-08-03 23:12:44 +00002386 SDValue InputReg;
2387
2388 if (IncomingArg) {
2389 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2390 } else {
2391 // The implicit arg ptr is special because it doesn't have a corresponding
2392 // input for kernels, and is computed from the kernarg segment pointer.
2393 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2394 InputReg = getImplicitArgPtr(DAG, DL);
2395 }
2396
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002397 if (OutgoingArg->isRegister()) {
2398 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2399 } else {
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002400 unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2401 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2402 SpecialArgOffset);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002403 MemOpChains.push_back(ArgStore);
2404 }
2405 }
2406}
2407
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002408static bool canGuaranteeTCO(CallingConv::ID CC) {
2409 return CC == CallingConv::Fast;
2410}
2411
2412/// Return true if we might ever do TCO for calls with this calling convention.
2413static bool mayTailCallThisCC(CallingConv::ID CC) {
2414 switch (CC) {
2415 case CallingConv::C:
2416 return true;
2417 default:
2418 return canGuaranteeTCO(CC);
2419 }
2420}
2421
2422bool SITargetLowering::isEligibleForTailCallOptimization(
2423 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2424 const SmallVectorImpl<ISD::OutputArg> &Outs,
2425 const SmallVectorImpl<SDValue> &OutVals,
2426 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2427 if (!mayTailCallThisCC(CalleeCC))
2428 return false;
2429
2430 MachineFunction &MF = DAG.getMachineFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00002431 const Function &CallerF = MF.getFunction();
2432 CallingConv::ID CallerCC = CallerF.getCallingConv();
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002433 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2434 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2435
2436 // Kernels aren't callable, and don't have a live in return address so it
2437 // doesn't make sense to do a tail call with entry functions.
2438 if (!CallerPreserved)
2439 return false;
2440
2441 bool CCMatch = CallerCC == CalleeCC;
2442
2443 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2444 if (canGuaranteeTCO(CalleeCC) && CCMatch)
2445 return true;
2446 return false;
2447 }
2448
2449 // TODO: Can we handle var args?
2450 if (IsVarArg)
2451 return false;
2452
Matthias Braunf1caa282017-12-15 22:22:58 +00002453 for (const Argument &Arg : CallerF.args()) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002454 if (Arg.hasByValAttr())
2455 return false;
2456 }
2457
2458 LLVMContext &Ctx = *DAG.getContext();
2459
2460 // Check that the call results are passed in the same way.
2461 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2462 CCAssignFnForCall(CalleeCC, IsVarArg),
2463 CCAssignFnForCall(CallerCC, IsVarArg)))
2464 return false;
2465
2466 // The callee has to preserve all registers the caller needs to preserve.
2467 if (!CCMatch) {
2468 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2469 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2470 return false;
2471 }
2472
2473 // Nothing more to check if the callee is taking no arguments.
2474 if (Outs.empty())
2475 return true;
2476
2477 SmallVector<CCValAssign, 16> ArgLocs;
2478 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2479
2480 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2481
2482 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2483 // If the stack arguments for this call do not fit into our own save area then
2484 // the call cannot be made tail.
2485 // TODO: Is this really necessary?
2486 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2487 return false;
2488
2489 const MachineRegisterInfo &MRI = MF.getRegInfo();
2490 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2491}
2492
2493bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2494 if (!CI->isTailCall())
2495 return false;
2496
2497 const Function *ParentFn = CI->getParent()->getParent();
2498 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2499 return false;
2500
2501 auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2502 return (Attr.getValueAsString() != "true");
2503}
2504
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002505// The wave scratch offset register is used as the global base pointer.
2506SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2507 SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002508 SelectionDAG &DAG = CLI.DAG;
2509 const SDLoc &DL = CLI.DL;
2510 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2511 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2512 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2513 SDValue Chain = CLI.Chain;
2514 SDValue Callee = CLI.Callee;
2515 bool &IsTailCall = CLI.IsTailCall;
2516 CallingConv::ID CallConv = CLI.CallConv;
2517 bool IsVarArg = CLI.IsVarArg;
2518 bool IsSibCall = false;
2519 bool IsThisReturn = false;
2520 MachineFunction &MF = DAG.getMachineFunction();
2521
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002522 if (IsVarArg) {
2523 return lowerUnhandledCall(CLI, InVals,
2524 "unsupported call to variadic function ");
2525 }
2526
Matt Arsenault935f3b72018-08-08 16:58:39 +00002527 if (!CLI.CS.getInstruction())
2528 report_fatal_error("unsupported libcall legalization");
2529
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002530 if (!CLI.CS.getCalledFunction()) {
2531 return lowerUnhandledCall(CLI, InVals,
2532 "unsupported indirect call to function ");
2533 }
2534
2535 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2536 return lowerUnhandledCall(CLI, InVals,
2537 "unsupported required tail call to function ");
2538 }
2539
Matt Arsenault1fb90132018-06-28 10:18:36 +00002540 if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2541 // Note the issue is with the CC of the calling function, not of the call
2542 // itself.
2543 return lowerUnhandledCall(CLI, InVals,
2544 "unsupported call from graphics shader of function ");
2545 }
2546
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002547 if (IsTailCall) {
2548 IsTailCall = isEligibleForTailCallOptimization(
2549 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2550 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2551 report_fatal_error("failed to perform tail call elimination on a call "
2552 "site marked musttail");
2553 }
2554
2555 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2556
2557 // A sibling call is one where we're under the usual C ABI and not planning
2558 // to change that but can still do a tail call:
2559 if (!TailCallOpt && IsTailCall)
2560 IsSibCall = true;
2561
2562 if (IsTailCall)
2563 ++NumTailCalls;
2564 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002565
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002566 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2567
2568 // Analyze operands of the call, assigning locations to each operand.
2569 SmallVector<CCValAssign, 16> ArgLocs;
2570 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2571 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002572
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002573 CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2574
2575 // Get a count of how many bytes are to be pushed on the stack.
2576 unsigned NumBytes = CCInfo.getNextStackOffset();
2577
2578 if (IsSibCall) {
2579 // Since we're not changing the ABI to make this a tail call, the memory
2580 // operands are already available in the caller's incoming argument space.
2581 NumBytes = 0;
2582 }
2583
2584 // FPDiff is the byte offset of the call's argument area from the callee's.
2585 // Stores to callee stack arguments will be placed in FixedStackSlots offset
2586 // by this amount for a tail call. In a sibling call it must be 0 because the
2587 // caller will deallocate the entire stack and the callee still expects its
2588 // arguments to begin at SP+0. Completely unused for non-tail calls.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002589 int32_t FPDiff = 0;
2590 MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002591 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2592
Matt Arsenault6efd0822017-09-14 17:14:57 +00002593 SDValue CallerSavedFP;
2594
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002595 // Adjust the stack pointer for the new arguments...
2596 // These operations are automatically eliminated by the prolog/epilog pass
2597 if (!IsSibCall) {
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002598 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002599
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002600 SmallVector<SDValue, 4> CopyFromChains;
2601
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002602 unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2603
2604 // In the HSA case, this should be an identity copy.
2605 SDValue ScratchRSrcReg
2606 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2607 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002608 CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002609
2610 // TODO: Don't hardcode these registers and get from the callee function.
2611 SDValue ScratchWaveOffsetReg
2612 = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2613 RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002614 CopyFromChains.push_back(ScratchWaveOffsetReg.getValue(1));
Matt Arsenault6efd0822017-09-14 17:14:57 +00002615
2616 if (!Info->isEntryFunction()) {
2617 // Avoid clobbering this function's FP value. In the current convention
2618 // callee will overwrite this, so do save/restore around the call site.
2619 CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2620 Info->getFrameOffsetReg(), MVT::i32);
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002621 CopyFromChains.push_back(CallerSavedFP.getValue(1));
Matt Arsenault6efd0822017-09-14 17:14:57 +00002622 }
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002623
2624 Chain = DAG.getTokenFactor(DL, CopyFromChains);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002625 }
2626
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002627 SmallVector<SDValue, 8> MemOpChains;
2628 MVT PtrVT = MVT::i32;
2629
2630 // Walk the register/memloc assignments, inserting copies/loads.
2631 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2632 ++i, ++realArgIdx) {
2633 CCValAssign &VA = ArgLocs[i];
2634 SDValue Arg = OutVals[realArgIdx];
2635
2636 // Promote the value if needed.
2637 switch (VA.getLocInfo()) {
2638 case CCValAssign::Full:
2639 break;
2640 case CCValAssign::BCvt:
2641 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2642 break;
2643 case CCValAssign::ZExt:
2644 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2645 break;
2646 case CCValAssign::SExt:
2647 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2648 break;
2649 case CCValAssign::AExt:
2650 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2651 break;
2652 case CCValAssign::FPExt:
2653 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2654 break;
2655 default:
2656 llvm_unreachable("Unknown loc info!");
2657 }
2658
2659 if (VA.isRegLoc()) {
2660 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2661 } else {
2662 assert(VA.isMemLoc());
2663
2664 SDValue DstAddr;
2665 MachinePointerInfo DstInfo;
2666
2667 unsigned LocMemOffset = VA.getLocMemOffset();
2668 int32_t Offset = LocMemOffset;
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002669
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002670 SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002671 unsigned Align = 0;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002672
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002673 if (IsTailCall) {
2674 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2675 unsigned OpSize = Flags.isByVal() ?
2676 Flags.getByValSize() : VA.getValVT().getStoreSize();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002677
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002678 // FIXME: We can have better than the minimum byval required alignment.
2679 Align = Flags.isByVal() ? Flags.getByValAlign() :
2680 MinAlign(Subtarget->getStackAlignment(), Offset);
2681
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002682 Offset = Offset + FPDiff;
2683 int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2684
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002685 DstAddr = DAG.getFrameIndex(FI, PtrVT);
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002686 DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2687
2688 // Make sure any stack arguments overlapping with where we're storing
2689 // are loaded before this eventual operation. Otherwise they'll be
2690 // clobbered.
2691
2692 // FIXME: Why is this really necessary? This seems to just result in a
2693 // lot of code to copy the stack and write them back to the same
2694 // locations, which are supposed to be immutable?
2695 Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2696 } else {
2697 DstAddr = PtrOff;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002698 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002699 Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002700 }
2701
2702 if (Outs[i].Flags.isByVal()) {
2703 SDValue SizeNode =
2704 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2705 SDValue Cpy = DAG.getMemcpy(
2706 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2707 /*isVol = */ false, /*AlwaysInline = */ true,
Yaxun Liuc5962262017-11-22 16:13:35 +00002708 /*isTailCall = */ false, DstInfo,
2709 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
Matt Arsenault0da63502018-08-31 05:49:54 +00002710 *DAG.getContext(), AMDGPUAS::PRIVATE_ADDRESS))));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002711
2712 MemOpChains.push_back(Cpy);
2713 } else {
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002714 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002715 MemOpChains.push_back(Store);
2716 }
2717 }
2718 }
2719
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002720 // Copy special input registers after user input arguments.
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002721 passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002722
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002723 if (!MemOpChains.empty())
2724 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2725
2726 // Build a sequence of copy-to-reg nodes chained together with token chain
2727 // and flag operands which copy the outgoing args into the appropriate regs.
2728 SDValue InFlag;
2729 for (auto &RegToPass : RegsToPass) {
2730 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2731 RegToPass.second, InFlag);
2732 InFlag = Chain.getValue(1);
2733 }
2734
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002735
2736 SDValue PhysReturnAddrReg;
2737 if (IsTailCall) {
2738 // Since the return is being combined with the call, we need to pass on the
2739 // return address.
2740
2741 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2742 SDValue ReturnAddrReg = CreateLiveInRegister(
2743 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2744
2745 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2746 MVT::i64);
2747 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2748 InFlag = Chain.getValue(1);
2749 }
2750
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002751 // We don't usually want to end the call-sequence here because we would tidy
2752 // the frame up *after* the call, however in the ABI-changing tail-call case
2753 // we've carefully laid out the parameters so that when sp is reset they'll be
2754 // in the correct location.
2755 if (IsTailCall && !IsSibCall) {
2756 Chain = DAG.getCALLSEQ_END(Chain,
2757 DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2758 DAG.getTargetConstant(0, DL, MVT::i32),
2759 InFlag, DL);
2760 InFlag = Chain.getValue(1);
2761 }
2762
2763 std::vector<SDValue> Ops;
2764 Ops.push_back(Chain);
2765 Ops.push_back(Callee);
Scott Linderd19d1972019-02-04 20:00:07 +00002766 // Add a redundant copy of the callee global which will not be legalized, as
2767 // we need direct access to the callee later.
2768 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
2769 const GlobalValue *GV = GSD->getGlobal();
2770 Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002771
2772 if (IsTailCall) {
2773 // Each tail call may have to adjust the stack by a different amount, so
2774 // this information must travel along with the operation for eventual
2775 // consumption by emitEpilogue.
2776 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002777
2778 Ops.push_back(PhysReturnAddrReg);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002779 }
2780
2781 // Add argument registers to the end of the list so that they are known live
2782 // into the call.
2783 for (auto &RegToPass : RegsToPass) {
2784 Ops.push_back(DAG.getRegister(RegToPass.first,
2785 RegToPass.second.getValueType()));
2786 }
2787
2788 // Add a register mask operand representing the call-preserved registers.
2789
Tom Stellardc5a154d2018-06-28 23:47:12 +00002790 auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002791 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2792 assert(Mask && "Missing call preserved mask for calling convention");
2793 Ops.push_back(DAG.getRegisterMask(Mask));
2794
2795 if (InFlag.getNode())
2796 Ops.push_back(InFlag);
2797
2798 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2799
2800 // If we're doing a tall call, use a TC_RETURN here rather than an
2801 // actual call instruction.
2802 if (IsTailCall) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002803 MFI.setHasTailCall();
2804 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002805 }
2806
2807 // Returns a chain and a flag for retval copy to use.
2808 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2809 Chain = Call.getValue(0);
2810 InFlag = Call.getValue(1);
2811
Matt Arsenault6efd0822017-09-14 17:14:57 +00002812 if (CallerSavedFP) {
2813 SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2814 Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2815 InFlag = Chain.getValue(1);
2816 }
2817
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002818 uint64_t CalleePopBytes = NumBytes;
2819 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002820 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2821 InFlag, DL);
2822 if (!Ins.empty())
2823 InFlag = Chain.getValue(1);
2824
2825 // Handle result values, copying them out of physregs into vregs that we
2826 // return.
2827 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2828 InVals, IsThisReturn,
2829 IsThisReturn ? OutVals[0] : SDValue());
2830}
2831
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002832unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2833 SelectionDAG &DAG) const {
2834 unsigned Reg = StringSwitch<unsigned>(RegName)
2835 .Case("m0", AMDGPU::M0)
2836 .Case("exec", AMDGPU::EXEC)
2837 .Case("exec_lo", AMDGPU::EXEC_LO)
2838 .Case("exec_hi", AMDGPU::EXEC_HI)
2839 .Case("flat_scratch", AMDGPU::FLAT_SCR)
2840 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2841 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2842 .Default(AMDGPU::NoRegister);
2843
2844 if (Reg == AMDGPU::NoRegister) {
2845 report_fatal_error(Twine("invalid register name \""
2846 + StringRef(RegName) + "\"."));
2847
2848 }
2849
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00002850 if ((Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS ||
2851 Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) &&
2852 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002853 report_fatal_error(Twine("invalid register \""
2854 + StringRef(RegName) + "\" for subtarget."));
2855 }
2856
2857 switch (Reg) {
2858 case AMDGPU::M0:
2859 case AMDGPU::EXEC_LO:
2860 case AMDGPU::EXEC_HI:
2861 case AMDGPU::FLAT_SCR_LO:
2862 case AMDGPU::FLAT_SCR_HI:
2863 if (VT.getSizeInBits() == 32)
2864 return Reg;
2865 break;
2866 case AMDGPU::EXEC:
2867 case AMDGPU::FLAT_SCR:
2868 if (VT.getSizeInBits() == 64)
2869 return Reg;
2870 break;
2871 default:
2872 llvm_unreachable("missing register type checking");
2873 }
2874
2875 report_fatal_error(Twine("invalid type for register \""
2876 + StringRef(RegName) + "\"."));
2877}
2878
Matt Arsenault786724a2016-07-12 21:41:32 +00002879// If kill is not the last instruction, split the block so kill is always a
2880// proper terminator.
2881MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
2882 MachineBasicBlock *BB) const {
2883 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2884
2885 MachineBasicBlock::iterator SplitPoint(&MI);
2886 ++SplitPoint;
2887
2888 if (SplitPoint == BB->end()) {
2889 // Don't bother with a new block.
Marek Olsakce76ea02017-10-24 10:27:13 +00002890 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002891 return BB;
2892 }
2893
2894 MachineFunction *MF = BB->getParent();
2895 MachineBasicBlock *SplitBB
2896 = MF->CreateMachineBasicBlock(BB->getBasicBlock());
2897
Matt Arsenault786724a2016-07-12 21:41:32 +00002898 MF->insert(++MachineFunction::iterator(BB), SplitBB);
2899 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2900
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002901 SplitBB->transferSuccessorsAndUpdatePHIs(BB);
Matt Arsenault786724a2016-07-12 21:41:32 +00002902 BB->addSuccessor(SplitBB);
2903
Marek Olsakce76ea02017-10-24 10:27:13 +00002904 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002905 return SplitBB;
2906}
2907
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002908// Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2909// wavefront. If the value is uniform and just happens to be in a VGPR, this
2910// will only do one iteration. In the worst case, this will loop 64 times.
2911//
2912// TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002913static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
2914 const SIInstrInfo *TII,
2915 MachineRegisterInfo &MRI,
2916 MachineBasicBlock &OrigBB,
2917 MachineBasicBlock &LoopBB,
2918 const DebugLoc &DL,
2919 const MachineOperand &IdxReg,
2920 unsigned InitReg,
2921 unsigned ResultReg,
2922 unsigned PhiReg,
2923 unsigned InitSaveExecReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002924 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002925 bool UseGPRIdxMode,
2926 bool IsIndirectSrc) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00002927 MachineFunction *MF = OrigBB.getParent();
2928 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2929 const SIRegisterInfo *TRI = ST.getRegisterInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002930 MachineBasicBlock::iterator I = LoopBB.begin();
2931
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00002932 const TargetRegisterClass *BoolRC = TRI->getBoolRC();
2933 unsigned PhiExec = MRI.createVirtualRegister(BoolRC);
2934 unsigned NewExec = MRI.createVirtualRegister(BoolRC);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002935 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00002936 unsigned CondReg = MRI.createVirtualRegister(BoolRC);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002937
2938 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2939 .addReg(InitReg)
2940 .addMBB(&OrigBB)
2941 .addReg(ResultReg)
2942 .addMBB(&LoopBB);
2943
2944 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2945 .addReg(InitSaveExecReg)
2946 .addMBB(&OrigBB)
2947 .addReg(NewExec)
2948 .addMBB(&LoopBB);
2949
2950 // Read the next variant <- also loop target.
2951 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2952 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2953
2954 // Compare the just read M0 value to all possible Idx values.
2955 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2956 .addReg(CurrentIdxReg)
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +00002957 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002958
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002959 // Update EXEC, save the original EXEC value to VCC.
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00002960 BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32
2961 : AMDGPU::S_AND_SAVEEXEC_B64),
2962 NewExec)
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002963 .addReg(CondReg, RegState::Kill);
2964
2965 MRI.setSimpleHint(NewExec, CondReg);
2966
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002967 if (UseGPRIdxMode) {
2968 unsigned IdxReg;
2969 if (Offset == 0) {
2970 IdxReg = CurrentIdxReg;
2971 } else {
2972 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2973 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2974 .addReg(CurrentIdxReg, RegState::Kill)
2975 .addImm(Offset);
2976 }
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002977 unsigned IdxMode = IsIndirectSrc ?
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00002978 AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002979 MachineInstr *SetOn =
2980 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2981 .addReg(IdxReg, RegState::Kill)
2982 .addImm(IdxMode);
2983 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002984 } else {
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002985 // Move index from VCC into M0
2986 if (Offset == 0) {
2987 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2988 .addReg(CurrentIdxReg, RegState::Kill);
2989 } else {
2990 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2991 .addReg(CurrentIdxReg, RegState::Kill)
2992 .addImm(Offset);
2993 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002994 }
2995
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002996 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00002997 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002998 MachineInstr *InsertPt =
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00002999 BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term
3000 : AMDGPU::S_XOR_B64_term), Exec)
3001 .addReg(Exec)
3002 .addReg(NewExec);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003003
3004 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
3005 // s_cbranch_scc0?
3006
3007 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
3008 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
3009 .addMBB(&LoopBB);
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003010
3011 return InsertPt->getIterator();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003012}
3013
3014// This has slightly sub-optimal regalloc when the source vector is killed by
3015// the read. The register allocator does not understand that the kill is
3016// per-workitem, so is kept alive for the whole loop so we end up not re-using a
3017// subregister from it, using 1 more VGPR than necessary. This was saved when
3018// this was expanded after register allocation.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003019static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
3020 MachineBasicBlock &MBB,
3021 MachineInstr &MI,
3022 unsigned InitResultReg,
3023 unsigned PhiReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003024 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003025 bool UseGPRIdxMode,
3026 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003027 MachineFunction *MF = MBB.getParent();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003028 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3029 const SIRegisterInfo *TRI = ST.getRegisterInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003030 MachineRegisterInfo &MRI = MF->getRegInfo();
3031 const DebugLoc &DL = MI.getDebugLoc();
3032 MachineBasicBlock::iterator I(&MI);
3033
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003034 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003035 unsigned DstReg = MI.getOperand(0).getReg();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003036 unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC);
3037 unsigned TmpExec = MRI.createVirtualRegister(BoolXExecRC);
3038 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3039 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003040
3041 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
3042
3043 // Save the EXEC mask
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003044 BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec)
3045 .addReg(Exec);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003046
3047 // To insert the loop we need to split the block. Move everything after this
3048 // point to a new block, and insert a new empty block between the two.
3049 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
3050 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
3051 MachineFunction::iterator MBBI(MBB);
3052 ++MBBI;
3053
3054 MF->insert(MBBI, LoopBB);
3055 MF->insert(MBBI, RemainderBB);
3056
3057 LoopBB->addSuccessor(LoopBB);
3058 LoopBB->addSuccessor(RemainderBB);
3059
3060 // Move the rest of the block into a new block.
Matt Arsenaultd40ded62016-07-22 17:01:15 +00003061 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003062 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3063
3064 MBB.addSuccessor(LoopBB);
3065
3066 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3067
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003068 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
3069 InitResultReg, DstReg, PhiReg, TmpExec,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003070 Offset, UseGPRIdxMode, IsIndirectSrc);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003071
3072 MachineBasicBlock::iterator First = RemainderBB->begin();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003073 BuildMI(*RemainderBB, First, DL, TII->get(MovExecOpc), Exec)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003074 .addReg(SaveExec);
3075
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003076 return InsPt;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003077}
3078
3079// Returns subreg index, offset
3080static std::pair<unsigned, int>
3081computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
3082 const TargetRegisterClass *SuperRC,
3083 unsigned VecReg,
3084 int Offset) {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003085 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003086
3087 // Skip out of bounds offsets, or else we would end up using an undefined
3088 // register.
3089 if (Offset >= NumElts || Offset < 0)
3090 return std::make_pair(AMDGPU::sub0, Offset);
3091
3092 return std::make_pair(AMDGPU::sub0 + Offset, 0);
3093}
3094
3095// Return true if the index is an SGPR and was set.
3096static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
3097 MachineRegisterInfo &MRI,
3098 MachineInstr &MI,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003099 int Offset,
3100 bool UseGPRIdxMode,
3101 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003102 MachineBasicBlock *MBB = MI.getParent();
3103 const DebugLoc &DL = MI.getDebugLoc();
3104 MachineBasicBlock::iterator I(&MI);
3105
3106 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3107 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3108
3109 assert(Idx->getReg() != AMDGPU::NoRegister);
3110
3111 if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3112 return false;
3113
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003114 if (UseGPRIdxMode) {
3115 unsigned IdxMode = IsIndirectSrc ?
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00003116 AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003117 if (Offset == 0) {
3118 MachineInstr *SetOn =
Diana Picus116bbab2017-01-13 09:58:52 +00003119 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3120 .add(*Idx)
3121 .addImm(IdxMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003122
Matt Arsenaultdac31db2016-10-13 12:45:16 +00003123 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003124 } else {
3125 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3126 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
Diana Picus116bbab2017-01-13 09:58:52 +00003127 .add(*Idx)
3128 .addImm(Offset);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003129 MachineInstr *SetOn =
3130 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3131 .addReg(Tmp, RegState::Kill)
3132 .addImm(IdxMode);
3133
Matt Arsenaultdac31db2016-10-13 12:45:16 +00003134 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003135 }
3136
3137 return true;
3138 }
3139
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003140 if (Offset == 0) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00003141 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3142 .add(*Idx);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003143 } else {
3144 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00003145 .add(*Idx)
3146 .addImm(Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003147 }
3148
3149 return true;
3150}
3151
3152// Control flow needs to be inserted if indexing with a VGPR.
3153static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
3154 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00003155 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003156 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003157 const SIRegisterInfo &TRI = TII->getRegisterInfo();
3158 MachineFunction *MF = MBB.getParent();
3159 MachineRegisterInfo &MRI = MF->getRegInfo();
3160
3161 unsigned Dst = MI.getOperand(0).getReg();
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003162 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003163 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3164
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003165 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003166
3167 unsigned SubReg;
3168 std::tie(SubReg, Offset)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003169 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003170
Marek Olsake22fdb92017-03-21 17:00:32 +00003171 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003172
3173 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003174 MachineBasicBlock::iterator I(&MI);
3175 const DebugLoc &DL = MI.getDebugLoc();
3176
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003177 if (UseGPRIdxMode) {
3178 // TODO: Look at the uses to avoid the copy. This may require rescheduling
3179 // to avoid interfering with other uses, so probably requires a new
3180 // optimization pass.
3181 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003182 .addReg(SrcReg, RegState::Undef, SubReg)
3183 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003184 .addReg(AMDGPU::M0, RegState::Implicit);
3185 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3186 } else {
3187 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003188 .addReg(SrcReg, RegState::Undef, SubReg)
3189 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003190 }
3191
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003192 MI.eraseFromParent();
3193
3194 return &MBB;
3195 }
3196
3197 const DebugLoc &DL = MI.getDebugLoc();
3198 MachineBasicBlock::iterator I(&MI);
3199
3200 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3201 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3202
3203 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3204
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003205 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3206 Offset, UseGPRIdxMode, true);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003207 MachineBasicBlock *LoopBB = InsPt->getParent();
3208
3209 if (UseGPRIdxMode) {
3210 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003211 .addReg(SrcReg, RegState::Undef, SubReg)
3212 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003213 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003214 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003215 } else {
3216 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003217 .addReg(SrcReg, RegState::Undef, SubReg)
3218 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003219 }
3220
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003221 MI.eraseFromParent();
3222
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003223 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003224}
3225
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003226static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3227 const TargetRegisterClass *VecRC) {
3228 switch (TRI.getRegSizeInBits(*VecRC)) {
3229 case 32: // 4 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003230 return AMDGPU::V_MOVRELD_B32_V1;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003231 case 64: // 8 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003232 return AMDGPU::V_MOVRELD_B32_V2;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003233 case 128: // 16 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003234 return AMDGPU::V_MOVRELD_B32_V4;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003235 case 256: // 32 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003236 return AMDGPU::V_MOVRELD_B32_V8;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003237 case 512: // 64 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003238 return AMDGPU::V_MOVRELD_B32_V16;
3239 default:
3240 llvm_unreachable("unsupported size for MOVRELD pseudos");
3241 }
3242}
3243
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003244static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3245 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00003246 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003247 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003248 const SIRegisterInfo &TRI = TII->getRegisterInfo();
3249 MachineFunction *MF = MBB.getParent();
3250 MachineRegisterInfo &MRI = MF->getRegInfo();
3251
3252 unsigned Dst = MI.getOperand(0).getReg();
3253 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3254 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3255 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3256 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3257 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3258
3259 // This can be an immediate, but will be folded later.
3260 assert(Val->getReg());
3261
3262 unsigned SubReg;
3263 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3264 SrcVec->getReg(),
3265 Offset);
Marek Olsake22fdb92017-03-21 17:00:32 +00003266 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003267
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003268 if (Idx->getReg() == AMDGPU::NoRegister) {
3269 MachineBasicBlock::iterator I(&MI);
3270 const DebugLoc &DL = MI.getDebugLoc();
3271
3272 assert(Offset == 0);
3273
3274 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
Diana Picus116bbab2017-01-13 09:58:52 +00003275 .add(*SrcVec)
3276 .add(*Val)
3277 .addImm(SubReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003278
3279 MI.eraseFromParent();
3280 return &MBB;
3281 }
3282
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003283 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003284 MachineBasicBlock::iterator I(&MI);
3285 const DebugLoc &DL = MI.getDebugLoc();
3286
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003287 if (UseGPRIdxMode) {
3288 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003289 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3290 .add(*Val)
3291 .addReg(Dst, RegState::ImplicitDefine)
3292 .addReg(SrcVec->getReg(), RegState::Implicit)
3293 .addReg(AMDGPU::M0, RegState::Implicit);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003294
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003295 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3296 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003297 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003298
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003299 BuildMI(MBB, I, DL, MovRelDesc)
3300 .addReg(Dst, RegState::Define)
3301 .addReg(SrcVec->getReg())
Diana Picus116bbab2017-01-13 09:58:52 +00003302 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003303 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003304 }
3305
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003306 MI.eraseFromParent();
3307 return &MBB;
3308 }
3309
3310 if (Val->isReg())
3311 MRI.clearKillFlags(Val->getReg());
3312
3313 const DebugLoc &DL = MI.getDebugLoc();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003314
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003315 unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3316
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003317 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003318 Offset, UseGPRIdxMode, false);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003319 MachineBasicBlock *LoopBB = InsPt->getParent();
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003320
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003321 if (UseGPRIdxMode) {
3322 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003323 .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3324 .add(*Val) // src0
3325 .addReg(Dst, RegState::ImplicitDefine)
3326 .addReg(PhiReg, RegState::Implicit)
3327 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003328 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003329 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003330 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003331
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003332 BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3333 .addReg(Dst, RegState::Define)
3334 .addReg(PhiReg)
Diana Picus116bbab2017-01-13 09:58:52 +00003335 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003336 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003337 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003338
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003339 MI.eraseFromParent();
3340
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003341 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003342}
3343
Matt Arsenault786724a2016-07-12 21:41:32 +00003344MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3345 MachineInstr &MI, MachineBasicBlock *BB) const {
Tom Stellard244891d2016-12-20 15:52:17 +00003346
3347 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3348 MachineFunction *MF = BB->getParent();
3349 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3350
3351 if (TII->isMIMG(MI)) {
Matt Arsenault905f3512017-12-29 17:18:14 +00003352 if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3353 report_fatal_error("missing mem operand from MIMG instruction");
3354 }
Tom Stellard244891d2016-12-20 15:52:17 +00003355 // Add a memoperand for mimg instructions so that they aren't assumed to
3356 // be ordered memory instuctions.
3357
Tom Stellard244891d2016-12-20 15:52:17 +00003358 return BB;
3359 }
3360
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003361 switch (MI.getOpcode()) {
Matt Arsenault301162c2017-11-15 21:51:43 +00003362 case AMDGPU::S_ADD_U64_PSEUDO:
3363 case AMDGPU::S_SUB_U64_PSEUDO: {
3364 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003365 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3366 const SIRegisterInfo *TRI = ST.getRegisterInfo();
3367 const TargetRegisterClass *BoolRC = TRI->getBoolRC();
Matt Arsenault301162c2017-11-15 21:51:43 +00003368 const DebugLoc &DL = MI.getDebugLoc();
3369
3370 MachineOperand &Dest = MI.getOperand(0);
3371 MachineOperand &Src0 = MI.getOperand(1);
3372 MachineOperand &Src1 = MI.getOperand(2);
3373
3374 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3375 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3376
3377 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003378 Src0, BoolRC, AMDGPU::sub0,
Matt Arsenault301162c2017-11-15 21:51:43 +00003379 &AMDGPU::SReg_32_XM0RegClass);
3380 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003381 Src0, BoolRC, AMDGPU::sub1,
Matt Arsenault301162c2017-11-15 21:51:43 +00003382 &AMDGPU::SReg_32_XM0RegClass);
3383
3384 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003385 Src1, BoolRC, AMDGPU::sub0,
Matt Arsenault301162c2017-11-15 21:51:43 +00003386 &AMDGPU::SReg_32_XM0RegClass);
3387 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003388 Src1, BoolRC, AMDGPU::sub1,
Matt Arsenault301162c2017-11-15 21:51:43 +00003389 &AMDGPU::SReg_32_XM0RegClass);
3390
3391 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3392
3393 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3394 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3395 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3396 .add(Src0Sub0)
3397 .add(Src1Sub0);
3398 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3399 .add(Src0Sub1)
3400 .add(Src1Sub1);
3401 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3402 .addReg(DestSub0)
3403 .addImm(AMDGPU::sub0)
3404 .addReg(DestSub1)
3405 .addImm(AMDGPU::sub1);
3406 MI.eraseFromParent();
3407 return BB;
3408 }
3409 case AMDGPU::SI_INIT_M0: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003410 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
Matt Arsenault4ac341c2016-04-14 21:58:15 +00003411 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Diana Picus116bbab2017-01-13 09:58:52 +00003412 .add(MI.getOperand(0));
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003413 MI.eraseFromParent();
Matt Arsenault20711b72015-02-20 22:10:45 +00003414 return BB;
Matt Arsenault301162c2017-11-15 21:51:43 +00003415 }
Marek Olsak2d825902017-04-28 20:21:58 +00003416 case AMDGPU::SI_INIT_EXEC:
3417 // This should be before all vector instructions.
3418 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3419 AMDGPU::EXEC)
3420 .addImm(MI.getOperand(0).getImm());
3421 MI.eraseFromParent();
3422 return BB;
3423
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003424 case AMDGPU::SI_INIT_EXEC_LO:
3425 // This should be before all vector instructions.
3426 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
3427 AMDGPU::EXEC_LO)
3428 .addImm(MI.getOperand(0).getImm());
3429 MI.eraseFromParent();
3430 return BB;
3431
Marek Olsak2d825902017-04-28 20:21:58 +00003432 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3433 // Extract the thread count from an SGPR input and set EXEC accordingly.
3434 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3435 //
3436 // S_BFE_U32 count, input, {shift, 7}
3437 // S_BFM_B64 exec, count, 0
3438 // S_CMP_EQ_U32 count, 64
3439 // S_CMOV_B64 exec, -1
3440 MachineInstr *FirstMI = &*BB->begin();
3441 MachineRegisterInfo &MRI = MF->getRegInfo();
3442 unsigned InputReg = MI.getOperand(0).getReg();
3443 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3444 bool Found = false;
3445
3446 // Move the COPY of the input reg to the beginning, so that we can use it.
3447 for (auto I = BB->begin(); I != &MI; I++) {
3448 if (I->getOpcode() != TargetOpcode::COPY ||
3449 I->getOperand(0).getReg() != InputReg)
3450 continue;
3451
3452 if (I == FirstMI) {
3453 FirstMI = &*++BB->begin();
3454 } else {
3455 I->removeFromParent();
3456 BB->insert(FirstMI, &*I);
3457 }
3458 Found = true;
3459 break;
3460 }
3461 assert(Found);
Davide Italiano0dcc0152017-05-11 19:58:52 +00003462 (void)Found;
Marek Olsak2d825902017-04-28 20:21:58 +00003463
3464 // This should be before all vector instructions.
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003465 unsigned Mask = (getSubtarget()->getWavefrontSize() << 1) - 1;
3466 bool isWave32 = getSubtarget()->isWave32();
3467 unsigned Exec = isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
Marek Olsak2d825902017-04-28 20:21:58 +00003468 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3469 .addReg(InputReg)
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003470 .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
3471 BuildMI(*BB, FirstMI, DebugLoc(),
3472 TII->get(isWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64),
3473 Exec)
Marek Olsak2d825902017-04-28 20:21:58 +00003474 .addReg(CountReg)
3475 .addImm(0);
3476 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3477 .addReg(CountReg, RegState::Kill)
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003478 .addImm(getSubtarget()->getWavefrontSize());
3479 BuildMI(*BB, FirstMI, DebugLoc(),
3480 TII->get(isWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
3481 Exec)
Marek Olsak2d825902017-04-28 20:21:58 +00003482 .addImm(-1);
3483 MI.eraseFromParent();
3484 return BB;
3485 }
3486
Changpeng Fang01f60622016-03-15 17:28:44 +00003487 case AMDGPU::GET_GROUPSTATICSIZE: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003488 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3c07c812016-07-22 17:01:33 +00003489 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
Diana Picus116bbab2017-01-13 09:58:52 +00003490 .add(MI.getOperand(0))
3491 .addImm(MFI->getLDSSize());
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003492 MI.eraseFromParent();
Changpeng Fang01f60622016-03-15 17:28:44 +00003493 return BB;
3494 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003495 case AMDGPU::SI_INDIRECT_SRC_V1:
3496 case AMDGPU::SI_INDIRECT_SRC_V2:
3497 case AMDGPU::SI_INDIRECT_SRC_V4:
3498 case AMDGPU::SI_INDIRECT_SRC_V8:
3499 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003500 return emitIndirectSrc(MI, *BB, *getSubtarget());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003501 case AMDGPU::SI_INDIRECT_DST_V1:
3502 case AMDGPU::SI_INDIRECT_DST_V2:
3503 case AMDGPU::SI_INDIRECT_DST_V4:
3504 case AMDGPU::SI_INDIRECT_DST_V8:
3505 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003506 return emitIndirectDst(MI, *BB, *getSubtarget());
Marek Olsakce76ea02017-10-24 10:27:13 +00003507 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3508 case AMDGPU::SI_KILL_I1_PSEUDO:
Matt Arsenault786724a2016-07-12 21:41:32 +00003509 return splitKillBlock(MI, BB);
Matt Arsenault22e41792016-08-27 01:00:37 +00003510 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3511 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003512 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3513 const SIRegisterInfo *TRI = ST.getRegisterInfo();
Matt Arsenault22e41792016-08-27 01:00:37 +00003514
3515 unsigned Dst = MI.getOperand(0).getReg();
3516 unsigned Src0 = MI.getOperand(1).getReg();
3517 unsigned Src1 = MI.getOperand(2).getReg();
3518 const DebugLoc &DL = MI.getDebugLoc();
3519 unsigned SrcCond = MI.getOperand(3).getReg();
3520
3521 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3522 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003523 const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
3524 unsigned SrcCondCopy = MRI.createVirtualRegister(CondRC);
Matt Arsenault22e41792016-08-27 01:00:37 +00003525
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003526 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3527 .addReg(SrcCond);
Matt Arsenault22e41792016-08-27 01:00:37 +00003528 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003529 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003530 .addReg(Src0, 0, AMDGPU::sub0)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003531 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003532 .addReg(Src1, 0, AMDGPU::sub0)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003533 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003534 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003535 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003536 .addReg(Src0, 0, AMDGPU::sub1)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003537 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003538 .addReg(Src1, 0, AMDGPU::sub1)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003539 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003540
3541 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3542 .addReg(DstLo)
3543 .addImm(AMDGPU::sub0)
3544 .addReg(DstHi)
3545 .addImm(AMDGPU::sub1);
3546 MI.eraseFromParent();
3547 return BB;
3548 }
Matt Arsenault327188a2016-12-15 21:57:11 +00003549 case AMDGPU::SI_BR_UNDEF: {
3550 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3551 const DebugLoc &DL = MI.getDebugLoc();
3552 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
Diana Picus116bbab2017-01-13 09:58:52 +00003553 .add(MI.getOperand(0));
Matt Arsenault327188a2016-12-15 21:57:11 +00003554 Br->getOperand(1).setIsUndef(true); // read undef SCC
3555 MI.eraseFromParent();
3556 return BB;
3557 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003558 case AMDGPU::ADJCALLSTACKUP:
3559 case AMDGPU::ADJCALLSTACKDOWN: {
3560 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3561 MachineInstrBuilder MIB(*MF, &MI);
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003562
3563 // Add an implicit use of the frame offset reg to prevent the restore copy
3564 // inserted after the call from being reorderd after stack operations in the
3565 // the caller's frame.
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003566 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003567 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3568 .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003569 return BB;
3570 }
Scott Linderd19d1972019-02-04 20:00:07 +00003571 case AMDGPU::SI_CALL_ISEL: {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003572 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3573 const DebugLoc &DL = MI.getDebugLoc();
Scott Linderd19d1972019-02-04 20:00:07 +00003574
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003575 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003576
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003577 MachineInstrBuilder MIB;
Scott Linderd19d1972019-02-04 20:00:07 +00003578 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003579
Scott Linderd19d1972019-02-04 20:00:07 +00003580 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003581 MIB.add(MI.getOperand(I));
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003582
Chandler Carruthc73c0302018-08-16 21:30:05 +00003583 MIB.cloneMemRefs(MI);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003584 MI.eraseFromParent();
3585 return BB;
3586 }
Stanislav Mekhanoshin64399da2019-05-02 04:26:35 +00003587 case AMDGPU::V_ADD_I32_e32:
3588 case AMDGPU::V_SUB_I32_e32:
3589 case AMDGPU::V_SUBREV_I32_e32: {
3590 // TODO: Define distinct V_*_I32_Pseudo instructions instead.
3591 const DebugLoc &DL = MI.getDebugLoc();
3592 unsigned Opc = MI.getOpcode();
3593
3594 bool NeedClampOperand = false;
3595 if (TII->pseudoToMCOpcode(Opc) == -1) {
3596 Opc = AMDGPU::getVOPe64(Opc);
3597 NeedClampOperand = true;
3598 }
3599
3600 auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
3601 if (TII->isVOP3(*I)) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00003602 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3603 const SIRegisterInfo *TRI = ST.getRegisterInfo();
3604 I.addReg(TRI->getVCC(), RegState::Define);
Stanislav Mekhanoshin64399da2019-05-02 04:26:35 +00003605 }
3606 I.add(MI.getOperand(1))
3607 .add(MI.getOperand(2));
3608 if (NeedClampOperand)
3609 I.addImm(0); // clamp bit for e64 encoding
3610
3611 TII->legalizeOperands(*I);
3612
3613 MI.eraseFromParent();
3614 return BB;
3615 }
Changpeng Fang01f60622016-03-15 17:28:44 +00003616 default:
3617 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
Tom Stellard75aadc22012-12-11 21:25:42 +00003618 }
Tom Stellard75aadc22012-12-11 21:25:42 +00003619}
3620
Matt Arsenaulte11d8ac2017-10-13 21:10:22 +00003621bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3622 return isTypeLegal(VT.getScalarType());
3623}
3624
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003625bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3626 // This currently forces unfolding various combinations of fsub into fma with
3627 // free fneg'd operands. As long as we have fast FMA (controlled by
3628 // isFMAFasterThanFMulAndFAdd), we should perform these.
3629
3630 // When fma is quarter rate, for f64 where add / sub are at best half rate,
3631 // most of these combines appear to be cycle neutral but save on instruction
3632 // count / code size.
3633 return true;
3634}
3635
Mehdi Amini44ede332015-07-09 02:09:04 +00003636EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3637 EVT VT) const {
Tom Stellard83747202013-07-18 21:43:53 +00003638 if (!VT.isVector()) {
3639 return MVT::i1;
3640 }
Matt Arsenault8596f712014-11-28 22:51:38 +00003641 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
Tom Stellard75aadc22012-12-11 21:25:42 +00003642}
3643
Matt Arsenault94163282016-12-22 16:36:25 +00003644MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3645 // TODO: Should i16 be used always if legal? For now it would force VALU
3646 // shifts.
3647 return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
Christian Konig082a14a2013-03-18 11:34:05 +00003648}
3649
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003650// Answering this is somewhat tricky and depends on the specific device which
3651// have different rates for fma or all f64 operations.
3652//
3653// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3654// regardless of which device (although the number of cycles differs between
3655// devices), so it is always profitable for f64.
3656//
3657// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3658// only on full rate devices. Normally, we should prefer selecting v_mad_f32
3659// which we can always do even without fused FP ops since it returns the same
3660// result as the separate operations and since it is always full
3661// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3662// however does not support denormals, so we do report fma as faster if we have
3663// a fast fma device and require denormals.
3664//
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003665bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3666 VT = VT.getScalarType();
3667
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003668 switch (VT.getSimpleVT().SimpleTy) {
Matt Arsenault0084adc2018-04-30 19:08:16 +00003669 case MVT::f32: {
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003670 // This is as fast on some subtargets. However, we always have full rate f32
3671 // mad available which returns the same result as the separate operations
Matt Arsenault8d630032015-02-20 22:10:41 +00003672 // which we should prefer over fma. We can't use this if we want to support
3673 // denormals, so only report this in these cases.
Matt Arsenault0084adc2018-04-30 19:08:16 +00003674 if (Subtarget->hasFP32Denormals())
3675 return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3676
3677 // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3678 return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3679 }
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003680 case MVT::f64:
3681 return true;
Matt Arsenault9e22bc22016-12-22 03:21:48 +00003682 case MVT::f16:
3683 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003684 default:
3685 break;
3686 }
3687
3688 return false;
3689}
3690
Tom Stellard75aadc22012-12-11 21:25:42 +00003691//===----------------------------------------------------------------------===//
3692// Custom DAG Lowering Operations
3693//===----------------------------------------------------------------------===//
3694
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003695// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3696// wider vector type is legal.
3697SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3698 SelectionDAG &DAG) const {
3699 unsigned Opc = Op.getOpcode();
3700 EVT VT = Op.getValueType();
3701 assert(VT == MVT::v4f16);
3702
3703 SDValue Lo, Hi;
3704 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3705
3706 SDLoc SL(Op);
3707 SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3708 Op->getFlags());
3709 SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3710 Op->getFlags());
3711
3712 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3713}
3714
3715// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3716// wider vector type is legal.
3717SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3718 SelectionDAG &DAG) const {
3719 unsigned Opc = Op.getOpcode();
3720 EVT VT = Op.getValueType();
3721 assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3722
3723 SDValue Lo0, Hi0;
3724 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3725 SDValue Lo1, Hi1;
3726 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3727
3728 SDLoc SL(Op);
3729
3730 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3731 Op->getFlags());
3732 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3733 Op->getFlags());
3734
3735 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3736}
3737
Tom Stellard75aadc22012-12-11 21:25:42 +00003738SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3739 switch (Op.getOpcode()) {
3740 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
Tom Stellardf8794352012-12-19 22:10:31 +00003741 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
Aakanksha Patild5443f82019-05-29 18:20:11 +00003742 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
Tom Stellard35bb18c2013-08-26 15:06:04 +00003743 case ISD::LOAD: {
Tom Stellarde812f2f2014-07-21 15:45:06 +00003744 SDValue Result = LowerLOAD(Op, DAG);
3745 assert((!Result.getNode() ||
3746 Result.getNode()->getNumValues() == 2) &&
3747 "Load should return a value and a chain");
3748 return Result;
Tom Stellard35bb18c2013-08-26 15:06:04 +00003749 }
Tom Stellardaf775432013-10-23 00:44:32 +00003750
Matt Arsenaultad14ce82014-07-19 18:44:39 +00003751 case ISD::FSIN:
3752 case ISD::FCOS:
3753 return LowerTrig(Op, DAG);
Tom Stellard0ec134f2014-02-04 17:18:40 +00003754 case ISD::SELECT: return LowerSELECT(Op, DAG);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00003755 case ISD::FDIV: return LowerFDIV(Op, DAG);
Tom Stellard354a43c2016-04-01 18:27:37 +00003756 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +00003757 case ISD::STORE: return LowerSTORE(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003758 case ISD::GlobalAddress: {
3759 MachineFunction &MF = DAG.getMachineFunction();
3760 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3761 return LowerGlobalAddress(MFI, Op, DAG);
Tom Stellard94593ee2013-06-03 17:40:18 +00003762 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003763 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00003764 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003765 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00003766 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
Matt Arsenault3aef8092017-01-23 23:09:58 +00003767 case ISD::INSERT_VECTOR_ELT:
3768 return lowerINSERT_VECTOR_ELT(Op, DAG);
3769 case ISD::EXTRACT_VECTOR_ELT:
3770 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
Matt Arsenault67a98152018-05-16 11:47:30 +00003771 case ISD::BUILD_VECTOR:
3772 return lowerBUILD_VECTOR(Op, DAG);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003773 case ISD::FP_ROUND:
3774 return lowerFP_ROUND(Op, DAG);
Matt Arsenault3e025382017-04-24 17:49:13 +00003775 case ISD::TRAP:
Matt Arsenault3e025382017-04-24 17:49:13 +00003776 return lowerTRAP(Op, DAG);
Tony Tye43259df2018-05-16 16:19:34 +00003777 case ISD::DEBUGTRAP:
3778 return lowerDEBUGTRAP(Op, DAG);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003779 case ISD::FABS:
3780 case ISD::FNEG:
Matt Arsenault36cdcfa2018-08-02 13:43:42 +00003781 case ISD::FCANONICALIZE:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003782 return splitUnaryVectorOp(Op, DAG);
Matt Arsenault687ec752018-10-22 16:27:27 +00003783 case ISD::FMINNUM:
3784 case ISD::FMAXNUM:
3785 return lowerFMINNUM_FMAXNUM(Op, DAG);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003786 case ISD::SHL:
3787 case ISD::SRA:
3788 case ISD::SRL:
3789 case ISD::ADD:
3790 case ISD::SUB:
3791 case ISD::MUL:
3792 case ISD::SMIN:
3793 case ISD::SMAX:
3794 case ISD::UMIN:
3795 case ISD::UMAX:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003796 case ISD::FADD:
3797 case ISD::FMUL:
Matt Arsenault687ec752018-10-22 16:27:27 +00003798 case ISD::FMINNUM_IEEE:
3799 case ISD::FMAXNUM_IEEE:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003800 return splitBinaryVectorOp(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +00003801 }
3802 return SDValue();
3803}
3804
Matt Arsenault1349a042018-05-22 06:32:10 +00003805static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
3806 const SDLoc &DL,
3807 SelectionDAG &DAG, bool Unpacked) {
3808 if (!LoadVT.isVector())
3809 return Result;
3810
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003811 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3812 // Truncate to v2i16/v4i16.
3813 EVT IntLoadVT = LoadVT.changeTypeToInteger();
Matt Arsenault1349a042018-05-22 06:32:10 +00003814
3815 // Workaround legalizer not scalarizing truncate after vector op
3816 // legalization byt not creating intermediate vector trunc.
3817 SmallVector<SDValue, 4> Elts;
3818 DAG.ExtractVectorElements(Result, Elts);
3819 for (SDValue &Elt : Elts)
3820 Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
3821
3822 Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
3823
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003824 // Bitcast to original type (v2f16/v4f16).
Matt Arsenault1349a042018-05-22 06:32:10 +00003825 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003826 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003827
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003828 // Cast back to the original packed type.
3829 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3830}
3831
Matt Arsenault1349a042018-05-22 06:32:10 +00003832SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
3833 MemSDNode *M,
3834 SelectionDAG &DAG,
Tim Renouf366a49d2018-08-02 23:33:01 +00003835 ArrayRef<SDValue> Ops,
Matt Arsenault1349a042018-05-22 06:32:10 +00003836 bool IsIntrinsic) const {
3837 SDLoc DL(M);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003838
3839 bool Unpacked = Subtarget->hasUnpackedD16VMem();
Matt Arsenault1349a042018-05-22 06:32:10 +00003840 EVT LoadVT = M->getValueType(0);
3841
Matt Arsenault1349a042018-05-22 06:32:10 +00003842 EVT EquivLoadVT = LoadVT;
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003843 if (Unpacked && LoadVT.isVector()) {
3844 EquivLoadVT = LoadVT.isVector() ?
3845 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3846 LoadVT.getVectorNumElements()) : LoadVT;
Matt Arsenault1349a042018-05-22 06:32:10 +00003847 }
3848
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003849 // Change from v4f16/v2f16 to EquivLoadVT.
3850 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3851
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003852 SDValue Load
3853 = DAG.getMemIntrinsicNode(
3854 IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
3855 VTList, Ops, M->getMemoryVT(),
3856 M->getMemOperand());
3857 if (!Unpacked) // Just adjusted the opcode.
3858 return Load;
Changpeng Fang4737e892018-01-18 22:08:53 +00003859
Matt Arsenault1349a042018-05-22 06:32:10 +00003860 SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
Changpeng Fang4737e892018-01-18 22:08:53 +00003861
Matt Arsenault1349a042018-05-22 06:32:10 +00003862 return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003863}
3864
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00003865static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
3866 SDNode *N, SelectionDAG &DAG) {
3867 EVT VT = N->getValueType(0);
Matt Arsenaultcaf13162019-03-12 21:02:54 +00003868 const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00003869 int CondCode = CD->getSExtValue();
3870 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
3871 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
3872 return DAG.getUNDEF(VT);
3873
3874 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
3875
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00003876 SDValue LHS = N->getOperand(1);
3877 SDValue RHS = N->getOperand(2);
3878
3879 SDLoc DL(N);
3880
3881 EVT CmpVT = LHS.getValueType();
3882 if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
3883 unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
3884 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3885 LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
3886 RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
3887 }
3888
3889 ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
3890
Stanislav Mekhanoshin68a2fef2019-06-13 23:47:36 +00003891 unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
3892 EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
3893
3894 SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS,
3895 DAG.getCondCode(CCOpcode));
3896 if (VT.bitsEq(CCVT))
3897 return SetCC;
3898 return DAG.getZExtOrTrunc(SetCC, DL, VT);
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00003899}
3900
3901static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
3902 SDNode *N, SelectionDAG &DAG) {
3903 EVT VT = N->getValueType(0);
Matt Arsenaultcaf13162019-03-12 21:02:54 +00003904 const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00003905
3906 int CondCode = CD->getSExtValue();
3907 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
3908 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
3909 return DAG.getUNDEF(VT);
3910 }
3911
3912 SDValue Src0 = N->getOperand(1);
3913 SDValue Src1 = N->getOperand(2);
3914 EVT CmpVT = Src0.getValueType();
3915 SDLoc SL(N);
3916
3917 if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
3918 Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
3919 Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
3920 }
3921
3922 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
3923 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
Stanislav Mekhanoshin68a2fef2019-06-13 23:47:36 +00003924 unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize();
3925 EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize);
3926 SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0,
3927 Src1, DAG.getCondCode(CCOpcode));
3928 if (VT.bitsEq(CCVT))
3929 return SetCC;
3930 return DAG.getZExtOrTrunc(SetCC, SL, VT);
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00003931}
3932
Matt Arsenault3aef8092017-01-23 23:09:58 +00003933void SITargetLowering::ReplaceNodeResults(SDNode *N,
3934 SmallVectorImpl<SDValue> &Results,
3935 SelectionDAG &DAG) const {
3936 switch (N->getOpcode()) {
3937 case ISD::INSERT_VECTOR_ELT: {
3938 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3939 Results.push_back(Res);
3940 return;
3941 }
3942 case ISD::EXTRACT_VECTOR_ELT: {
3943 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3944 Results.push_back(Res);
3945 return;
3946 }
Matt Arsenault1f17c662017-02-22 00:27:34 +00003947 case ISD::INTRINSIC_WO_CHAIN: {
3948 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Marek Olsak13e47412018-01-31 20:18:04 +00003949 switch (IID) {
3950 case Intrinsic::amdgcn_cvt_pkrtz: {
Matt Arsenault1f17c662017-02-22 00:27:34 +00003951 SDValue Src0 = N->getOperand(1);
3952 SDValue Src1 = N->getOperand(2);
3953 SDLoc SL(N);
3954 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
3955 Src0, Src1);
Matt Arsenault1f17c662017-02-22 00:27:34 +00003956 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3957 return;
3958 }
Marek Olsak13e47412018-01-31 20:18:04 +00003959 case Intrinsic::amdgcn_cvt_pknorm_i16:
3960 case Intrinsic::amdgcn_cvt_pknorm_u16:
3961 case Intrinsic::amdgcn_cvt_pk_i16:
3962 case Intrinsic::amdgcn_cvt_pk_u16: {
3963 SDValue Src0 = N->getOperand(1);
3964 SDValue Src1 = N->getOperand(2);
3965 SDLoc SL(N);
3966 unsigned Opcode;
3967
3968 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
3969 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
3970 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
3971 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
3972 else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
3973 Opcode = AMDGPUISD::CVT_PK_I16_I32;
3974 else
3975 Opcode = AMDGPUISD::CVT_PK_U16_U32;
3976
Matt Arsenault709374d2018-08-01 20:13:58 +00003977 EVT VT = N->getValueType(0);
3978 if (isTypeLegal(VT))
3979 Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
3980 else {
3981 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
3982 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
3983 }
Marek Olsak13e47412018-01-31 20:18:04 +00003984 return;
3985 }
3986 }
Simon Pilgrimd362d272017-07-08 19:50:03 +00003987 break;
Matt Arsenault1f17c662017-02-22 00:27:34 +00003988 }
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003989 case ISD::INTRINSIC_W_CHAIN: {
Matt Arsenault1349a042018-05-22 06:32:10 +00003990 if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003991 Results.push_back(Res);
Matt Arsenault1349a042018-05-22 06:32:10 +00003992 Results.push_back(Res.getValue(1));
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003993 return;
3994 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003995
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003996 break;
3997 }
Matt Arsenault4a486232017-04-19 20:53:07 +00003998 case ISD::SELECT: {
3999 SDLoc SL(N);
4000 EVT VT = N->getValueType(0);
4001 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
4002 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
4003 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
4004
4005 EVT SelectVT = NewVT;
4006 if (NewVT.bitsLT(MVT::i32)) {
4007 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
4008 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
4009 SelectVT = MVT::i32;
4010 }
4011
4012 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
4013 N->getOperand(0), LHS, RHS);
4014
4015 if (NewVT != SelectVT)
4016 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
4017 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
4018 return;
4019 }
Matt Arsenaulte9524f12018-06-06 21:28:11 +00004020 case ISD::FNEG: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004021 if (N->getValueType(0) != MVT::v2f16)
4022 break;
4023
Matt Arsenaulte9524f12018-06-06 21:28:11 +00004024 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00004025 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4026
4027 SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
4028 BC,
4029 DAG.getConstant(0x80008000, SL, MVT::i32));
4030 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4031 return;
4032 }
4033 case ISD::FABS: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004034 if (N->getValueType(0) != MVT::v2f16)
4035 break;
4036
Matt Arsenaulte9524f12018-06-06 21:28:11 +00004037 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00004038 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
4039
4040 SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
4041 BC,
4042 DAG.getConstant(0x7fff7fff, SL, MVT::i32));
4043 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4044 return;
4045 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00004046 default:
4047 break;
4048 }
4049}
4050
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00004051/// Helper function for LowerBRCOND
Tom Stellardf8794352012-12-19 22:10:31 +00004052static SDNode *findUser(SDValue Value, unsigned Opcode) {
Tom Stellard75aadc22012-12-11 21:25:42 +00004053
Tom Stellardf8794352012-12-19 22:10:31 +00004054 SDNode *Parent = Value.getNode();
4055 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
4056 I != E; ++I) {
4057
4058 if (I.getUse().get() != Value)
4059 continue;
4060
4061 if (I->getOpcode() == Opcode)
4062 return *I;
4063 }
Craig Topper062a2ba2014-04-25 05:30:21 +00004064 return nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00004065}
4066
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004067unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
Matt Arsenault6408c912016-09-16 22:11:18 +00004068 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4069 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004070 case Intrinsic::amdgcn_if:
4071 return AMDGPUISD::IF;
4072 case Intrinsic::amdgcn_else:
4073 return AMDGPUISD::ELSE;
4074 case Intrinsic::amdgcn_loop:
4075 return AMDGPUISD::LOOP;
4076 case Intrinsic::amdgcn_end_cf:
4077 llvm_unreachable("should not occur");
Matt Arsenault6408c912016-09-16 22:11:18 +00004078 default:
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004079 return 0;
Matt Arsenault6408c912016-09-16 22:11:18 +00004080 }
Tom Stellardbc4497b2016-02-12 23:45:29 +00004081 }
Matt Arsenault6408c912016-09-16 22:11:18 +00004082
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004083 // break, if_break, else_break are all only used as inputs to loop, not
4084 // directly as branch conditions.
4085 return 0;
Tom Stellardbc4497b2016-02-12 23:45:29 +00004086}
4087
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004088bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
4089 const Triple &TT = getTargetMachine().getTargetTriple();
Matt Arsenault0da63502018-08-31 05:49:54 +00004090 return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4091 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004092 AMDGPU::shouldEmitConstantsToTextSection(TT);
4093}
4094
4095bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
Scott Linderd19d1972019-02-04 20:00:07 +00004096 // FIXME: Either avoid relying on address space here or change the default
4097 // address space for functions to avoid the explicit check.
4098 return (GV->getValueType()->isFunctionTy() ||
4099 GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
Matt Arsenault0da63502018-08-31 05:49:54 +00004100 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4101 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004102 !shouldEmitFixup(GV) &&
4103 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
4104}
4105
4106bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4107 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4108}
4109
Tom Stellardf8794352012-12-19 22:10:31 +00004110/// This transforms the control flow intrinsics to get the branch destination as
4111/// last parameter, also switches branch target with BR if the need arise
4112SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4113 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00004114 SDLoc DL(BRCOND);
Tom Stellardf8794352012-12-19 22:10:31 +00004115
4116 SDNode *Intr = BRCOND.getOperand(1).getNode();
4117 SDValue Target = BRCOND.getOperand(2);
Craig Topper062a2ba2014-04-25 05:30:21 +00004118 SDNode *BR = nullptr;
Tom Stellardbc4497b2016-02-12 23:45:29 +00004119 SDNode *SetCC = nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00004120
4121 if (Intr->getOpcode() == ISD::SETCC) {
4122 // As long as we negate the condition everything is fine
Tom Stellardbc4497b2016-02-12 23:45:29 +00004123 SetCC = Intr;
Tom Stellardf8794352012-12-19 22:10:31 +00004124 Intr = SetCC->getOperand(0).getNode();
4125
4126 } else {
4127 // Get the target from BR if we don't negate the condition
4128 BR = findUser(BRCOND, ISD::BR);
4129 Target = BR->getOperand(1);
4130 }
4131
Matt Arsenault6408c912016-09-16 22:11:18 +00004132 // FIXME: This changes the types of the intrinsics instead of introducing new
4133 // nodes with the correct types.
4134 // e.g. llvm.amdgcn.loop
4135
4136 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4137 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4138
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004139 unsigned CFNode = isCFIntrinsic(Intr);
4140 if (CFNode == 0) {
Tom Stellardbc4497b2016-02-12 23:45:29 +00004141 // This is a uniform branch so we don't need to legalize.
4142 return BRCOND;
4143 }
4144
Matt Arsenault6408c912016-09-16 22:11:18 +00004145 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4146 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4147
Tom Stellardbc4497b2016-02-12 23:45:29 +00004148 assert(!SetCC ||
4149 (SetCC->getConstantOperandVal(1) == 1 &&
Tom Stellardbc4497b2016-02-12 23:45:29 +00004150 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4151 ISD::SETNE));
Tom Stellardf8794352012-12-19 22:10:31 +00004152
Tom Stellardf8794352012-12-19 22:10:31 +00004153 // operands of the new intrinsic call
4154 SmallVector<SDValue, 4> Ops;
Matt Arsenault6408c912016-09-16 22:11:18 +00004155 if (HaveChain)
4156 Ops.push_back(BRCOND.getOperand(0));
4157
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004158 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
Tom Stellardf8794352012-12-19 22:10:31 +00004159 Ops.push_back(Target);
4160
Matt Arsenault6408c912016-09-16 22:11:18 +00004161 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4162
Tom Stellardf8794352012-12-19 22:10:31 +00004163 // build the new intrinsic call
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004164 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00004165
Matt Arsenault6408c912016-09-16 22:11:18 +00004166 if (!HaveChain) {
4167 SDValue Ops[] = {
4168 SDValue(Result, 0),
4169 BRCOND.getOperand(0)
4170 };
4171
4172 Result = DAG.getMergeValues(Ops, DL).getNode();
4173 }
4174
Tom Stellardf8794352012-12-19 22:10:31 +00004175 if (BR) {
4176 // Give the branch instruction our target
4177 SDValue Ops[] = {
4178 BR->getOperand(0),
4179 BRCOND.getOperand(2)
4180 };
Chandler Carruth356665a2014-08-01 22:09:43 +00004181 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4182 DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4183 BR = NewBR.getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00004184 }
4185
4186 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4187
4188 // Copy the intrinsic results to registers
4189 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4190 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
4191 if (!CopyToReg)
4192 continue;
4193
4194 Chain = DAG.getCopyToReg(
4195 Chain, DL,
4196 CopyToReg->getOperand(1),
4197 SDValue(Result, i - 1),
4198 SDValue());
4199
4200 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4201 }
4202
4203 // Remove the old intrinsic from the chain
4204 DAG.ReplaceAllUsesOfValueWith(
4205 SDValue(Intr, Intr->getNumValues() - 1),
4206 Intr->getOperand(0));
4207
4208 return Chain;
Tom Stellard75aadc22012-12-11 21:25:42 +00004209}
4210
Aakanksha Patild5443f82019-05-29 18:20:11 +00004211SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
4212 SelectionDAG &DAG) const {
4213 MVT VT = Op.getSimpleValueType();
4214 SDLoc DL(Op);
4215 // Checking the depth
4216 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0)
4217 return DAG.getConstant(0, DL, VT);
4218
4219 MachineFunction &MF = DAG.getMachineFunction();
4220 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4221 // Check for kernel and shader functions
4222 if (Info->isEntryFunction())
4223 return DAG.getConstant(0, DL, VT);
4224
4225 MachineFrameInfo &MFI = MF.getFrameInfo();
4226 // There is a call to @llvm.returnaddress in this function
4227 MFI.setReturnAddressIsTaken(true);
4228
4229 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
4230 // Get the return address reg and mark it as an implicit live-in
4231 unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
4232
4233 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
4234}
4235
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004236SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4237 SDValue Op,
4238 const SDLoc &DL,
4239 EVT VT) const {
4240 return Op.getValueType().bitsLE(VT) ?
4241 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4242 DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4243}
4244
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004245SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004246 assert(Op.getValueType() == MVT::f16 &&
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004247 "Do not know how to custom lower FP_ROUND for non-f16 type");
4248
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004249 SDValue Src = Op.getOperand(0);
4250 EVT SrcVT = Src.getValueType();
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004251 if (SrcVT != MVT::f64)
4252 return Op;
4253
4254 SDLoc DL(Op);
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004255
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004256 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4257 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
Mandeep Singh Grang5e1697e2017-06-06 05:08:36 +00004258 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004259}
4260
Matt Arsenault687ec752018-10-22 16:27:27 +00004261SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4262 SelectionDAG &DAG) const {
4263 EVT VT = Op.getValueType();
Matt Arsenault055e4dc2019-03-29 19:14:54 +00004264 const MachineFunction &MF = DAG.getMachineFunction();
4265 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4266 bool IsIEEEMode = Info->getMode().IEEE;
Matt Arsenault687ec752018-10-22 16:27:27 +00004267
4268 // FIXME: Assert during eslection that this is only selected for
4269 // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4270 // mode functions, but this happens to be OK since it's only done in cases
4271 // where there is known no sNaN.
4272 if (IsIEEEMode)
4273 return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4274
4275 if (VT == MVT::v4f16)
4276 return splitBinaryVectorOp(Op, DAG);
4277 return Op;
4278}
4279
Matt Arsenault3e025382017-04-24 17:49:13 +00004280SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4281 SDLoc SL(Op);
Matt Arsenault3e025382017-04-24 17:49:13 +00004282 SDValue Chain = Op.getOperand(0);
4283
Tom Stellard5bfbae52018-07-11 20:59:01 +00004284 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00004285 !Subtarget->isTrapHandlerEnabled())
Matt Arsenault3e025382017-04-24 17:49:13 +00004286 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
Tony Tye43259df2018-05-16 16:19:34 +00004287
4288 MachineFunction &MF = DAG.getMachineFunction();
4289 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4290 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4291 assert(UserSGPR != AMDGPU::NoRegister);
4292 SDValue QueuePtr = CreateLiveInRegister(
4293 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4294 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4295 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4296 QueuePtr, SDValue());
4297 SDValue Ops[] = {
4298 ToReg,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004299 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
Tony Tye43259df2018-05-16 16:19:34 +00004300 SGPR01,
4301 ToReg.getValue(1)
4302 };
4303 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4304}
4305
4306SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4307 SDLoc SL(Op);
4308 SDValue Chain = Op.getOperand(0);
4309 MachineFunction &MF = DAG.getMachineFunction();
4310
Tom Stellard5bfbae52018-07-11 20:59:01 +00004311 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00004312 !Subtarget->isTrapHandlerEnabled()) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004313 DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
Matt Arsenault3e025382017-04-24 17:49:13 +00004314 "debugtrap handler not supported",
4315 Op.getDebugLoc(),
4316 DS_Warning);
Matthias Braunf1caa282017-12-15 22:22:58 +00004317 LLVMContext &Ctx = MF.getFunction().getContext();
Matt Arsenault3e025382017-04-24 17:49:13 +00004318 Ctx.diagnose(NoTrap);
4319 return Chain;
4320 }
Matt Arsenault3e025382017-04-24 17:49:13 +00004321
Tony Tye43259df2018-05-16 16:19:34 +00004322 SDValue Ops[] = {
4323 Chain,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004324 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
Tony Tye43259df2018-05-16 16:19:34 +00004325 };
4326 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
Matt Arsenault3e025382017-04-24 17:49:13 +00004327}
4328
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004329SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
Matt Arsenault99c14522016-04-25 19:27:24 +00004330 SelectionDAG &DAG) const {
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004331 // FIXME: Use inline constants (src_{shared, private}_base) instead.
4332 if (Subtarget->hasApertureRegs()) {
Matt Arsenault0da63502018-08-31 05:49:54 +00004333 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004334 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4335 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
Matt Arsenault0da63502018-08-31 05:49:54 +00004336 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004337 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4338 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4339 unsigned Encoding =
4340 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4341 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4342 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
Matt Arsenaulte823d922017-02-18 18:29:53 +00004343
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004344 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4345 SDValue ApertureReg = SDValue(
4346 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4347 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4348 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
Matt Arsenaulte823d922017-02-18 18:29:53 +00004349 }
4350
Matt Arsenault99c14522016-04-25 19:27:24 +00004351 MachineFunction &MF = DAG.getMachineFunction();
4352 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004353 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4354 assert(UserSGPR != AMDGPU::NoRegister);
4355
Matt Arsenault99c14522016-04-25 19:27:24 +00004356 SDValue QueuePtr = CreateLiveInRegister(
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004357 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
Matt Arsenault99c14522016-04-25 19:27:24 +00004358
4359 // Offset into amd_queue_t for group_segment_aperture_base_hi /
4360 // private_segment_aperture_base_hi.
Matt Arsenault0da63502018-08-31 05:49:54 +00004361 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
Matt Arsenault99c14522016-04-25 19:27:24 +00004362
Matt Arsenaultb655fa92017-11-29 01:25:12 +00004363 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
Matt Arsenault99c14522016-04-25 19:27:24 +00004364
4365 // TODO: Use custom target PseudoSourceValue.
4366 // TODO: We should use the value from the IR intrinsic call, but it might not
4367 // be available and how do we get it?
4368 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
Matt Arsenault0da63502018-08-31 05:49:54 +00004369 AMDGPUAS::CONSTANT_ADDRESS));
Matt Arsenault99c14522016-04-25 19:27:24 +00004370
4371 MachinePointerInfo PtrInfo(V, StructOffset);
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004372 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
Justin Lebar9c375812016-07-15 18:27:10 +00004373 MinAlign(64, StructOffset),
Justin Lebaradbf09e2016-09-11 01:38:58 +00004374 MachineMemOperand::MODereferenceable |
4375 MachineMemOperand::MOInvariant);
Matt Arsenault99c14522016-04-25 19:27:24 +00004376}
4377
4378SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4379 SelectionDAG &DAG) const {
4380 SDLoc SL(Op);
4381 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4382
4383 SDValue Src = ASC->getOperand(0);
Matt Arsenault99c14522016-04-25 19:27:24 +00004384 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4385
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004386 const AMDGPUTargetMachine &TM =
4387 static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4388
Matt Arsenault99c14522016-04-25 19:27:24 +00004389 // flat -> local/private
Matt Arsenault0da63502018-08-31 05:49:54 +00004390 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004391 unsigned DestAS = ASC->getDestAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004392
Matt Arsenault0da63502018-08-31 05:49:54 +00004393 if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4394 DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004395 unsigned NullVal = TM.getNullPointerValue(DestAS);
4396 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault99c14522016-04-25 19:27:24 +00004397 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4398 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4399
4400 return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4401 NonNull, Ptr, SegmentNullPtr);
4402 }
4403 }
4404
4405 // local/private -> flat
Matt Arsenault0da63502018-08-31 05:49:54 +00004406 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004407 unsigned SrcAS = ASC->getSrcAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004408
Matt Arsenault0da63502018-08-31 05:49:54 +00004409 if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4410 SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004411 unsigned NullVal = TM.getNullPointerValue(SrcAS);
4412 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault971c85e2017-03-13 19:47:31 +00004413
Matt Arsenault99c14522016-04-25 19:27:24 +00004414 SDValue NonNull
4415 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4416
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004417 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00004418 SDValue CvtPtr
4419 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4420
4421 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4422 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4423 FlatNullPtr);
4424 }
4425 }
4426
4427 // global <-> flat are no-ops and never emitted.
4428
4429 const MachineFunction &MF = DAG.getMachineFunction();
4430 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
Matthias Braunf1caa282017-12-15 22:22:58 +00004431 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
Matt Arsenault99c14522016-04-25 19:27:24 +00004432 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4433
4434 return DAG.getUNDEF(ASC->getValueType(0));
4435}
4436
Matt Arsenault3aef8092017-01-23 23:09:58 +00004437SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4438 SelectionDAG &DAG) const {
Matt Arsenault67a98152018-05-16 11:47:30 +00004439 SDValue Vec = Op.getOperand(0);
4440 SDValue InsVal = Op.getOperand(1);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004441 SDValue Idx = Op.getOperand(2);
Matt Arsenault67a98152018-05-16 11:47:30 +00004442 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004443 EVT EltVT = VecVT.getVectorElementType();
4444 unsigned VecSize = VecVT.getSizeInBits();
4445 unsigned EltSize = EltVT.getSizeInBits();
Matt Arsenault67a98152018-05-16 11:47:30 +00004446
Matt Arsenault9224c002018-06-05 19:52:46 +00004447
4448 assert(VecSize <= 64);
Matt Arsenault67a98152018-05-16 11:47:30 +00004449
4450 unsigned NumElts = VecVT.getVectorNumElements();
4451 SDLoc SL(Op);
4452 auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4453
Matt Arsenault9224c002018-06-05 19:52:46 +00004454 if (NumElts == 4 && EltSize == 16 && KIdx) {
Matt Arsenault67a98152018-05-16 11:47:30 +00004455 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4456
4457 SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4458 DAG.getConstant(0, SL, MVT::i32));
4459 SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4460 DAG.getConstant(1, SL, MVT::i32));
4461
4462 SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4463 SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4464
4465 unsigned Idx = KIdx->getZExtValue();
4466 bool InsertLo = Idx < 2;
4467 SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4468 InsertLo ? LoVec : HiVec,
4469 DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4470 DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4471
4472 InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4473
4474 SDValue Concat = InsertLo ?
4475 DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4476 DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4477
4478 return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4479 }
4480
Matt Arsenault3aef8092017-01-23 23:09:58 +00004481 if (isa<ConstantSDNode>(Idx))
4482 return SDValue();
4483
Matt Arsenault9224c002018-06-05 19:52:46 +00004484 MVT IntVT = MVT::getIntegerVT(VecSize);
Matt Arsenault67a98152018-05-16 11:47:30 +00004485
Matt Arsenault3aef8092017-01-23 23:09:58 +00004486 // Avoid stack access for dynamic indexing.
Matt Arsenault3aef8092017-01-23 23:09:58 +00004487 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
Tim Corringhamfa3e4e52019-02-01 16:51:09 +00004488
4489 // Create a congruent vector with the target value in each element so that
4490 // the required element can be masked and ORed into the target vector.
4491 SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
4492 DAG.getSplatBuildVector(VecVT, SL, InsVal));
Matt Arsenault3aef8092017-01-23 23:09:58 +00004493
Matt Arsenault9224c002018-06-05 19:52:46 +00004494 assert(isPowerOf2_32(EltSize));
4495 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4496
Matt Arsenault3aef8092017-01-23 23:09:58 +00004497 // Convert vector index to bit-index.
Matt Arsenault9224c002018-06-05 19:52:46 +00004498 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004499
Matt Arsenault67a98152018-05-16 11:47:30 +00004500 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4501 SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4502 DAG.getConstant(0xffff, SL, IntVT),
Matt Arsenault3aef8092017-01-23 23:09:58 +00004503 ScaledIdx);
4504
Matt Arsenault67a98152018-05-16 11:47:30 +00004505 SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4506 SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4507 DAG.getNOT(SL, BFM, IntVT), BCVec);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004508
Matt Arsenault67a98152018-05-16 11:47:30 +00004509 SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4510 return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004511}
4512
4513SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4514 SelectionDAG &DAG) const {
4515 SDLoc SL(Op);
4516
4517 EVT ResultVT = Op.getValueType();
4518 SDValue Vec = Op.getOperand(0);
4519 SDValue Idx = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004520 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004521 unsigned VecSize = VecVT.getSizeInBits();
4522 EVT EltVT = VecVT.getVectorElementType();
4523 assert(VecSize <= 64);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004524
Matt Arsenault98f29462017-05-17 20:30:58 +00004525 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4526
Hiroshi Inoue372ffa12018-04-13 11:37:06 +00004527 // Make sure we do any optimizations that will make it easier to fold
Matt Arsenault98f29462017-05-17 20:30:58 +00004528 // source modifiers before obscuring it with bit operations.
4529
4530 // XXX - Why doesn't this get called when vector_shuffle is expanded?
4531 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4532 return Combined;
4533
Matt Arsenault9224c002018-06-05 19:52:46 +00004534 unsigned EltSize = EltVT.getSizeInBits();
4535 assert(isPowerOf2_32(EltSize));
Matt Arsenault3aef8092017-01-23 23:09:58 +00004536
Matt Arsenault9224c002018-06-05 19:52:46 +00004537 MVT IntVT = MVT::getIntegerVT(VecSize);
4538 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4539
4540 // Convert vector index to bit-index (* EltSize)
4541 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004542
Matt Arsenault67a98152018-05-16 11:47:30 +00004543 SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4544 SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004545
Matt Arsenault67a98152018-05-16 11:47:30 +00004546 if (ResultVT == MVT::f16) {
4547 SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4548 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4549 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00004550
Matt Arsenault67a98152018-05-16 11:47:30 +00004551 return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4552}
4553
4554SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4555 SelectionDAG &DAG) const {
4556 SDLoc SL(Op);
4557 EVT VT = Op.getValueType();
Matt Arsenault67a98152018-05-16 11:47:30 +00004558
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004559 if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4560 EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4561
4562 // Turn into pair of packed build_vectors.
4563 // TODO: Special case for constants that can be materialized with s_mov_b64.
4564 SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4565 { Op.getOperand(0), Op.getOperand(1) });
4566 SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4567 { Op.getOperand(2), Op.getOperand(3) });
4568
4569 SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4570 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4571
4572 SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4573 return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4574 }
4575
Matt Arsenault1349a042018-05-22 06:32:10 +00004576 assert(VT == MVT::v2f16 || VT == MVT::v2i16);
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004577 assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
Matt Arsenault67a98152018-05-16 11:47:30 +00004578
Matt Arsenault1349a042018-05-22 06:32:10 +00004579 SDValue Lo = Op.getOperand(0);
4580 SDValue Hi = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004581
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004582 // Avoid adding defined bits with the zero_extend.
4583 if (Hi.isUndef()) {
4584 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4585 SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
4586 return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
4587 }
Matt Arsenault67a98152018-05-16 11:47:30 +00004588
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004589 Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
Matt Arsenault1349a042018-05-22 06:32:10 +00004590 Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4591
4592 SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4593 DAG.getConstant(16, SL, MVT::i32));
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004594 if (Lo.isUndef())
4595 return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
4596
4597 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4598 Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
Matt Arsenault1349a042018-05-22 06:32:10 +00004599
4600 SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
Matt Arsenault1349a042018-05-22 06:32:10 +00004601 return DAG.getNode(ISD::BITCAST, SL, VT, Or);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004602}
4603
Tom Stellard418beb72016-07-13 14:23:33 +00004604bool
4605SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4606 // We can fold offsets for anything that doesn't require a GOT relocation.
Matt Arsenault0da63502018-08-31 05:49:54 +00004607 return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4608 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4609 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004610 !shouldEmitGOTReloc(GA->getGlobal());
Tom Stellard418beb72016-07-13 14:23:33 +00004611}
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004612
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004613static SDValue
4614buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4615 const SDLoc &DL, unsigned Offset, EVT PtrVT,
4616 unsigned GAFlags = SIInstrInfo::MO_NONE) {
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004617 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4618 // lowered to the following code sequence:
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004619 //
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004620 // For constant address space:
4621 // s_getpc_b64 s[0:1]
4622 // s_add_u32 s0, s0, $symbol
4623 // s_addc_u32 s1, s1, 0
4624 //
4625 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4626 // a fixup or relocation is emitted to replace $symbol with a literal
4627 // constant, which is a pc-relative offset from the encoding of the $symbol
4628 // operand to the global variable.
4629 //
4630 // For global address space:
4631 // s_getpc_b64 s[0:1]
4632 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4633 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4634 //
4635 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4636 // fixups or relocations are emitted to replace $symbol@*@lo and
4637 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4638 // which is a 64-bit pc-relative offset from the encoding of the $symbol
4639 // operand to the global variable.
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004640 //
4641 // What we want here is an offset from the value returned by s_getpc
4642 // (which is the address of the s_add_u32 instruction) to the global
4643 // variable, but since the encoding of $symbol starts 4 bytes after the start
4644 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4645 // small. This requires us to add 4 to the global variable offset in order to
4646 // compute the correct address.
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004647 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4648 GAFlags);
4649 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4650 GAFlags == SIInstrInfo::MO_NONE ?
4651 GAFlags : GAFlags + 1);
4652 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004653}
4654
Tom Stellard418beb72016-07-13 14:23:33 +00004655SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
4656 SDValue Op,
4657 SelectionDAG &DAG) const {
4658 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00004659 const GlobalValue *GV = GSD->getGlobal();
Matt Arsenaultd1f45712018-09-10 12:16:11 +00004660 if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
4661 GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
4662 GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS)
Tom Stellard418beb72016-07-13 14:23:33 +00004663 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
4664
4665 SDLoc DL(GSD);
Tom Stellard418beb72016-07-13 14:23:33 +00004666 EVT PtrVT = Op.getValueType();
4667
Matt Arsenaultd1f45712018-09-10 12:16:11 +00004668 // FIXME: Should not make address space based decisions here.
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004669 if (shouldEmitFixup(GV))
Tom Stellard418beb72016-07-13 14:23:33 +00004670 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004671 else if (shouldEmitPCReloc(GV))
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004672 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
4673 SIInstrInfo::MO_REL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004674
4675 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004676 SIInstrInfo::MO_GOTPCREL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004677
4678 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
Matt Arsenault0da63502018-08-31 05:49:54 +00004679 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
Tom Stellard418beb72016-07-13 14:23:33 +00004680 const DataLayout &DataLayout = DAG.getDataLayout();
4681 unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
Matt Arsenaultd77fcc22018-09-10 02:23:39 +00004682 MachinePointerInfo PtrInfo
4683 = MachinePointerInfo::getGOT(DAG.getMachineFunction());
Tom Stellard418beb72016-07-13 14:23:33 +00004684
Justin Lebar9c375812016-07-15 18:27:10 +00004685 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
Justin Lebaradbf09e2016-09-11 01:38:58 +00004686 MachineMemOperand::MODereferenceable |
4687 MachineMemOperand::MOInvariant);
Tom Stellard418beb72016-07-13 14:23:33 +00004688}
4689
Benjamin Kramerbdc49562016-06-12 15:39:02 +00004690SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
4691 const SDLoc &DL, SDValue V) const {
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004692 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
4693 // the destination register.
4694 //
Tom Stellardfc92e772015-05-12 14:18:14 +00004695 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
4696 // so we will end up with redundant moves to m0.
4697 //
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004698 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
4699
4700 // A Null SDValue creates a glue result.
4701 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
4702 V, Chain);
4703 return SDValue(M0, 0);
Tom Stellardfc92e772015-05-12 14:18:14 +00004704}
4705
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004706SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
4707 SDValue Op,
4708 MVT VT,
4709 unsigned Offset) const {
4710 SDLoc SL(Op);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004711 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004712 DAG.getEntryNode(), Offset, 4, false);
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004713 // The local size values will have the hi 16-bits as zero.
4714 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
4715 DAG.getValueType(VT));
4716}
4717
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004718static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4719 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004720 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004721 "non-hsa intrinsic with hsa target",
4722 DL.getDebugLoc());
4723 DAG.getContext()->diagnose(BadIntrin);
4724 return DAG.getUNDEF(VT);
4725}
4726
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004727static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4728 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004729 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004730 "intrinsic not supported on subtarget",
4731 DL.getDebugLoc());
Matt Arsenaulte0132462016-01-30 05:19:45 +00004732 DAG.getContext()->diagnose(BadIntrin);
4733 return DAG.getUNDEF(VT);
4734}
4735
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004736static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
4737 ArrayRef<SDValue> Elts) {
4738 assert(!Elts.empty());
4739 MVT Type;
4740 unsigned NumElts;
4741
4742 if (Elts.size() == 1) {
4743 Type = MVT::f32;
4744 NumElts = 1;
4745 } else if (Elts.size() == 2) {
4746 Type = MVT::v2f32;
4747 NumElts = 2;
4748 } else if (Elts.size() <= 4) {
4749 Type = MVT::v4f32;
4750 NumElts = 4;
4751 } else if (Elts.size() <= 8) {
4752 Type = MVT::v8f32;
4753 NumElts = 8;
4754 } else {
4755 assert(Elts.size() <= 16);
4756 Type = MVT::v16f32;
4757 NumElts = 16;
4758 }
4759
4760 SmallVector<SDValue, 16> VecElts(NumElts);
4761 for (unsigned i = 0; i < Elts.size(); ++i) {
4762 SDValue Elt = Elts[i];
4763 if (Elt.getValueType() != MVT::f32)
4764 Elt = DAG.getBitcast(MVT::f32, Elt);
4765 VecElts[i] = Elt;
4766 }
4767 for (unsigned i = Elts.size(); i < NumElts; ++i)
4768 VecElts[i] = DAG.getUNDEF(MVT::f32);
4769
4770 if (NumElts == 1)
4771 return VecElts[0];
4772 return DAG.getBuildVector(Type, DL, VecElts);
4773}
4774
4775static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004776 SDValue *GLC, SDValue *SLC, SDValue *DLC) {
Matt Arsenaultcaf13162019-03-12 21:02:54 +00004777 auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode());
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004778
4779 uint64_t Value = CachePolicyConst->getZExtValue();
4780 SDLoc DL(CachePolicy);
4781 if (GLC) {
4782 *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
4783 Value &= ~(uint64_t)0x1;
4784 }
4785 if (SLC) {
4786 *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
4787 Value &= ~(uint64_t)0x2;
4788 }
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004789 if (DLC) {
4790 *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32);
4791 Value &= ~(uint64_t)0x4;
4792 }
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004793
4794 return Value == 0;
4795}
4796
David Stuttardf77079f2019-01-14 11:55:24 +00004797// Re-construct the required return value for a image load intrinsic.
4798// This is more complicated due to the optional use TexFailCtrl which means the required
4799// return type is an aggregate
4800static SDValue constructRetValue(SelectionDAG &DAG,
4801 MachineSDNode *Result,
4802 ArrayRef<EVT> ResultTypes,
4803 bool IsTexFail, bool Unpacked, bool IsD16,
4804 int DMaskPop, int NumVDataDwords,
4805 const SDLoc &DL, LLVMContext &Context) {
4806 // Determine the required return type. This is the same regardless of IsTexFail flag
4807 EVT ReqRetVT = ResultTypes[0];
4808 EVT ReqRetEltVT = ReqRetVT.isVector() ? ReqRetVT.getVectorElementType() : ReqRetVT;
4809 int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
4810 EVT AdjEltVT = Unpacked && IsD16 ? MVT::i32 : ReqRetEltVT;
4811 EVT AdjVT = Unpacked ? ReqRetNumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, ReqRetNumElts)
4812 : AdjEltVT
4813 : ReqRetVT;
4814
4815 // Extract data part of the result
4816 // Bitcast the result to the same type as the required return type
4817 int NumElts;
4818 if (IsD16 && !Unpacked)
4819 NumElts = NumVDataDwords << 1;
4820 else
4821 NumElts = NumVDataDwords;
4822
4823 EVT CastVT = NumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, NumElts)
4824 : AdjEltVT;
4825
Tim Renouf6f0191a2019-03-22 15:21:11 +00004826 // Special case for v6f16. Rather than add support for this, use v3i32 to
David Stuttardf77079f2019-01-14 11:55:24 +00004827 // extract the data elements
Tim Renouf6f0191a2019-03-22 15:21:11 +00004828 bool V6F16Special = false;
4829 if (NumElts == 6) {
4830 CastVT = EVT::getVectorVT(Context, MVT::i32, NumElts / 2);
David Stuttardf77079f2019-01-14 11:55:24 +00004831 DMaskPop >>= 1;
4832 ReqRetNumElts >>= 1;
Tim Renouf6f0191a2019-03-22 15:21:11 +00004833 V6F16Special = true;
David Stuttardf77079f2019-01-14 11:55:24 +00004834 AdjVT = MVT::v2i32;
4835 }
4836
4837 SDValue N = SDValue(Result, 0);
4838 SDValue CastRes = DAG.getNode(ISD::BITCAST, DL, CastVT, N);
4839
4840 // Iterate over the result
4841 SmallVector<SDValue, 4> BVElts;
4842
4843 if (CastVT.isVector()) {
4844 DAG.ExtractVectorElements(CastRes, BVElts, 0, DMaskPop);
4845 } else {
4846 BVElts.push_back(CastRes);
4847 }
4848 int ExtraElts = ReqRetNumElts - DMaskPop;
4849 while(ExtraElts--)
4850 BVElts.push_back(DAG.getUNDEF(AdjEltVT));
4851
4852 SDValue PreTFCRes;
4853 if (ReqRetNumElts > 1) {
4854 SDValue NewVec = DAG.getBuildVector(AdjVT, DL, BVElts);
4855 if (IsD16 && Unpacked)
4856 PreTFCRes = adjustLoadValueTypeImpl(NewVec, ReqRetVT, DL, DAG, Unpacked);
4857 else
4858 PreTFCRes = NewVec;
4859 } else {
4860 PreTFCRes = BVElts[0];
4861 }
4862
Tim Renouf6f0191a2019-03-22 15:21:11 +00004863 if (V6F16Special)
David Stuttardf77079f2019-01-14 11:55:24 +00004864 PreTFCRes = DAG.getNode(ISD::BITCAST, DL, MVT::v4f16, PreTFCRes);
4865
4866 if (!IsTexFail) {
4867 if (Result->getNumValues() > 1)
4868 return DAG.getMergeValues({PreTFCRes, SDValue(Result, 1)}, DL);
4869 else
4870 return PreTFCRes;
4871 }
4872
4873 // Extract the TexFail result and insert into aggregate return
4874 SmallVector<SDValue, 1> TFCElt;
4875 DAG.ExtractVectorElements(N, TFCElt, DMaskPop, 1);
4876 SDValue TFCRes = DAG.getNode(ISD::BITCAST, DL, ResultTypes[1], TFCElt[0]);
4877 return DAG.getMergeValues({PreTFCRes, TFCRes, SDValue(Result, 1)}, DL);
4878}
4879
4880static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
4881 SDValue *LWE, bool &IsTexFail) {
Matt Arsenaultcaf13162019-03-12 21:02:54 +00004882 auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode());
David Stuttardf77079f2019-01-14 11:55:24 +00004883
4884 uint64_t Value = TexFailCtrlConst->getZExtValue();
4885 if (Value) {
4886 IsTexFail = true;
4887 }
4888
4889 SDLoc DL(TexFailCtrlConst);
4890 *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
4891 Value &= ~(uint64_t)0x1;
4892 *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
4893 Value &= ~(uint64_t)0x2;
4894
4895 return Value == 0;
4896}
4897
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004898SDValue SITargetLowering::lowerImage(SDValue Op,
4899 const AMDGPU::ImageDimIntrinsicInfo *Intr,
4900 SelectionDAG &DAG) const {
4901 SDLoc DL(Op);
Ryan Taylor1f334d02018-08-28 15:07:30 +00004902 MachineFunction &MF = DAG.getMachineFunction();
4903 const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004904 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
4905 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
4906 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004907 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
4908 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
Piotr Sobczak9b11e932019-06-10 15:58:51 +00004909 const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
4910 AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004911 unsigned IntrOpcode = Intr->BaseOpcode;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004912 bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004913
David Stuttardf77079f2019-01-14 11:55:24 +00004914 SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end());
4915 SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end());
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004916 bool IsD16 = false;
Ryan Taylor1f334d02018-08-28 15:07:30 +00004917 bool IsA16 = false;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004918 SDValue VData;
4919 int NumVDataDwords;
David Stuttardf77079f2019-01-14 11:55:24 +00004920 bool AdjustRetType = false;
4921
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004922 unsigned AddrIdx; // Index of first address argument
4923 unsigned DMask;
David Stuttardf77079f2019-01-14 11:55:24 +00004924 unsigned DMaskLanes = 0;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004925
4926 if (BaseOpcode->Atomic) {
4927 VData = Op.getOperand(2);
4928
4929 bool Is64Bit = VData.getValueType() == MVT::i64;
4930 if (BaseOpcode->AtomicX2) {
4931 SDValue VData2 = Op.getOperand(3);
4932 VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
4933 {VData, VData2});
4934 if (Is64Bit)
4935 VData = DAG.getBitcast(MVT::v4i32, VData);
4936
4937 ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
4938 DMask = Is64Bit ? 0xf : 0x3;
4939 NumVDataDwords = Is64Bit ? 4 : 2;
4940 AddrIdx = 4;
4941 } else {
4942 DMask = Is64Bit ? 0x3 : 0x1;
4943 NumVDataDwords = Is64Bit ? 2 : 1;
4944 AddrIdx = 3;
4945 }
4946 } else {
David Stuttardf77079f2019-01-14 11:55:24 +00004947 unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1;
Matt Arsenaultcaf13162019-03-12 21:02:54 +00004948 auto DMaskConst = cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
David Stuttardf77079f2019-01-14 11:55:24 +00004949 DMask = DMaskConst->getZExtValue();
4950 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004951
4952 if (BaseOpcode->Store) {
4953 VData = Op.getOperand(2);
4954
4955 MVT StoreVT = VData.getSimpleValueType();
4956 if (StoreVT.getScalarType() == MVT::f16) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004957 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004958 !BaseOpcode->HasD16)
4959 return Op; // D16 is unsupported for this instruction
4960
4961 IsD16 = true;
4962 VData = handleD16VData(VData, DAG);
4963 }
4964
4965 NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004966 } else {
David Stuttardf77079f2019-01-14 11:55:24 +00004967 // Work out the num dwords based on the dmask popcount and underlying type
4968 // and whether packing is supported.
4969 MVT LoadVT = ResultTypes[0].getSimpleVT();
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004970 if (LoadVT.getScalarType() == MVT::f16) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004971 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004972 !BaseOpcode->HasD16)
4973 return Op; // D16 is unsupported for this instruction
4974
4975 IsD16 = true;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004976 }
4977
David Stuttardf77079f2019-01-14 11:55:24 +00004978 // Confirm that the return type is large enough for the dmask specified
4979 if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
4980 (!LoadVT.isVector() && DMaskLanes > 1))
4981 return Op;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004982
David Stuttardf77079f2019-01-14 11:55:24 +00004983 if (IsD16 && !Subtarget->hasUnpackedD16VMem())
4984 NumVDataDwords = (DMaskLanes + 1) / 2;
4985 else
4986 NumVDataDwords = DMaskLanes;
4987
4988 AdjustRetType = true;
4989 }
David Stuttardc6603862018-11-29 20:14:17 +00004990
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004991 AddrIdx = DMaskIdx + 1;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004992 }
4993
Ryan Taylor1f334d02018-08-28 15:07:30 +00004994 unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
4995 unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
4996 unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
4997 unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients +
4998 NumCoords + NumLCM;
4999 unsigned NumMIVAddrs = NumVAddrs;
5000
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005001 SmallVector<SDValue, 4> VAddrs;
Ryan Taylor894c8fd2018-08-01 12:12:01 +00005002
5003 // Optimize _L to _LZ when _L is zero
5004 if (LZMappingInfo) {
5005 if (auto ConstantLod =
Ryan Taylor1f334d02018-08-28 15:07:30 +00005006 dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
Ryan Taylor894c8fd2018-08-01 12:12:01 +00005007 if (ConstantLod->isZero() || ConstantLod->isNegative()) {
5008 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
Ryan Taylor1f334d02018-08-28 15:07:30 +00005009 NumMIVAddrs--; // remove 'lod'
Ryan Taylor894c8fd2018-08-01 12:12:01 +00005010 }
5011 }
5012 }
5013
Piotr Sobczak9b11e932019-06-10 15:58:51 +00005014 // Optimize _mip away, when 'lod' is zero
5015 if (MIPMappingInfo) {
5016 if (auto ConstantLod =
5017 dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
5018 if (ConstantLod->isNullValue()) {
5019 IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip
5020 NumMIVAddrs--; // remove 'lod'
5021 }
5022 }
5023 }
5024
Ryan Taylor1f334d02018-08-28 15:07:30 +00005025 // Check for 16 bit addresses and pack if true.
5026 unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs;
5027 MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType();
Neil Henning63718b22018-10-31 10:34:48 +00005028 const MVT VAddrScalarVT = VAddrVT.getScalarType();
5029 if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16)) &&
Ryan Taylor1f334d02018-08-28 15:07:30 +00005030 ST->hasFeature(AMDGPU::FeatureR128A16)) {
5031 IsA16 = true;
Neil Henning63718b22018-10-31 10:34:48 +00005032 const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
Ryan Taylor1f334d02018-08-28 15:07:30 +00005033 for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) {
5034 SDValue AddrLo, AddrHi;
5035 // Push back extra arguments.
5036 if (i < DimIdx) {
5037 AddrLo = Op.getOperand(i);
5038 } else {
5039 AddrLo = Op.getOperand(i);
5040 // Dz/dh, dz/dv and the last odd coord are packed with undef. Also,
5041 // in 1D, derivatives dx/dh and dx/dv are packed with undef.
5042 if (((i + 1) >= (AddrIdx + NumMIVAddrs)) ||
Matt Arsenault0da63502018-08-31 05:49:54 +00005043 ((NumGradients / 2) % 2 == 1 &&
5044 (i == DimIdx + (NumGradients / 2) - 1 ||
Ryan Taylor1f334d02018-08-28 15:07:30 +00005045 i == DimIdx + NumGradients - 1))) {
5046 AddrHi = DAG.getUNDEF(MVT::f16);
5047 } else {
5048 AddrHi = Op.getOperand(i + 1);
5049 i++;
5050 }
Neil Henning63718b22018-10-31 10:34:48 +00005051 AddrLo = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorVT,
Ryan Taylor1f334d02018-08-28 15:07:30 +00005052 {AddrLo, AddrHi});
5053 AddrLo = DAG.getBitcast(MVT::i32, AddrLo);
5054 }
5055 VAddrs.push_back(AddrLo);
5056 }
5057 } else {
5058 for (unsigned i = 0; i < NumMIVAddrs; ++i)
5059 VAddrs.push_back(Op.getOperand(AddrIdx + i));
5060 }
5061
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005062 // If the register allocator cannot place the address registers contiguously
5063 // without introducing moves, then using the non-sequential address encoding
5064 // is always preferable, since it saves VALU instructions and is usually a
5065 // wash in terms of code size or even better.
5066 //
5067 // However, we currently have no way of hinting to the register allocator that
5068 // MIMG addresses should be placed contiguously when it is possible to do so,
5069 // so force non-NSA for the common 2-address case as a heuristic.
5070 //
5071 // SIShrinkInstructions will convert NSA encodings to non-NSA after register
5072 // allocation when possible.
5073 bool UseNSA =
5074 ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3;
5075 SDValue VAddr;
5076 if (!UseNSA)
5077 VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005078
5079 SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
5080 SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
5081 unsigned CtrlIdx; // Index of texfailctrl argument
5082 SDValue Unorm;
5083 if (!BaseOpcode->Sampler) {
5084 Unorm = True;
5085 CtrlIdx = AddrIdx + NumVAddrs + 1;
5086 } else {
5087 auto UnormConst =
Matt Arsenaultcaf13162019-03-12 21:02:54 +00005088 cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005089
5090 Unorm = UnormConst->getZExtValue() ? True : False;
5091 CtrlIdx = AddrIdx + NumVAddrs + 3;
5092 }
5093
David Stuttardf77079f2019-01-14 11:55:24 +00005094 SDValue TFE;
5095 SDValue LWE;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005096 SDValue TexFail = Op.getOperand(CtrlIdx);
David Stuttardf77079f2019-01-14 11:55:24 +00005097 bool IsTexFail = false;
5098 if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005099 return Op;
5100
David Stuttardf77079f2019-01-14 11:55:24 +00005101 if (IsTexFail) {
5102 if (!DMaskLanes) {
5103 // Expecting to get an error flag since TFC is on - and dmask is 0
5104 // Force dmask to be at least 1 otherwise the instruction will fail
5105 DMask = 0x1;
5106 DMaskLanes = 1;
5107 NumVDataDwords = 1;
5108 }
5109 NumVDataDwords += 1;
5110 AdjustRetType = true;
5111 }
5112
5113 // Has something earlier tagged that the return type needs adjusting
5114 // This happens if the instruction is a load or has set TexFailCtrl flags
5115 if (AdjustRetType) {
5116 // NumVDataDwords reflects the true number of dwords required in the return type
5117 if (DMaskLanes == 0 && !BaseOpcode->Store) {
5118 // This is a no-op load. This can be eliminated
5119 SDValue Undef = DAG.getUNDEF(Op.getValueType());
5120 if (isa<MemSDNode>(Op))
5121 return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
5122 return Undef;
5123 }
5124
David Stuttardf77079f2019-01-14 11:55:24 +00005125 EVT NewVT = NumVDataDwords > 1 ?
5126 EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumVDataDwords)
5127 : MVT::f32;
5128
5129 ResultTypes[0] = NewVT;
5130 if (ResultTypes.size() == 3) {
5131 // Original result was aggregate type used for TexFailCtrl results
5132 // The actual instruction returns as a vector type which has now been
5133 // created. Remove the aggregate result.
5134 ResultTypes.erase(&ResultTypes[1]);
5135 }
5136 }
5137
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005138 SDValue GLC;
5139 SDValue SLC;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005140 SDValue DLC;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005141 if (BaseOpcode->Atomic) {
5142 GLC = True; // TODO no-return optimization
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005143 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC,
5144 IsGFX10 ? &DLC : nullptr))
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005145 return Op;
5146 } else {
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005147 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC,
5148 IsGFX10 ? &DLC : nullptr))
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005149 return Op;
5150 }
5151
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005152 SmallVector<SDValue, 26> Ops;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005153 if (BaseOpcode->Store || BaseOpcode->Atomic)
5154 Ops.push_back(VData); // vdata
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005155 if (UseNSA) {
5156 for (const SDValue &Addr : VAddrs)
5157 Ops.push_back(Addr);
5158 } else {
5159 Ops.push_back(VAddr);
5160 }
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005161 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
5162 if (BaseOpcode->Sampler)
5163 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
5164 Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005165 if (IsGFX10)
5166 Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32));
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005167 Ops.push_back(Unorm);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005168 if (IsGFX10)
5169 Ops.push_back(DLC);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005170 Ops.push_back(GLC);
5171 Ops.push_back(SLC);
Ryan Taylor1f334d02018-08-28 15:07:30 +00005172 Ops.push_back(IsA16 && // a16 or r128
5173 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
David Stuttardf77079f2019-01-14 11:55:24 +00005174 Ops.push_back(TFE); // tfe
5175 Ops.push_back(LWE); // lwe
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005176 if (!IsGFX10)
5177 Ops.push_back(DimInfo->DA ? True : False);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005178 if (BaseOpcode->HasD16)
5179 Ops.push_back(IsD16 ? True : False);
5180 if (isa<MemSDNode>(Op))
5181 Ops.push_back(Op.getOperand(0)); // chain
5182
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005183 int NumVAddrDwords =
5184 UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005185 int Opcode = -1;
5186
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005187 if (IsGFX10) {
5188 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
5189 UseNSA ? AMDGPU::MIMGEncGfx10NSA
5190 : AMDGPU::MIMGEncGfx10Default,
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005191 NumVDataDwords, NumVAddrDwords);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005192 } else {
5193 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5194 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
5195 NumVDataDwords, NumVAddrDwords);
5196 if (Opcode == -1)
5197 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
5198 NumVDataDwords, NumVAddrDwords);
5199 }
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005200 assert(Opcode != -1);
5201
5202 MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
5203 if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
Chandler Carruth66654b72018-08-14 23:30:32 +00005204 MachineMemOperand *MemRef = MemOp->getMemOperand();
5205 DAG.setNodeMemRefs(NewNode, {MemRef});
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005206 }
5207
5208 if (BaseOpcode->AtomicX2) {
5209 SmallVector<SDValue, 1> Elt;
5210 DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
5211 return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
David Stuttardf77079f2019-01-14 11:55:24 +00005212 } else if (!BaseOpcode->Store) {
5213 return constructRetValue(DAG, NewNode,
5214 OrigResultTypes, IsTexFail,
5215 Subtarget->hasUnpackedD16VMem(), IsD16,
5216 DMaskLanes, NumVDataDwords, DL,
5217 *DAG.getContext());
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005218 }
5219
5220 return SDValue(NewNode, 0);
5221}
5222
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005223SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
5224 SDValue Offset, SDValue GLC,
5225 SelectionDAG &DAG) const {
5226 MachineFunction &MF = DAG.getMachineFunction();
5227 MachineMemOperand *MMO = MF.getMachineMemOperand(
5228 MachinePointerInfo(),
5229 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
5230 MachineMemOperand::MOInvariant,
5231 VT.getStoreSize(), VT.getStoreSize());
5232
5233 if (!Offset->isDivergent()) {
5234 SDValue Ops[] = {
5235 Rsrc,
5236 Offset, // Offset
5237 GLC // glc
5238 };
5239 return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
5240 DAG.getVTList(VT), Ops, VT, MMO);
5241 }
5242
5243 // We have a divergent offset. Emit a MUBUF buffer load instead. We can
5244 // assume that the buffer is unswizzled.
5245 SmallVector<SDValue, 4> Loads;
5246 unsigned NumLoads = 1;
5247 MVT LoadVT = VT.getSimpleVT();
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005248 unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
Simon Pilgrim44dfd812018-12-07 21:44:25 +00005249 assert((LoadVT.getScalarType() == MVT::i32 ||
5250 LoadVT.getScalarType() == MVT::f32) &&
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005251 isPowerOf2_32(NumElts));
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005252
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005253 if (NumElts == 8 || NumElts == 16) {
5254 NumLoads = NumElts == 16 ? 4 : 2;
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005255 LoadVT = MVT::v4i32;
5256 }
5257
5258 SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
5259 unsigned CachePolicy = cast<ConstantSDNode>(GLC)->getZExtValue();
5260 SDValue Ops[] = {
5261 DAG.getEntryNode(), // Chain
5262 Rsrc, // rsrc
5263 DAG.getConstant(0, DL, MVT::i32), // vindex
5264 {}, // voffset
5265 {}, // soffset
5266 {}, // offset
5267 DAG.getConstant(CachePolicy, DL, MVT::i32), // cachepolicy
5268 DAG.getConstant(0, DL, MVT::i1), // idxen
5269 };
5270
5271 // Use the alignment to ensure that the required offsets will fit into the
5272 // immediate offsets.
5273 setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4);
5274
5275 uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
5276 for (unsigned i = 0; i < NumLoads; ++i) {
5277 Ops[5] = DAG.getConstant(InstOffset + 16 * i, DL, MVT::i32);
5278 Loads.push_back(DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList,
5279 Ops, LoadVT, MMO));
5280 }
5281
5282 if (VT == MVT::v8i32 || VT == MVT::v16i32)
5283 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
5284
5285 return Loads[0];
5286}
5287
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005288SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
5289 SelectionDAG &DAG) const {
5290 MachineFunction &MF = DAG.getMachineFunction();
Tom Stellarddcb9f092015-07-09 21:20:37 +00005291 auto MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005292
5293 EVT VT = Op.getValueType();
5294 SDLoc DL(Op);
5295 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5296
Sanjay Patela2607012015-09-16 16:31:21 +00005297 // TODO: Should this propagate fast-math-flags?
5298
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005299 switch (IntrinsicID) {
Tom Stellard2f3f9852017-01-25 01:25:13 +00005300 case Intrinsic::amdgcn_implicit_buffer_ptr: {
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +00005301 if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
Matt Arsenault10fc0622017-06-26 03:01:31 +00005302 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005303 return getPreloadedValue(DAG, *MFI, VT,
5304 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
Tom Stellard2f3f9852017-01-25 01:25:13 +00005305 }
Tom Stellard48f29f22015-11-26 00:43:29 +00005306 case Intrinsic::amdgcn_dispatch_ptr:
Matt Arsenault48ab5262016-04-25 19:27:18 +00005307 case Intrinsic::amdgcn_queue_ptr: {
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +00005308 if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00005309 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00005310 MF.getFunction(), "unsupported hsa intrinsic without hsa target",
Oliver Stannard7e7d9832016-02-02 13:52:43 +00005311 DL.getDebugLoc());
Matt Arsenault800fecf2016-01-11 21:18:33 +00005312 DAG.getContext()->diagnose(BadIntrin);
5313 return DAG.getUNDEF(VT);
5314 }
5315
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005316 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
5317 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
5318 return getPreloadedValue(DAG, *MFI, VT, RegID);
Matt Arsenault48ab5262016-04-25 19:27:18 +00005319 }
Jan Veselyfea814d2016-06-21 20:46:20 +00005320 case Intrinsic::amdgcn_implicitarg_ptr: {
Matt Arsenault9166ce82017-07-28 15:52:08 +00005321 if (MFI->isEntryFunction())
5322 return getImplicitArgPtr(DAG, DL);
Matt Arsenault817c2532017-08-03 23:12:44 +00005323 return getPreloadedValue(DAG, *MFI, VT,
5324 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
Jan Veselyfea814d2016-06-21 20:46:20 +00005325 }
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00005326 case Intrinsic::amdgcn_kernarg_segment_ptr: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005327 return getPreloadedValue(DAG, *MFI, VT,
5328 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00005329 }
Matt Arsenault8d718dc2016-07-22 17:01:30 +00005330 case Intrinsic::amdgcn_dispatch_id: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005331 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
Matt Arsenault8d718dc2016-07-22 17:01:30 +00005332 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005333 case Intrinsic::amdgcn_rcp:
5334 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
5335 case Intrinsic::amdgcn_rsq:
5336 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00005337 case Intrinsic::amdgcn_rsq_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00005338 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005339 return emitRemovedIntrinsicError(DAG, DL, VT);
5340
5341 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00005342 case Intrinsic::amdgcn_rcp_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00005343 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault32fc5272016-07-26 16:45:45 +00005344 return emitRemovedIntrinsicError(DAG, DL, VT);
5345 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
Matt Arsenault09b2c4a2016-07-15 21:26:52 +00005346 case Intrinsic::amdgcn_rsq_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00005347 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault79963e82016-02-13 01:03:00 +00005348 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
Tom Stellard48f29f22015-11-26 00:43:29 +00005349
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005350 Type *Type = VT.getTypeForEVT(*DAG.getContext());
5351 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
5352 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
5353
5354 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5355 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
5356 DAG.getConstantFP(Max, DL, VT));
5357 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
5358 DAG.getConstantFP(Min, DL, VT));
5359 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005360 case Intrinsic::r600_read_ngroups_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005361 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005362 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005363
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005364 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005365 SI::KernelInputOffsets::NGROUPS_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005366 case Intrinsic::r600_read_ngroups_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005367 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005368 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005369
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005370 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005371 SI::KernelInputOffsets::NGROUPS_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005372 case Intrinsic::r600_read_ngroups_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005373 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005374 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005375
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005376 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005377 SI::KernelInputOffsets::NGROUPS_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005378 case Intrinsic::r600_read_global_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005379 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005380 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005381
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005382 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005383 SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005384 case Intrinsic::r600_read_global_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005385 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005386 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005387
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005388 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005389 SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005390 case Intrinsic::r600_read_global_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005391 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005392 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005393
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005394 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005395 SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005396 case Intrinsic::r600_read_local_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005397 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005398 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005399
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005400 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5401 SI::KernelInputOffsets::LOCAL_SIZE_X);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005402 case Intrinsic::r600_read_local_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005403 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005404 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005405
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005406 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5407 SI::KernelInputOffsets::LOCAL_SIZE_Y);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005408 case Intrinsic::r600_read_local_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005409 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005410 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005411
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005412 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5413 SI::KernelInputOffsets::LOCAL_SIZE_Z);
Matt Arsenault43976df2016-01-30 04:25:19 +00005414 case Intrinsic::amdgcn_workgroup_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005415 case Intrinsic::r600_read_tgid_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005416 return getPreloadedValue(DAG, *MFI, VT,
5417 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
Matt Arsenault43976df2016-01-30 04:25:19 +00005418 case Intrinsic::amdgcn_workgroup_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005419 case Intrinsic::r600_read_tgid_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005420 return getPreloadedValue(DAG, *MFI, VT,
5421 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
Matt Arsenault43976df2016-01-30 04:25:19 +00005422 case Intrinsic::amdgcn_workgroup_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005423 case Intrinsic::r600_read_tgid_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005424 return getPreloadedValue(DAG, *MFI, VT,
5425 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
Reid Kleckner4dc0b1a2018-11-01 19:54:45 +00005426 case Intrinsic::amdgcn_workitem_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005427 case Intrinsic::r600_read_tidig_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005428 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5429 SDLoc(DAG.getEntryNode()),
5430 MFI->getArgInfo().WorkItemIDX);
Matt Arsenault43976df2016-01-30 04:25:19 +00005431 case Intrinsic::amdgcn_workitem_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005432 case Intrinsic::r600_read_tidig_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005433 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5434 SDLoc(DAG.getEntryNode()),
5435 MFI->getArgInfo().WorkItemIDY);
Matt Arsenault43976df2016-01-30 04:25:19 +00005436 case Intrinsic::amdgcn_workitem_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005437 case Intrinsic::r600_read_tidig_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005438 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5439 SDLoc(DAG.getEntryNode()),
5440 MFI->getArgInfo().WorkItemIDZ);
Stanislav Mekhanoshin68a2fef2019-06-13 23:47:36 +00005441 case Intrinsic::amdgcn_wavefrontsize:
5442 return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(),
5443 SDLoc(Op), MVT::i32);
Tim Renouf904343f2018-08-25 14:53:17 +00005444 case Intrinsic::amdgcn_s_buffer_load: {
5445 unsigned Cache = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005446 return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2),
5447 DAG.getTargetConstant(Cache & 1, DL, MVT::i1), DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005448 }
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00005449 case Intrinsic::amdgcn_fdiv_fast:
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005450 return lowerFDIV_FAST(Op, DAG);
Tom Stellard2187bb82016-12-06 23:52:13 +00005451 case Intrinsic::amdgcn_interp_mov: {
5452 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5453 SDValue Glue = M0.getValue(1);
5454 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
5455 Op.getOperand(2), Op.getOperand(3), Glue);
5456 }
Tom Stellardad7d03d2015-12-15 17:02:49 +00005457 case Intrinsic::amdgcn_interp_p1: {
5458 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5459 SDValue Glue = M0.getValue(1);
5460 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
5461 Op.getOperand(2), Op.getOperand(3), Glue);
5462 }
5463 case Intrinsic::amdgcn_interp_p2: {
5464 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5465 SDValue Glue = SDValue(M0.getNode(), 1);
5466 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
5467 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
5468 Glue);
5469 }
Tim Corringham824ca3f2019-01-28 13:48:59 +00005470 case Intrinsic::amdgcn_interp_p1_f16: {
5471 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5472 SDValue Glue = M0.getValue(1);
5473 if (getSubtarget()->getLDSBankCount() == 16) {
5474 // 16 bank LDS
5475 SDValue S = DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32,
5476 DAG.getConstant(2, DL, MVT::i32), // P0
5477 Op.getOperand(2), // Attrchan
5478 Op.getOperand(3), // Attr
5479 Glue);
5480 SDValue Ops[] = {
5481 Op.getOperand(1), // Src0
5482 Op.getOperand(2), // Attrchan
5483 Op.getOperand(3), // Attr
5484 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5485 S, // Src2 - holds two f16 values selected by high
5486 DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5487 Op.getOperand(4), // high
5488 DAG.getConstant(0, DL, MVT::i1), // $clamp
5489 DAG.getConstant(0, DL, MVT::i32) // $omod
5490 };
5491 return DAG.getNode(AMDGPUISD::INTERP_P1LV_F16, DL, MVT::f32, Ops);
5492 } else {
5493 // 32 bank LDS
5494 SDValue Ops[] = {
5495 Op.getOperand(1), // Src0
5496 Op.getOperand(2), // Attrchan
5497 Op.getOperand(3), // Attr
5498 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5499 Op.getOperand(4), // high
5500 DAG.getConstant(0, DL, MVT::i1), // $clamp
5501 DAG.getConstant(0, DL, MVT::i32), // $omod
5502 Glue
5503 };
5504 return DAG.getNode(AMDGPUISD::INTERP_P1LL_F16, DL, MVT::f32, Ops);
5505 }
5506 }
5507 case Intrinsic::amdgcn_interp_p2_f16: {
5508 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(6));
5509 SDValue Glue = SDValue(M0.getNode(), 1);
5510 SDValue Ops[] = {
5511 Op.getOperand(2), // Src0
5512 Op.getOperand(3), // Attrchan
5513 Op.getOperand(4), // Attr
5514 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5515 Op.getOperand(1), // Src2
5516 DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5517 Op.getOperand(5), // high
5518 DAG.getConstant(0, DL, MVT::i1), // $clamp
5519 Glue
5520 };
5521 return DAG.getNode(AMDGPUISD::INTERP_P2_F16, DL, MVT::f16, Ops);
5522 }
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005523 case Intrinsic::amdgcn_sin:
5524 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
5525
5526 case Intrinsic::amdgcn_cos:
5527 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
5528
5529 case Intrinsic::amdgcn_log_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00005530 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005531 return SDValue();
5532
5533 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00005534 MF.getFunction(), "intrinsic not supported on subtarget",
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005535 DL.getDebugLoc());
5536 DAG.getContext()->diagnose(BadIntrin);
5537 return DAG.getUNDEF(VT);
5538 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005539 case Intrinsic::amdgcn_ldexp:
5540 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
5541 Op.getOperand(1), Op.getOperand(2));
Matt Arsenault74015162016-05-28 00:19:52 +00005542
5543 case Intrinsic::amdgcn_fract:
5544 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
5545
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005546 case Intrinsic::amdgcn_class:
5547 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
5548 Op.getOperand(1), Op.getOperand(2));
5549 case Intrinsic::amdgcn_div_fmas:
5550 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
5551 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5552 Op.getOperand(4));
5553
5554 case Intrinsic::amdgcn_div_fixup:
5555 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
5556 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5557
5558 case Intrinsic::amdgcn_trig_preop:
5559 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
5560 Op.getOperand(1), Op.getOperand(2));
5561 case Intrinsic::amdgcn_div_scale: {
Matt Arsenaultcaf13162019-03-12 21:02:54 +00005562 const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3));
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005563
5564 // Translate to the operands expected by the machine instruction. The
5565 // first parameter must be the same as the first instruction.
5566 SDValue Numerator = Op.getOperand(1);
5567 SDValue Denominator = Op.getOperand(2);
5568
5569 // Note this order is opposite of the machine instruction's operations,
5570 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
5571 // intrinsic has the numerator as the first operand to match a normal
5572 // division operation.
5573
5574 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
5575
5576 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
5577 Denominator, Numerator);
5578 }
Wei Ding07e03712016-07-28 16:42:13 +00005579 case Intrinsic::amdgcn_icmp: {
Marek Olsak33eb4d92019-01-15 02:13:18 +00005580 // There is a Pat that handles this variant, so return it as-is.
5581 if (Op.getOperand(1).getValueType() == MVT::i1 &&
5582 Op.getConstantOperandVal(2) == 0 &&
5583 Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
5584 return Op;
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00005585 return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
Wei Ding07e03712016-07-28 16:42:13 +00005586 }
5587 case Intrinsic::amdgcn_fcmp: {
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00005588 return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
Wei Ding07e03712016-07-28 16:42:13 +00005589 }
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00005590 case Intrinsic::amdgcn_fmed3:
5591 return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
5592 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Farhana Aleenc370d7b2018-07-16 18:19:59 +00005593 case Intrinsic::amdgcn_fdot2:
5594 return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00005595 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5596 Op.getOperand(4));
Matt Arsenault32fc5272016-07-26 16:45:45 +00005597 case Intrinsic::amdgcn_fmul_legacy:
5598 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
5599 Op.getOperand(1), Op.getOperand(2));
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00005600 case Intrinsic::amdgcn_sffbh:
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00005601 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
Matt Arsenaultf5262252017-02-22 23:04:58 +00005602 case Intrinsic::amdgcn_sbfe:
5603 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
5604 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5605 case Intrinsic::amdgcn_ubfe:
5606 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
5607 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Marek Olsak13e47412018-01-31 20:18:04 +00005608 case Intrinsic::amdgcn_cvt_pkrtz:
5609 case Intrinsic::amdgcn_cvt_pknorm_i16:
5610 case Intrinsic::amdgcn_cvt_pknorm_u16:
5611 case Intrinsic::amdgcn_cvt_pk_i16:
5612 case Intrinsic::amdgcn_cvt_pk_u16: {
5613 // FIXME: Stop adding cast if v2f16/v2i16 are legal.
Matt Arsenault1f17c662017-02-22 00:27:34 +00005614 EVT VT = Op.getValueType();
Marek Olsak13e47412018-01-31 20:18:04 +00005615 unsigned Opcode;
5616
5617 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
5618 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
5619 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
5620 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
5621 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
5622 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
5623 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
5624 Opcode = AMDGPUISD::CVT_PK_I16_I32;
5625 else
5626 Opcode = AMDGPUISD::CVT_PK_U16_U32;
5627
Matt Arsenault709374d2018-08-01 20:13:58 +00005628 if (isTypeLegal(VT))
5629 return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
5630
Marek Olsak13e47412018-01-31 20:18:04 +00005631 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
Matt Arsenault1f17c662017-02-22 00:27:34 +00005632 Op.getOperand(1), Op.getOperand(2));
5633 return DAG.getNode(ISD::BITCAST, DL, VT, Node);
5634 }
Connor Abbott8c217d02017-08-04 18:36:49 +00005635 case Intrinsic::amdgcn_wqm: {
5636 SDValue Src = Op.getOperand(1);
5637 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src),
5638 0);
5639 }
Connor Abbott92638ab2017-08-04 18:36:52 +00005640 case Intrinsic::amdgcn_wwm: {
5641 SDValue Src = Op.getOperand(1);
5642 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src),
5643 0);
5644 }
Stanislav Mekhanoshindacda792018-06-26 20:04:19 +00005645 case Intrinsic::amdgcn_fmad_ftz:
5646 return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
5647 Op.getOperand(2), Op.getOperand(3));
Stanislav Mekhanoshin68a2fef2019-06-13 23:47:36 +00005648
5649 case Intrinsic::amdgcn_if_break:
5650 return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT,
5651 Op->getOperand(1), Op->getOperand(2)), 0);
5652
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005653 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005654 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5655 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
5656 return lowerImage(Op, ImageDimIntr, DAG);
5657
Matt Arsenault754dd3e2017-04-03 18:08:08 +00005658 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005659 }
5660}
5661
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005662SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
5663 SelectionDAG &DAG) const {
5664 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Tom Stellard6f9ef142016-12-20 17:19:44 +00005665 SDLoc DL(Op);
David Stuttard70e8bc12017-06-22 16:29:22 +00005666
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005667 switch (IntrID) {
Marek Olsakc5cec5e2019-01-16 15:43:53 +00005668 case Intrinsic::amdgcn_ds_ordered_add:
5669 case Intrinsic::amdgcn_ds_ordered_swap: {
5670 MemSDNode *M = cast<MemSDNode>(Op);
5671 SDValue Chain = M->getOperand(0);
5672 SDValue M0 = M->getOperand(2);
5673 SDValue Value = M->getOperand(3);
5674 unsigned OrderedCountIndex = M->getConstantOperandVal(7);
5675 unsigned WaveRelease = M->getConstantOperandVal(8);
5676 unsigned WaveDone = M->getConstantOperandVal(9);
5677 unsigned ShaderType;
5678 unsigned Instruction;
5679
5680 switch (IntrID) {
5681 case Intrinsic::amdgcn_ds_ordered_add:
5682 Instruction = 0;
5683 break;
5684 case Intrinsic::amdgcn_ds_ordered_swap:
5685 Instruction = 1;
5686 break;
5687 }
5688
5689 if (WaveDone && !WaveRelease)
5690 report_fatal_error("ds_ordered_count: wave_done requires wave_release");
5691
5692 switch (DAG.getMachineFunction().getFunction().getCallingConv()) {
5693 case CallingConv::AMDGPU_CS:
5694 case CallingConv::AMDGPU_KERNEL:
5695 ShaderType = 0;
5696 break;
5697 case CallingConv::AMDGPU_PS:
5698 ShaderType = 1;
5699 break;
5700 case CallingConv::AMDGPU_VS:
5701 ShaderType = 2;
5702 break;
5703 case CallingConv::AMDGPU_GS:
5704 ShaderType = 3;
5705 break;
5706 default:
5707 report_fatal_error("ds_ordered_count unsupported for this calling conv");
5708 }
5709
5710 unsigned Offset0 = OrderedCountIndex << 2;
5711 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
5712 (Instruction << 4);
5713 unsigned Offset = Offset0 | (Offset1 << 8);
5714
5715 SDValue Ops[] = {
5716 Chain,
5717 Value,
5718 DAG.getTargetConstant(Offset, DL, MVT::i16),
5719 copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue
5720 };
5721 return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
5722 M->getVTList(), Ops, M->getMemoryVT(),
5723 M->getMemOperand());
5724 }
Matt Arsenaulta5840c32019-01-22 18:36:06 +00005725 case Intrinsic::amdgcn_ds_fadd: {
5726 MemSDNode *M = cast<MemSDNode>(Op);
5727 unsigned Opc;
5728 switch (IntrID) {
5729 case Intrinsic::amdgcn_ds_fadd:
5730 Opc = ISD::ATOMIC_LOAD_FADD;
5731 break;
5732 }
5733
5734 return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
5735 M->getOperand(0), M->getOperand(2), M->getOperand(3),
5736 M->getMemOperand());
5737 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005738 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005739 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005740 case Intrinsic::amdgcn_ds_fmin:
5741 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005742 MemSDNode *M = cast<MemSDNode>(Op);
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005743 unsigned Opc;
5744 switch (IntrID) {
5745 case Intrinsic::amdgcn_atomic_inc:
5746 Opc = AMDGPUISD::ATOMIC_INC;
5747 break;
5748 case Intrinsic::amdgcn_atomic_dec:
5749 Opc = AMDGPUISD::ATOMIC_DEC;
5750 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005751 case Intrinsic::amdgcn_ds_fmin:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005752 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
5753 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005754 case Intrinsic::amdgcn_ds_fmax:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005755 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
5756 break;
5757 default:
5758 llvm_unreachable("Unknown intrinsic!");
5759 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005760 SDValue Ops[] = {
5761 M->getOperand(0), // Chain
5762 M->getOperand(2), // Ptr
5763 M->getOperand(3) // Value
5764 };
5765
5766 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
5767 M->getMemoryVT(), M->getMemOperand());
5768 }
Tom Stellard6f9ef142016-12-20 17:19:44 +00005769 case Intrinsic::amdgcn_buffer_load:
5770 case Intrinsic::amdgcn_buffer_load_format: {
Tim Renouf4f703f52018-08-21 11:07:10 +00005771 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
5772 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
5773 unsigned IdxEn = 1;
5774 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
5775 IdxEn = Idx->getZExtValue() != 0;
Tom Stellard6f9ef142016-12-20 17:19:44 +00005776 SDValue Ops[] = {
5777 Op.getOperand(0), // Chain
5778 Op.getOperand(2), // rsrc
5779 Op.getOperand(3), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00005780 SDValue(), // voffset -- will be set by setBufferOffsets
5781 SDValue(), // soffset -- will be set by setBufferOffsets
5782 SDValue(), // offset -- will be set by setBufferOffsets
5783 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
5784 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Tom Stellard6f9ef142016-12-20 17:19:44 +00005785 };
Tom Stellard6f9ef142016-12-20 17:19:44 +00005786
Tim Renouf4f703f52018-08-21 11:07:10 +00005787 setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
Tom Stellard6f9ef142016-12-20 17:19:44 +00005788 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
5789 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
Tim Renouf4f703f52018-08-21 11:07:10 +00005790
5791 EVT VT = Op.getValueType();
5792 EVT IntVT = VT.changeTypeToInteger();
5793 auto *M = cast<MemSDNode>(Op);
5794 EVT LoadVT = Op.getValueType();
5795
5796 if (LoadVT.getScalarType() == MVT::f16)
5797 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5798 M, DAG, Ops);
Ryan Taylor00e063a2019-03-19 16:07:00 +00005799
5800 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
5801 if (LoadVT.getScalarType() == MVT::i8 ||
5802 LoadVT.getScalarType() == MVT::i16)
5803 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
5804
Tim Renouf677387d2019-03-22 14:58:02 +00005805 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5806 M->getMemOperand(), DAG);
Tim Renouf4f703f52018-08-21 11:07:10 +00005807 }
5808 case Intrinsic::amdgcn_raw_buffer_load:
5809 case Intrinsic::amdgcn_raw_buffer_load_format: {
5810 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
5811 SDValue Ops[] = {
5812 Op.getOperand(0), // Chain
5813 Op.getOperand(2), // rsrc
5814 DAG.getConstant(0, DL, MVT::i32), // vindex
5815 Offsets.first, // voffset
5816 Op.getOperand(4), // soffset
5817 Offsets.second, // offset
5818 Op.getOperand(5), // cachepolicy
5819 DAG.getConstant(0, DL, MVT::i1), // idxen
5820 };
5821
5822 unsigned Opc = (IntrID == Intrinsic::amdgcn_raw_buffer_load) ?
5823 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
5824
5825 EVT VT = Op.getValueType();
5826 EVT IntVT = VT.changeTypeToInteger();
5827 auto *M = cast<MemSDNode>(Op);
5828 EVT LoadVT = Op.getValueType();
5829
5830 if (LoadVT.getScalarType() == MVT::f16)
5831 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5832 M, DAG, Ops);
Ryan Taylor00e063a2019-03-19 16:07:00 +00005833
5834 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
5835 if (LoadVT.getScalarType() == MVT::i8 ||
5836 LoadVT.getScalarType() == MVT::i16)
5837 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
5838
Tim Renouf677387d2019-03-22 14:58:02 +00005839 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5840 M->getMemOperand(), DAG);
Tim Renouf4f703f52018-08-21 11:07:10 +00005841 }
5842 case Intrinsic::amdgcn_struct_buffer_load:
5843 case Intrinsic::amdgcn_struct_buffer_load_format: {
5844 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
5845 SDValue Ops[] = {
5846 Op.getOperand(0), // Chain
5847 Op.getOperand(2), // rsrc
5848 Op.getOperand(3), // vindex
5849 Offsets.first, // voffset
5850 Op.getOperand(5), // soffset
5851 Offsets.second, // offset
5852 Op.getOperand(6), // cachepolicy
5853 DAG.getConstant(1, DL, MVT::i1), // idxen
5854 };
5855
5856 unsigned Opc = (IntrID == Intrinsic::amdgcn_struct_buffer_load) ?
5857 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
5858
Tom Stellard6f9ef142016-12-20 17:19:44 +00005859 EVT VT = Op.getValueType();
5860 EVT IntVT = VT.changeTypeToInteger();
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005861 auto *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00005862 EVT LoadVT = Op.getValueType();
Matt Arsenault1349a042018-05-22 06:32:10 +00005863
Tim Renouf366a49d2018-08-02 23:33:01 +00005864 if (LoadVT.getScalarType() == MVT::f16)
5865 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5866 M, DAG, Ops);
Ryan Taylor00e063a2019-03-19 16:07:00 +00005867
5868 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
5869 if (LoadVT.getScalarType() == MVT::i8 ||
5870 LoadVT.getScalarType() == MVT::i16)
5871 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
5872
Tim Renouf677387d2019-03-22 14:58:02 +00005873 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5874 M->getMemOperand(), DAG);
Tom Stellard6f9ef142016-12-20 17:19:44 +00005875 }
David Stuttard70e8bc12017-06-22 16:29:22 +00005876 case Intrinsic::amdgcn_tbuffer_load: {
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005877 MemSDNode *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00005878 EVT LoadVT = Op.getValueType();
Matt Arsenault1349a042018-05-22 06:32:10 +00005879
Tim Renouf35484c92018-08-21 11:06:05 +00005880 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
5881 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
5882 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
5883 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
5884 unsigned IdxEn = 1;
5885 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
5886 IdxEn = Idx->getZExtValue() != 0;
David Stuttard70e8bc12017-06-22 16:29:22 +00005887 SDValue Ops[] = {
5888 Op.getOperand(0), // Chain
5889 Op.getOperand(2), // rsrc
5890 Op.getOperand(3), // vindex
5891 Op.getOperand(4), // voffset
5892 Op.getOperand(5), // soffset
5893 Op.getOperand(6), // offset
Tim Renouf35484c92018-08-21 11:06:05 +00005894 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
5895 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
5896 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
5897 };
5898
5899 if (LoadVT.getScalarType() == MVT::f16)
5900 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5901 M, DAG, Ops);
Tim Renouf677387d2019-03-22 14:58:02 +00005902 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
5903 Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
5904 DAG);
Tim Renouf35484c92018-08-21 11:06:05 +00005905 }
5906 case Intrinsic::amdgcn_raw_tbuffer_load: {
5907 MemSDNode *M = cast<MemSDNode>(Op);
5908 EVT LoadVT = Op.getValueType();
5909 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
5910
5911 SDValue Ops[] = {
5912 Op.getOperand(0), // Chain
5913 Op.getOperand(2), // rsrc
5914 DAG.getConstant(0, DL, MVT::i32), // vindex
5915 Offsets.first, // voffset
5916 Op.getOperand(4), // soffset
5917 Offsets.second, // offset
5918 Op.getOperand(5), // format
5919 Op.getOperand(6), // cachepolicy
5920 DAG.getConstant(0, DL, MVT::i1), // idxen
5921 };
5922
5923 if (LoadVT.getScalarType() == MVT::f16)
5924 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5925 M, DAG, Ops);
Tim Renouf677387d2019-03-22 14:58:02 +00005926 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
5927 Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
5928 DAG);
Tim Renouf35484c92018-08-21 11:06:05 +00005929 }
5930 case Intrinsic::amdgcn_struct_tbuffer_load: {
5931 MemSDNode *M = cast<MemSDNode>(Op);
5932 EVT LoadVT = Op.getValueType();
5933 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
5934
5935 SDValue Ops[] = {
5936 Op.getOperand(0), // Chain
5937 Op.getOperand(2), // rsrc
5938 Op.getOperand(3), // vindex
5939 Offsets.first, // voffset
5940 Op.getOperand(5), // soffset
5941 Offsets.second, // offset
5942 Op.getOperand(6), // format
5943 Op.getOperand(7), // cachepolicy
5944 DAG.getConstant(1, DL, MVT::i1), // idxen
David Stuttard70e8bc12017-06-22 16:29:22 +00005945 };
5946
Tim Renouf366a49d2018-08-02 23:33:01 +00005947 if (LoadVT.getScalarType() == MVT::f16)
5948 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5949 M, DAG, Ops);
Tim Renouf677387d2019-03-22 14:58:02 +00005950 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
5951 Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
5952 DAG);
David Stuttard70e8bc12017-06-22 16:29:22 +00005953 }
Marek Olsak5cec6412017-11-09 01:52:48 +00005954 case Intrinsic::amdgcn_buffer_atomic_swap:
5955 case Intrinsic::amdgcn_buffer_atomic_add:
5956 case Intrinsic::amdgcn_buffer_atomic_sub:
5957 case Intrinsic::amdgcn_buffer_atomic_smin:
5958 case Intrinsic::amdgcn_buffer_atomic_umin:
5959 case Intrinsic::amdgcn_buffer_atomic_smax:
5960 case Intrinsic::amdgcn_buffer_atomic_umax:
5961 case Intrinsic::amdgcn_buffer_atomic_and:
5962 case Intrinsic::amdgcn_buffer_atomic_or:
5963 case Intrinsic::amdgcn_buffer_atomic_xor: {
Tim Renouf4f703f52018-08-21 11:07:10 +00005964 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
5965 unsigned IdxEn = 1;
5966 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
5967 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00005968 SDValue Ops[] = {
5969 Op.getOperand(0), // Chain
5970 Op.getOperand(2), // vdata
5971 Op.getOperand(3), // rsrc
5972 Op.getOperand(4), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00005973 SDValue(), // voffset -- will be set by setBufferOffsets
5974 SDValue(), // soffset -- will be set by setBufferOffsets
5975 SDValue(), // offset -- will be set by setBufferOffsets
5976 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
5977 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00005978 };
Tim Renouf4f703f52018-08-21 11:07:10 +00005979 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005980 EVT VT = Op.getValueType();
5981
5982 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00005983 unsigned Opcode = 0;
5984
5985 switch (IntrID) {
5986 case Intrinsic::amdgcn_buffer_atomic_swap:
5987 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
5988 break;
5989 case Intrinsic::amdgcn_buffer_atomic_add:
5990 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
5991 break;
5992 case Intrinsic::amdgcn_buffer_atomic_sub:
5993 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
5994 break;
5995 case Intrinsic::amdgcn_buffer_atomic_smin:
5996 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
5997 break;
5998 case Intrinsic::amdgcn_buffer_atomic_umin:
5999 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6000 break;
6001 case Intrinsic::amdgcn_buffer_atomic_smax:
6002 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6003 break;
6004 case Intrinsic::amdgcn_buffer_atomic_umax:
6005 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6006 break;
6007 case Intrinsic::amdgcn_buffer_atomic_and:
6008 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6009 break;
6010 case Intrinsic::amdgcn_buffer_atomic_or:
6011 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6012 break;
6013 case Intrinsic::amdgcn_buffer_atomic_xor:
6014 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6015 break;
6016 default:
6017 llvm_unreachable("unhandled atomic opcode");
6018 }
6019
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00006020 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6021 M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00006022 }
Tim Renouf4f703f52018-08-21 11:07:10 +00006023 case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6024 case Intrinsic::amdgcn_raw_buffer_atomic_add:
6025 case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6026 case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6027 case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6028 case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6029 case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6030 case Intrinsic::amdgcn_raw_buffer_atomic_and:
6031 case Intrinsic::amdgcn_raw_buffer_atomic_or:
6032 case Intrinsic::amdgcn_raw_buffer_atomic_xor: {
6033 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6034 SDValue Ops[] = {
6035 Op.getOperand(0), // Chain
6036 Op.getOperand(2), // vdata
6037 Op.getOperand(3), // rsrc
6038 DAG.getConstant(0, DL, MVT::i32), // vindex
6039 Offsets.first, // voffset
6040 Op.getOperand(5), // soffset
6041 Offsets.second, // offset
6042 Op.getOperand(6), // cachepolicy
6043 DAG.getConstant(0, DL, MVT::i1), // idxen
6044 };
6045 EVT VT = Op.getValueType();
Marek Olsak5cec6412017-11-09 01:52:48 +00006046
Tim Renouf4f703f52018-08-21 11:07:10 +00006047 auto *M = cast<MemSDNode>(Op);
6048 unsigned Opcode = 0;
6049
6050 switch (IntrID) {
6051 case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6052 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6053 break;
6054 case Intrinsic::amdgcn_raw_buffer_atomic_add:
6055 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6056 break;
6057 case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6058 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6059 break;
6060 case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6061 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6062 break;
6063 case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6064 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6065 break;
6066 case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6067 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6068 break;
6069 case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6070 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6071 break;
6072 case Intrinsic::amdgcn_raw_buffer_atomic_and:
6073 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6074 break;
6075 case Intrinsic::amdgcn_raw_buffer_atomic_or:
6076 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6077 break;
6078 case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6079 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6080 break;
6081 default:
6082 llvm_unreachable("unhandled atomic opcode");
6083 }
6084
6085 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6086 M->getMemOperand());
6087 }
6088 case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6089 case Intrinsic::amdgcn_struct_buffer_atomic_add:
6090 case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6091 case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6092 case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6093 case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6094 case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6095 case Intrinsic::amdgcn_struct_buffer_atomic_and:
6096 case Intrinsic::amdgcn_struct_buffer_atomic_or:
6097 case Intrinsic::amdgcn_struct_buffer_atomic_xor: {
6098 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6099 SDValue Ops[] = {
6100 Op.getOperand(0), // Chain
6101 Op.getOperand(2), // vdata
6102 Op.getOperand(3), // rsrc
6103 Op.getOperand(4), // vindex
6104 Offsets.first, // voffset
6105 Op.getOperand(6), // soffset
6106 Offsets.second, // offset
6107 Op.getOperand(7), // cachepolicy
6108 DAG.getConstant(1, DL, MVT::i1), // idxen
6109 };
6110 EVT VT = Op.getValueType();
6111
6112 auto *M = cast<MemSDNode>(Op);
6113 unsigned Opcode = 0;
6114
6115 switch (IntrID) {
6116 case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6117 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6118 break;
6119 case Intrinsic::amdgcn_struct_buffer_atomic_add:
6120 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6121 break;
6122 case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6123 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6124 break;
6125 case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6126 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6127 break;
6128 case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6129 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6130 break;
6131 case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6132 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6133 break;
6134 case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6135 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6136 break;
6137 case Intrinsic::amdgcn_struct_buffer_atomic_and:
6138 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6139 break;
6140 case Intrinsic::amdgcn_struct_buffer_atomic_or:
6141 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6142 break;
6143 case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6144 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6145 break;
6146 default:
6147 llvm_unreachable("unhandled atomic opcode");
6148 }
6149
6150 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6151 M->getMemOperand());
6152 }
Marek Olsak5cec6412017-11-09 01:52:48 +00006153 case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
Tim Renouf4f703f52018-08-21 11:07:10 +00006154 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6155 unsigned IdxEn = 1;
6156 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5)))
6157 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00006158 SDValue Ops[] = {
6159 Op.getOperand(0), // Chain
6160 Op.getOperand(2), // src
6161 Op.getOperand(3), // cmp
6162 Op.getOperand(4), // rsrc
6163 Op.getOperand(5), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00006164 SDValue(), // voffset -- will be set by setBufferOffsets
6165 SDValue(), // soffset -- will be set by setBufferOffsets
6166 SDValue(), // offset -- will be set by setBufferOffsets
6167 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6168 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6169 };
6170 setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
6171 EVT VT = Op.getValueType();
6172 auto *M = cast<MemSDNode>(Op);
6173
6174 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6175 Op->getVTList(), Ops, VT, M->getMemOperand());
6176 }
6177 case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
6178 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6179 SDValue Ops[] = {
6180 Op.getOperand(0), // Chain
6181 Op.getOperand(2), // src
6182 Op.getOperand(3), // cmp
6183 Op.getOperand(4), // rsrc
6184 DAG.getConstant(0, DL, MVT::i32), // vindex
6185 Offsets.first, // voffset
6186 Op.getOperand(6), // soffset
6187 Offsets.second, // offset
6188 Op.getOperand(7), // cachepolicy
6189 DAG.getConstant(0, DL, MVT::i1), // idxen
6190 };
6191 EVT VT = Op.getValueType();
6192 auto *M = cast<MemSDNode>(Op);
6193
6194 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6195 Op->getVTList(), Ops, VT, M->getMemOperand());
6196 }
6197 case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
6198 auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
6199 SDValue Ops[] = {
6200 Op.getOperand(0), // Chain
6201 Op.getOperand(2), // src
6202 Op.getOperand(3), // cmp
6203 Op.getOperand(4), // rsrc
6204 Op.getOperand(5), // vindex
6205 Offsets.first, // voffset
6206 Op.getOperand(7), // soffset
6207 Offsets.second, // offset
6208 Op.getOperand(8), // cachepolicy
6209 DAG.getConstant(1, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00006210 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00006211 EVT VT = Op.getValueType();
6212 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00006213
6214 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00006215 Op->getVTList(), Ops, VT, M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00006216 }
6217
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00006218 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00006219 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6220 AMDGPU::getImageDimIntrinsicInfo(IntrID))
6221 return lowerImage(Op, ImageDimIntr, DAG);
Matt Arsenault1349a042018-05-22 06:32:10 +00006222
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00006223 return SDValue();
6224 }
6225}
6226
Tim Renouf677387d2019-03-22 14:58:02 +00006227// Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
6228// dwordx4 if on SI.
6229SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL,
6230 SDVTList VTList,
6231 ArrayRef<SDValue> Ops, EVT MemVT,
6232 MachineMemOperand *MMO,
6233 SelectionDAG &DAG) const {
6234 EVT VT = VTList.VTs[0];
6235 EVT WidenedVT = VT;
6236 EVT WidenedMemVT = MemVT;
6237 if (!Subtarget->hasDwordx3LoadStores() &&
6238 (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) {
6239 WidenedVT = EVT::getVectorVT(*DAG.getContext(),
6240 WidenedVT.getVectorElementType(), 4);
6241 WidenedMemVT = EVT::getVectorVT(*DAG.getContext(),
6242 WidenedMemVT.getVectorElementType(), 4);
6243 MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16);
6244 }
6245
6246 assert(VTList.NumVTs == 2);
6247 SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]);
6248
6249 auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops,
6250 WidenedMemVT, MMO);
6251 if (WidenedVT != VT) {
6252 auto Extract = DAG.getNode(
6253 ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp,
6254 DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
6255 NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL);
6256 }
6257 return NewOp;
6258}
6259
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006260SDValue SITargetLowering::handleD16VData(SDValue VData,
6261 SelectionDAG &DAG) const {
6262 EVT StoreVT = VData.getValueType();
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006263
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006264 // No change for f16 and legal vector D16 types.
Matt Arsenault1349a042018-05-22 06:32:10 +00006265 if (!StoreVT.isVector())
6266 return VData;
6267
6268 SDLoc DL(VData);
6269 assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
6270
6271 if (Subtarget->hasUnpackedD16VMem()) {
6272 // We need to unpack the packed data to store.
6273 EVT IntStoreVT = StoreVT.changeTypeToInteger();
6274 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
6275
6276 EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
6277 StoreVT.getVectorNumElements());
6278 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
6279 return DAG.UnrollVectorOp(ZExt.getNode());
6280 }
6281
Matt Arsenault02dc7e12018-06-15 15:15:46 +00006282 assert(isTypeLegal(StoreVT));
6283 return VData;
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006284}
6285
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006286SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
6287 SelectionDAG &DAG) const {
Tom Stellardfc92e772015-05-12 14:18:14 +00006288 SDLoc DL(Op);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006289 SDValue Chain = Op.getOperand(0);
6290 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
David Stuttard70e8bc12017-06-22 16:29:22 +00006291 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006292
6293 switch (IntrinsicID) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00006294 case Intrinsic::amdgcn_exp: {
Matt Arsenault4165efd2017-01-17 07:26:53 +00006295 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6296 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6297 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
6298 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
6299
6300 const SDValue Ops[] = {
6301 Chain,
6302 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6303 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
6304 Op.getOperand(4), // src0
6305 Op.getOperand(5), // src1
6306 Op.getOperand(6), // src2
6307 Op.getOperand(7), // src3
6308 DAG.getTargetConstant(0, DL, MVT::i1), // compr
6309 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6310 };
6311
6312 unsigned Opc = Done->isNullValue() ?
6313 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6314 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6315 }
6316 case Intrinsic::amdgcn_exp_compr: {
6317 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6318 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6319 SDValue Src0 = Op.getOperand(4);
6320 SDValue Src1 = Op.getOperand(5);
6321 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
6322 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
6323
6324 SDValue Undef = DAG.getUNDEF(MVT::f32);
6325 const SDValue Ops[] = {
6326 Chain,
6327 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6328 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
6329 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
6330 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
6331 Undef, // src2
6332 Undef, // src3
6333 DAG.getTargetConstant(1, DL, MVT::i1), // compr
6334 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6335 };
6336
6337 unsigned Opc = Done->isNullValue() ?
6338 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6339 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6340 }
6341 case Intrinsic::amdgcn_s_sendmsg:
Matt Arsenaultd3e5cb72017-02-16 02:01:17 +00006342 case Intrinsic::amdgcn_s_sendmsghalt: {
6343 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
6344 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
Tom Stellardfc92e772015-05-12 14:18:14 +00006345 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
6346 SDValue Glue = Chain.getValue(1);
Matt Arsenaulta78ca622017-02-15 22:17:09 +00006347 return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
Jan Veselyd48445d2017-01-04 18:06:55 +00006348 Op.getOperand(2), Glue);
6349 }
Marek Olsak2d825902017-04-28 20:21:58 +00006350 case Intrinsic::amdgcn_init_exec: {
6351 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
6352 Op.getOperand(2));
6353 }
6354 case Intrinsic::amdgcn_init_exec_from_input: {
6355 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
6356 Op.getOperand(2), Op.getOperand(3));
6357 }
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00006358 case Intrinsic::amdgcn_s_barrier: {
6359 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00006360 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matthias Braunf1caa282017-12-15 22:22:58 +00006361 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00006362 if (WGSize <= ST.getWavefrontSize())
6363 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
6364 Op.getOperand(0)), 0);
6365 }
6366 return SDValue();
6367 };
David Stuttard70e8bc12017-06-22 16:29:22 +00006368 case Intrinsic::amdgcn_tbuffer_store: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006369 SDValue VData = Op.getOperand(2);
6370 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6371 if (IsD16)
6372 VData = handleD16VData(VData, DAG);
Tim Renouf35484c92018-08-21 11:06:05 +00006373 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6374 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6375 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6376 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
6377 unsigned IdxEn = 1;
6378 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6379 IdxEn = Idx->getZExtValue() != 0;
David Stuttard70e8bc12017-06-22 16:29:22 +00006380 SDValue Ops[] = {
6381 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006382 VData, // vdata
David Stuttard70e8bc12017-06-22 16:29:22 +00006383 Op.getOperand(3), // rsrc
6384 Op.getOperand(4), // vindex
6385 Op.getOperand(5), // voffset
6386 Op.getOperand(6), // soffset
6387 Op.getOperand(7), // offset
Tim Renouf35484c92018-08-21 11:06:05 +00006388 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6389 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6390 DAG.getConstant(IdxEn, DL, MVT::i1), // idexen
6391 };
6392 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6393 AMDGPUISD::TBUFFER_STORE_FORMAT;
6394 MemSDNode *M = cast<MemSDNode>(Op);
6395 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6396 M->getMemoryVT(), M->getMemOperand());
6397 }
6398
6399 case Intrinsic::amdgcn_struct_tbuffer_store: {
6400 SDValue VData = Op.getOperand(2);
6401 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6402 if (IsD16)
6403 VData = handleD16VData(VData, DAG);
6404 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6405 SDValue Ops[] = {
6406 Chain,
6407 VData, // vdata
6408 Op.getOperand(3), // rsrc
6409 Op.getOperand(4), // vindex
6410 Offsets.first, // voffset
6411 Op.getOperand(6), // soffset
6412 Offsets.second, // offset
6413 Op.getOperand(7), // format
6414 Op.getOperand(8), // cachepolicy
6415 DAG.getConstant(1, DL, MVT::i1), // idexen
6416 };
6417 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6418 AMDGPUISD::TBUFFER_STORE_FORMAT;
6419 MemSDNode *M = cast<MemSDNode>(Op);
6420 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6421 M->getMemoryVT(), M->getMemOperand());
6422 }
6423
6424 case Intrinsic::amdgcn_raw_tbuffer_store: {
6425 SDValue VData = Op.getOperand(2);
6426 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6427 if (IsD16)
6428 VData = handleD16VData(VData, DAG);
6429 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6430 SDValue Ops[] = {
6431 Chain,
6432 VData, // vdata
6433 Op.getOperand(3), // rsrc
6434 DAG.getConstant(0, DL, MVT::i32), // vindex
6435 Offsets.first, // voffset
6436 Op.getOperand(5), // soffset
6437 Offsets.second, // offset
6438 Op.getOperand(6), // format
6439 Op.getOperand(7), // cachepolicy
6440 DAG.getConstant(0, DL, MVT::i1), // idexen
David Stuttard70e8bc12017-06-22 16:29:22 +00006441 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006442 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6443 AMDGPUISD::TBUFFER_STORE_FORMAT;
6444 MemSDNode *M = cast<MemSDNode>(Op);
6445 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6446 M->getMemoryVT(), M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00006447 }
6448
Marek Olsak5cec6412017-11-09 01:52:48 +00006449 case Intrinsic::amdgcn_buffer_store:
6450 case Intrinsic::amdgcn_buffer_store_format: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006451 SDValue VData = Op.getOperand(2);
6452 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6453 if (IsD16)
6454 VData = handleD16VData(VData, DAG);
Tim Renouf4f703f52018-08-21 11:07:10 +00006455 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6456 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6457 unsigned IdxEn = 1;
6458 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6459 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00006460 SDValue Ops[] = {
6461 Chain,
Tim Renouf4f703f52018-08-21 11:07:10 +00006462 VData,
Marek Olsak5cec6412017-11-09 01:52:48 +00006463 Op.getOperand(3), // rsrc
6464 Op.getOperand(4), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00006465 SDValue(), // voffset -- will be set by setBufferOffsets
6466 SDValue(), // soffset -- will be set by setBufferOffsets
6467 SDValue(), // offset -- will be set by setBufferOffsets
6468 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6469 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00006470 };
Tim Renouf4f703f52018-08-21 11:07:10 +00006471 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006472 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
6473 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6474 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6475 MemSDNode *M = cast<MemSDNode>(Op);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006476
6477 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6478 EVT VDataType = VData.getValueType().getScalarType();
6479 if (VDataType == MVT::i8 || VDataType == MVT::i16)
6480 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6481
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006482 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6483 M->getMemoryVT(), M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00006484 }
Tim Renouf4f703f52018-08-21 11:07:10 +00006485
6486 case Intrinsic::amdgcn_raw_buffer_store:
6487 case Intrinsic::amdgcn_raw_buffer_store_format: {
6488 SDValue VData = Op.getOperand(2);
6489 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6490 if (IsD16)
6491 VData = handleD16VData(VData, DAG);
6492 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6493 SDValue Ops[] = {
6494 Chain,
6495 VData,
6496 Op.getOperand(3), // rsrc
6497 DAG.getConstant(0, DL, MVT::i32), // vindex
6498 Offsets.first, // voffset
6499 Op.getOperand(5), // soffset
6500 Offsets.second, // offset
6501 Op.getOperand(6), // cachepolicy
6502 DAG.getConstant(0, DL, MVT::i1), // idxen
6503 };
6504 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_raw_buffer_store ?
6505 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6506 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6507 MemSDNode *M = cast<MemSDNode>(Op);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006508
6509 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6510 EVT VDataType = VData.getValueType().getScalarType();
6511 if (VDataType == MVT::i8 || VDataType == MVT::i16)
6512 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6513
Tim Renouf4f703f52018-08-21 11:07:10 +00006514 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6515 M->getMemoryVT(), M->getMemOperand());
6516 }
6517
6518 case Intrinsic::amdgcn_struct_buffer_store:
6519 case Intrinsic::amdgcn_struct_buffer_store_format: {
6520 SDValue VData = Op.getOperand(2);
6521 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6522 if (IsD16)
6523 VData = handleD16VData(VData, DAG);
6524 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6525 SDValue Ops[] = {
6526 Chain,
6527 VData,
6528 Op.getOperand(3), // rsrc
6529 Op.getOperand(4), // vindex
6530 Offsets.first, // voffset
6531 Op.getOperand(6), // soffset
6532 Offsets.second, // offset
6533 Op.getOperand(7), // cachepolicy
6534 DAG.getConstant(1, DL, MVT::i1), // idxen
6535 };
6536 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
6537 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6538 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6539 MemSDNode *M = cast<MemSDNode>(Op);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006540
6541 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6542 EVT VDataType = VData.getValueType().getScalarType();
6543 if (VDataType == MVT::i8 || VDataType == MVT::i16)
6544 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6545
Tim Renouf4f703f52018-08-21 11:07:10 +00006546 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6547 M->getMemoryVT(), M->getMemOperand());
6548 }
6549
Stanislav Mekhanoshin68a2fef2019-06-13 23:47:36 +00006550 case Intrinsic::amdgcn_end_cf:
6551 return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other,
6552 Op->getOperand(2), Chain), 0);
6553
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00006554 default: {
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00006555 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6556 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6557 return lowerImage(Op, ImageDimIntr, DAG);
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00006558
Matt Arsenault754dd3e2017-04-03 18:08:08 +00006559 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006560 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00006561 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006562}
6563
Tim Renouf4f703f52018-08-21 11:07:10 +00006564// The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
6565// offset (the offset that is included in bounds checking and swizzling, to be
6566// split between the instruction's voffset and immoffset fields) and soffset
6567// (the offset that is excluded from bounds checking and swizzling, to go in
6568// the instruction's soffset field). This function takes the first kind of
6569// offset and figures out how to split it between voffset and immoffset.
Tim Renouf35484c92018-08-21 11:06:05 +00006570std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
6571 SDValue Offset, SelectionDAG &DAG) const {
6572 SDLoc DL(Offset);
6573 const unsigned MaxImm = 4095;
6574 SDValue N0 = Offset;
6575 ConstantSDNode *C1 = nullptr;
Piotr Sobczak378131b2019-01-02 09:47:41 +00006576
6577 if ((C1 = dyn_cast<ConstantSDNode>(N0)))
Tim Renouf35484c92018-08-21 11:06:05 +00006578 N0 = SDValue();
Piotr Sobczak378131b2019-01-02 09:47:41 +00006579 else if (DAG.isBaseWithConstantOffset(N0)) {
6580 C1 = cast<ConstantSDNode>(N0.getOperand(1));
6581 N0 = N0.getOperand(0);
6582 }
Tim Renouf35484c92018-08-21 11:06:05 +00006583
6584 if (C1) {
6585 unsigned ImmOffset = C1->getZExtValue();
6586 // If the immediate value is too big for the immoffset field, put the value
Tim Renoufa37679d2018-10-03 10:29:43 +00006587 // and -4096 into the immoffset field so that the value that is copied/added
Tim Renouf35484c92018-08-21 11:06:05 +00006588 // for the voffset field is a multiple of 4096, and it stands more chance
6589 // of being CSEd with the copy/add for another similar load/store.
Tim Renoufa37679d2018-10-03 10:29:43 +00006590 // However, do not do that rounding down to a multiple of 4096 if that is a
6591 // negative number, as it appears to be illegal to have a negative offset
6592 // in the vgpr, even if adding the immediate offset makes it positive.
Tim Renouf35484c92018-08-21 11:06:05 +00006593 unsigned Overflow = ImmOffset & ~MaxImm;
6594 ImmOffset -= Overflow;
Tim Renoufa37679d2018-10-03 10:29:43 +00006595 if ((int32_t)Overflow < 0) {
6596 Overflow += ImmOffset;
6597 ImmOffset = 0;
6598 }
Tim Renouf35484c92018-08-21 11:06:05 +00006599 C1 = cast<ConstantSDNode>(DAG.getConstant(ImmOffset, DL, MVT::i32));
6600 if (Overflow) {
6601 auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
6602 if (!N0)
6603 N0 = OverflowVal;
6604 else {
6605 SDValue Ops[] = { N0, OverflowVal };
6606 N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
6607 }
6608 }
6609 }
6610 if (!N0)
6611 N0 = DAG.getConstant(0, DL, MVT::i32);
6612 if (!C1)
6613 C1 = cast<ConstantSDNode>(DAG.getConstant(0, DL, MVT::i32));
6614 return {N0, SDValue(C1, 0)};
6615}
6616
Tim Renouf4f703f52018-08-21 11:07:10 +00006617// Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
6618// three offsets (voffset, soffset and instoffset) into the SDValue[3] array
6619// pointed to by Offsets.
6620void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00006621 SelectionDAG &DAG, SDValue *Offsets,
6622 unsigned Align) const {
Tim Renouf4f703f52018-08-21 11:07:10 +00006623 SDLoc DL(CombinedOffset);
6624 if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
6625 uint32_t Imm = C->getZExtValue();
6626 uint32_t SOffset, ImmOffset;
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00006627 if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) {
Tim Renouf4f703f52018-08-21 11:07:10 +00006628 Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
6629 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
6630 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
6631 return;
6632 }
6633 }
6634 if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
6635 SDValue N0 = CombinedOffset.getOperand(0);
6636 SDValue N1 = CombinedOffset.getOperand(1);
6637 uint32_t SOffset, ImmOffset;
6638 int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00006639 if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
6640 Subtarget, Align)) {
Tim Renouf4f703f52018-08-21 11:07:10 +00006641 Offsets[0] = N0;
6642 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
6643 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
6644 return;
6645 }
6646 }
6647 Offsets[0] = CombinedOffset;
6648 Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
6649 Offsets[2] = DAG.getConstant(0, DL, MVT::i32);
6650}
6651
Ryan Taylor00e063a2019-03-19 16:07:00 +00006652// Handle 8 bit and 16 bit buffer loads
6653SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
6654 EVT LoadVT, SDLoc DL,
6655 ArrayRef<SDValue> Ops,
6656 MemSDNode *M) const {
6657 EVT IntVT = LoadVT.changeTypeToInteger();
6658 unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ?
6659 AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT;
6660
6661 SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other);
6662 SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList,
6663 Ops, IntVT,
6664 M->getMemOperand());
6665 SDValue BufferLoadTrunc = DAG.getNode(ISD::TRUNCATE, DL,
6666 LoadVT.getScalarType(), BufferLoad);
6667 return DAG.getMergeValues({BufferLoadTrunc, BufferLoad.getValue(1)}, DL);
6668}
6669
6670// Handle 8 bit and 16 bit buffer stores
6671SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG,
6672 EVT VDataType, SDLoc DL,
6673 SDValue Ops[],
6674 MemSDNode *M) const {
6675 SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]);
6676 Ops[1] = BufferStoreExt;
6677 unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE :
6678 AMDGPUISD::BUFFER_STORE_SHORT;
6679 ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9);
6680 return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType,
6681 M->getMemOperand());
6682}
6683
Matt Arsenault90083d32018-06-07 09:54:49 +00006684static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
6685 ISD::LoadExtType ExtType, SDValue Op,
6686 const SDLoc &SL, EVT VT) {
6687 if (VT.bitsLT(Op.getValueType()))
6688 return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
6689
6690 switch (ExtType) {
6691 case ISD::SEXTLOAD:
6692 return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
6693 case ISD::ZEXTLOAD:
6694 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
6695 case ISD::EXTLOAD:
6696 return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
6697 case ISD::NON_EXTLOAD:
6698 return Op;
6699 }
6700
6701 llvm_unreachable("invalid ext type");
6702}
6703
6704SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
6705 SelectionDAG &DAG = DCI.DAG;
6706 if (Ld->getAlignment() < 4 || Ld->isDivergent())
6707 return SDValue();
6708
6709 // FIXME: Constant loads should all be marked invariant.
6710 unsigned AS = Ld->getAddressSpace();
Matt Arsenault0da63502018-08-31 05:49:54 +00006711 if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
6712 AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
Matt Arsenault90083d32018-06-07 09:54:49 +00006713 (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
6714 return SDValue();
6715
6716 // Don't do this early, since it may interfere with adjacent load merging for
6717 // illegal types. We can avoid losing alignment information for exotic types
6718 // pre-legalize.
6719 EVT MemVT = Ld->getMemoryVT();
6720 if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
6721 MemVT.getSizeInBits() >= 32)
6722 return SDValue();
6723
6724 SDLoc SL(Ld);
6725
6726 assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
6727 "unexpected vector extload");
6728
6729 // TODO: Drop only high part of range.
6730 SDValue Ptr = Ld->getBasePtr();
6731 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
6732 MVT::i32, SL, Ld->getChain(), Ptr,
6733 Ld->getOffset(),
6734 Ld->getPointerInfo(), MVT::i32,
6735 Ld->getAlignment(),
6736 Ld->getMemOperand()->getFlags(),
6737 Ld->getAAInfo(),
6738 nullptr); // Drop ranges
6739
6740 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
6741 if (MemVT.isFloatingPoint()) {
6742 assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
6743 "unexpected fp extload");
6744 TruncVT = MemVT.changeTypeToInteger();
6745 }
6746
6747 SDValue Cvt = NewLoad;
6748 if (Ld->getExtensionType() == ISD::SEXTLOAD) {
6749 Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
6750 DAG.getValueType(TruncVT));
6751 } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
6752 Ld->getExtensionType() == ISD::NON_EXTLOAD) {
6753 Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
6754 } else {
6755 assert(Ld->getExtensionType() == ISD::EXTLOAD);
6756 }
6757
6758 EVT VT = Ld->getValueType(0);
6759 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
6760
6761 DCI.AddToWorklist(Cvt.getNode());
6762
6763 // We may need to handle exotic cases, such as i16->i64 extloads, so insert
6764 // the appropriate extension from the 32-bit load.
6765 Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
6766 DCI.AddToWorklist(Cvt.getNode());
6767
6768 // Handle conversion back to floating point if necessary.
6769 Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
6770
6771 return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
6772}
6773
Tom Stellard81d871d2013-11-13 23:36:50 +00006774SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
6775 SDLoc DL(Op);
6776 LoadSDNode *Load = cast<LoadSDNode>(Op);
Matt Arsenault6dfda962016-02-10 18:21:39 +00006777 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaulta1436412016-02-10 18:21:45 +00006778 EVT MemVT = Load->getMemoryVT();
Matt Arsenault6dfda962016-02-10 18:21:39 +00006779
Matt Arsenaulta1436412016-02-10 18:21:45 +00006780 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
Matt Arsenault65ca292a2017-09-07 05:37:34 +00006781 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
6782 return SDValue();
6783
Matt Arsenault6dfda962016-02-10 18:21:39 +00006784 // FIXME: Copied from PPC
6785 // First, load into 32 bits, then truncate to 1 bit.
6786
6787 SDValue Chain = Load->getChain();
6788 SDValue BasePtr = Load->getBasePtr();
6789 MachineMemOperand *MMO = Load->getMemOperand();
6790
Tom Stellard115a6152016-11-10 16:02:37 +00006791 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
6792
Matt Arsenault6dfda962016-02-10 18:21:39 +00006793 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
Tom Stellard115a6152016-11-10 16:02:37 +00006794 BasePtr, RealMemVT, MMO);
Matt Arsenault6dfda962016-02-10 18:21:39 +00006795
Tim Renouf361b5b22019-03-21 12:01:21 +00006796 if (!MemVT.isVector()) {
6797 SDValue Ops[] = {
6798 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
6799 NewLD.getValue(1)
6800 };
6801
6802 return DAG.getMergeValues(Ops, DL);
6803 }
6804
6805 SmallVector<SDValue, 3> Elts;
6806 for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) {
6807 SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD,
6808 DAG.getConstant(I, DL, MVT::i32));
6809
6810 Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt));
6811 }
6812
Matt Arsenault6dfda962016-02-10 18:21:39 +00006813 SDValue Ops[] = {
Tim Renouf361b5b22019-03-21 12:01:21 +00006814 DAG.getBuildVector(MemVT, DL, Elts),
Matt Arsenault6dfda962016-02-10 18:21:39 +00006815 NewLD.getValue(1)
6816 };
6817
6818 return DAG.getMergeValues(Ops, DL);
6819 }
Tom Stellard81d871d2013-11-13 23:36:50 +00006820
Matt Arsenaulta1436412016-02-10 18:21:45 +00006821 if (!MemVT.isVector())
6822 return SDValue();
Matt Arsenault4d801cd2015-11-24 12:05:03 +00006823
Matt Arsenaulta1436412016-02-10 18:21:45 +00006824 assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
6825 "Custom lowering for non-i32 vectors hasn't been implemented.");
Matt Arsenault4d801cd2015-11-24 12:05:03 +00006826
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006827 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
Simon Pilgrim266f4392019-06-11 11:00:23 +00006828 *Load->getMemOperand())) {
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006829 SDValue Ops[2];
6830 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
6831 return DAG.getMergeValues(Ops, DL);
6832 }
Simon Pilgrim266f4392019-06-11 11:00:23 +00006833
6834 unsigned Alignment = Load->getAlignment();
6835 unsigned AS = Load->getAddressSpace();
Stanislav Mekhanoshina224f682019-05-01 16:11:11 +00006836 if (Subtarget->hasLDSMisalignedBug() &&
6837 AS == AMDGPUAS::FLAT_ADDRESS &&
6838 Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
6839 return SplitVectorLoad(Op, DAG);
6840 }
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006841
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006842 MachineFunction &MF = DAG.getMachineFunction();
6843 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
6844 // If there is a possibilty that flat instruction access scratch memory
6845 // then we need to use the same legalization rules we use for private.
Matt Arsenault0da63502018-08-31 05:49:54 +00006846 if (AS == AMDGPUAS::FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006847 AS = MFI->hasFlatScratchInit() ?
Matt Arsenault0da63502018-08-31 05:49:54 +00006848 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006849
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006850 unsigned NumElements = MemVT.getVectorNumElements();
Matt Arsenault6c041a32018-03-29 19:59:28 +00006851
Matt Arsenault0da63502018-08-31 05:49:54 +00006852 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6853 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
Tim Renouf361b5b22019-03-21 12:01:21 +00006854 if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) {
6855 if (MemVT.isPow2VectorType())
6856 return SDValue();
6857 if (NumElements == 3)
6858 return WidenVectorLoad(Op, DAG);
6859 return SplitVectorLoad(Op, DAG);
6860 }
Matt Arsenaulta1436412016-02-10 18:21:45 +00006861 // Non-uniform loads will be selected to MUBUF instructions, so they
Alexander Timofeev18009562016-12-08 17:28:47 +00006862 // have the same legalization requirements as global and private
Matt Arsenaulta1436412016-02-10 18:21:45 +00006863 // loads.
6864 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006865 }
Matt Arsenault6c041a32018-03-29 19:59:28 +00006866
Matt Arsenault0da63502018-08-31 05:49:54 +00006867 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6868 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
6869 AS == AMDGPUAS::GLOBAL_ADDRESS) {
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00006870 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
Farhana Aleen89196642018-03-07 17:09:18 +00006871 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
Tim Renouf361b5b22019-03-21 12:01:21 +00006872 Alignment >= 4 && NumElements < 32) {
6873 if (MemVT.isPow2VectorType())
6874 return SDValue();
6875 if (NumElements == 3)
6876 return WidenVectorLoad(Op, DAG);
6877 return SplitVectorLoad(Op, DAG);
6878 }
Alexander Timofeev18009562016-12-08 17:28:47 +00006879 // Non-uniform loads will be selected to MUBUF instructions, so they
6880 // have the same legalization requirements as global and private
6881 // loads.
6882 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006883 }
Matt Arsenault0da63502018-08-31 05:49:54 +00006884 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6885 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
6886 AS == AMDGPUAS::GLOBAL_ADDRESS ||
6887 AS == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006888 if (NumElements > 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00006889 return SplitVectorLoad(Op, DAG);
Tim Renouf361b5b22019-03-21 12:01:21 +00006890 // v3 loads not supported on SI.
6891 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
6892 return WidenVectorLoad(Op, DAG);
6893 // v3 and v4 loads are supported for private and global memory.
Matt Arsenaulta1436412016-02-10 18:21:45 +00006894 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006895 }
Matt Arsenault0da63502018-08-31 05:49:54 +00006896 if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006897 // Depending on the setting of the private_element_size field in the
6898 // resource descriptor, we can only make private accesses up to a certain
6899 // size.
6900 switch (Subtarget->getMaxPrivateElementSize()) {
6901 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00006902 return scalarizeVectorLoad(Load, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006903 case 8:
6904 if (NumElements > 2)
6905 return SplitVectorLoad(Op, DAG);
6906 return SDValue();
6907 case 16:
6908 // Same as global/flat
6909 if (NumElements > 4)
6910 return SplitVectorLoad(Op, DAG);
Tim Renouf361b5b22019-03-21 12:01:21 +00006911 // v3 loads not supported on SI.
6912 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
6913 return WidenVectorLoad(Op, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006914 return SDValue();
6915 default:
6916 llvm_unreachable("unsupported private_element_size");
6917 }
Matt Arsenault0da63502018-08-31 05:49:54 +00006918 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
Farhana Aleena7cb3112018-03-09 17:41:39 +00006919 // Use ds_read_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00006920 if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
Farhana Aleena7cb3112018-03-09 17:41:39 +00006921 MemVT.getStoreSize() == 16)
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006922 return SDValue();
6923
Farhana Aleena7cb3112018-03-09 17:41:39 +00006924 if (NumElements > 2)
6925 return SplitVectorLoad(Op, DAG);
Nicolai Haehnle48219372018-10-17 15:37:48 +00006926
6927 // SI has a hardware bug in the LDS / GDS boounds checking: if the base
6928 // address is negative, then the instruction is incorrectly treated as
6929 // out-of-bounds even if base + offsets is in bounds. Split vectorized
6930 // loads here to avoid emitting ds_read2_b32. We may re-combine the
6931 // load later in the SILoadStoreOptimizer.
6932 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
6933 NumElements == 2 && MemVT.getStoreSize() == 8 &&
6934 Load->getAlignment() < 8) {
6935 return SplitVectorLoad(Op, DAG);
6936 }
Tom Stellarde9373602014-01-22 19:24:14 +00006937 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006938 return SDValue();
Tom Stellard81d871d2013-11-13 23:36:50 +00006939}
6940
Tom Stellard0ec134f2014-02-04 17:18:40 +00006941SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00006942 EVT VT = Op.getValueType();
6943 assert(VT.getSizeInBits() == 64);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006944
6945 SDLoc DL(Op);
6946 SDValue Cond = Op.getOperand(0);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006947
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006948 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
6949 SDValue One = DAG.getConstant(1, DL, MVT::i32);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006950
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00006951 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
6952 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
6953
6954 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
6955 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006956
6957 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
6958
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00006959 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
6960 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006961
6962 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
6963
Ahmed Bougacha128f8732016-04-26 21:15:30 +00006964 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
Matt Arsenault02dc7e12018-06-15 15:15:46 +00006965 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006966}
6967
Matt Arsenault22ca3f82014-07-15 23:50:10 +00006968// Catch division cases where we can use shortcuts with rcp and rsq
6969// instructions.
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00006970SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
6971 SelectionDAG &DAG) const {
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006972 SDLoc SL(Op);
6973 SDValue LHS = Op.getOperand(0);
6974 SDValue RHS = Op.getOperand(1);
6975 EVT VT = Op.getValueType();
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00006976 const SDNodeFlags Flags = Op->getFlags();
Michael Berg7acc81b2018-05-04 18:48:20 +00006977 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006978
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00006979 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
6980 return SDValue();
6981
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006982 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00006983 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
Matt Arsenault979902b2016-08-02 22:25:04 +00006984 if (CLHS->isExactlyValue(1.0)) {
6985 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
6986 // the CI documentation has a worst case error of 1 ulp.
6987 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
6988 // use it as long as we aren't trying to use denormals.
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00006989 //
6990 // v_rcp_f16 and v_rsq_f16 DO support denormals.
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006991
Matt Arsenault979902b2016-08-02 22:25:04 +00006992 // 1.0 / sqrt(x) -> rsq(x)
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00006993
Matt Arsenault979902b2016-08-02 22:25:04 +00006994 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
6995 // error seems really high at 2^29 ULP.
6996 if (RHS.getOpcode() == ISD::FSQRT)
6997 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
6998
6999 // 1.0 / x -> rcp(x)
7000 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
7001 }
7002
7003 // Same as for 1.0, but expand the sign out of the constant.
7004 if (CLHS->isExactlyValue(-1.0)) {
7005 // -1.0 / x -> rcp (fneg x)
7006 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
7007 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
7008 }
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007009 }
7010 }
7011
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00007012 if (Unsafe) {
Matt Arsenault22ca3f82014-07-15 23:50:10 +00007013 // Turn into multiply by the reciprocal.
7014 // x / y -> x * (1.0 / y)
7015 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00007016 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
Matt Arsenault22ca3f82014-07-15 23:50:10 +00007017 }
7018
7019 return SDValue();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007020}
7021
Tom Stellard8485fa02016-12-07 02:42:15 +00007022static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7023 EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
7024 if (GlueChain->getNumValues() <= 1) {
7025 return DAG.getNode(Opcode, SL, VT, A, B);
7026 }
7027
7028 assert(GlueChain->getNumValues() == 3);
7029
7030 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7031 switch (Opcode) {
7032 default: llvm_unreachable("no chain equivalent for opcode");
7033 case ISD::FMUL:
7034 Opcode = AMDGPUISD::FMUL_W_CHAIN;
7035 break;
7036 }
7037
7038 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
7039 GlueChain.getValue(2));
7040}
7041
7042static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
7043 EVT VT, SDValue A, SDValue B, SDValue C,
7044 SDValue GlueChain) {
7045 if (GlueChain->getNumValues() <= 1) {
7046 return DAG.getNode(Opcode, SL, VT, A, B, C);
7047 }
7048
7049 assert(GlueChain->getNumValues() == 3);
7050
7051 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
7052 switch (Opcode) {
7053 default: llvm_unreachable("no chain equivalent for opcode");
7054 case ISD::FMA:
7055 Opcode = AMDGPUISD::FMA_W_CHAIN;
7056 break;
7057 }
7058
7059 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
7060 GlueChain.getValue(2));
7061}
7062
Matt Arsenault4052a572016-12-22 03:05:41 +00007063SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00007064 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7065 return FastLowered;
7066
Matt Arsenault4052a572016-12-22 03:05:41 +00007067 SDLoc SL(Op);
7068 SDValue Src0 = Op.getOperand(0);
7069 SDValue Src1 = Op.getOperand(1);
7070
7071 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
7072 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
7073
7074 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
7075 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
7076
7077 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
7078 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
7079
7080 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
7081}
7082
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00007083// Faster 2.5 ULP division that does not support denormals.
7084SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
7085 SDLoc SL(Op);
7086 SDValue LHS = Op.getOperand(1);
7087 SDValue RHS = Op.getOperand(2);
7088
7089 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
7090
7091 const APFloat K0Val(BitsToFloat(0x6f800000));
7092 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
7093
7094 const APFloat K1Val(BitsToFloat(0x2f800000));
7095 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
7096
7097 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7098
7099 EVT SetCCVT =
7100 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
7101
7102 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
7103
7104 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
7105
7106 // TODO: Should this propagate fast-math-flags?
7107 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
7108
7109 // rcp does not support denormals.
7110 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
7111
7112 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
7113
7114 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
7115}
7116
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007117SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00007118 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
Eric Christopher538d09d02016-06-07 20:27:12 +00007119 return FastLowered;
Matt Arsenault22ca3f82014-07-15 23:50:10 +00007120
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007121 SDLoc SL(Op);
7122 SDValue LHS = Op.getOperand(0);
7123 SDValue RHS = Op.getOperand(1);
7124
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007125 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007126
Wei Dinged0f97f2016-06-09 19:17:15 +00007127 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007128
Tom Stellard8485fa02016-12-07 02:42:15 +00007129 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7130 RHS, RHS, LHS);
7131 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7132 LHS, RHS, LHS);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007133
Matt Arsenaultdfec5ce2016-07-09 07:48:11 +00007134 // Denominator is scaled to not be denormal, so using rcp is ok.
Tom Stellard8485fa02016-12-07 02:42:15 +00007135 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
7136 DenominatorScaled);
7137 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
7138 DenominatorScaled);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007139
Tom Stellard8485fa02016-12-07 02:42:15 +00007140 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
7141 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
7142 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007143
Tom Stellard8485fa02016-12-07 02:42:15 +00007144 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007145
Tom Stellard8485fa02016-12-07 02:42:15 +00007146 if (!Subtarget->hasFP32Denormals()) {
7147 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
7148 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
7149 SL, MVT::i32);
7150 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
7151 DAG.getEntryNode(),
7152 EnableDenormValue, BitField);
7153 SDValue Ops[3] = {
7154 NegDivScale0,
7155 EnableDenorm.getValue(0),
7156 EnableDenorm.getValue(1)
7157 };
Matt Arsenault37fefd62016-06-10 02:18:02 +00007158
Tom Stellard8485fa02016-12-07 02:42:15 +00007159 NegDivScale0 = DAG.getMergeValues(Ops, SL);
7160 }
7161
7162 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
7163 ApproxRcp, One, NegDivScale0);
7164
7165 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
7166 ApproxRcp, Fma0);
7167
7168 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
7169 Fma1, Fma1);
7170
7171 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
7172 NumeratorScaled, Mul);
7173
7174 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
7175
7176 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
7177 NumeratorScaled, Fma3);
7178
7179 if (!Subtarget->hasFP32Denormals()) {
7180 const SDValue DisableDenormValue =
7181 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
7182 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
7183 Fma4.getValue(1),
7184 DisableDenormValue,
7185 BitField,
7186 Fma4.getValue(2));
7187
7188 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
7189 DisableDenorm, DAG.getRoot());
7190 DAG.setRoot(OutputChain);
7191 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00007192
Wei Dinged0f97f2016-06-09 19:17:15 +00007193 SDValue Scale = NumeratorScaled.getValue(1);
Tom Stellard8485fa02016-12-07 02:42:15 +00007194 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
7195 Fma4, Fma1, Fma3, Scale);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007196
Wei Dinged0f97f2016-06-09 19:17:15 +00007197 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007198}
7199
7200SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007201 if (DAG.getTarget().Options.UnsafeFPMath)
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00007202 return lowerFastUnsafeFDIV(Op, DAG);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007203
7204 SDLoc SL(Op);
7205 SDValue X = Op.getOperand(0);
7206 SDValue Y = Op.getOperand(1);
7207
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007208 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007209
7210 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
7211
7212 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
7213
7214 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
7215
7216 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
7217
7218 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
7219
7220 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
7221
7222 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
7223
7224 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
7225
7226 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
7227 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
7228
7229 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
7230 NegDivScale0, Mul, DivScale1);
7231
7232 SDValue Scale;
7233
Tom Stellard5bfbae52018-07-11 20:59:01 +00007234 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007235 // Workaround a hardware bug on SI where the condition output from div_scale
7236 // is not usable.
7237
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007238 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007239
7240 // Figure out if the scale to use for div_fmas.
7241 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
7242 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
7243 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
7244 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
7245
7246 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
7247 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
7248
7249 SDValue Scale0Hi
7250 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
7251 SDValue Scale1Hi
7252 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
7253
7254 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
7255 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
7256 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
7257 } else {
7258 Scale = DivScale1.getValue(1);
7259 }
7260
7261 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
7262 Fma4, Fma3, Mul, Scale);
7263
7264 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007265}
7266
7267SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
7268 EVT VT = Op.getValueType();
7269
7270 if (VT == MVT::f32)
7271 return LowerFDIV32(Op, DAG);
7272
7273 if (VT == MVT::f64)
7274 return LowerFDIV64(Op, DAG);
7275
Matt Arsenault4052a572016-12-22 03:05:41 +00007276 if (VT == MVT::f16)
7277 return LowerFDIV16(Op, DAG);
7278
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007279 llvm_unreachable("Unexpected type for fdiv");
7280}
7281
Tom Stellard81d871d2013-11-13 23:36:50 +00007282SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7283 SDLoc DL(Op);
7284 StoreSDNode *Store = cast<StoreSDNode>(Op);
7285 EVT VT = Store->getMemoryVT();
7286
Matt Arsenault95245662016-02-11 05:32:46 +00007287 if (VT == MVT::i1) {
7288 return DAG.getTruncStore(Store->getChain(), DL,
7289 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
7290 Store->getBasePtr(), MVT::i1, Store->getMemOperand());
Tom Stellardb02094e2014-07-21 15:45:01 +00007291 }
7292
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007293 assert(VT.isVector() &&
7294 Store->getValue().getValueType().getScalarType() == MVT::i32);
7295
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007296 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
Simon Pilgrim266f4392019-06-11 11:00:23 +00007297 *Store->getMemOperand())) {
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007298 return expandUnalignedStore(Store, DAG);
7299 }
Tom Stellard81d871d2013-11-13 23:36:50 +00007300
Simon Pilgrim266f4392019-06-11 11:00:23 +00007301 unsigned AS = Store->getAddressSpace();
Stanislav Mekhanoshina224f682019-05-01 16:11:11 +00007302 if (Subtarget->hasLDSMisalignedBug() &&
7303 AS == AMDGPUAS::FLAT_ADDRESS &&
7304 Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
7305 return SplitVectorStore(Op, DAG);
7306 }
7307
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007308 MachineFunction &MF = DAG.getMachineFunction();
7309 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7310 // If there is a possibilty that flat instruction access scratch memory
7311 // then we need to use the same legalization rules we use for private.
Matt Arsenault0da63502018-08-31 05:49:54 +00007312 if (AS == AMDGPUAS::FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007313 AS = MFI->hasFlatScratchInit() ?
Matt Arsenault0da63502018-08-31 05:49:54 +00007314 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007315
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007316 unsigned NumElements = VT.getVectorNumElements();
Matt Arsenault0da63502018-08-31 05:49:54 +00007317 if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
7318 AS == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007319 if (NumElements > 4)
7320 return SplitVectorStore(Op, DAG);
Tim Renouf361b5b22019-03-21 12:01:21 +00007321 // v3 stores not supported on SI.
7322 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7323 return SplitVectorStore(Op, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007324 return SDValue();
Matt Arsenault0da63502018-08-31 05:49:54 +00007325 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007326 switch (Subtarget->getMaxPrivateElementSize()) {
7327 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00007328 return scalarizeVectorStore(Store, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007329 case 8:
7330 if (NumElements > 2)
7331 return SplitVectorStore(Op, DAG);
7332 return SDValue();
7333 case 16:
Tim Renouf361b5b22019-03-21 12:01:21 +00007334 if (NumElements > 4 || NumElements == 3)
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007335 return SplitVectorStore(Op, DAG);
7336 return SDValue();
7337 default:
7338 llvm_unreachable("unsupported private_element_size");
7339 }
Matt Arsenault0da63502018-08-31 05:49:54 +00007340 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00007341 // Use ds_write_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00007342 if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
Tim Renouf361b5b22019-03-21 12:01:21 +00007343 VT.getStoreSize() == 16 && NumElements != 3)
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00007344 return SDValue();
7345
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007346 if (NumElements > 2)
7347 return SplitVectorStore(Op, DAG);
Nicolai Haehnle48219372018-10-17 15:37:48 +00007348
7349 // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7350 // address is negative, then the instruction is incorrectly treated as
7351 // out-of-bounds even if base + offsets is in bounds. Split vectorized
7352 // stores here to avoid emitting ds_write2_b32. We may re-combine the
7353 // store later in the SILoadStoreOptimizer.
7354 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
7355 NumElements == 2 && VT.getStoreSize() == 8 &&
7356 Store->getAlignment() < 8) {
7357 return SplitVectorStore(Op, DAG);
7358 }
7359
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00007360 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00007361 } else {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007362 llvm_unreachable("unhandled address space");
Matt Arsenault95245662016-02-11 05:32:46 +00007363 }
Tom Stellard81d871d2013-11-13 23:36:50 +00007364}
7365
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007366SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007367 SDLoc DL(Op);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007368 EVT VT = Op.getValueType();
7369 SDValue Arg = Op.getOperand(0);
David Stuttard20de3e92018-09-14 10:27:19 +00007370 SDValue TrigVal;
7371
Sanjay Patela2607012015-09-16 16:31:21 +00007372 // TODO: Should this propagate fast-math-flags?
David Stuttard20de3e92018-09-14 10:27:19 +00007373
7374 SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT);
7375
7376 if (Subtarget->hasTrigReducedRange()) {
7377 SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7378 TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal);
7379 } else {
7380 TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7381 }
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007382
7383 switch (Op.getOpcode()) {
7384 case ISD::FCOS:
David Stuttard20de3e92018-09-14 10:27:19 +00007385 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007386 case ISD::FSIN:
David Stuttard20de3e92018-09-14 10:27:19 +00007387 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007388 default:
7389 llvm_unreachable("Wrong trig opcode");
7390 }
7391}
7392
Tom Stellard354a43c2016-04-01 18:27:37 +00007393SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
7394 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
7395 assert(AtomicNode->isCompareAndSwap());
7396 unsigned AS = AtomicNode->getAddressSpace();
7397
7398 // No custom lowering required for local address space
Matt Arsenault0da63502018-08-31 05:49:54 +00007399 if (!isFlatGlobalAddrSpace(AS))
Tom Stellard354a43c2016-04-01 18:27:37 +00007400 return Op;
7401
7402 // Non-local address space requires custom lowering for atomic compare
7403 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
7404 SDLoc DL(Op);
7405 SDValue ChainIn = Op.getOperand(0);
7406 SDValue Addr = Op.getOperand(1);
7407 SDValue Old = Op.getOperand(2);
7408 SDValue New = Op.getOperand(3);
7409 EVT VT = Op.getValueType();
7410 MVT SimpleVT = VT.getSimpleVT();
7411 MVT VecType = MVT::getVectorVT(SimpleVT, 2);
7412
Ahmed Bougacha128f8732016-04-26 21:15:30 +00007413 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
Tom Stellard354a43c2016-04-01 18:27:37 +00007414 SDValue Ops[] = { ChainIn, Addr, NewOld };
Matt Arsenault88701812016-06-09 23:42:48 +00007415
7416 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
7417 Ops, VT, AtomicNode->getMemOperand());
Tom Stellard354a43c2016-04-01 18:27:37 +00007418}
7419
Tom Stellard75aadc22012-12-11 21:25:42 +00007420//===----------------------------------------------------------------------===//
7421// Custom DAG optimizations
7422//===----------------------------------------------------------------------===//
7423
Matt Arsenault364a6742014-06-11 17:50:44 +00007424SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
Matt Arsenaulte6986632015-01-14 01:35:22 +00007425 DAGCombinerInfo &DCI) const {
Matt Arsenault364a6742014-06-11 17:50:44 +00007426 EVT VT = N->getValueType(0);
7427 EVT ScalarVT = VT.getScalarType();
7428 if (ScalarVT != MVT::f32)
7429 return SDValue();
7430
7431 SelectionDAG &DAG = DCI.DAG;
7432 SDLoc DL(N);
7433
7434 SDValue Src = N->getOperand(0);
7435 EVT SrcVT = Src.getValueType();
7436
7437 // TODO: We could try to match extracting the higher bytes, which would be
7438 // easier if i8 vectors weren't promoted to i32 vectors, particularly after
7439 // types are legalized. v4i8 -> v4f32 is probably the only case to worry
7440 // about in practice.
Craig Topper80d3bb32018-03-06 19:44:52 +00007441 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
Matt Arsenault364a6742014-06-11 17:50:44 +00007442 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
7443 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
7444 DCI.AddToWorklist(Cvt.getNode());
7445 return Cvt;
7446 }
7447 }
7448
Matt Arsenault364a6742014-06-11 17:50:44 +00007449 return SDValue();
7450}
7451
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007452// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
7453
7454// This is a variant of
7455// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
7456//
7457// The normal DAG combiner will do this, but only if the add has one use since
7458// that would increase the number of instructions.
7459//
7460// This prevents us from seeing a constant offset that can be folded into a
7461// memory instruction's addressing mode. If we know the resulting add offset of
7462// a pointer can be folded into an addressing offset, we can replace the pointer
7463// operand with the add of new constant offset. This eliminates one of the uses,
7464// and may allow the remaining use to also be simplified.
7465//
7466SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
7467 unsigned AddrSpace,
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007468 EVT MemVT,
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007469 DAGCombinerInfo &DCI) const {
7470 SDValue N0 = N->getOperand(0);
7471 SDValue N1 = N->getOperand(1);
7472
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007473 // We only do this to handle cases where it's profitable when there are
7474 // multiple uses of the add, so defer to the standard combine.
Matt Arsenaultc8903122017-11-14 23:46:42 +00007475 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
7476 N0->hasOneUse())
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007477 return SDValue();
7478
7479 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
7480 if (!CN1)
7481 return SDValue();
7482
7483 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
7484 if (!CAdd)
7485 return SDValue();
7486
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007487 // If the resulting offset is too large, we can't fold it into the addressing
7488 // mode offset.
7489 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007490 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
7491
7492 AddrMode AM;
7493 AM.HasBaseReg = true;
7494 AM.BaseOffs = Offset.getSExtValue();
7495 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007496 return SDValue();
7497
7498 SelectionDAG &DAG = DCI.DAG;
7499 SDLoc SL(N);
7500 EVT VT = N->getValueType(0);
7501
7502 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007503 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007504
Matt Arsenaulte5e0c742017-11-13 05:33:35 +00007505 SDNodeFlags Flags;
7506 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
7507 (N0.getOpcode() == ISD::OR ||
7508 N0->getFlags().hasNoUnsignedWrap()));
7509
7510 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007511}
7512
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007513SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
7514 DAGCombinerInfo &DCI) const {
7515 SDValue Ptr = N->getBasePtr();
7516 SelectionDAG &DAG = DCI.DAG;
7517 SDLoc SL(N);
7518
7519 // TODO: We could also do this for multiplies.
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007520 if (Ptr.getOpcode() == ISD::SHL) {
7521 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(),
7522 N->getMemoryVT(), DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007523 if (NewPtr) {
7524 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
7525
7526 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
7527 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
7528 }
7529 }
7530
7531 return SDValue();
7532}
7533
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007534static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
7535 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
7536 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
7537 (Opc == ISD::XOR && Val == 0);
7538}
7539
7540// Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
7541// will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
7542// integer combine opportunities since most 64-bit operations are decomposed
7543// this way. TODO: We won't want this for SALU especially if it is an inline
7544// immediate.
7545SDValue SITargetLowering::splitBinaryBitConstantOp(
7546 DAGCombinerInfo &DCI,
7547 const SDLoc &SL,
7548 unsigned Opc, SDValue LHS,
7549 const ConstantSDNode *CRHS) const {
7550 uint64_t Val = CRHS->getZExtValue();
7551 uint32_t ValLo = Lo_32(Val);
7552 uint32_t ValHi = Hi_32(Val);
7553 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7554
7555 if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
7556 bitOpWithConstantIsReducible(Opc, ValHi)) ||
7557 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
7558 // If we need to materialize a 64-bit immediate, it will be split up later
7559 // anyway. Avoid creating the harder to understand 64-bit immediate
7560 // materialization.
7561 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
7562 }
7563
7564 return SDValue();
7565}
7566
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00007567// Returns true if argument is a boolean value which is not serialized into
7568// memory or argument and does not require v_cmdmask_b32 to be deserialized.
7569static bool isBoolSGPR(SDValue V) {
7570 if (V.getValueType() != MVT::i1)
7571 return false;
7572 switch (V.getOpcode()) {
7573 default: break;
7574 case ISD::SETCC:
7575 case ISD::AND:
7576 case ISD::OR:
7577 case ISD::XOR:
7578 case AMDGPUISD::FP_CLASS:
7579 return true;
7580 }
7581 return false;
7582}
7583
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007584// If a constant has all zeroes or all ones within each byte return it.
7585// Otherwise return 0.
7586static uint32_t getConstantPermuteMask(uint32_t C) {
7587 // 0xff for any zero byte in the mask
7588 uint32_t ZeroByteMask = 0;
7589 if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
7590 if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
7591 if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
7592 if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
7593 uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
7594 if ((NonZeroByteMask & C) != NonZeroByteMask)
7595 return 0; // Partial bytes selected.
7596 return C;
7597}
7598
7599// Check if a node selects whole bytes from its operand 0 starting at a byte
7600// boundary while masking the rest. Returns select mask as in the v_perm_b32
7601// or -1 if not succeeded.
7602// Note byte select encoding:
7603// value 0-3 selects corresponding source byte;
7604// value 0xc selects zero;
7605// value 0xff selects 0xff.
7606static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
7607 assert(V.getValueSizeInBits() == 32);
7608
7609 if (V.getNumOperands() != 2)
7610 return ~0;
7611
7612 ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
7613 if (!N1)
7614 return ~0;
7615
7616 uint32_t C = N1->getZExtValue();
7617
7618 switch (V.getOpcode()) {
7619 default:
7620 break;
7621 case ISD::AND:
7622 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
7623 return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
7624 }
7625 break;
7626
7627 case ISD::OR:
7628 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
7629 return (0x03020100 & ~ConstMask) | ConstMask;
7630 }
7631 break;
7632
7633 case ISD::SHL:
7634 if (C % 8)
7635 return ~0;
7636
7637 return uint32_t((0x030201000c0c0c0cull << C) >> 32);
7638
7639 case ISD::SRL:
7640 if (C % 8)
7641 return ~0;
7642
7643 return uint32_t(0x0c0c0c0c03020100ull >> C);
7644 }
7645
7646 return ~0;
7647}
7648
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007649SDValue SITargetLowering::performAndCombine(SDNode *N,
7650 DAGCombinerInfo &DCI) const {
7651 if (DCI.isBeforeLegalize())
7652 return SDValue();
7653
7654 SelectionDAG &DAG = DCI.DAG;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007655 EVT VT = N->getValueType(0);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007656 SDValue LHS = N->getOperand(0);
7657 SDValue RHS = N->getOperand(1);
7658
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007659
Stanislav Mekhanoshin53a21292017-05-23 19:54:48 +00007660 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
7661 if (VT == MVT::i64 && CRHS) {
7662 if (SDValue Split
7663 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
7664 return Split;
7665 }
7666
7667 if (CRHS && VT == MVT::i32) {
7668 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
7669 // nb = number of trailing zeroes in mask
7670 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
7671 // given that we are selecting 8 or 16 bit fields starting at byte boundary.
7672 uint64_t Mask = CRHS->getZExtValue();
7673 unsigned Bits = countPopulation(Mask);
7674 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
7675 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
7676 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
7677 unsigned Shift = CShift->getZExtValue();
7678 unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
7679 unsigned Offset = NB + Shift;
7680 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
7681 SDLoc SL(N);
7682 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
7683 LHS->getOperand(0),
7684 DAG.getConstant(Offset, SL, MVT::i32),
7685 DAG.getConstant(Bits, SL, MVT::i32));
7686 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
7687 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
7688 DAG.getValueType(NarrowVT));
7689 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
7690 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
7691 return Shl;
7692 }
7693 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007694 }
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007695
7696 // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
7697 if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
7698 isa<ConstantSDNode>(LHS.getOperand(2))) {
7699 uint32_t Sel = getConstantPermuteMask(Mask);
7700 if (!Sel)
7701 return SDValue();
7702
7703 // Select 0xc for all zero bytes
7704 Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
7705 SDLoc DL(N);
7706 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
7707 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
7708 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007709 }
7710
7711 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
7712 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
7713 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007714 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7715 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
7716
7717 SDValue X = LHS.getOperand(0);
7718 SDValue Y = RHS.getOperand(0);
7719 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
7720 return SDValue();
7721
7722 if (LCC == ISD::SETO) {
7723 if (X != LHS.getOperand(1))
7724 return SDValue();
7725
7726 if (RCC == ISD::SETUNE) {
7727 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
7728 if (!C1 || !C1->isInfinity() || C1->isNegative())
7729 return SDValue();
7730
7731 const uint32_t Mask = SIInstrFlags::N_NORMAL |
7732 SIInstrFlags::N_SUBNORMAL |
7733 SIInstrFlags::N_ZERO |
7734 SIInstrFlags::P_ZERO |
7735 SIInstrFlags::P_SUBNORMAL |
7736 SIInstrFlags::P_NORMAL;
7737
7738 static_assert(((~(SIInstrFlags::S_NAN |
7739 SIInstrFlags::Q_NAN |
7740 SIInstrFlags::N_INFINITY |
7741 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
7742 "mask not equal");
7743
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007744 SDLoc DL(N);
7745 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
7746 X, DAG.getConstant(Mask, DL, MVT::i32));
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007747 }
7748 }
7749 }
7750
Matt Arsenault3dcf4ce2018-08-10 18:58:56 +00007751 if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
7752 std::swap(LHS, RHS);
7753
7754 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
7755 RHS.hasOneUse()) {
7756 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7757 // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan)
7758 // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan)
7759 const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
7760 if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
7761 (RHS.getOperand(0) == LHS.getOperand(0) &&
7762 LHS.getOperand(0) == LHS.getOperand(1))) {
7763 const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
7764 unsigned NewMask = LCC == ISD::SETO ?
7765 Mask->getZExtValue() & ~OrdMask :
7766 Mask->getZExtValue() & OrdMask;
7767
7768 SDLoc DL(N);
7769 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
7770 DAG.getConstant(NewMask, DL, MVT::i32));
7771 }
7772 }
7773
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00007774 if (VT == MVT::i32 &&
7775 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
7776 // and x, (sext cc from i1) => select cc, x, 0
7777 if (RHS.getOpcode() != ISD::SIGN_EXTEND)
7778 std::swap(LHS, RHS);
7779 if (isBoolSGPR(RHS.getOperand(0)))
7780 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
7781 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
7782 }
7783
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007784 // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
7785 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7786 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
7787 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
7788 uint32_t LHSMask = getPermuteMask(DAG, LHS);
7789 uint32_t RHSMask = getPermuteMask(DAG, RHS);
7790 if (LHSMask != ~0u && RHSMask != ~0u) {
7791 // Canonicalize the expression in an attempt to have fewer unique masks
7792 // and therefore fewer registers used to hold the masks.
7793 if (LHSMask > RHSMask) {
7794 std::swap(LHSMask, RHSMask);
7795 std::swap(LHS, RHS);
7796 }
7797
7798 // Select 0xc for each lane used from source operand. Zero has 0xc mask
7799 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
7800 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7801 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7802
7803 // Check of we need to combine values from two sources within a byte.
7804 if (!(LHSUsedLanes & RHSUsedLanes) &&
7805 // If we select high and lower word keep it for SDWA.
7806 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
7807 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
7808 // Each byte in each mask is either selector mask 0-3, or has higher
7809 // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
7810 // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
7811 // mask which is not 0xff wins. By anding both masks we have a correct
7812 // result except that 0x0c shall be corrected to give 0x0c only.
7813 uint32_t Mask = LHSMask & RHSMask;
7814 for (unsigned I = 0; I < 32; I += 8) {
7815 uint32_t ByteSel = 0xff << I;
7816 if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
7817 Mask &= (0x0c << I) & 0xffffffff;
7818 }
7819
7820 // Add 4 to each active LHS lane. It will not affect any existing 0xff
7821 // or 0x0c.
7822 uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
7823 SDLoc DL(N);
7824
7825 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
7826 LHS.getOperand(0), RHS.getOperand(0),
7827 DAG.getConstant(Sel, DL, MVT::i32));
7828 }
7829 }
7830 }
7831
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007832 return SDValue();
7833}
7834
Matt Arsenaultf2290332015-01-06 23:00:39 +00007835SDValue SITargetLowering::performOrCombine(SDNode *N,
7836 DAGCombinerInfo &DCI) const {
7837 SelectionDAG &DAG = DCI.DAG;
7838 SDValue LHS = N->getOperand(0);
7839 SDValue RHS = N->getOperand(1);
7840
Matt Arsenault3b082382016-04-12 18:24:38 +00007841 EVT VT = N->getValueType(0);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007842 if (VT == MVT::i1) {
7843 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
7844 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
7845 RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
7846 SDValue Src = LHS.getOperand(0);
7847 if (Src != RHS.getOperand(0))
7848 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00007849
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007850 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
7851 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
7852 if (!CLHS || !CRHS)
7853 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00007854
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007855 // Only 10 bits are used.
7856 static const uint32_t MaxMask = 0x3ff;
Matt Arsenault3b082382016-04-12 18:24:38 +00007857
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007858 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
7859 SDLoc DL(N);
7860 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
7861 Src, DAG.getConstant(NewMask, DL, MVT::i32));
7862 }
Matt Arsenault3b082382016-04-12 18:24:38 +00007863
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007864 return SDValue();
7865 }
7866
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007867 // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
7868 if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
7869 LHS.getOpcode() == AMDGPUISD::PERM &&
7870 isa<ConstantSDNode>(LHS.getOperand(2))) {
7871 uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
7872 if (!Sel)
7873 return SDValue();
7874
7875 Sel |= LHS.getConstantOperandVal(2);
7876 SDLoc DL(N);
7877 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
7878 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
7879 }
7880
7881 // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
7882 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7883 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
7884 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
7885 uint32_t LHSMask = getPermuteMask(DAG, LHS);
7886 uint32_t RHSMask = getPermuteMask(DAG, RHS);
7887 if (LHSMask != ~0u && RHSMask != ~0u) {
7888 // Canonicalize the expression in an attempt to have fewer unique masks
7889 // and therefore fewer registers used to hold the masks.
7890 if (LHSMask > RHSMask) {
7891 std::swap(LHSMask, RHSMask);
7892 std::swap(LHS, RHS);
7893 }
7894
7895 // Select 0xc for each lane used from source operand. Zero has 0xc mask
7896 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
7897 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7898 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7899
7900 // Check of we need to combine values from two sources within a byte.
7901 if (!(LHSUsedLanes & RHSUsedLanes) &&
7902 // If we select high and lower word keep it for SDWA.
7903 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
7904 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
7905 // Kill zero bytes selected by other mask. Zero value is 0xc.
7906 LHSMask &= ~RHSUsedLanes;
7907 RHSMask &= ~LHSUsedLanes;
7908 // Add 4 to each active LHS lane
7909 LHSMask |= LHSUsedLanes & 0x04040404;
7910 // Combine masks
7911 uint32_t Sel = LHSMask | RHSMask;
7912 SDLoc DL(N);
7913
7914 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
7915 LHS.getOperand(0), RHS.getOperand(0),
7916 DAG.getConstant(Sel, DL, MVT::i32));
7917 }
7918 }
7919 }
7920
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007921 if (VT != MVT::i64)
7922 return SDValue();
7923
7924 // TODO: This could be a generic combine with a predicate for extracting the
7925 // high half of an integer being free.
7926
7927 // (or i64:x, (zero_extend i32:y)) ->
7928 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
7929 if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
7930 RHS.getOpcode() != ISD::ZERO_EXTEND)
7931 std::swap(LHS, RHS);
7932
7933 if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
7934 SDValue ExtSrc = RHS.getOperand(0);
7935 EVT SrcVT = ExtSrc.getValueType();
7936 if (SrcVT == MVT::i32) {
7937 SDLoc SL(N);
7938 SDValue LowLHS, HiBits;
7939 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
7940 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
7941
7942 DCI.AddToWorklist(LowOr.getNode());
7943 DCI.AddToWorklist(HiBits.getNode());
7944
7945 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
7946 LowOr, HiBits);
7947 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
Matt Arsenault3b082382016-04-12 18:24:38 +00007948 }
7949 }
7950
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007951 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
7952 if (CRHS) {
7953 if (SDValue Split
7954 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
7955 return Split;
7956 }
Matt Arsenaultf2290332015-01-06 23:00:39 +00007957
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007958 return SDValue();
7959}
Matt Arsenaultf2290332015-01-06 23:00:39 +00007960
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007961SDValue SITargetLowering::performXorCombine(SDNode *N,
7962 DAGCombinerInfo &DCI) const {
7963 EVT VT = N->getValueType(0);
7964 if (VT != MVT::i64)
7965 return SDValue();
Matt Arsenaultf2290332015-01-06 23:00:39 +00007966
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007967 SDValue LHS = N->getOperand(0);
7968 SDValue RHS = N->getOperand(1);
7969
7970 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
7971 if (CRHS) {
7972 if (SDValue Split
7973 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
7974 return Split;
Matt Arsenaultf2290332015-01-06 23:00:39 +00007975 }
7976
7977 return SDValue();
7978}
7979
Matt Arsenault5cf42712017-04-06 20:58:30 +00007980// Instructions that will be lowered with a final instruction that zeros the
7981// high result bits.
7982// XXX - probably only need to list legal operations.
Matt Arsenault8edfaee2017-03-31 19:53:03 +00007983static bool fp16SrcZerosHighBits(unsigned Opc) {
7984 switch (Opc) {
Matt Arsenault5cf42712017-04-06 20:58:30 +00007985 case ISD::FADD:
7986 case ISD::FSUB:
7987 case ISD::FMUL:
7988 case ISD::FDIV:
7989 case ISD::FREM:
7990 case ISD::FMA:
7991 case ISD::FMAD:
7992 case ISD::FCANONICALIZE:
7993 case ISD::FP_ROUND:
7994 case ISD::UINT_TO_FP:
7995 case ISD::SINT_TO_FP:
7996 case ISD::FABS:
7997 // Fabs is lowered to a bit operation, but it's an and which will clear the
7998 // high bits anyway.
7999 case ISD::FSQRT:
8000 case ISD::FSIN:
8001 case ISD::FCOS:
8002 case ISD::FPOWI:
8003 case ISD::FPOW:
8004 case ISD::FLOG:
8005 case ISD::FLOG2:
8006 case ISD::FLOG10:
8007 case ISD::FEXP:
8008 case ISD::FEXP2:
8009 case ISD::FCEIL:
8010 case ISD::FTRUNC:
8011 case ISD::FRINT:
8012 case ISD::FNEARBYINT:
8013 case ISD::FROUND:
8014 case ISD::FFLOOR:
8015 case ISD::FMINNUM:
8016 case ISD::FMAXNUM:
8017 case AMDGPUISD::FRACT:
8018 case AMDGPUISD::CLAMP:
8019 case AMDGPUISD::COS_HW:
8020 case AMDGPUISD::SIN_HW:
8021 case AMDGPUISD::FMIN3:
8022 case AMDGPUISD::FMAX3:
8023 case AMDGPUISD::FMED3:
8024 case AMDGPUISD::FMAD_FTZ:
8025 case AMDGPUISD::RCP:
8026 case AMDGPUISD::RSQ:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00008027 case AMDGPUISD::RCP_IFLAG:
Matt Arsenault5cf42712017-04-06 20:58:30 +00008028 case AMDGPUISD::LDEXP:
Matt Arsenault8edfaee2017-03-31 19:53:03 +00008029 return true;
Matt Arsenault5cf42712017-04-06 20:58:30 +00008030 default:
8031 // fcopysign, select and others may be lowered to 32-bit bit operations
8032 // which don't zero the high bits.
8033 return false;
Matt Arsenault8edfaee2017-03-31 19:53:03 +00008034 }
8035}
8036
8037SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
8038 DAGCombinerInfo &DCI) const {
8039 if (!Subtarget->has16BitInsts() ||
8040 DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8041 return SDValue();
8042
8043 EVT VT = N->getValueType(0);
8044 if (VT != MVT::i32)
8045 return SDValue();
8046
8047 SDValue Src = N->getOperand(0);
8048 if (Src.getValueType() != MVT::i16)
8049 return SDValue();
8050
8051 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
8052 // FIXME: It is not universally true that the high bits are zeroed on gfx9.
8053 if (Src.getOpcode() == ISD::BITCAST) {
8054 SDValue BCSrc = Src.getOperand(0);
8055 if (BCSrc.getValueType() == MVT::f16 &&
8056 fp16SrcZerosHighBits(BCSrc.getOpcode()))
8057 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
8058 }
8059
8060 return SDValue();
8061}
8062
Ryan Taylor00e063a2019-03-19 16:07:00 +00008063SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N,
8064 DAGCombinerInfo &DCI)
8065 const {
8066 SDValue Src = N->getOperand(0);
8067 auto *VTSign = cast<VTSDNode>(N->getOperand(1));
8068
8069 if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
8070 VTSign->getVT() == MVT::i8) ||
8071 (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
8072 VTSign->getVT() == MVT::i16)) &&
8073 Src.hasOneUse()) {
8074 auto *M = cast<MemSDNode>(Src);
8075 SDValue Ops[] = {
8076 Src.getOperand(0), // Chain
8077 Src.getOperand(1), // rsrc
8078 Src.getOperand(2), // vindex
8079 Src.getOperand(3), // voffset
8080 Src.getOperand(4), // soffset
8081 Src.getOperand(5), // offset
8082 Src.getOperand(6),
8083 Src.getOperand(7)
8084 };
8085 // replace with BUFFER_LOAD_BYTE/SHORT
8086 SDVTList ResList = DCI.DAG.getVTList(MVT::i32,
8087 Src.getOperand(0).getValueType());
8088 unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ?
8089 AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT;
8090 SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N),
8091 ResList,
8092 Ops, M->getMemoryVT(),
8093 M->getMemOperand());
8094 return DCI.DAG.getMergeValues({BufferLoadSignExt,
8095 BufferLoadSignExt.getValue(1)}, SDLoc(N));
8096 }
8097 return SDValue();
8098}
8099
Matt Arsenaultf2290332015-01-06 23:00:39 +00008100SDValue SITargetLowering::performClassCombine(SDNode *N,
8101 DAGCombinerInfo &DCI) const {
8102 SelectionDAG &DAG = DCI.DAG;
8103 SDValue Mask = N->getOperand(1);
8104
8105 // fp_class x, 0 -> false
8106 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
8107 if (CMask->isNullValue())
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008108 return DAG.getConstant(0, SDLoc(N), MVT::i1);
Matt Arsenaultf2290332015-01-06 23:00:39 +00008109 }
8110
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00008111 if (N->getOperand(0).isUndef())
8112 return DAG.getUNDEF(MVT::i1);
8113
Matt Arsenaultf2290332015-01-06 23:00:39 +00008114 return SDValue();
8115}
8116
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00008117SDValue SITargetLowering::performRcpCombine(SDNode *N,
8118 DAGCombinerInfo &DCI) const {
8119 EVT VT = N->getValueType(0);
8120 SDValue N0 = N->getOperand(0);
8121
8122 if (N0.isUndef())
8123 return N0;
8124
8125 if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
8126 N0.getOpcode() == ISD::SINT_TO_FP)) {
8127 return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
8128 N->getFlags());
8129 }
8130
8131 return AMDGPUTargetLowering::performRcpCombine(N, DCI);
8132}
8133
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008134bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
8135 unsigned MaxDepth) const {
8136 unsigned Opcode = Op.getOpcode();
8137 if (Opcode == ISD::FCANONICALIZE)
8138 return true;
8139
8140 if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8141 auto F = CFP->getValueAPF();
8142 if (F.isNaN() && F.isSignaling())
8143 return false;
8144 return !F.isDenormal() || denormalsEnabledForType(Op.getValueType());
8145 }
8146
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008147 // If source is a result of another standard FP operation it is already in
8148 // canonical form.
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008149 if (MaxDepth == 0)
8150 return false;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008151
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008152 switch (Opcode) {
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008153 // These will flush denorms if required.
8154 case ISD::FADD:
8155 case ISD::FSUB:
8156 case ISD::FMUL:
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008157 case ISD::FCEIL:
8158 case ISD::FFLOOR:
8159 case ISD::FMA:
8160 case ISD::FMAD:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008161 case ISD::FSQRT:
8162 case ISD::FDIV:
8163 case ISD::FREM:
Matt Arsenaultce6d61f2018-08-06 21:51:52 +00008164 case ISD::FP_ROUND:
8165 case ISD::FP_EXTEND:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008166 case AMDGPUISD::FMUL_LEGACY:
8167 case AMDGPUISD::FMAD_FTZ:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00008168 case AMDGPUISD::RCP:
8169 case AMDGPUISD::RSQ:
8170 case AMDGPUISD::RSQ_CLAMP:
8171 case AMDGPUISD::RCP_LEGACY:
8172 case AMDGPUISD::RSQ_LEGACY:
8173 case AMDGPUISD::RCP_IFLAG:
8174 case AMDGPUISD::TRIG_PREOP:
8175 case AMDGPUISD::DIV_SCALE:
8176 case AMDGPUISD::DIV_FMAS:
8177 case AMDGPUISD::DIV_FIXUP:
8178 case AMDGPUISD::FRACT:
8179 case AMDGPUISD::LDEXP:
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008180 case AMDGPUISD::CVT_PKRTZ_F16_F32:
Matt Arsenault940e6072018-08-10 19:20:17 +00008181 case AMDGPUISD::CVT_F32_UBYTE0:
8182 case AMDGPUISD::CVT_F32_UBYTE1:
8183 case AMDGPUISD::CVT_F32_UBYTE2:
8184 case AMDGPUISD::CVT_F32_UBYTE3:
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008185 return true;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008186
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008187 // It can/will be lowered or combined as a bit operation.
8188 // Need to check their input recursively to handle.
8189 case ISD::FNEG:
8190 case ISD::FABS:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008191 case ISD::FCOPYSIGN:
8192 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008193
8194 case ISD::FSIN:
8195 case ISD::FCOS:
8196 case ISD::FSINCOS:
8197 return Op.getValueType().getScalarType() != MVT::f16;
8198
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008199 case ISD::FMINNUM:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00008200 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008201 case ISD::FMINNUM_IEEE:
8202 case ISD::FMAXNUM_IEEE:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00008203 case AMDGPUISD::CLAMP:
8204 case AMDGPUISD::FMED3:
8205 case AMDGPUISD::FMAX3:
8206 case AMDGPUISD::FMIN3: {
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008207 // FIXME: Shouldn't treat the generic operations different based these.
Matt Arsenault687ec752018-10-22 16:27:27 +00008208 // However, we aren't really required to flush the result from
8209 // minnum/maxnum..
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008210
Matt Arsenault687ec752018-10-22 16:27:27 +00008211 // snans will be quieted, so we only need to worry about denormals.
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008212 if (Subtarget->supportsMinMaxDenormModes() ||
Matt Arsenault687ec752018-10-22 16:27:27 +00008213 denormalsEnabledForType(Op.getValueType()))
8214 return true;
8215
8216 // Flushing may be required.
8217 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
8218 // targets need to check their input recursively.
8219
8220 // FIXME: Does this apply with clamp? It's implemented with max.
8221 for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
8222 if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
8223 return false;
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008224 }
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008225
Matt Arsenault687ec752018-10-22 16:27:27 +00008226 return true;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008227 }
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008228 case ISD::SELECT: {
8229 return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
8230 isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008231 }
Matt Arsenaulte94ee832018-08-06 22:45:51 +00008232 case ISD::BUILD_VECTOR: {
8233 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
8234 SDValue SrcOp = Op.getOperand(i);
8235 if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
8236 return false;
8237 }
8238
8239 return true;
8240 }
8241 case ISD::EXTRACT_VECTOR_ELT:
8242 case ISD::EXTRACT_SUBVECTOR: {
8243 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8244 }
8245 case ISD::INSERT_VECTOR_ELT: {
8246 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
8247 isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
8248 }
8249 case ISD::UNDEF:
8250 // Could be anything.
8251 return false;
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008252
Matt Arsenault687ec752018-10-22 16:27:27 +00008253 case ISD::BITCAST: {
8254 // Hack round the mess we make when legalizing extract_vector_elt
8255 SDValue Src = Op.getOperand(0);
8256 if (Src.getValueType() == MVT::i16 &&
8257 Src.getOpcode() == ISD::TRUNCATE) {
8258 SDValue TruncSrc = Src.getOperand(0);
8259 if (TruncSrc.getValueType() == MVT::i32 &&
8260 TruncSrc.getOpcode() == ISD::BITCAST &&
8261 TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
8262 return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
8263 }
8264 }
8265
8266 return false;
8267 }
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008268 case ISD::INTRINSIC_WO_CHAIN: {
8269 unsigned IntrinsicID
8270 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8271 // TODO: Handle more intrinsics
8272 switch (IntrinsicID) {
8273 case Intrinsic::amdgcn_cvt_pkrtz:
Matt Arsenault940e6072018-08-10 19:20:17 +00008274 case Intrinsic::amdgcn_cubeid:
8275 case Intrinsic::amdgcn_frexp_mant:
8276 case Intrinsic::amdgcn_fdot2:
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008277 return true;
8278 default:
8279 break;
8280 }
Matt Arsenault5bb9d792018-08-10 17:57:12 +00008281
8282 LLVM_FALLTHROUGH;
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008283 }
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008284 default:
8285 return denormalsEnabledForType(Op.getValueType()) &&
8286 DAG.isKnownNeverSNaN(Op);
8287 }
8288
8289 llvm_unreachable("invalid operation");
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008290}
8291
Matt Arsenault9cd90712016-04-14 01:42:16 +00008292// Constant fold canonicalize.
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008293SDValue SITargetLowering::getCanonicalConstantFP(
8294 SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
8295 // Flush denormals to 0 if not enabled.
8296 if (C.isDenormal() && !denormalsEnabledForType(VT))
8297 return DAG.getConstantFP(0.0, SL, VT);
8298
8299 if (C.isNaN()) {
8300 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
8301 if (C.isSignaling()) {
8302 // Quiet a signaling NaN.
8303 // FIXME: Is this supposed to preserve payload bits?
8304 return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8305 }
8306
8307 // Make sure it is the canonical NaN bitpattern.
8308 //
8309 // TODO: Can we use -1 as the canonical NaN value since it's an inline
8310 // immediate?
8311 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
8312 return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8313 }
8314
8315 // Already canonical.
8316 return DAG.getConstantFP(C, SL, VT);
8317}
8318
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008319static bool vectorEltWillFoldAway(SDValue Op) {
8320 return Op.isUndef() || isa<ConstantFPSDNode>(Op);
8321}
8322
Matt Arsenault9cd90712016-04-14 01:42:16 +00008323SDValue SITargetLowering::performFCanonicalizeCombine(
8324 SDNode *N,
8325 DAGCombinerInfo &DCI) const {
Matt Arsenault9cd90712016-04-14 01:42:16 +00008326 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault4aec86d2018-07-31 13:34:31 +00008327 SDValue N0 = N->getOperand(0);
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008328 EVT VT = N->getValueType(0);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008329
Matt Arsenault4aec86d2018-07-31 13:34:31 +00008330 // fcanonicalize undef -> qnan
8331 if (N0.isUndef()) {
Matt Arsenault4aec86d2018-07-31 13:34:31 +00008332 APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
8333 return DAG.getConstantFP(QNaN, SDLoc(N), VT);
8334 }
8335
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008336 if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
Matt Arsenault9cd90712016-04-14 01:42:16 +00008337 EVT VT = N->getValueType(0);
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008338 return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
Matt Arsenault9cd90712016-04-14 01:42:16 +00008339 }
8340
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008341 // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
8342 // (fcanonicalize k)
8343 //
8344 // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
8345
8346 // TODO: This could be better with wider vectors that will be split to v2f16,
8347 // and to consider uses since there aren't that many packed operations.
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008348 if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
8349 isTypeLegal(MVT::v2f16)) {
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008350 SDLoc SL(N);
8351 SDValue NewElts[2];
8352 SDValue Lo = N0.getOperand(0);
8353 SDValue Hi = N0.getOperand(1);
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008354 EVT EltVT = Lo.getValueType();
8355
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008356 if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
8357 for (unsigned I = 0; I != 2; ++I) {
8358 SDValue Op = N0.getOperand(I);
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008359 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8360 NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
8361 CFP->getValueAPF());
8362 } else if (Op.isUndef()) {
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008363 // Handled below based on what the other operand is.
8364 NewElts[I] = Op;
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008365 } else {
8366 NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
8367 }
8368 }
8369
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008370 // If one half is undef, and one is constant, perfer a splat vector rather
8371 // than the normal qNaN. If it's a register, prefer 0.0 since that's
8372 // cheaper to use and may be free with a packed operation.
8373 if (NewElts[0].isUndef()) {
8374 if (isa<ConstantFPSDNode>(NewElts[1]))
8375 NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
8376 NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
8377 }
8378
8379 if (NewElts[1].isUndef()) {
8380 NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
8381 NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
8382 }
8383
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008384 return DAG.getBuildVector(VT, SL, NewElts);
8385 }
8386 }
8387
Matt Arsenault687ec752018-10-22 16:27:27 +00008388 unsigned SrcOpc = N0.getOpcode();
8389
8390 // If it's free to do so, push canonicalizes further up the source, which may
8391 // find a canonical source.
8392 //
8393 // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for
8394 // sNaNs.
8395 if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
8396 auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
8397 if (CRHS && N0.hasOneUse()) {
8398 SDLoc SL(N);
8399 SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
8400 N0.getOperand(0));
8401 SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
8402 DCI.AddToWorklist(Canon0.getNode());
8403
8404 return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
8405 }
8406 }
8407
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008408 return isCanonicalized(DAG, N0) ? N0 : SDValue();
Matt Arsenault9cd90712016-04-14 01:42:16 +00008409}
8410
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008411static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
8412 switch (Opc) {
8413 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008414 case ISD::FMAXNUM_IEEE:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008415 return AMDGPUISD::FMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008416 case ISD::SMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008417 return AMDGPUISD::SMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008418 case ISD::UMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008419 return AMDGPUISD::UMAX3;
8420 case ISD::FMINNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008421 case ISD::FMINNUM_IEEE:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008422 return AMDGPUISD::FMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008423 case ISD::SMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008424 return AMDGPUISD::SMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008425 case ISD::UMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008426 return AMDGPUISD::UMIN3;
8427 default:
8428 llvm_unreachable("Not a min/max opcode");
8429 }
8430}
8431
Matt Arsenault10268f92017-02-27 22:40:39 +00008432SDValue SITargetLowering::performIntMed3ImmCombine(
8433 SelectionDAG &DAG, const SDLoc &SL,
8434 SDValue Op0, SDValue Op1, bool Signed) const {
Matt Arsenaultf639c322016-01-28 20:53:42 +00008435 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
8436 if (!K1)
8437 return SDValue();
8438
8439 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
8440 if (!K0)
8441 return SDValue();
8442
Matt Arsenaultf639c322016-01-28 20:53:42 +00008443 if (Signed) {
8444 if (K0->getAPIntValue().sge(K1->getAPIntValue()))
8445 return SDValue();
8446 } else {
8447 if (K0->getAPIntValue().uge(K1->getAPIntValue()))
8448 return SDValue();
8449 }
8450
8451 EVT VT = K0->getValueType(0);
Matt Arsenault10268f92017-02-27 22:40:39 +00008452 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
8453 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
8454 return DAG.getNode(Med3Opc, SL, VT,
8455 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
8456 }
Tom Stellard115a6152016-11-10 16:02:37 +00008457
Matt Arsenault10268f92017-02-27 22:40:39 +00008458 // If there isn't a 16-bit med3 operation, convert to 32-bit.
Tom Stellard115a6152016-11-10 16:02:37 +00008459 MVT NVT = MVT::i32;
8460 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8461
Matt Arsenault10268f92017-02-27 22:40:39 +00008462 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
8463 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
8464 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
Tom Stellard115a6152016-11-10 16:02:37 +00008465
Matt Arsenault10268f92017-02-27 22:40:39 +00008466 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
8467 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
Matt Arsenaultf639c322016-01-28 20:53:42 +00008468}
8469
Matt Arsenault6b114d22017-08-30 01:20:17 +00008470static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
8471 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
8472 return C;
8473
8474 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
8475 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
8476 return C;
8477 }
8478
8479 return nullptr;
8480}
8481
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008482SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
8483 const SDLoc &SL,
8484 SDValue Op0,
8485 SDValue Op1) const {
Matt Arsenault6b114d22017-08-30 01:20:17 +00008486 ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
Matt Arsenaultf639c322016-01-28 20:53:42 +00008487 if (!K1)
8488 return SDValue();
8489
Matt Arsenault6b114d22017-08-30 01:20:17 +00008490 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
Matt Arsenaultf639c322016-01-28 20:53:42 +00008491 if (!K0)
8492 return SDValue();
8493
8494 // Ordered >= (although NaN inputs should have folded away by now).
8495 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
8496 if (Cmp == APFloat::cmpGreaterThan)
8497 return SDValue();
8498
Matt Arsenault055e4dc2019-03-29 19:14:54 +00008499 const MachineFunction &MF = DAG.getMachineFunction();
8500 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
8501
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008502 // TODO: Check IEEE bit enabled?
Matt Arsenault6b114d22017-08-30 01:20:17 +00008503 EVT VT = Op0.getValueType();
Matt Arsenault055e4dc2019-03-29 19:14:54 +00008504 if (Info->getMode().DX10Clamp) {
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008505 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
8506 // hardware fmed3 behavior converting to a min.
8507 // FIXME: Should this be allowing -0.0?
8508 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
8509 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
8510 }
8511
Matt Arsenault6b114d22017-08-30 01:20:17 +00008512 // med3 for f16 is only available on gfx9+, and not available for v2f16.
8513 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
8514 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
8515 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
8516 // then give the other result, which is different from med3 with a NaN
8517 // input.
8518 SDValue Var = Op0.getOperand(0);
Matt Arsenaultc3dc8e62018-08-03 18:27:52 +00008519 if (!DAG.isKnownNeverSNaN(Var))
Matt Arsenault6b114d22017-08-30 01:20:17 +00008520 return SDValue();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008521
Matt Arsenaultebf46142018-09-18 02:34:54 +00008522 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8523
8524 if ((!K0->hasOneUse() ||
8525 TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
8526 (!K1->hasOneUse() ||
8527 TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
8528 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
8529 Var, SDValue(K0, 0), SDValue(K1, 0));
8530 }
Matt Arsenault6b114d22017-08-30 01:20:17 +00008531 }
Matt Arsenaultf639c322016-01-28 20:53:42 +00008532
Matt Arsenault6b114d22017-08-30 01:20:17 +00008533 return SDValue();
Matt Arsenaultf639c322016-01-28 20:53:42 +00008534}
8535
8536SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
8537 DAGCombinerInfo &DCI) const {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008538 SelectionDAG &DAG = DCI.DAG;
8539
Matt Arsenault79a45db2017-02-22 23:53:37 +00008540 EVT VT = N->getValueType(0);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008541 unsigned Opc = N->getOpcode();
8542 SDValue Op0 = N->getOperand(0);
8543 SDValue Op1 = N->getOperand(1);
8544
8545 // Only do this if the inner op has one use since this will just increases
8546 // register pressure for no benefit.
8547
Matt Arsenault79a45db2017-02-22 23:53:37 +00008548 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
Neil Henninge85f6bd2019-03-19 15:50:24 +00008549 !VT.isVector() &&
8550 (VT == MVT::i32 || VT == MVT::f32 ||
8551 ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) {
Matt Arsenault5b39b342016-01-28 20:53:48 +00008552 // max(max(a, b), c) -> max3(a, b, c)
8553 // min(min(a, b), c) -> min3(a, b, c)
8554 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
8555 SDLoc DL(N);
8556 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
8557 DL,
8558 N->getValueType(0),
8559 Op0.getOperand(0),
8560 Op0.getOperand(1),
8561 Op1);
8562 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008563
Matt Arsenault5b39b342016-01-28 20:53:48 +00008564 // Try commuted.
8565 // max(a, max(b, c)) -> max3(a, b, c)
8566 // min(a, min(b, c)) -> min3(a, b, c)
8567 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
8568 SDLoc DL(N);
8569 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
8570 DL,
8571 N->getValueType(0),
8572 Op0,
8573 Op1.getOperand(0),
8574 Op1.getOperand(1));
8575 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008576 }
8577
Matt Arsenaultf639c322016-01-28 20:53:42 +00008578 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
8579 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
8580 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
8581 return Med3;
8582 }
8583
8584 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
8585 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
8586 return Med3;
8587 }
8588
8589 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
Matt Arsenault5b39b342016-01-28 20:53:48 +00008590 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
Matt Arsenault687ec752018-10-22 16:27:27 +00008591 (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
Matt Arsenault5b39b342016-01-28 20:53:48 +00008592 (Opc == AMDGPUISD::FMIN_LEGACY &&
8593 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
Matt Arsenault79a45db2017-02-22 23:53:37 +00008594 (VT == MVT::f32 || VT == MVT::f64 ||
Matt Arsenault6b114d22017-08-30 01:20:17 +00008595 (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
8596 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008597 Op0.hasOneUse()) {
Matt Arsenaultf639c322016-01-28 20:53:42 +00008598 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
8599 return Res;
8600 }
8601
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008602 return SDValue();
8603}
8604
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008605static bool isClampZeroToOne(SDValue A, SDValue B) {
8606 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
8607 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
8608 // FIXME: Should this be allowing -0.0?
8609 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
8610 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
8611 }
8612 }
8613
8614 return false;
8615}
8616
8617// FIXME: Should only worry about snans for version with chain.
8618SDValue SITargetLowering::performFMed3Combine(SDNode *N,
8619 DAGCombinerInfo &DCI) const {
8620 EVT VT = N->getValueType(0);
8621 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
8622 // NaNs. With a NaN input, the order of the operands may change the result.
8623
8624 SelectionDAG &DAG = DCI.DAG;
8625 SDLoc SL(N);
8626
8627 SDValue Src0 = N->getOperand(0);
8628 SDValue Src1 = N->getOperand(1);
8629 SDValue Src2 = N->getOperand(2);
8630
8631 if (isClampZeroToOne(Src0, Src1)) {
8632 // const_a, const_b, x -> clamp is safe in all cases including signaling
8633 // nans.
8634 // FIXME: Should this be allowing -0.0?
8635 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
8636 }
8637
Matt Arsenault055e4dc2019-03-29 19:14:54 +00008638 const MachineFunction &MF = DAG.getMachineFunction();
8639 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
8640
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008641 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
8642 // handling no dx10-clamp?
Matt Arsenault055e4dc2019-03-29 19:14:54 +00008643 if (Info->getMode().DX10Clamp) {
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008644 // If NaNs is clamped to 0, we are free to reorder the inputs.
8645
8646 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
8647 std::swap(Src0, Src1);
8648
8649 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
8650 std::swap(Src1, Src2);
8651
8652 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
8653 std::swap(Src0, Src1);
8654
8655 if (isClampZeroToOne(Src1, Src2))
8656 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
8657 }
8658
8659 return SDValue();
8660}
8661
Matt Arsenault1f17c662017-02-22 00:27:34 +00008662SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
8663 DAGCombinerInfo &DCI) const {
8664 SDValue Src0 = N->getOperand(0);
8665 SDValue Src1 = N->getOperand(1);
8666 if (Src0.isUndef() && Src1.isUndef())
8667 return DCI.DAG.getUNDEF(N->getValueType(0));
8668 return SDValue();
8669}
8670
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008671SDValue SITargetLowering::performExtractVectorEltCombine(
8672 SDNode *N, DAGCombinerInfo &DCI) const {
8673 SDValue Vec = N->getOperand(0);
Matt Arsenault8cbb4882017-09-20 21:01:24 +00008674 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008675
8676 EVT VecVT = Vec.getValueType();
8677 EVT EltVT = VecVT.getVectorElementType();
8678
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00008679 if ((Vec.getOpcode() == ISD::FNEG ||
8680 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008681 SDLoc SL(N);
8682 EVT EltVT = N->getValueType(0);
8683 SDValue Idx = N->getOperand(1);
8684 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8685 Vec.getOperand(0), Idx);
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00008686 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008687 }
8688
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008689 // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
8690 // =>
8691 // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
8692 // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
8693 // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
Farhana Aleene24f3ff2018-05-09 21:18:34 +00008694 if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008695 SDLoc SL(N);
8696 EVT EltVT = N->getValueType(0);
8697 SDValue Idx = N->getOperand(1);
8698 unsigned Opc = Vec.getOpcode();
8699
8700 switch(Opc) {
8701 default:
Stanislav Mekhanoshinbcb34ac2018-11-13 21:18:21 +00008702 break;
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008703 // TODO: Support other binary operations.
8704 case ISD::FADD:
Matt Arsenaulta8160732018-08-15 21:34:06 +00008705 case ISD::FSUB:
8706 case ISD::FMUL:
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008707 case ISD::ADD:
Farhana Aleene24f3ff2018-05-09 21:18:34 +00008708 case ISD::UMIN:
8709 case ISD::UMAX:
8710 case ISD::SMIN:
8711 case ISD::SMAX:
8712 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008713 case ISD::FMINNUM:
8714 case ISD::FMAXNUM_IEEE:
8715 case ISD::FMINNUM_IEEE: {
Matt Arsenaulta8160732018-08-15 21:34:06 +00008716 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8717 Vec.getOperand(0), Idx);
8718 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8719 Vec.getOperand(1), Idx);
8720
8721 DCI.AddToWorklist(Elt0.getNode());
8722 DCI.AddToWorklist(Elt1.getNode());
8723 return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
8724 }
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008725 }
8726 }
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008727
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008728 unsigned VecSize = VecVT.getSizeInBits();
8729 unsigned EltSize = EltVT.getSizeInBits();
8730
Stanislav Mekhanoshinbcb34ac2018-11-13 21:18:21 +00008731 // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
8732 // This elminates non-constant index and subsequent movrel or scratch access.
8733 // Sub-dword vectors of size 2 dword or less have better implementation.
8734 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
8735 // instructions.
8736 if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) &&
8737 !isa<ConstantSDNode>(N->getOperand(1))) {
8738 SDLoc SL(N);
8739 SDValue Idx = N->getOperand(1);
8740 EVT IdxVT = Idx.getValueType();
8741 SDValue V;
8742 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
8743 SDValue IC = DAG.getConstant(I, SL, IdxVT);
8744 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
8745 if (I == 0)
8746 V = Elt;
8747 else
8748 V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
8749 }
8750 return V;
8751 }
8752
8753 if (!DCI.isBeforeLegalize())
8754 return SDValue();
8755
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008756 // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
8757 // elements. This exposes more load reduction opportunities by replacing
8758 // multiple small extract_vector_elements with a single 32-bit extract.
8759 auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
Matt Arsenaultbf07a502018-08-31 15:39:52 +00008760 if (isa<MemSDNode>(Vec) &&
8761 EltSize <= 16 &&
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008762 EltVT.isByteSized() &&
8763 VecSize > 32 &&
8764 VecSize % 32 == 0 &&
8765 Idx) {
8766 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
8767
8768 unsigned BitIndex = Idx->getZExtValue() * EltSize;
8769 unsigned EltIdx = BitIndex / 32;
8770 unsigned LeftoverBitIdx = BitIndex % 32;
8771 SDLoc SL(N);
8772
8773 SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
8774 DCI.AddToWorklist(Cast.getNode());
8775
8776 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
8777 DAG.getConstant(EltIdx, SL, MVT::i32));
8778 DCI.AddToWorklist(Elt.getNode());
8779 SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
8780 DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
8781 DCI.AddToWorklist(Srl.getNode());
8782
8783 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
8784 DCI.AddToWorklist(Trunc.getNode());
8785 return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
8786 }
8787
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008788 return SDValue();
8789}
8790
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00008791SDValue
8792SITargetLowering::performInsertVectorEltCombine(SDNode *N,
8793 DAGCombinerInfo &DCI) const {
8794 SDValue Vec = N->getOperand(0);
8795 SDValue Idx = N->getOperand(2);
8796 EVT VecVT = Vec.getValueType();
8797 EVT EltVT = VecVT.getVectorElementType();
8798 unsigned VecSize = VecVT.getSizeInBits();
8799 unsigned EltSize = EltVT.getSizeInBits();
8800
8801 // INSERT_VECTOR_ELT (<n x e>, var-idx)
8802 // => BUILD_VECTOR n x select (e, const-idx)
8803 // This elminates non-constant index and subsequent movrel or scratch access.
8804 // Sub-dword vectors of size 2 dword or less have better implementation.
8805 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
8806 // instructions.
8807 if (isa<ConstantSDNode>(Idx) ||
8808 VecSize > 256 || (VecSize <= 64 && EltSize < 32))
8809 return SDValue();
8810
8811 SelectionDAG &DAG = DCI.DAG;
8812 SDLoc SL(N);
8813 SDValue Ins = N->getOperand(1);
8814 EVT IdxVT = Idx.getValueType();
8815
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00008816 SmallVector<SDValue, 16> Ops;
8817 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
8818 SDValue IC = DAG.getConstant(I, SL, IdxVT);
8819 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
8820 SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
8821 Ops.push_back(V);
8822 }
8823
8824 return DAG.getBuildVector(VecVT, SL, Ops);
8825}
8826
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00008827unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
8828 const SDNode *N0,
8829 const SDNode *N1) const {
8830 EVT VT = N0->getValueType(0);
8831
Matt Arsenault770ec862016-12-22 03:55:35 +00008832 // Only do this if we are not trying to support denormals. v_mad_f32 does not
8833 // support denormals ever.
Stanislav Mekhanoshin28a19362019-05-04 04:20:37 +00008834 if (((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
8835 (VT == MVT::f16 && !Subtarget->hasFP16Denormals() &&
8836 getSubtarget()->hasMadF16())) &&
8837 isOperationLegal(ISD::FMAD, VT))
Matt Arsenault770ec862016-12-22 03:55:35 +00008838 return ISD::FMAD;
8839
8840 const TargetOptions &Options = DAG.getTarget().Options;
Amara Emersond28f0cd42017-05-01 15:17:51 +00008841 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
Michael Berg7acc81b2018-05-04 18:48:20 +00008842 (N0->getFlags().hasAllowContract() &&
8843 N1->getFlags().hasAllowContract())) &&
Matt Arsenault770ec862016-12-22 03:55:35 +00008844 isFMAFasterThanFMulAndFAdd(VT)) {
8845 return ISD::FMA;
8846 }
8847
8848 return 0;
8849}
8850
Stanislav Mekhanoshin871821f2019-02-14 22:11:25 +00008851// For a reassociatable opcode perform:
8852// op x, (op y, z) -> op (op x, z), y, if x and z are uniform
8853SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
8854 SelectionDAG &DAG) const {
8855 EVT VT = N->getValueType(0);
8856 if (VT != MVT::i32 && VT != MVT::i64)
8857 return SDValue();
8858
8859 unsigned Opc = N->getOpcode();
8860 SDValue Op0 = N->getOperand(0);
8861 SDValue Op1 = N->getOperand(1);
8862
8863 if (!(Op0->isDivergent() ^ Op1->isDivergent()))
8864 return SDValue();
8865
8866 if (Op0->isDivergent())
8867 std::swap(Op0, Op1);
8868
8869 if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
8870 return SDValue();
8871
8872 SDValue Op2 = Op1.getOperand(1);
8873 Op1 = Op1.getOperand(0);
8874 if (!(Op1->isDivergent() ^ Op2->isDivergent()))
8875 return SDValue();
8876
8877 if (Op1->isDivergent())
8878 std::swap(Op1, Op2);
8879
8880 // If either operand is constant this will conflict with
8881 // DAGCombiner::ReassociateOps().
Stanislav Mekhanoshinda1628e2019-02-26 20:56:25 +00008882 if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
8883 DAG.isConstantIntBuildVectorOrConstantInt(Op1))
Stanislav Mekhanoshin871821f2019-02-14 22:11:25 +00008884 return SDValue();
8885
8886 SDLoc SL(N);
8887 SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
8888 return DAG.getNode(Opc, SL, VT, Add1, Op2);
8889}
8890
Matt Arsenault4f6318f2017-11-06 17:04:37 +00008891static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
8892 EVT VT,
8893 SDValue N0, SDValue N1, SDValue N2,
8894 bool Signed) {
8895 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
8896 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
8897 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
8898 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
8899}
8900
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008901SDValue SITargetLowering::performAddCombine(SDNode *N,
8902 DAGCombinerInfo &DCI) const {
8903 SelectionDAG &DAG = DCI.DAG;
8904 EVT VT = N->getValueType(0);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008905 SDLoc SL(N);
8906 SDValue LHS = N->getOperand(0);
8907 SDValue RHS = N->getOperand(1);
8908
Matt Arsenault4f6318f2017-11-06 17:04:37 +00008909 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
8910 && Subtarget->hasMad64_32() &&
8911 !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
8912 VT.getScalarSizeInBits() <= 64) {
8913 if (LHS.getOpcode() != ISD::MUL)
8914 std::swap(LHS, RHS);
8915
8916 SDValue MulLHS = LHS.getOperand(0);
8917 SDValue MulRHS = LHS.getOperand(1);
8918 SDValue AddRHS = RHS;
8919
8920 // TODO: Maybe restrict if SGPR inputs.
8921 if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
8922 numBitsUnsigned(MulRHS, DAG) <= 32) {
8923 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
8924 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
8925 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
8926 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
8927 }
8928
8929 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
8930 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
8931 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
8932 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
8933 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
8934 }
8935
8936 return SDValue();
8937 }
8938
Stanislav Mekhanoshin871821f2019-02-14 22:11:25 +00008939 if (SDValue V = reassociateScalarOps(N, DAG)) {
8940 return V;
8941 }
8942
Farhana Aleen07e61232018-05-02 18:16:39 +00008943 if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
Matt Arsenault4f6318f2017-11-06 17:04:37 +00008944 return SDValue();
8945
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008946 // add x, zext (setcc) => addcarry x, 0, setcc
8947 // add x, sext (setcc) => subcarry x, 0, setcc
8948 unsigned Opc = LHS.getOpcode();
8949 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008950 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008951 std::swap(RHS, LHS);
8952
8953 Opc = RHS.getOpcode();
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008954 switch (Opc) {
8955 default: break;
8956 case ISD::ZERO_EXTEND:
8957 case ISD::SIGN_EXTEND:
8958 case ISD::ANY_EXTEND: {
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008959 auto Cond = RHS.getOperand(0);
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00008960 if (!isBoolSGPR(Cond))
Stanislav Mekhanoshin3ed38c62017-06-21 23:46:22 +00008961 break;
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008962 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
8963 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
8964 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
8965 return DAG.getNode(Opc, SL, VTList, Args);
8966 }
8967 case ISD::ADDCARRY: {
8968 // add x, (addcarry y, 0, cc) => addcarry x, y, cc
8969 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8970 if (!C || C->getZExtValue() != 0) break;
8971 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
8972 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
8973 }
8974 }
8975 return SDValue();
8976}
8977
8978SDValue SITargetLowering::performSubCombine(SDNode *N,
8979 DAGCombinerInfo &DCI) const {
8980 SelectionDAG &DAG = DCI.DAG;
8981 EVT VT = N->getValueType(0);
8982
8983 if (VT != MVT::i32)
8984 return SDValue();
8985
8986 SDLoc SL(N);
8987 SDValue LHS = N->getOperand(0);
8988 SDValue RHS = N->getOperand(1);
8989
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008990 if (LHS.getOpcode() == ISD::SUBCARRY) {
8991 // sub (subcarry x, 0, cc), y => subcarry x, y, cc
8992 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
Stanislav Mekhanoshin42e229e2019-02-21 02:58:00 +00008993 if (!C || !C->isNullValue())
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008994 return SDValue();
8995 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
8996 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
8997 }
8998 return SDValue();
8999}
9000
9001SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
9002 DAGCombinerInfo &DCI) const {
9003
9004 if (N->getValueType(0) != MVT::i32)
9005 return SDValue();
9006
9007 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
9008 if (!C || C->getZExtValue() != 0)
9009 return SDValue();
9010
9011 SelectionDAG &DAG = DCI.DAG;
9012 SDValue LHS = N->getOperand(0);
9013
9014 // addcarry (add x, y), 0, cc => addcarry x, y, cc
9015 // subcarry (sub x, y), 0, cc => subcarry x, y, cc
9016 unsigned LHSOpc = LHS.getOpcode();
9017 unsigned Opc = N->getOpcode();
9018 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
9019 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
9020 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
9021 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00009022 }
9023 return SDValue();
9024}
9025
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009026SDValue SITargetLowering::performFAddCombine(SDNode *N,
9027 DAGCombinerInfo &DCI) const {
9028 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9029 return SDValue();
9030
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009031 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault770ec862016-12-22 03:55:35 +00009032 EVT VT = N->getValueType(0);
Matt Arsenault770ec862016-12-22 03:55:35 +00009033
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009034 SDLoc SL(N);
9035 SDValue LHS = N->getOperand(0);
9036 SDValue RHS = N->getOperand(1);
9037
9038 // These should really be instruction patterns, but writing patterns with
9039 // source modiifiers is a pain.
9040
9041 // fadd (fadd (a, a), b) -> mad 2.0, a, b
9042 if (LHS.getOpcode() == ISD::FADD) {
9043 SDValue A = LHS.getOperand(0);
9044 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009045 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00009046 if (FusedOp != 0) {
9047 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00009048 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00009049 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009050 }
9051 }
9052
9053 // fadd (b, fadd (a, a)) -> mad 2.0, a, b
9054 if (RHS.getOpcode() == ISD::FADD) {
9055 SDValue A = RHS.getOperand(0);
9056 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009057 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00009058 if (FusedOp != 0) {
9059 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00009060 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00009061 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009062 }
9063 }
9064
9065 return SDValue();
9066}
9067
9068SDValue SITargetLowering::performFSubCombine(SDNode *N,
9069 DAGCombinerInfo &DCI) const {
9070 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9071 return SDValue();
9072
9073 SelectionDAG &DAG = DCI.DAG;
9074 SDLoc SL(N);
9075 EVT VT = N->getValueType(0);
9076 assert(!VT.isVector());
9077
9078 // Try to get the fneg to fold into the source modifier. This undoes generic
9079 // DAG combines and folds them into the mad.
9080 //
9081 // Only do this if we are not trying to support denormals. v_mad_f32 does
9082 // not support denormals ever.
Matt Arsenault770ec862016-12-22 03:55:35 +00009083 SDValue LHS = N->getOperand(0);
9084 SDValue RHS = N->getOperand(1);
9085 if (LHS.getOpcode() == ISD::FADD) {
9086 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
9087 SDValue A = LHS.getOperand(0);
9088 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009089 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00009090 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009091 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9092 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
9093
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00009094 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009095 }
9096 }
Matt Arsenault770ec862016-12-22 03:55:35 +00009097 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009098
Matt Arsenault770ec862016-12-22 03:55:35 +00009099 if (RHS.getOpcode() == ISD::FADD) {
9100 // (fsub c, (fadd a, a)) -> mad -2.0, a, c
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009101
Matt Arsenault770ec862016-12-22 03:55:35 +00009102 SDValue A = RHS.getOperand(0);
9103 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009104 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00009105 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009106 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00009107 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009108 }
9109 }
9110 }
9111
9112 return SDValue();
9113}
9114
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009115SDValue SITargetLowering::performFMACombine(SDNode *N,
9116 DAGCombinerInfo &DCI) const {
9117 SelectionDAG &DAG = DCI.DAG;
9118 EVT VT = N->getValueType(0);
9119 SDLoc SL(N);
9120
Stanislav Mekhanoshin0e858b02019-02-09 00:34:21 +00009121 if (!Subtarget->hasDot2Insts() || VT != MVT::f32)
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009122 return SDValue();
9123
9124 // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
9125 // FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
9126 SDValue Op1 = N->getOperand(0);
9127 SDValue Op2 = N->getOperand(1);
9128 SDValue FMA = N->getOperand(2);
9129
9130 if (FMA.getOpcode() != ISD::FMA ||
9131 Op1.getOpcode() != ISD::FP_EXTEND ||
9132 Op2.getOpcode() != ISD::FP_EXTEND)
9133 return SDValue();
9134
9135 // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
9136 // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
9137 // is sufficient to allow generaing fdot2.
9138 const TargetOptions &Options = DAG.getTarget().Options;
9139 if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9140 (N->getFlags().hasAllowContract() &&
9141 FMA->getFlags().hasAllowContract())) {
9142 Op1 = Op1.getOperand(0);
9143 Op2 = Op2.getOperand(0);
9144 if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9145 Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9146 return SDValue();
9147
9148 SDValue Vec1 = Op1.getOperand(0);
9149 SDValue Idx1 = Op1.getOperand(1);
9150 SDValue Vec2 = Op2.getOperand(0);
9151
9152 SDValue FMAOp1 = FMA.getOperand(0);
9153 SDValue FMAOp2 = FMA.getOperand(1);
9154 SDValue FMAAcc = FMA.getOperand(2);
9155
9156 if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
9157 FMAOp2.getOpcode() != ISD::FP_EXTEND)
9158 return SDValue();
9159
9160 FMAOp1 = FMAOp1.getOperand(0);
9161 FMAOp2 = FMAOp2.getOperand(0);
9162 if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9163 FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9164 return SDValue();
9165
9166 SDValue Vec3 = FMAOp1.getOperand(0);
9167 SDValue Vec4 = FMAOp2.getOperand(0);
9168 SDValue Idx2 = FMAOp1.getOperand(1);
9169
9170 if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
9171 // Idx1 and Idx2 cannot be the same.
9172 Idx1 == Idx2)
9173 return SDValue();
9174
9175 if (Vec1 == Vec2 || Vec3 == Vec4)
9176 return SDValue();
9177
9178 if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
9179 return SDValue();
9180
9181 if ((Vec1 == Vec3 && Vec2 == Vec4) ||
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00009182 (Vec1 == Vec4 && Vec2 == Vec3)) {
9183 return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
9184 DAG.getTargetConstant(0, SL, MVT::i1));
9185 }
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009186 }
9187 return SDValue();
9188}
9189
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009190SDValue SITargetLowering::performSetCCCombine(SDNode *N,
9191 DAGCombinerInfo &DCI) const {
9192 SelectionDAG &DAG = DCI.DAG;
9193 SDLoc SL(N);
9194
9195 SDValue LHS = N->getOperand(0);
9196 SDValue RHS = N->getOperand(1);
9197 EVT VT = LHS.getValueType();
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00009198 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
9199
9200 auto CRHS = dyn_cast<ConstantSDNode>(RHS);
9201 if (!CRHS) {
9202 CRHS = dyn_cast<ConstantSDNode>(LHS);
9203 if (CRHS) {
9204 std::swap(LHS, RHS);
9205 CC = getSetCCSwappedOperands(CC);
9206 }
9207 }
9208
Stanislav Mekhanoshin3b117942018-06-16 03:46:59 +00009209 if (CRHS) {
9210 if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
9211 isBoolSGPR(LHS.getOperand(0))) {
9212 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
9213 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
9214 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1
9215 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc
9216 if ((CRHS->isAllOnesValue() &&
9217 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
9218 (CRHS->isNullValue() &&
9219 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
9220 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9221 DAG.getConstant(-1, SL, MVT::i1));
9222 if ((CRHS->isAllOnesValue() &&
9223 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
9224 (CRHS->isNullValue() &&
9225 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
9226 return LHS.getOperand(0);
9227 }
9228
9229 uint64_t CRHSVal = CRHS->getZExtValue();
9230 if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
9231 LHS.getOpcode() == ISD::SELECT &&
9232 isa<ConstantSDNode>(LHS.getOperand(1)) &&
9233 isa<ConstantSDNode>(LHS.getOperand(2)) &&
9234 LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
9235 isBoolSGPR(LHS.getOperand(0))) {
9236 // Given CT != FT:
9237 // setcc (select cc, CT, CF), CF, eq => xor cc, -1
9238 // setcc (select cc, CT, CF), CF, ne => cc
9239 // setcc (select cc, CT, CF), CT, ne => xor cc, -1
9240 // setcc (select cc, CT, CF), CT, eq => cc
9241 uint64_t CT = LHS.getConstantOperandVal(1);
9242 uint64_t CF = LHS.getConstantOperandVal(2);
9243
9244 if ((CF == CRHSVal && CC == ISD::SETEQ) ||
9245 (CT == CRHSVal && CC == ISD::SETNE))
9246 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9247 DAG.getConstant(-1, SL, MVT::i1));
9248 if ((CF == CRHSVal && CC == ISD::SETNE) ||
9249 (CT == CRHSVal && CC == ISD::SETEQ))
9250 return LHS.getOperand(0);
9251 }
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00009252 }
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009253
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00009254 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
9255 VT != MVT::f16))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009256 return SDValue();
9257
Matt Arsenault8ad00d32018-08-10 18:58:41 +00009258 // Match isinf/isfinite pattern
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009259 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
Matt Arsenault8ad00d32018-08-10 18:58:41 +00009260 // (fcmp one (fabs x), inf) -> (fp_class x,
9261 // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero)
9262 if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009263 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
9264 if (!CRHS)
9265 return SDValue();
9266
9267 const APFloat &APF = CRHS->getValueAPF();
9268 if (APF.isInfinity() && !APF.isNegative()) {
Matt Arsenault8ad00d32018-08-10 18:58:41 +00009269 const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
9270 SIInstrFlags::N_INFINITY;
9271 const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
9272 SIInstrFlags::P_ZERO |
9273 SIInstrFlags::N_NORMAL |
9274 SIInstrFlags::P_NORMAL |
9275 SIInstrFlags::N_SUBNORMAL |
9276 SIInstrFlags::P_SUBNORMAL;
9277 unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009278 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
9279 DAG.getConstant(Mask, SL, MVT::i32));
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009280 }
9281 }
9282
9283 return SDValue();
9284}
9285
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009286SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
9287 DAGCombinerInfo &DCI) const {
9288 SelectionDAG &DAG = DCI.DAG;
9289 SDLoc SL(N);
9290 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
9291
9292 SDValue Src = N->getOperand(0);
9293 SDValue Srl = N->getOperand(0);
9294 if (Srl.getOpcode() == ISD::ZERO_EXTEND)
9295 Srl = Srl.getOperand(0);
9296
9297 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
9298 if (Srl.getOpcode() == ISD::SRL) {
9299 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
9300 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
9301 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
9302
9303 if (const ConstantSDNode *C =
9304 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
9305 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
9306 EVT(MVT::i32));
9307
9308 unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
9309 if (SrcOffset < 32 && SrcOffset % 8 == 0) {
9310 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
9311 MVT::f32, Srl);
9312 }
9313 }
9314 }
9315
9316 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
9317
Craig Topperd0af7e82017-04-28 05:31:46 +00009318 KnownBits Known;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009319 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
9320 !DCI.isBeforeLegalizeOps());
9321 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Stanislav Mekhanoshined0d6c62019-01-09 02:24:22 +00009322 if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009323 DCI.CommitTargetLoweringOpt(TLO);
9324 }
9325
9326 return SDValue();
9327}
9328
Tom Stellard1b95fed2018-05-24 05:28:34 +00009329SDValue SITargetLowering::performClampCombine(SDNode *N,
9330 DAGCombinerInfo &DCI) const {
9331 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
9332 if (!CSrc)
9333 return SDValue();
9334
Matt Arsenault055e4dc2019-03-29 19:14:54 +00009335 const MachineFunction &MF = DCI.DAG.getMachineFunction();
Tom Stellard1b95fed2018-05-24 05:28:34 +00009336 const APFloat &F = CSrc->getValueAPF();
9337 APFloat Zero = APFloat::getZero(F.getSemantics());
9338 APFloat::cmpResult Cmp0 = F.compare(Zero);
9339 if (Cmp0 == APFloat::cmpLessThan ||
Matt Arsenault055e4dc2019-03-29 19:14:54 +00009340 (Cmp0 == APFloat::cmpUnordered &&
9341 MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) {
Tom Stellard1b95fed2018-05-24 05:28:34 +00009342 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
9343 }
9344
9345 APFloat One(F.getSemantics(), "1.0");
9346 APFloat::cmpResult Cmp1 = F.compare(One);
9347 if (Cmp1 == APFloat::cmpGreaterThan)
9348 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
9349
9350 return SDValue(CSrc, 0);
9351}
9352
9353
Tom Stellard75aadc22012-12-11 21:25:42 +00009354SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
9355 DAGCombinerInfo &DCI) const {
Stanislav Mekhanoshin443a7f92018-11-27 15:13:37 +00009356 if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
9357 return SDValue();
Tom Stellard75aadc22012-12-11 21:25:42 +00009358 switch (N->getOpcode()) {
Matt Arsenault22b4c252014-12-21 16:48:42 +00009359 default:
9360 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00009361 case ISD::ADD:
9362 return performAddCombine(N, DCI);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00009363 case ISD::SUB:
9364 return performSubCombine(N, DCI);
9365 case ISD::ADDCARRY:
9366 case ISD::SUBCARRY:
9367 return performAddCarrySubCarryCombine(N, DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009368 case ISD::FADD:
9369 return performFAddCombine(N, DCI);
9370 case ISD::FSUB:
9371 return performFSubCombine(N, DCI);
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009372 case ISD::SETCC:
9373 return performSetCCCombine(N, DCI);
Matt Arsenault5b39b342016-01-28 20:53:48 +00009374 case ISD::FMAXNUM:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00009375 case ISD::FMINNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00009376 case ISD::FMAXNUM_IEEE:
9377 case ISD::FMINNUM_IEEE:
Matt Arsenault5881f4e2015-06-09 00:52:37 +00009378 case ISD::SMAX:
9379 case ISD::SMIN:
9380 case ISD::UMAX:
Matt Arsenault5b39b342016-01-28 20:53:48 +00009381 case ISD::UMIN:
9382 case AMDGPUISD::FMIN_LEGACY:
Stanislav Mekhanoshin443a7f92018-11-27 15:13:37 +00009383 case AMDGPUISD::FMAX_LEGACY:
9384 return performMinMaxCombine(N, DCI);
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009385 case ISD::FMA:
9386 return performFMACombine(N, DCI);
Matt Arsenault90083d32018-06-07 09:54:49 +00009387 case ISD::LOAD: {
9388 if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
9389 return Widended;
9390 LLVM_FALLTHROUGH;
9391 }
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00009392 case ISD::STORE:
9393 case ISD::ATOMIC_LOAD:
9394 case ISD::ATOMIC_STORE:
9395 case ISD::ATOMIC_CMP_SWAP:
9396 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
9397 case ISD::ATOMIC_SWAP:
9398 case ISD::ATOMIC_LOAD_ADD:
9399 case ISD::ATOMIC_LOAD_SUB:
9400 case ISD::ATOMIC_LOAD_AND:
9401 case ISD::ATOMIC_LOAD_OR:
9402 case ISD::ATOMIC_LOAD_XOR:
9403 case ISD::ATOMIC_LOAD_NAND:
9404 case ISD::ATOMIC_LOAD_MIN:
9405 case ISD::ATOMIC_LOAD_MAX:
9406 case ISD::ATOMIC_LOAD_UMIN:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00009407 case ISD::ATOMIC_LOAD_UMAX:
Matt Arsenaulta5840c32019-01-22 18:36:06 +00009408 case ISD::ATOMIC_LOAD_FADD:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00009409 case AMDGPUISD::ATOMIC_INC:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00009410 case AMDGPUISD::ATOMIC_DEC:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00009411 case AMDGPUISD::ATOMIC_LOAD_FMIN:
Matt Arsenaulta5840c32019-01-22 18:36:06 +00009412 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00009413 if (DCI.isBeforeLegalize())
9414 break;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009415 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00009416 case ISD::AND:
9417 return performAndCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00009418 case ISD::OR:
9419 return performOrCombine(N, DCI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00009420 case ISD::XOR:
9421 return performXorCombine(N, DCI);
Matt Arsenault8edfaee2017-03-31 19:53:03 +00009422 case ISD::ZERO_EXTEND:
9423 return performZeroExtendCombine(N, DCI);
Ryan Taylor00e063a2019-03-19 16:07:00 +00009424 case ISD::SIGN_EXTEND_INREG:
9425 return performSignExtendInRegCombine(N , DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00009426 case AMDGPUISD::FP_CLASS:
9427 return performClassCombine(N, DCI);
Matt Arsenault9cd90712016-04-14 01:42:16 +00009428 case ISD::FCANONICALIZE:
9429 return performFCanonicalizeCombine(N, DCI);
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009430 case AMDGPUISD::RCP:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00009431 return performRcpCombine(N, DCI);
9432 case AMDGPUISD::FRACT:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009433 case AMDGPUISD::RSQ:
Matt Arsenault32fc5272016-07-26 16:45:45 +00009434 case AMDGPUISD::RCP_LEGACY:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009435 case AMDGPUISD::RSQ_LEGACY:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00009436 case AMDGPUISD::RCP_IFLAG:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009437 case AMDGPUISD::RSQ_CLAMP:
9438 case AMDGPUISD::LDEXP: {
9439 SDValue Src = N->getOperand(0);
9440 if (Src.isUndef())
9441 return Src;
9442 break;
9443 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009444 case ISD::SINT_TO_FP:
9445 case ISD::UINT_TO_FP:
9446 return performUCharToFloatCombine(N, DCI);
9447 case AMDGPUISD::CVT_F32_UBYTE0:
9448 case AMDGPUISD::CVT_F32_UBYTE1:
9449 case AMDGPUISD::CVT_F32_UBYTE2:
9450 case AMDGPUISD::CVT_F32_UBYTE3:
9451 return performCvtF32UByteNCombine(N, DCI);
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00009452 case AMDGPUISD::FMED3:
9453 return performFMed3Combine(N, DCI);
Matt Arsenault1f17c662017-02-22 00:27:34 +00009454 case AMDGPUISD::CVT_PKRTZ_F16_F32:
9455 return performCvtPkRTZCombine(N, DCI);
Tom Stellard1b95fed2018-05-24 05:28:34 +00009456 case AMDGPUISD::CLAMP:
9457 return performClampCombine(N, DCI);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00009458 case ISD::SCALAR_TO_VECTOR: {
9459 SelectionDAG &DAG = DCI.DAG;
9460 EVT VT = N->getValueType(0);
9461
9462 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
9463 if (VT == MVT::v2i16 || VT == MVT::v2f16) {
9464 SDLoc SL(N);
9465 SDValue Src = N->getOperand(0);
9466 EVT EltVT = Src.getValueType();
9467 if (EltVT == MVT::f16)
9468 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
9469
9470 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
9471 return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
9472 }
9473
9474 break;
9475 }
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00009476 case ISD::EXTRACT_VECTOR_ELT:
9477 return performExtractVectorEltCombine(N, DCI);
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00009478 case ISD::INSERT_VECTOR_ELT:
9479 return performInsertVectorEltCombine(N, DCI);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00009480 }
Matt Arsenault5565f65e2014-05-22 18:09:07 +00009481 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Tom Stellard75aadc22012-12-11 21:25:42 +00009482}
Christian Konigd910b7d2013-02-26 17:52:16 +00009483
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009484/// Helper function for adjustWritemask
Benjamin Kramer635e3682013-05-23 15:43:05 +00009485static unsigned SubIdx2Lane(unsigned Idx) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00009486 switch (Idx) {
9487 default: return 0;
9488 case AMDGPU::sub0: return 0;
9489 case AMDGPU::sub1: return 1;
9490 case AMDGPU::sub2: return 2;
9491 case AMDGPU::sub3: return 3;
David Stuttardf77079f2019-01-14 11:55:24 +00009492 case AMDGPU::sub4: return 4; // Possible with TFE/LWE
Christian Konig8e06e2a2013-04-10 08:39:08 +00009493 }
9494}
9495
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009496/// Adjust the writemask of MIMG instructions
Matt Arsenault68f05052017-12-04 22:18:27 +00009497SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
9498 SelectionDAG &DAG) const {
Nicolai Haehnlef2674312018-06-21 13:36:01 +00009499 unsigned Opcode = Node->getMachineOpcode();
9500
9501 // Subtract 1 because the vdata output is not a MachineSDNode operand.
9502 int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
9503 if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
9504 return Node; // not implemented for D16
9505
David Stuttardf77079f2019-01-14 11:55:24 +00009506 SDNode *Users[5] = { nullptr };
Tom Stellard54774e52013-10-23 02:53:47 +00009507 unsigned Lane = 0;
Nicolai Haehnlef2674312018-06-21 13:36:01 +00009508 unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00009509 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
Tom Stellard54774e52013-10-23 02:53:47 +00009510 unsigned NewDmask = 0;
David Stuttardf77079f2019-01-14 11:55:24 +00009511 unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
9512 unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
9513 bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) ||
9514 Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
9515 unsigned TFCLane = 0;
Matt Arsenault856777d2017-12-08 20:00:57 +00009516 bool HasChain = Node->getNumValues() > 1;
9517
9518 if (OldDmask == 0) {
9519 // These are folded out, but on the chance it happens don't assert.
9520 return Node;
9521 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009522
David Stuttardf77079f2019-01-14 11:55:24 +00009523 unsigned OldBitsSet = countPopulation(OldDmask);
9524 // Work out which is the TFE/LWE lane if that is enabled.
9525 if (UsesTFC) {
9526 TFCLane = OldBitsSet;
9527 }
9528
Christian Konig8e06e2a2013-04-10 08:39:08 +00009529 // Try to figure out the used register components
9530 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
9531 I != E; ++I) {
9532
Matt Arsenault93e65ea2017-02-22 21:16:41 +00009533 // Don't look at users of the chain.
9534 if (I.getUse().getResNo() != 0)
9535 continue;
9536
Christian Konig8e06e2a2013-04-10 08:39:08 +00009537 // Abort if we can't understand the usage
9538 if (!I->isMachineOpcode() ||
9539 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
Matt Arsenault68f05052017-12-04 22:18:27 +00009540 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009541
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +00009542 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
Tom Stellard54774e52013-10-23 02:53:47 +00009543 // Note that subregs are packed, i.e. Lane==0 is the first bit set
9544 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
9545 // set, etc.
Christian Konig8b1ed282013-04-10 08:39:16 +00009546 Lane = SubIdx2Lane(I->getConstantOperandVal(1));
Christian Konig8e06e2a2013-04-10 08:39:08 +00009547
David Stuttardf77079f2019-01-14 11:55:24 +00009548 // Check if the use is for the TFE/LWE generated result at VGPRn+1.
9549 if (UsesTFC && Lane == TFCLane) {
9550 Users[Lane] = *I;
9551 } else {
9552 // Set which texture component corresponds to the lane.
9553 unsigned Comp;
9554 for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
9555 Comp = countTrailingZeros(Dmask);
9556 Dmask &= ~(1 << Comp);
9557 }
9558
9559 // Abort if we have more than one user per component.
9560 if (Users[Lane])
9561 return Node;
9562
9563 Users[Lane] = *I;
9564 NewDmask |= 1 << Comp;
Tom Stellard54774e52013-10-23 02:53:47 +00009565 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009566 }
9567
David Stuttardf77079f2019-01-14 11:55:24 +00009568 // Don't allow 0 dmask, as hardware assumes one channel enabled.
9569 bool NoChannels = !NewDmask;
9570 if (NoChannels) {
David Stuttardfc2a7472019-03-20 09:29:55 +00009571 if (!UsesTFC) {
9572 // No uses of the result and not using TFC. Then do nothing.
9573 return Node;
9574 }
David Stuttardf77079f2019-01-14 11:55:24 +00009575 // If the original dmask has one channel - then nothing to do
9576 if (OldBitsSet == 1)
9577 return Node;
9578 // Use an arbitrary dmask - required for the instruction to work
9579 NewDmask = 1;
9580 }
Tom Stellard54774e52013-10-23 02:53:47 +00009581 // Abort if there's no change
9582 if (NewDmask == OldDmask)
Matt Arsenault68f05052017-12-04 22:18:27 +00009583 return Node;
9584
9585 unsigned BitsSet = countPopulation(NewDmask);
9586
David Stuttardf77079f2019-01-14 11:55:24 +00009587 // Check for TFE or LWE - increase the number of channels by one to account
9588 // for the extra return value
9589 // This will need adjustment for D16 if this is also included in
9590 // adjustWriteMask (this function) but at present D16 are excluded.
9591 unsigned NewChannels = BitsSet + UsesTFC;
9592
9593 int NewOpcode =
9594 AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
Matt Arsenault68f05052017-12-04 22:18:27 +00009595 assert(NewOpcode != -1 &&
9596 NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
9597 "failed to find equivalent MIMG op");
Christian Konig8e06e2a2013-04-10 08:39:08 +00009598
9599 // Adjust the writemask in the node
Matt Arsenault68f05052017-12-04 22:18:27 +00009600 SmallVector<SDValue, 12> Ops;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00009601 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009602 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
Nikolay Haustov2f684f12016-02-26 09:51:05 +00009603 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
Christian Konig8e06e2a2013-04-10 08:39:08 +00009604
Matt Arsenault68f05052017-12-04 22:18:27 +00009605 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
9606
David Stuttardf77079f2019-01-14 11:55:24 +00009607 MVT ResultVT = NewChannels == 1 ?
9608 SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
9609 NewChannels == 5 ? 8 : NewChannels);
Matt Arsenault856777d2017-12-08 20:00:57 +00009610 SDVTList NewVTList = HasChain ?
9611 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
9612
Matt Arsenault68f05052017-12-04 22:18:27 +00009613
9614 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
9615 NewVTList, Ops);
Matt Arsenaultecad0d532017-12-08 20:00:45 +00009616
Matt Arsenault856777d2017-12-08 20:00:57 +00009617 if (HasChain) {
9618 // Update chain.
Chandler Carruth66654b72018-08-14 23:30:32 +00009619 DAG.setNodeMemRefs(NewNode, Node->memoperands());
Matt Arsenault856777d2017-12-08 20:00:57 +00009620 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
9621 }
Matt Arsenault68f05052017-12-04 22:18:27 +00009622
David Stuttardf77079f2019-01-14 11:55:24 +00009623 if (NewChannels == 1) {
Matt Arsenault68f05052017-12-04 22:18:27 +00009624 assert(Node->hasNUsesOfValue(1, 0));
9625 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
9626 SDLoc(Node), Users[Lane]->getValueType(0),
9627 SDValue(NewNode, 0));
Christian Konig8b1ed282013-04-10 08:39:16 +00009628 DAG.ReplaceAllUsesWith(Users[Lane], Copy);
Matt Arsenault68f05052017-12-04 22:18:27 +00009629 return nullptr;
Christian Konig8b1ed282013-04-10 08:39:16 +00009630 }
9631
Christian Konig8e06e2a2013-04-10 08:39:08 +00009632 // Update the users of the node with the new indices
David Stuttardf77079f2019-01-14 11:55:24 +00009633 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00009634 SDNode *User = Users[i];
David Stuttardf77079f2019-01-14 11:55:24 +00009635 if (!User) {
9636 // Handle the special case of NoChannels. We set NewDmask to 1 above, but
9637 // Users[0] is still nullptr because channel 0 doesn't really have a use.
9638 if (i || !NoChannels)
9639 continue;
9640 } else {
9641 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
9642 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
9643 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009644
9645 switch (Idx) {
9646 default: break;
9647 case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
9648 case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
9649 case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
David Stuttardf77079f2019-01-14 11:55:24 +00009650 case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009651 }
9652 }
Matt Arsenault68f05052017-12-04 22:18:27 +00009653
9654 DAG.RemoveDeadNode(Node);
9655 return nullptr;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009656}
9657
Tom Stellardc98ee202015-07-16 19:40:07 +00009658static bool isFrameIndexOp(SDValue Op) {
9659 if (Op.getOpcode() == ISD::AssertZext)
9660 Op = Op.getOperand(0);
9661
9662 return isa<FrameIndexSDNode>(Op);
9663}
9664
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009665/// Legalize target independent instructions (e.g. INSERT_SUBREG)
Tom Stellard3457a842014-10-09 19:06:00 +00009666/// with frame index operands.
9667/// LLVM assumes that inputs are to these instructions are registers.
Matt Arsenault0d0d6c22017-04-12 21:58:23 +00009668SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
9669 SelectionDAG &DAG) const {
9670 if (Node->getOpcode() == ISD::CopyToReg) {
9671 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
9672 SDValue SrcVal = Node->getOperand(2);
9673
9674 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
9675 // to try understanding copies to physical registers.
9676 if (SrcVal.getValueType() == MVT::i1 &&
9677 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
9678 SDLoc SL(Node);
9679 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
9680 SDValue VReg = DAG.getRegister(
9681 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
9682
9683 SDNode *Glued = Node->getGluedNode();
9684 SDValue ToVReg
9685 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
9686 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
9687 SDValue ToResultReg
9688 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
9689 VReg, ToVReg.getValue(1));
9690 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
9691 DAG.RemoveDeadNode(Node);
9692 return ToResultReg.getNode();
9693 }
9694 }
Tom Stellard8dd392e2014-10-09 18:09:15 +00009695
9696 SmallVector<SDValue, 8> Ops;
Tom Stellard3457a842014-10-09 19:06:00 +00009697 for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
Tom Stellardc98ee202015-07-16 19:40:07 +00009698 if (!isFrameIndexOp(Node->getOperand(i))) {
Tom Stellard3457a842014-10-09 19:06:00 +00009699 Ops.push_back(Node->getOperand(i));
Tom Stellard8dd392e2014-10-09 18:09:15 +00009700 continue;
9701 }
9702
Tom Stellard3457a842014-10-09 19:06:00 +00009703 SDLoc DL(Node);
Tom Stellard8dd392e2014-10-09 18:09:15 +00009704 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
Tom Stellard3457a842014-10-09 19:06:00 +00009705 Node->getOperand(i).getValueType(),
9706 Node->getOperand(i)), 0));
Tom Stellard8dd392e2014-10-09 18:09:15 +00009707 }
9708
Mark Searles4e3d6162017-10-16 23:38:53 +00009709 return DAG.UpdateNodeOperands(Node, Ops);
Tom Stellard8dd392e2014-10-09 18:09:15 +00009710}
9711
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009712/// Fold the instructions after selecting them.
Matt Arsenault68f05052017-12-04 22:18:27 +00009713/// Returns null if users were already updated.
Christian Konig8e06e2a2013-04-10 08:39:08 +00009714SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
9715 SelectionDAG &DAG) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00009716 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00009717 unsigned Opcode = Node->getMachineOpcode();
Christian Konig8e06e2a2013-04-10 08:39:08 +00009718
Nicolai Haehnlec06bfa12016-07-11 21:59:43 +00009719 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
Nicolai Haehnlef2674312018-06-21 13:36:01 +00009720 !TII->isGather4(Opcode)) {
Matt Arsenault68f05052017-12-04 22:18:27 +00009721 return adjustWritemask(Node, DAG);
9722 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009723
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00009724 if (Opcode == AMDGPU::INSERT_SUBREG ||
9725 Opcode == AMDGPU::REG_SEQUENCE) {
Tom Stellard8dd392e2014-10-09 18:09:15 +00009726 legalizeTargetIndependentNode(Node, DAG);
9727 return Node;
9728 }
Matt Arsenault206f8262017-08-01 20:49:41 +00009729
9730 switch (Opcode) {
9731 case AMDGPU::V_DIV_SCALE_F32:
9732 case AMDGPU::V_DIV_SCALE_F64: {
9733 // Satisfy the operand register constraint when one of the inputs is
9734 // undefined. Ordinarily each undef value will have its own implicit_def of
9735 // a vreg, so force these to use a single register.
9736 SDValue Src0 = Node->getOperand(0);
9737 SDValue Src1 = Node->getOperand(1);
9738 SDValue Src2 = Node->getOperand(2);
9739
9740 if ((Src0.isMachineOpcode() &&
9741 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
9742 (Src0 == Src1 || Src0 == Src2))
9743 break;
9744
9745 MVT VT = Src0.getValueType().getSimpleVT();
Alexander Timofeevba447ba2019-05-26 20:33:26 +00009746 const TargetRegisterClass *RC =
9747 getRegClassFor(VT, Src0.getNode()->isDivergent());
Matt Arsenault206f8262017-08-01 20:49:41 +00009748
9749 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
9750 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
9751
9752 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
9753 UndefReg, Src0, SDValue());
9754
9755 // src0 must be the same register as src1 or src2, even if the value is
9756 // undefined, so make sure we don't violate this constraint.
9757 if (Src0.isMachineOpcode() &&
9758 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
9759 if (Src1.isMachineOpcode() &&
9760 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
9761 Src0 = Src1;
9762 else if (Src2.isMachineOpcode() &&
9763 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
9764 Src0 = Src2;
9765 else {
9766 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
9767 Src0 = UndefReg;
9768 Src1 = UndefReg;
9769 }
9770 } else
9771 break;
9772
9773 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
9774 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
9775 Ops.push_back(Node->getOperand(I));
9776
9777 Ops.push_back(ImpDef.getValue(1));
9778 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
9779 }
Stanislav Mekhanoshin5f581c92019-06-12 17:52:51 +00009780 case AMDGPU::V_PERMLANE16_B32:
9781 case AMDGPU::V_PERMLANEX16_B32: {
9782 ConstantSDNode *FI = cast<ConstantSDNode>(Node->getOperand(0));
9783 ConstantSDNode *BC = cast<ConstantSDNode>(Node->getOperand(2));
9784 if (!FI->getZExtValue() && !BC->getZExtValue())
9785 break;
9786 SDValue VDstIn = Node->getOperand(6);
9787 if (VDstIn.isMachineOpcode()
9788 && VDstIn.getMachineOpcode() == AMDGPU::IMPLICIT_DEF)
9789 break;
9790 MachineSDNode *ImpDef = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
9791 SDLoc(Node), MVT::i32);
9792 SmallVector<SDValue, 8> Ops = { SDValue(FI, 0), Node->getOperand(1),
9793 SDValue(BC, 0), Node->getOperand(3),
9794 Node->getOperand(4), Node->getOperand(5),
9795 SDValue(ImpDef, 0), Node->getOperand(7) };
9796 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
9797 }
Matt Arsenault206f8262017-08-01 20:49:41 +00009798 default:
9799 break;
9800 }
9801
Tom Stellard654d6692015-01-08 15:08:17 +00009802 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009803}
Christian Konig8b1ed282013-04-10 08:39:16 +00009804
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009805/// Assign the register class depending on the number of
Christian Konig8b1ed282013-04-10 08:39:16 +00009806/// bits set in the writemask
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009807void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
Christian Konig8b1ed282013-04-10 08:39:16 +00009808 SDNode *Node) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00009809 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009810
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009811 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Matt Arsenault6005fcb2015-10-21 21:51:02 +00009812
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009813 if (TII->isVOP3(MI.getOpcode())) {
Matt Arsenault6005fcb2015-10-21 21:51:02 +00009814 // Make sure constant bus requirements are respected.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009815 TII->legalizeOperandsVOP3(MRI, MI);
Matt Arsenault6005fcb2015-10-21 21:51:02 +00009816 return;
9817 }
Matt Arsenaultcb0ac3d2014-09-26 17:54:59 +00009818
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009819 // Replace unused atomics with the no return version.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009820 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009821 if (NoRetAtomicOp != -1) {
9822 if (!Node->hasAnyUseOfValue(0)) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009823 MI.setDesc(TII->get(NoRetAtomicOp));
9824 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00009825 return;
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009826 }
9827
Tom Stellard354a43c2016-04-01 18:27:37 +00009828 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
9829 // instruction, because the return type of these instructions is a vec2 of
9830 // the memory type, so it can be tied to the input operand.
9831 // This means these instructions always have a use, so we need to add a
9832 // special case to check if the atomic has only one extract_subreg use,
9833 // which itself has no uses.
9834 if ((Node->hasNUsesOfValue(1, 0) &&
Nicolai Haehnle750082d2016-04-15 14:42:36 +00009835 Node->use_begin()->isMachineOpcode() &&
Tom Stellard354a43c2016-04-01 18:27:37 +00009836 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
9837 !Node->use_begin()->hasAnyUseOfValue(0))) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009838 unsigned Def = MI.getOperand(0).getReg();
Tom Stellard354a43c2016-04-01 18:27:37 +00009839
9840 // Change this into a noret atomic.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009841 MI.setDesc(TII->get(NoRetAtomicOp));
9842 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00009843
9844 // If we only remove the def operand from the atomic instruction, the
9845 // extract_subreg will be left with a use of a vreg without a def.
9846 // So we need to insert an implicit_def to avoid machine verifier
9847 // errors.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009848 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
Tom Stellard354a43c2016-04-01 18:27:37 +00009849 TII->get(AMDGPU::IMPLICIT_DEF), Def);
9850 }
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009851 return;
9852 }
Christian Konig8b1ed282013-04-10 08:39:16 +00009853}
Tom Stellard0518ff82013-06-03 17:39:58 +00009854
Benjamin Kramerbdc49562016-06-12 15:39:02 +00009855static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
9856 uint64_t Val) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009857 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
Matt Arsenault485defe2014-11-05 19:01:17 +00009858 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
9859}
9860
9861MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00009862 const SDLoc &DL,
Matt Arsenault485defe2014-11-05 19:01:17 +00009863 SDValue Ptr) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00009864 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault485defe2014-11-05 19:01:17 +00009865
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009866 // Build the half of the subregister with the constants before building the
9867 // full 128-bit register. If we are building multiple resource descriptors,
9868 // this will allow CSEing of the 2-component register.
9869 const SDValue Ops0[] = {
9870 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
9871 buildSMovImm32(DAG, DL, 0),
9872 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
9873 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
9874 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
9875 };
Matt Arsenault485defe2014-11-05 19:01:17 +00009876
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009877 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
9878 MVT::v2i32, Ops0), 0);
Matt Arsenault485defe2014-11-05 19:01:17 +00009879
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009880 // Combine the constants and the pointer.
9881 const SDValue Ops1[] = {
9882 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
9883 Ptr,
9884 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
9885 SubRegHi,
9886 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
9887 };
Matt Arsenault485defe2014-11-05 19:01:17 +00009888
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009889 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
Matt Arsenault485defe2014-11-05 19:01:17 +00009890}
9891
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009892/// Return a resource descriptor with the 'Add TID' bit enabled
Benjamin Kramerdf005cb2015-08-08 18:27:36 +00009893/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
9894/// of the resource descriptor) to create an offset, which is added to
9895/// the resource pointer.
Benjamin Kramerbdc49562016-06-12 15:39:02 +00009896MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
9897 SDValue Ptr, uint32_t RsrcDword1,
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009898 uint64_t RsrcDword2And3) const {
9899 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
9900 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
9901 if (RsrcDword1) {
9902 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009903 DAG.getConstant(RsrcDword1, DL, MVT::i32)),
9904 0);
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009905 }
9906
9907 SDValue DataLo = buildSMovImm32(DAG, DL,
9908 RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
9909 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
9910
9911 const SDValue Ops[] = {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009912 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009913 PtrLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009914 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009915 PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009916 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009917 DataLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009918 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009919 DataHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009920 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009921 };
9922
9923 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
9924}
9925
Tom Stellardd7e6f132015-04-08 01:09:26 +00009926//===----------------------------------------------------------------------===//
9927// SI Inline Assembly Support
9928//===----------------------------------------------------------------------===//
9929
9930std::pair<unsigned, const TargetRegisterClass *>
9931SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +00009932 StringRef Constraint,
Tom Stellardd7e6f132015-04-08 01:09:26 +00009933 MVT VT) const {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009934 const TargetRegisterClass *RC = nullptr;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009935 if (Constraint.size() == 1) {
9936 switch (Constraint[0]) {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009937 default:
9938 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009939 case 's':
9940 case 'r':
9941 switch (VT.getSizeInBits()) {
9942 default:
9943 return std::make_pair(0U, nullptr);
9944 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00009945 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009946 RC = &AMDGPU::SReg_32_XM0RegClass;
9947 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009948 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009949 RC = &AMDGPU::SGPR_64RegClass;
9950 break;
Tim Renouf361b5b22019-03-21 12:01:21 +00009951 case 96:
9952 RC = &AMDGPU::SReg_96RegClass;
9953 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009954 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009955 RC = &AMDGPU::SReg_128RegClass;
9956 break;
Tim Renouf033f99a2019-03-22 10:11:21 +00009957 case 160:
9958 RC = &AMDGPU::SReg_160RegClass;
9959 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009960 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009961 RC = &AMDGPU::SReg_256RegClass;
9962 break;
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +00009963 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009964 RC = &AMDGPU::SReg_512RegClass;
9965 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009966 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009967 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009968 case 'v':
9969 switch (VT.getSizeInBits()) {
9970 default:
9971 return std::make_pair(0U, nullptr);
9972 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00009973 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009974 RC = &AMDGPU::VGPR_32RegClass;
9975 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009976 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009977 RC = &AMDGPU::VReg_64RegClass;
9978 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009979 case 96:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009980 RC = &AMDGPU::VReg_96RegClass;
9981 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009982 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009983 RC = &AMDGPU::VReg_128RegClass;
9984 break;
Tim Renouf033f99a2019-03-22 10:11:21 +00009985 case 160:
9986 RC = &AMDGPU::VReg_160RegClass;
9987 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009988 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009989 RC = &AMDGPU::VReg_256RegClass;
9990 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009991 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009992 RC = &AMDGPU::VReg_512RegClass;
9993 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009994 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009995 break;
Tom Stellardd7e6f132015-04-08 01:09:26 +00009996 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009997 // We actually support i128, i16 and f16 as inline parameters
9998 // even if they are not reported as legal
9999 if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
10000 VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
10001 return std::make_pair(0U, RC);
Tom Stellardd7e6f132015-04-08 01:09:26 +000010002 }
10003
10004 if (Constraint.size() > 1) {
Tom Stellardd7e6f132015-04-08 01:09:26 +000010005 if (Constraint[1] == 'v') {
10006 RC = &AMDGPU::VGPR_32RegClass;
10007 } else if (Constraint[1] == 's') {
10008 RC = &AMDGPU::SGPR_32RegClass;
10009 }
10010
10011 if (RC) {
Matt Arsenault0b554ed2015-06-23 02:05:55 +000010012 uint32_t Idx;
10013 bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
10014 if (!Failed && Idx < RC->getNumRegs())
Tom Stellardd7e6f132015-04-08 01:09:26 +000010015 return std::make_pair(RC->getRegister(Idx), RC);
10016 }
10017 }
10018 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10019}
Tom Stellardb3c3bda2015-12-10 02:12:53 +000010020
10021SITargetLowering::ConstraintType
10022SITargetLowering::getConstraintType(StringRef Constraint) const {
10023 if (Constraint.size() == 1) {
10024 switch (Constraint[0]) {
10025 default: break;
10026 case 's':
10027 case 'v':
10028 return C_RegisterClass;
10029 }
10030 }
10031 return TargetLowering::getConstraintType(Constraint);
10032}
Matt Arsenault1cc47f82017-07-18 16:44:56 +000010033
10034// Figure out which registers should be reserved for stack access. Only after
10035// the function is legalized do we know all of the non-spill stack objects or if
10036// calls are present.
10037void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
10038 MachineRegisterInfo &MRI = MF.getRegInfo();
10039 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +000010040 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Tom Stellardc5a154d2018-06-28 23:47:12 +000010041 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault1cc47f82017-07-18 16:44:56 +000010042
10043 if (Info->isEntryFunction()) {
10044 // Callable functions have fixed registers used for stack access.
10045 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
10046 }
10047
Matt Arsenaultb812b7a2019-06-05 22:20:47 +000010048 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
10049 Info->getStackPtrOffsetReg()));
10050 if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG)
10051 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
Matt Arsenault1cc47f82017-07-18 16:44:56 +000010052
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +000010053 // We need to worry about replacing the default register with itself in case
10054 // of MIR testcases missing the MFI.
10055 if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG)
10056 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
10057
10058 if (Info->getFrameOffsetReg() != AMDGPU::FP_REG)
10059 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
10060
10061 if (Info->getScratchWaveOffsetReg() != AMDGPU::SCRATCH_WAVE_OFFSET_REG) {
10062 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
10063 Info->getScratchWaveOffsetReg());
10064 }
Matt Arsenault1cc47f82017-07-18 16:44:56 +000010065
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +000010066 Info->limitOccupancy(MF);
10067
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +000010068 if (ST.isWave32() && !MF.empty()) {
10069 // Add VCC_HI def because many instructions marked as imp-use VCC where
10070 // we may only define VCC_LO. If nothing defines VCC_HI we may end up
10071 // having a use of undef.
10072
10073 const SIInstrInfo *TII = ST.getInstrInfo();
10074 DebugLoc DL;
10075
10076 MachineBasicBlock &MBB = MF.front();
10077 MachineBasicBlock::iterator I = MBB.getFirstNonDebugInstr();
10078 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), AMDGPU::VCC_HI);
10079
10080 for (auto &MBB : MF) {
10081 for (auto &MI : MBB) {
10082 TII->fixImplicitOperands(MI);
10083 }
10084 }
10085 }
10086
Matt Arsenault1cc47f82017-07-18 16:44:56 +000010087 TargetLoweringBase::finalizeLowering(MF);
10088}
Matt Arsenault45b98182017-11-15 00:45:43 +000010089
10090void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
10091 KnownBits &Known,
10092 const APInt &DemandedElts,
10093 const SelectionDAG &DAG,
10094 unsigned Depth) const {
10095 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
10096 DAG, Depth);
10097
Matt Arsenault5c714cb2019-05-23 19:38:14 +000010098 // Set the high bits to zero based on the maximum allowed scratch size per
10099 // wave. We can't use vaddr in MUBUF instructions if we don't know the address
Matt Arsenault45b98182017-11-15 00:45:43 +000010100 // calculation won't overflow, so assume the sign bit is never set.
Matt Arsenault5c714cb2019-05-23 19:38:14 +000010101 Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
Matt Arsenault45b98182017-11-15 00:45:43 +000010102}
Tom Stellard264c1712018-06-13 15:06:37 +000010103
Stanislav Mekhanoshin93f15c92019-05-03 21:17:29 +000010104unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
10105 const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
10106 const unsigned CacheLineAlign = 6; // log2(64)
10107
10108 // Pre-GFX10 target did not benefit from loop alignment
10109 if (!ML || DisableLoopAlignment ||
10110 (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
10111 getSubtarget()->hasInstFwdPrefetchBug())
10112 return PrefAlign;
10113
10114 // On GFX10 I$ is 4 x 64 bytes cache lines.
10115 // By default prefetcher keeps one cache line behind and reads two ahead.
10116 // We can modify it with S_INST_PREFETCH for larger loops to have two lines
10117 // behind and one ahead.
10118 // Therefor we can benefit from aligning loop headers if loop fits 192 bytes.
10119 // If loop fits 64 bytes it always spans no more than two cache lines and
10120 // does not need an alignment.
10121 // Else if loop is less or equal 128 bytes we do not need to modify prefetch,
10122 // Else if loop is less or equal 192 bytes we need two lines behind.
10123
10124 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10125 const MachineBasicBlock *Header = ML->getHeader();
10126 if (Header->getAlignment() != PrefAlign)
10127 return Header->getAlignment(); // Already processed.
10128
10129 unsigned LoopSize = 0;
10130 for (const MachineBasicBlock *MBB : ML->blocks()) {
10131 // If inner loop block is aligned assume in average half of the alignment
10132 // size to be added as nops.
10133 if (MBB != Header)
10134 LoopSize += (1 << MBB->getAlignment()) / 2;
10135
10136 for (const MachineInstr &MI : *MBB) {
10137 LoopSize += TII->getInstSizeInBytes(MI);
10138 if (LoopSize > 192)
10139 return PrefAlign;
10140 }
10141 }
10142
10143 if (LoopSize <= 64)
10144 return PrefAlign;
10145
10146 if (LoopSize <= 128)
10147 return CacheLineAlign;
10148
10149 // If any of parent loops is surrounded by prefetch instructions do not
10150 // insert new for inner loop, which would reset parent's settings.
10151 for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) {
10152 if (MachineBasicBlock *Exit = P->getExitBlock()) {
10153 auto I = Exit->getFirstNonDebugInstr();
10154 if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
10155 return CacheLineAlign;
10156 }
10157 }
10158
10159 MachineBasicBlock *Pre = ML->getLoopPreheader();
10160 MachineBasicBlock *Exit = ML->getExitBlock();
10161
10162 if (Pre && Exit) {
10163 BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(),
10164 TII->get(AMDGPU::S_INST_PREFETCH))
10165 .addImm(1); // prefetch 2 lines behind PC
10166
10167 BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(),
10168 TII->get(AMDGPU::S_INST_PREFETCH))
10169 .addImm(2); // prefetch 1 line behind PC
10170 }
10171
10172 return CacheLineAlign;
10173}
10174
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010175LLVM_ATTRIBUTE_UNUSED
10176static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
10177 assert(N->getOpcode() == ISD::CopyFromReg);
10178 do {
10179 // Follow the chain until we find an INLINEASM node.
10180 N = N->getOperand(0).getNode();
Craig Topper784929d2019-02-08 20:48:56 +000010181 if (N->getOpcode() == ISD::INLINEASM ||
10182 N->getOpcode() == ISD::INLINEASM_BR)
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010183 return true;
10184 } while (N->getOpcode() == ISD::CopyFromReg);
10185 return false;
10186}
10187
Tom Stellard264c1712018-06-13 15:06:37 +000010188bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
Nicolai Haehnle35617ed2018-08-30 14:21:36 +000010189 FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const
Tom Stellard264c1712018-06-13 15:06:37 +000010190{
10191 switch (N->getOpcode()) {
Tom Stellard264c1712018-06-13 15:06:37 +000010192 case ISD::CopyFromReg:
10193 {
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010194 const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
10195 const MachineFunction * MF = FLI->MF;
10196 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
10197 const MachineRegisterInfo &MRI = MF->getRegInfo();
10198 const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
10199 unsigned Reg = R->getReg();
10200 if (TRI.isPhysicalRegister(Reg))
10201 return !TRI.isSGPRReg(MRI, Reg);
Tom Stellard264c1712018-06-13 15:06:37 +000010202
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010203 if (MRI.isLiveIn(Reg)) {
10204 // workitem.id.x workitem.id.y workitem.id.z
10205 // Any VGPR formal argument is also considered divergent
10206 if (!TRI.isSGPRReg(MRI, Reg))
10207 return true;
10208 // Formal arguments of non-entry functions
10209 // are conservatively considered divergent
10210 else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
10211 return true;
10212 return false;
Tom Stellard264c1712018-06-13 15:06:37 +000010213 }
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010214 const Value *V = FLI->getValueFromVirtualReg(Reg);
10215 if (V)
10216 return KDA->isDivergent(V);
10217 assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
10218 return !TRI.isSGPRReg(MRI, Reg);
Tom Stellard264c1712018-06-13 15:06:37 +000010219 }
10220 break;
10221 case ISD::LOAD: {
Matt Arsenault813613c2018-09-04 18:58:19 +000010222 const LoadSDNode *L = cast<LoadSDNode>(N);
10223 unsigned AS = L->getAddressSpace();
10224 // A flat load may access private memory.
10225 return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
Tom Stellard264c1712018-06-13 15:06:37 +000010226 } break;
10227 case ISD::CALLSEQ_END:
10228 return true;
10229 break;
10230 case ISD::INTRINSIC_WO_CHAIN:
10231 {
10232
10233 }
10234 return AMDGPU::isIntrinsicSourceOfDivergence(
10235 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
10236 case ISD::INTRINSIC_W_CHAIN:
10237 return AMDGPU::isIntrinsicSourceOfDivergence(
10238 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
10239 // In some cases intrinsics that are a source of divergence have been
10240 // lowered to AMDGPUISD so we also need to check those too.
10241 case AMDGPUISD::INTERP_MOV:
10242 case AMDGPUISD::INTERP_P1:
10243 case AMDGPUISD::INTERP_P2:
10244 return true;
10245 }
10246 return false;
10247}
Matt Arsenaultf8768bf2018-08-06 21:38:27 +000010248
10249bool SITargetLowering::denormalsEnabledForType(EVT VT) const {
10250 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
10251 case MVT::f32:
10252 return Subtarget->hasFP32Denormals();
10253 case MVT::f64:
10254 return Subtarget->hasFP64Denormals();
10255 case MVT::f16:
10256 return Subtarget->hasFP16Denormals();
10257 default:
10258 return false;
10259 }
10260}
Matt Arsenault687ec752018-10-22 16:27:27 +000010261
10262bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
10263 const SelectionDAG &DAG,
10264 bool SNaN,
10265 unsigned Depth) const {
10266 if (Op.getOpcode() == AMDGPUISD::CLAMP) {
Matt Arsenault055e4dc2019-03-29 19:14:54 +000010267 const MachineFunction &MF = DAG.getMachineFunction();
10268 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10269
10270 if (Info->getMode().DX10Clamp)
Matt Arsenault687ec752018-10-22 16:27:27 +000010271 return true; // Clamped to 0.
10272 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
10273 }
10274
10275 return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
10276 SNaN, Depth);
10277}
Matt Arsenaulta5840c32019-01-22 18:36:06 +000010278
10279TargetLowering::AtomicExpansionKind
10280SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
10281 switch (RMW->getOperation()) {
10282 case AtomicRMWInst::FAdd: {
10283 Type *Ty = RMW->getType();
10284
10285 // We don't have a way to support 16-bit atomics now, so just leave them
10286 // as-is.
10287 if (Ty->isHalfTy())
10288 return AtomicExpansionKind::None;
10289
10290 if (!Ty->isFloatTy())
10291 return AtomicExpansionKind::CmpXChg;
10292
10293 // TODO: Do have these for flat. Older targets also had them for buffers.
10294 unsigned AS = RMW->getPointerAddressSpace();
10295 return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ?
10296 AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg;
10297 }
10298 default:
10299 break;
10300 }
10301
10302 return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
10303}