blob: c444c08487aa2f682d14017132392de638755429 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard75aadc22012-12-11 21:25:42 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// Custom DAG lowering for SI
Tom Stellard75aadc22012-12-11 21:25:42 +000011//
12//===----------------------------------------------------------------------===//
13
Sylvestre Ledrudf92dab2018-11-02 17:25:40 +000014#if defined(_MSC_VER) || defined(__MINGW32__)
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000015// Provide M_PI.
16#define _USE_MATH_DEFINES
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000017#endif
18
Chandler Carruth6bda14b2017-06-06 11:49:48 +000019#include "SIISelLowering.h"
Christian Konig99ee0f42013-03-07 09:04:14 +000020#include "AMDGPU.h"
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000021#include "AMDGPUSubtarget.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000022#include "AMDGPUTargetMachine.h"
Tom Stellard8485fa02016-12-07 02:42:15 +000023#include "SIDefines.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000024#include "SIInstrInfo.h"
25#include "SIMachineFunctionInfo.h"
26#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000027#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000028#include "Utils/AMDGPUBaseInfo.h"
29#include "llvm/ADT/APFloat.h"
30#include "llvm/ADT/APInt.h"
31#include "llvm/ADT/ArrayRef.h"
Alexey Samsonova253bf92014-08-27 19:36:53 +000032#include "llvm/ADT/BitVector.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000033#include "llvm/ADT/SmallVector.h"
Matt Arsenault71bcbd42017-08-11 20:42:08 +000034#include "llvm/ADT/Statistic.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000035#include "llvm/ADT/StringRef.h"
Matt Arsenault9a10cea2016-01-26 04:29:24 +000036#include "llvm/ADT/StringSwitch.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000037#include "llvm/ADT/Twine.h"
Wei Ding07e03712016-07-28 16:42:13 +000038#include "llvm/CodeGen/Analysis.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000039#include "llvm/CodeGen/CallingConvLower.h"
40#include "llvm/CodeGen/DAGCombine.h"
41#include "llvm/CodeGen/ISDOpcodes.h"
42#include "llvm/CodeGen/MachineBasicBlock.h"
43#include "llvm/CodeGen/MachineFrameInfo.h"
44#include "llvm/CodeGen/MachineFunction.h"
45#include "llvm/CodeGen/MachineInstr.h"
46#include "llvm/CodeGen/MachineInstrBuilder.h"
47#include "llvm/CodeGen/MachineMemOperand.h"
Matt Arsenault8623e8d2017-08-03 23:00:29 +000048#include "llvm/CodeGen/MachineModuleInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000049#include "llvm/CodeGen/MachineOperand.h"
50#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000051#include "llvm/CodeGen/SelectionDAG.h"
52#include "llvm/CodeGen/SelectionDAGNodes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000053#include "llvm/CodeGen/TargetCallingConv.h"
54#include "llvm/CodeGen/TargetRegisterInfo.h"
Craig Topper2fa14362018-03-29 17:21:10 +000055#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000056#include "llvm/IR/Constants.h"
57#include "llvm/IR/DataLayout.h"
58#include "llvm/IR/DebugLoc.h"
59#include "llvm/IR/DerivedTypes.h"
Oliver Stannard7e7d9832016-02-02 13:52:43 +000060#include "llvm/IR/DiagnosticInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000061#include "llvm/IR/Function.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000062#include "llvm/IR/GlobalValue.h"
63#include "llvm/IR/InstrTypes.h"
64#include "llvm/IR/Instruction.h"
65#include "llvm/IR/Instructions.h"
Matt Arsenault7dc01c92017-03-15 23:15:12 +000066#include "llvm/IR/IntrinsicInst.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000067#include "llvm/IR/Type.h"
68#include "llvm/Support/Casting.h"
69#include "llvm/Support/CodeGen.h"
70#include "llvm/Support/CommandLine.h"
71#include "llvm/Support/Compiler.h"
72#include "llvm/Support/ErrorHandling.h"
Craig Topperd0af7e82017-04-28 05:31:46 +000073#include "llvm/Support/KnownBits.h"
David Blaikie13e77db2018-03-23 23:58:25 +000074#include "llvm/Support/MachineValueType.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000075#include "llvm/Support/MathExtras.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000076#include "llvm/Target/TargetOptions.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000077#include <cassert>
78#include <cmath>
79#include <cstdint>
80#include <iterator>
81#include <tuple>
82#include <utility>
83#include <vector>
Tom Stellard75aadc22012-12-11 21:25:42 +000084
85using namespace llvm;
86
Matt Arsenault71bcbd42017-08-11 20:42:08 +000087#define DEBUG_TYPE "si-lower"
88
89STATISTIC(NumTailCalls, "Number of tail calls");
90
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000091static cl::opt<bool> EnableVGPRIndexMode(
92 "amdgpu-vgpr-index-mode",
93 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
94 cl::init(false));
95
Stanislav Mekhanoshin93f15c92019-05-03 21:17:29 +000096static cl::opt<bool> DisableLoopAlignment(
97 "amdgpu-disable-loop-alignment",
98 cl::desc("Do not align and prefetch loops"),
99 cl::init(false));
100
Tom Stellardf110f8f2016-04-14 16:27:03 +0000101static unsigned findFirstFreeSGPR(CCState &CCInfo) {
102 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
103 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
104 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
105 return AMDGPU::SGPR0 + Reg;
106 }
107 }
108 llvm_unreachable("Cannot allocate sgpr");
109}
110
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000111SITargetLowering::SITargetLowering(const TargetMachine &TM,
Tom Stellard5bfbae52018-07-11 20:59:01 +0000112 const GCNSubtarget &STI)
Tom Stellardc5a154d2018-06-28 23:47:12 +0000113 : AMDGPUTargetLowering(TM, STI),
114 Subtarget(&STI) {
Tom Stellard1bd80722014-04-30 15:31:33 +0000115 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
Tom Stellard436780b2014-05-15 14:41:57 +0000116 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000117
Marek Olsak79c05872016-11-25 17:37:09 +0000118 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000119 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000120
Tom Stellard436780b2014-05-15 14:41:57 +0000121 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
122 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
123 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000124
Tim Renouf361b5b22019-03-21 12:01:21 +0000125 addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
126 addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
127
Matt Arsenault61001bb2015-11-25 19:58:34 +0000128 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
129 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
130
Tom Stellard436780b2014-05-15 14:41:57 +0000131 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
132 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000133
Tim Renouf033f99a2019-03-22 10:11:21 +0000134 addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
135 addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
136
Tom Stellardf0a21072014-11-18 20:39:39 +0000137 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000138 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
139
Tom Stellardf0a21072014-11-18 20:39:39 +0000140 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
Christian Konig2214f142013-03-07 09:03:38 +0000141 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +0000142
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000143 if (Subtarget->has16BitInsts()) {
Marek Olsak79c05872016-11-25 17:37:09 +0000144 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
145 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
Tom Stellard115a6152016-11-10 16:02:37 +0000146
Matt Arsenault1349a042018-05-22 06:32:10 +0000147 // Unless there are also VOP3P operations, not operations are really legal.
Matt Arsenault7596f132017-02-27 20:52:10 +0000148 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
149 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000150 addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass);
151 addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass);
Matt Arsenault7596f132017-02-27 20:52:10 +0000152 }
153
Tom Stellardc5a154d2018-06-28 23:47:12 +0000154 computeRegisterProperties(Subtarget->getRegisterInfo());
Tom Stellard75aadc22012-12-11 21:25:42 +0000155
Tom Stellard35bb18c2013-08-26 15:06:04 +0000156 // We need to custom lower vector stores from local memory
Matt Arsenault71e66762016-05-21 02:27:49 +0000157 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
Tim Renouf361b5b22019-03-21 12:01:21 +0000158 setOperationAction(ISD::LOAD, MVT::v3i32, Custom);
Tom Stellard35bb18c2013-08-26 15:06:04 +0000159 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
Tim Renouf033f99a2019-03-22 10:11:21 +0000160 setOperationAction(ISD::LOAD, MVT::v5i32, Custom);
Tom Stellardaf775432013-10-23 00:44:32 +0000161 setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
162 setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000163 setOperationAction(ISD::LOAD, MVT::i1, Custom);
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +0000164 setOperationAction(ISD::LOAD, MVT::v32i32, Custom);
Matt Arsenault2b957b52016-05-02 20:07:26 +0000165
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000166 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
Tim Renouf361b5b22019-03-21 12:01:21 +0000167 setOperationAction(ISD::STORE, MVT::v3i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000168 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
Tim Renouf033f99a2019-03-22 10:11:21 +0000169 setOperationAction(ISD::STORE, MVT::v5i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000170 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
171 setOperationAction(ISD::STORE, MVT::v16i32, Custom);
172 setOperationAction(ISD::STORE, MVT::i1, Custom);
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +0000173 setOperationAction(ISD::STORE, MVT::v32i32, Custom);
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000174
Jan Vesely06200bd2017-01-06 21:00:46 +0000175 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
176 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
177 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
178 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
179 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
180 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
181 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
182 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
183 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
184 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
185
Matt Arsenault71e66762016-05-21 02:27:49 +0000186 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
187 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000188
189 setOperationAction(ISD::SELECT, MVT::i1, Promote);
Tom Stellard0ec134f2014-02-04 17:18:40 +0000190 setOperationAction(ISD::SELECT, MVT::i64, Custom);
Tom Stellardda99c6e2014-03-24 16:07:30 +0000191 setOperationAction(ISD::SELECT, MVT::f64, Promote);
192 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
Tom Stellard81d871d2013-11-13 23:36:50 +0000193
Tom Stellard3ca1bfc2014-06-10 16:01:22 +0000194 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
195 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
196 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
197 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
Matt Arsenault71e66762016-05-21 02:27:49 +0000198 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
Tom Stellard754f80f2013-04-05 23:31:51 +0000199
Tom Stellardd1efda82016-01-20 21:48:24 +0000200 setOperationAction(ISD::SETCC, MVT::i1, Promote);
Tom Stellard83747202013-07-18 21:43:53 +0000201 setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
202 setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
Matt Arsenault18f56be2016-12-22 16:27:11 +0000203 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
Tom Stellard83747202013-07-18 21:43:53 +0000204
Matt Arsenault71e66762016-05-21 02:27:49 +0000205 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
206 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
Matt Arsenaulte306a322014-10-21 16:25:08 +0000207
Matt Arsenault4e466652014-04-16 01:41:30 +0000208 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
209 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000210 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
211 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000212 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
213 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
215
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000216 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000217 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000218 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
Matt Arsenaultb3a80e52018-08-15 21:25:20 +0000219 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
220 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom);
Marek Olsak13e47412018-01-31 20:18:04 +0000221 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000222 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
223
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000224 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom);
225 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom);
David Stuttardf77079f2019-01-14 11:55:24 +0000226 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000227 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
Ryan Taylor00e063a2019-03-19 16:07:00 +0000228 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
229 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
Matt Arsenault754dd3e2017-04-03 18:08:08 +0000230
231 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
Matt Arsenault4165efd2017-01-17 07:26:53 +0000232 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
233 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000234 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom);
Ryan Taylor00e063a2019-03-19 16:07:00 +0000235 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
236 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000237
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000238 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000239 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000240 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
241 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
242 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
243 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
Tom Stellardafcf12f2013-09-12 02:55:14 +0000244
Matt Arsenaultee3f0ac2017-01-30 18:11:38 +0000245 setOperationAction(ISD::UADDO, MVT::i32, Legal);
246 setOperationAction(ISD::USUBO, MVT::i32, Legal);
247
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000248 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
249 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
250
Matt Arsenaulte7191392018-08-08 16:58:33 +0000251 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
252 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
253 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
254
Matt Arsenault84445dd2017-11-30 22:51:26 +0000255#if 0
256 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
257 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
258#endif
259
Benjamin Kramer867bfc52015-03-07 17:41:00 +0000260 // We only support LOAD/STORE and vector manipulation ops for vectors
261 // with > 4 elements.
Matt Arsenault7596f132017-02-27 20:52:10 +0000262 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
Stanislav Mekhanoshin44451b32018-08-31 22:43:36 +0000263 MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16, MVT::v32i32 }) {
Tom Stellard967bf582014-02-13 23:34:15 +0000264 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000265 switch (Op) {
Tom Stellard967bf582014-02-13 23:34:15 +0000266 case ISD::LOAD:
267 case ISD::STORE:
268 case ISD::BUILD_VECTOR:
269 case ISD::BITCAST:
270 case ISD::EXTRACT_VECTOR_ELT:
271 case ISD::INSERT_VECTOR_ELT:
Tom Stellard967bf582014-02-13 23:34:15 +0000272 case ISD::INSERT_SUBVECTOR:
273 case ISD::EXTRACT_SUBVECTOR:
Matt Arsenault61001bb2015-11-25 19:58:34 +0000274 case ISD::SCALAR_TO_VECTOR:
Tom Stellard967bf582014-02-13 23:34:15 +0000275 break;
Tom Stellardc0503db2014-08-09 01:06:56 +0000276 case ISD::CONCAT_VECTORS:
277 setOperationAction(Op, VT, Custom);
278 break;
Tom Stellard967bf582014-02-13 23:34:15 +0000279 default:
Matt Arsenaultd504a742014-05-15 21:44:05 +0000280 setOperationAction(Op, VT, Expand);
Tom Stellard967bf582014-02-13 23:34:15 +0000281 break;
282 }
283 }
284 }
285
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000286 setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand);
287
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000288 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
289 // is expanded to avoid having two separate loops in case the index is a VGPR.
290
Matt Arsenault61001bb2015-11-25 19:58:34 +0000291 // Most operations are naturally 32-bit vector operations. We only support
292 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
293 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
294 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
295 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
296
297 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
298 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
299
300 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
301 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
302
303 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
304 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
305 }
306
Matt Arsenault71e66762016-05-21 02:27:49 +0000307 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
308 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
309 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
310 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000311
Matt Arsenault67a98152018-05-16 11:47:30 +0000312 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom);
313 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
314
Matt Arsenault3aef8092017-01-23 23:09:58 +0000315 // Avoid stack access for these.
316 // TODO: Generalize to more vector types.
317 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
318 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault67a98152018-05-16 11:47:30 +0000319 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
320 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
321
Matt Arsenault3aef8092017-01-23 23:09:58 +0000322 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
323 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault9224c002018-06-05 19:52:46 +0000324 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom);
325 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom);
326 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom);
327
328 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom);
329 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
330 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom);
Matt Arsenault3aef8092017-01-23 23:09:58 +0000331
Matt Arsenault67a98152018-05-16 11:47:30 +0000332 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom);
333 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom);
334 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
335 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
336
Tim Renouf361b5b22019-03-21 12:01:21 +0000337 // Deal with vec3 vector operations when widened to vec4.
338 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Expand);
339 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Expand);
340 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Expand);
341 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Expand);
342
Tim Renouf033f99a2019-03-22 10:11:21 +0000343 // Deal with vec5 vector operations when widened to vec8.
344 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Expand);
345 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Expand);
346 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Expand);
347 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Expand);
348
Tom Stellard354a43c2016-04-01 18:27:37 +0000349 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
350 // and output demarshalling
351 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
352 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
353
354 // We can't return success/failure, only the old value,
355 // let LLVM add the comparison
356 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
357 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
358
Tom Stellardc5a154d2018-06-28 23:47:12 +0000359 if (Subtarget->hasFlatAddressSpace()) {
Matt Arsenault99c14522016-04-25 19:27:24 +0000360 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
361 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
362 }
363
Matt Arsenault71e66762016-05-21 02:27:49 +0000364 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
365 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
366
367 // On SI this is s_memtime and s_memrealtime on VI.
368 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
Matt Arsenault3e025382017-04-24 17:49:13 +0000369 setOperationAction(ISD::TRAP, MVT::Other, Custom);
370 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000371
Tom Stellardc5a154d2018-06-28 23:47:12 +0000372 if (Subtarget->has16BitInsts()) {
373 setOperationAction(ISD::FLOG, MVT::f16, Custom);
Matt Arsenault7121bed2018-08-16 17:07:52 +0000374 setOperationAction(ISD::FEXP, MVT::f16, Custom);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000375 setOperationAction(ISD::FLOG10, MVT::f16, Custom);
376 }
377
378 // v_mad_f32 does not support denormals according to some sources.
379 if (!Subtarget->hasFP32Denormals())
380 setOperationAction(ISD::FMAD, MVT::f32, Legal);
381
382 if (!Subtarget->hasBFI()) {
383 // fcopysign can be done in a single instruction with BFI.
384 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
385 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
386 }
387
388 if (!Subtarget->hasBCNT(32))
389 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
390
391 if (!Subtarget->hasBCNT(64))
392 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
393
394 if (Subtarget->hasFFBH())
395 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
396
397 if (Subtarget->hasFFBL())
398 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
399
400 // We only really have 32-bit BFE instructions (and 16-bit on VI).
401 //
402 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
403 // effort to match them now. We want this to be false for i64 cases when the
404 // extraction isn't restricted to the upper or lower half. Ideally we would
405 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
406 // span the midpoint are probably relatively rare, so don't worry about them
407 // for now.
408 if (Subtarget->hasBFE())
409 setHasExtractBitsInsn(true);
410
Matt Arsenault687ec752018-10-22 16:27:27 +0000411 setOperationAction(ISD::FMINNUM, MVT::f32, Custom);
412 setOperationAction(ISD::FMAXNUM, MVT::f32, Custom);
413 setOperationAction(ISD::FMINNUM, MVT::f64, Custom);
414 setOperationAction(ISD::FMAXNUM, MVT::f64, Custom);
415
416
417 // These are really only legal for ieee_mode functions. We should be avoiding
418 // them for functions that don't have ieee_mode enabled, so just say they are
419 // legal.
420 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
421 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
422 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
423 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
424
Matt Arsenault71e66762016-05-21 02:27:49 +0000425
Tom Stellard5bfbae52018-07-11 20:59:01 +0000426 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000427 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
428 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
429 setOperationAction(ISD::FRINT, MVT::f64, Legal);
Tom Stellardc5a154d2018-06-28 23:47:12 +0000430 } else {
431 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
432 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
433 setOperationAction(ISD::FRINT, MVT::f64, Custom);
434 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000435 }
436
437 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
438
439 setOperationAction(ISD::FSIN, MVT::f32, Custom);
440 setOperationAction(ISD::FCOS, MVT::f32, Custom);
441 setOperationAction(ISD::FDIV, MVT::f32, Custom);
442 setOperationAction(ISD::FDIV, MVT::f64, Custom);
443
Tom Stellard115a6152016-11-10 16:02:37 +0000444 if (Subtarget->has16BitInsts()) {
445 setOperationAction(ISD::Constant, MVT::i16, Legal);
446
447 setOperationAction(ISD::SMIN, MVT::i16, Legal);
448 setOperationAction(ISD::SMAX, MVT::i16, Legal);
449
450 setOperationAction(ISD::UMIN, MVT::i16, Legal);
451 setOperationAction(ISD::UMAX, MVT::i16, Legal);
452
Tom Stellard115a6152016-11-10 16:02:37 +0000453 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
454 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
455
456 setOperationAction(ISD::ROTR, MVT::i16, Promote);
457 setOperationAction(ISD::ROTL, MVT::i16, Promote);
458
459 setOperationAction(ISD::SDIV, MVT::i16, Promote);
460 setOperationAction(ISD::UDIV, MVT::i16, Promote);
461 setOperationAction(ISD::SREM, MVT::i16, Promote);
462 setOperationAction(ISD::UREM, MVT::i16, Promote);
463
464 setOperationAction(ISD::BSWAP, MVT::i16, Promote);
465 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
466
467 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
468 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
469 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
470 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
Jan Veselyb283ea02018-03-02 02:50:22 +0000471 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
Tom Stellard115a6152016-11-10 16:02:37 +0000472
473 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
474
475 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
476
477 setOperationAction(ISD::LOAD, MVT::i16, Custom);
478
479 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
480
Tom Stellard115a6152016-11-10 16:02:37 +0000481 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
482 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
483 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
484 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000485
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000486 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
487 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
488 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
489 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000490
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000491 // F16 - Constant Actions.
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000492 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000493
494 // F16 - Load/Store Actions.
495 setOperationAction(ISD::LOAD, MVT::f16, Promote);
496 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
497 setOperationAction(ISD::STORE, MVT::f16, Promote);
498 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
499
500 // F16 - VOP1 Actions.
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +0000501 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000502 setOperationAction(ISD::FCOS, MVT::f16, Promote);
503 setOperationAction(ISD::FSIN, MVT::f16, Promote);
Konstantin Zhuravlyov3f0cdc72016-11-17 04:00:46 +0000504 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
505 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
506 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
507 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
Matt Arsenaultb5d23272017-03-24 20:04:18 +0000508 setOperationAction(ISD::FROUND, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000509
510 // F16 - VOP2 Actions.
Konstantin Zhuravlyov662e01d2016-11-17 03:49:01 +0000511 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000512 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
Matt Arsenault687ec752018-10-22 16:27:27 +0000513
Matt Arsenault4052a572016-12-22 03:05:41 +0000514 setOperationAction(ISD::FDIV, MVT::f16, Custom);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000515
516 // F16 - VOP3 Actions.
517 setOperationAction(ISD::FMA, MVT::f16, Legal);
Stanislav Mekhanoshin28a19362019-05-04 04:20:37 +0000518 if (!Subtarget->hasFP16Denormals() && STI.hasMadF16())
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000519 setOperationAction(ISD::FMAD, MVT::f16, Legal);
Tom Stellard115a6152016-11-10 16:02:37 +0000520
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000521 for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) {
Matt Arsenault7596f132017-02-27 20:52:10 +0000522 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
523 switch (Op) {
524 case ISD::LOAD:
525 case ISD::STORE:
526 case ISD::BUILD_VECTOR:
527 case ISD::BITCAST:
528 case ISD::EXTRACT_VECTOR_ELT:
529 case ISD::INSERT_VECTOR_ELT:
530 case ISD::INSERT_SUBVECTOR:
531 case ISD::EXTRACT_SUBVECTOR:
532 case ISD::SCALAR_TO_VECTOR:
533 break;
534 case ISD::CONCAT_VECTORS:
535 setOperationAction(Op, VT, Custom);
536 break;
537 default:
538 setOperationAction(Op, VT, Expand);
539 break;
540 }
541 }
542 }
543
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000544 // XXX - Do these do anything? Vector constants turn into build_vector.
545 setOperationAction(ISD::Constant, MVT::v2i16, Legal);
546 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
547
Matt Arsenaultdfb88df2018-05-13 10:04:38 +0000548 setOperationAction(ISD::UNDEF, MVT::v2i16, Legal);
549 setOperationAction(ISD::UNDEF, MVT::v2f16, Legal);
550
Matt Arsenault7596f132017-02-27 20:52:10 +0000551 setOperationAction(ISD::STORE, MVT::v2i16, Promote);
552 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
553 setOperationAction(ISD::STORE, MVT::v2f16, Promote);
554 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
555
556 setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
557 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
558 setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
559 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000560
561 setOperationAction(ISD::AND, MVT::v2i16, Promote);
562 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
563 setOperationAction(ISD::OR, MVT::v2i16, Promote);
564 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
565 setOperationAction(ISD::XOR, MVT::v2i16, Promote);
566 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000567
Matt Arsenault1349a042018-05-22 06:32:10 +0000568 setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
569 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32);
570 setOperationAction(ISD::LOAD, MVT::v4f16, Promote);
571 AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32);
572
573 setOperationAction(ISD::STORE, MVT::v4i16, Promote);
574 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
575 setOperationAction(ISD::STORE, MVT::v4f16, Promote);
576 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32);
577
578 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand);
579 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
580 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
581 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
582
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000583 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand);
584 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand);
585 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand);
586
Matt Arsenault1349a042018-05-22 06:32:10 +0000587 if (!Subtarget->hasVOP3PInsts()) {
588 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
589 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
590 }
591
592 setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
593 // This isn't really legal, but this avoids the legalizer unrolling it (and
594 // allows matching fneg (fabs x) patterns)
595 setOperationAction(ISD::FABS, MVT::v2f16, Legal);
Matt Arsenault687ec752018-10-22 16:27:27 +0000596
597 setOperationAction(ISD::FMAXNUM, MVT::f16, Custom);
598 setOperationAction(ISD::FMINNUM, MVT::f16, Custom);
599 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal);
600 setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal);
601
602 setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom);
603 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom);
604
605 setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand);
606 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand);
Matt Arsenault1349a042018-05-22 06:32:10 +0000607 }
608
609 if (Subtarget->hasVOP3PInsts()) {
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000610 setOperationAction(ISD::ADD, MVT::v2i16, Legal);
611 setOperationAction(ISD::SUB, MVT::v2i16, Legal);
612 setOperationAction(ISD::MUL, MVT::v2i16, Legal);
613 setOperationAction(ISD::SHL, MVT::v2i16, Legal);
614 setOperationAction(ISD::SRL, MVT::v2i16, Legal);
615 setOperationAction(ISD::SRA, MVT::v2i16, Legal);
616 setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
617 setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
618 setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
619 setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
620
621 setOperationAction(ISD::FADD, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000622 setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
623 setOperationAction(ISD::FMA, MVT::v2f16, Legal);
Matt Arsenault687ec752018-10-22 16:27:27 +0000624
625 setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal);
626 setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal);
627
Matt Arsenault540512c2018-04-26 19:21:37 +0000628 setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000629
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000630 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
631 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000632
633 setOperationAction(ISD::SHL, MVT::v4i16, Custom);
634 setOperationAction(ISD::SRA, MVT::v4i16, Custom);
635 setOperationAction(ISD::SRL, MVT::v4i16, Custom);
636 setOperationAction(ISD::ADD, MVT::v4i16, Custom);
637 setOperationAction(ISD::SUB, MVT::v4i16, Custom);
638 setOperationAction(ISD::MUL, MVT::v4i16, Custom);
639
640 setOperationAction(ISD::SMIN, MVT::v4i16, Custom);
641 setOperationAction(ISD::SMAX, MVT::v4i16, Custom);
642 setOperationAction(ISD::UMIN, MVT::v4i16, Custom);
643 setOperationAction(ISD::UMAX, MVT::v4i16, Custom);
644
645 setOperationAction(ISD::FADD, MVT::v4f16, Custom);
646 setOperationAction(ISD::FMUL, MVT::v4f16, Custom);
Matt Arsenault687ec752018-10-22 16:27:27 +0000647
648 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom);
649 setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom);
650
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000651 setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom);
652 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom);
Matt Arsenault36cdcfa2018-08-02 13:43:42 +0000653 setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000654
Matt Arsenault7121bed2018-08-16 17:07:52 +0000655 setOperationAction(ISD::FEXP, MVT::v2f16, Custom);
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000656 setOperationAction(ISD::SELECT, MVT::v4i16, Custom);
657 setOperationAction(ISD::SELECT, MVT::v4f16, Custom);
Matt Arsenault1349a042018-05-22 06:32:10 +0000658 }
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000659
Matt Arsenault02dc7e12018-06-15 15:15:46 +0000660 setOperationAction(ISD::FNEG, MVT::v4f16, Custom);
661 setOperationAction(ISD::FABS, MVT::v4f16, Custom);
662
Matt Arsenault1349a042018-05-22 06:32:10 +0000663 if (Subtarget->has16BitInsts()) {
664 setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
665 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
666 setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
667 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
Matt Arsenault4a486232017-04-19 20:53:07 +0000668 } else {
Matt Arsenault1349a042018-05-22 06:32:10 +0000669 // Legalization hack.
Matt Arsenault4a486232017-04-19 20:53:07 +0000670 setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
671 setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
Matt Arsenaulte9524f12018-06-06 21:28:11 +0000672
673 setOperationAction(ISD::FNEG, MVT::v2f16, Custom);
674 setOperationAction(ISD::FABS, MVT::v2f16, Custom);
Matt Arsenault4a486232017-04-19 20:53:07 +0000675 }
676
677 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
678 setOperationAction(ISD::SELECT, VT, Custom);
Matt Arsenault7596f132017-02-27 20:52:10 +0000679 }
680
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +0000681 setTargetDAGCombine(ISD::ADD);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +0000682 setTargetDAGCombine(ISD::ADDCARRY);
683 setTargetDAGCombine(ISD::SUB);
684 setTargetDAGCombine(ISD::SUBCARRY);
Matt Arsenault02cb0ff2014-09-29 14:59:34 +0000685 setTargetDAGCombine(ISD::FADD);
Matt Arsenault8675db12014-08-29 16:01:14 +0000686 setTargetDAGCombine(ISD::FSUB);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +0000687 setTargetDAGCombine(ISD::FMINNUM);
688 setTargetDAGCombine(ISD::FMAXNUM);
Matt Arsenault687ec752018-10-22 16:27:27 +0000689 setTargetDAGCombine(ISD::FMINNUM_IEEE);
690 setTargetDAGCombine(ISD::FMAXNUM_IEEE);
Farhana Aleenc370d7b2018-07-16 18:19:59 +0000691 setTargetDAGCombine(ISD::FMA);
Matt Arsenault5881f4e2015-06-09 00:52:37 +0000692 setTargetDAGCombine(ISD::SMIN);
693 setTargetDAGCombine(ISD::SMAX);
694 setTargetDAGCombine(ISD::UMIN);
695 setTargetDAGCombine(ISD::UMAX);
Tom Stellard75aadc22012-12-11 21:25:42 +0000696 setTargetDAGCombine(ISD::SETCC);
Matt Arsenaultd0101a22015-01-06 23:00:46 +0000697 setTargetDAGCombine(ISD::AND);
Matt Arsenaultf2290332015-01-06 23:00:39 +0000698 setTargetDAGCombine(ISD::OR);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000699 setTargetDAGCombine(ISD::XOR);
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +0000700 setTargetDAGCombine(ISD::SINT_TO_FP);
Matt Arsenault364a6742014-06-11 17:50:44 +0000701 setTargetDAGCombine(ISD::UINT_TO_FP);
Matt Arsenault9cd90712016-04-14 01:42:16 +0000702 setTargetDAGCombine(ISD::FCANONICALIZE);
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000703 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
Matt Arsenault8edfaee2017-03-31 19:53:03 +0000704 setTargetDAGCombine(ISD::ZERO_EXTEND);
Ryan Taylor00e063a2019-03-19 16:07:00 +0000705 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +0000706 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +0000707 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
Matt Arsenault364a6742014-06-11 17:50:44 +0000708
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000709 // All memory operations. Some folding on the pointer operand is done to help
710 // matching the constant offsets in the addressing modes.
711 setTargetDAGCombine(ISD::LOAD);
712 setTargetDAGCombine(ISD::STORE);
713 setTargetDAGCombine(ISD::ATOMIC_LOAD);
714 setTargetDAGCombine(ISD::ATOMIC_STORE);
715 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
716 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
717 setTargetDAGCombine(ISD::ATOMIC_SWAP);
718 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
719 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
720 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
721 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
722 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
723 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
724 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
725 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
726 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
727 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
Matt Arsenaulta5840c32019-01-22 18:36:06 +0000728 setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000729
Christian Konigeecebd02013-03-26 14:04:02 +0000730 setSchedulingPreference(Sched::RegPressure);
Tom Stellard75aadc22012-12-11 21:25:42 +0000731}
732
Tom Stellard5bfbae52018-07-11 20:59:01 +0000733const GCNSubtarget *SITargetLowering::getSubtarget() const {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000734 return Subtarget;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000735}
736
Tom Stellard0125f2a2013-06-25 02:39:35 +0000737//===----------------------------------------------------------------------===//
738// TargetLowering queries
739//===----------------------------------------------------------------------===//
740
Tom Stellardb12f4de2018-05-22 19:37:55 +0000741// v_mad_mix* support a conversion from f16 to f32.
742//
743// There is only one special case when denormals are enabled we don't currently,
744// where this is OK to use.
745bool SITargetLowering::isFPExtFoldable(unsigned Opcode,
746 EVT DestVT, EVT SrcVT) const {
747 return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) ||
748 (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) &&
749 DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
750 SrcVT.getScalarType() == MVT::f16;
751}
752
Zvi Rackover1b736822017-07-26 08:06:58 +0000753bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000754 // SI has some legal vector types, but no legal vector operations. Say no
755 // shuffles are legal in order to prefer scalarizing some vector operations.
756 return false;
757}
758
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000759MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
760 CallingConv::ID CC,
761 EVT VT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000762 // TODO: Consider splitting all arguments into 32-bit pieces.
763 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000764 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000765 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000766 if (Size == 32)
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000767 return ScalarVT.getSimpleVT();
Matt Arsenault0395da72018-07-31 19:17:47 +0000768
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000769 if (Size == 64)
770 return MVT::i32;
771
Matt Arsenault57b59662018-09-10 11:49:23 +0000772 if (Size == 16 && Subtarget->has16BitInsts())
Matt Arsenault0395da72018-07-31 19:17:47 +0000773 return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000774 }
775
776 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
777}
778
779unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
780 CallingConv::ID CC,
781 EVT VT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000782 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000783 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000784 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000785 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenault0395da72018-07-31 19:17:47 +0000786
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000787 if (Size == 32)
Matt Arsenault0395da72018-07-31 19:17:47 +0000788 return NumElts;
789
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000790 if (Size == 64)
791 return 2 * NumElts;
792
Matt Arsenault57b59662018-09-10 11:49:23 +0000793 if (Size == 16 && Subtarget->has16BitInsts())
794 return (VT.getVectorNumElements() + 1) / 2;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000795 }
796
797 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
798}
799
800unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
801 LLVMContext &Context, CallingConv::ID CC,
802 EVT VT, EVT &IntermediateVT,
803 unsigned &NumIntermediates, MVT &RegisterVT) const {
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000804 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000805 unsigned NumElts = VT.getVectorNumElements();
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000806 EVT ScalarVT = VT.getScalarType();
Matt Arsenault9ced1e02018-07-31 19:05:14 +0000807 unsigned Size = ScalarVT.getSizeInBits();
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000808 if (Size == 32) {
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000809 RegisterVT = ScalarVT.getSimpleVT();
810 IntermediateVT = RegisterVT;
Matt Arsenault0395da72018-07-31 19:17:47 +0000811 NumIntermediates = NumElts;
812 return NumIntermediates;
813 }
814
Matt Arsenaultfeedabf2018-07-31 19:29:04 +0000815 if (Size == 64) {
816 RegisterVT = MVT::i32;
817 IntermediateVT = RegisterVT;
818 NumIntermediates = 2 * NumElts;
819 return NumIntermediates;
820 }
821
Matt Arsenault0395da72018-07-31 19:17:47 +0000822 // FIXME: We should fix the ABI to be the same on targets without 16-bit
823 // support, but unless we can properly handle 3-vectors, it will be still be
824 // inconsistent.
Matt Arsenault57b59662018-09-10 11:49:23 +0000825 if (Size == 16 && Subtarget->has16BitInsts()) {
Matt Arsenault0395da72018-07-31 19:17:47 +0000826 RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
827 IntermediateVT = RegisterVT;
Matt Arsenault57b59662018-09-10 11:49:23 +0000828 NumIntermediates = (NumElts + 1) / 2;
Matt Arsenault8f9dde92018-07-28 14:11:34 +0000829 return NumIntermediates;
830 }
831 }
832
833 return TargetLowering::getVectorTypeBreakdownForCallingConv(
834 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
835}
836
David Stuttardf77079f2019-01-14 11:55:24 +0000837static MVT memVTFromAggregate(Type *Ty) {
838 // Only limited forms of aggregate type currently expected.
839 assert(Ty->isStructTy() && "Expected struct type");
840
841
842 Type *ElementType = nullptr;
843 unsigned NumElts;
844 if (Ty->getContainedType(0)->isVectorTy()) {
845 VectorType *VecComponent = cast<VectorType>(Ty->getContainedType(0));
846 ElementType = VecComponent->getElementType();
847 NumElts = VecComponent->getNumElements();
848 } else {
849 ElementType = Ty->getContainedType(0);
850 NumElts = 1;
851 }
852
853 assert((Ty->getContainedType(1) && Ty->getContainedType(1)->isIntegerTy(32)) && "Expected int32 type");
854
855 // Calculate the size of the memVT type from the aggregate
856 unsigned Pow2Elts = 0;
857 unsigned ElementSize;
858 switch (ElementType->getTypeID()) {
859 default:
860 llvm_unreachable("Unknown type!");
861 case Type::IntegerTyID:
862 ElementSize = cast<IntegerType>(ElementType)->getBitWidth();
863 break;
864 case Type::HalfTyID:
865 ElementSize = 16;
866 break;
867 case Type::FloatTyID:
868 ElementSize = 32;
869 break;
870 }
871 unsigned AdditionalElts = ElementSize == 16 ? 2 : 1;
872 Pow2Elts = 1 << Log2_32_Ceil(NumElts + AdditionalElts);
873
874 return MVT::getVectorVT(MVT::getVT(ElementType, false),
875 Pow2Elts);
876}
877
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000878bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
879 const CallInst &CI,
Matt Arsenault7d7adf42017-12-14 22:34:10 +0000880 MachineFunction &MF,
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000881 unsigned IntrID) const {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000882 if (const AMDGPU::RsrcIntrinsic *RsrcIntr =
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +0000883 AMDGPU::lookupRsrcIntrinsic(IntrID)) {
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000884 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(),
885 (Intrinsic::ID)IntrID);
886 if (Attr.hasFnAttribute(Attribute::ReadNone))
887 return false;
888
889 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
890
891 if (RsrcIntr->IsImage) {
892 Info.ptrVal = MFI->getImagePSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000893 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000894 CI.getArgOperand(RsrcIntr->RsrcArg));
895 Info.align = 0;
896 } else {
897 Info.ptrVal = MFI->getBufferPSV(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000898 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(),
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000899 CI.getArgOperand(RsrcIntr->RsrcArg));
900 }
901
902 Info.flags = MachineMemOperand::MODereferenceable;
903 if (Attr.hasFnAttribute(Attribute::ReadOnly)) {
904 Info.opc = ISD::INTRINSIC_W_CHAIN;
David Stuttardf77079f2019-01-14 11:55:24 +0000905 Info.memVT = MVT::getVT(CI.getType(), true);
906 if (Info.memVT == MVT::Other) {
907 // Some intrinsics return an aggregate type - special case to work out
908 // the correct memVT
909 Info.memVT = memVTFromAggregate(CI.getType());
910 }
Nicolai Haehnle5d0d3032018-04-01 17:09:07 +0000911 Info.flags |= MachineMemOperand::MOLoad;
912 } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) {
913 Info.opc = ISD::INTRINSIC_VOID;
914 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
915 Info.flags |= MachineMemOperand::MOStore;
916 } else {
917 // Atomic
918 Info.opc = ISD::INTRINSIC_W_CHAIN;
919 Info.memVT = MVT::getVT(CI.getType());
920 Info.flags = MachineMemOperand::MOLoad |
921 MachineMemOperand::MOStore |
922 MachineMemOperand::MODereferenceable;
923
924 // XXX - Should this be volatile without known ordering?
925 Info.flags |= MachineMemOperand::MOVolatile;
926 }
927 return true;
928 }
929
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000930 switch (IntrID) {
931 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000932 case Intrinsic::amdgcn_atomic_dec:
Marek Olsakc5cec5e2019-01-16 15:43:53 +0000933 case Intrinsic::amdgcn_ds_ordered_add:
934 case Intrinsic::amdgcn_ds_ordered_swap:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000935 case Intrinsic::amdgcn_ds_fadd:
936 case Intrinsic::amdgcn_ds_fmin:
937 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000938 Info.opc = ISD::INTRINSIC_W_CHAIN;
939 Info.memVT = MVT::getVT(CI.getType());
940 Info.ptrVal = CI.getOperand(0);
941 Info.align = 0;
Matt Arsenault11171332017-12-14 21:39:51 +0000942 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000943
Matt Arsenaultcaf13162019-03-12 21:02:54 +0000944 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4));
945 if (!Vol->isZero())
Matt Arsenault11171332017-12-14 21:39:51 +0000946 Info.flags |= MachineMemOperand::MOVolatile;
947
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000948 return true;
Matt Arsenault79f837c2017-03-30 22:21:40 +0000949 }
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000950 case Intrinsic::amdgcn_ds_append:
951 case Intrinsic::amdgcn_ds_consume: {
952 Info.opc = ISD::INTRINSIC_W_CHAIN;
953 Info.memVT = MVT::getVT(CI.getType());
954 Info.ptrVal = CI.getOperand(0);
955 Info.align = 0;
956 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
Matt Arsenault905f3512017-12-29 17:18:14 +0000957
Matt Arsenaultcaf13162019-03-12 21:02:54 +0000958 const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1));
959 if (!Vol->isZero())
Matt Arsenaultcdd191d2019-01-28 20:14:49 +0000960 Info.flags |= MachineMemOperand::MOVolatile;
961
962 return true;
963 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000964 default:
965 return false;
966 }
967}
968
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000969bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
970 SmallVectorImpl<Value*> &Ops,
971 Type *&AccessTy) const {
972 switch (II->getIntrinsicID()) {
973 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +0000974 case Intrinsic::amdgcn_atomic_dec:
Marek Olsakc5cec5e2019-01-16 15:43:53 +0000975 case Intrinsic::amdgcn_ds_ordered_add:
976 case Intrinsic::amdgcn_ds_ordered_swap:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +0000977 case Intrinsic::amdgcn_ds_fadd:
978 case Intrinsic::amdgcn_ds_fmin:
979 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenault7dc01c92017-03-15 23:15:12 +0000980 Value *Ptr = II->getArgOperand(0);
981 AccessTy = II->getType();
982 Ops.push_back(Ptr);
983 return true;
984 }
985 default:
986 return false;
987 }
Matt Arsenaulte306a322014-10-21 16:25:08 +0000988}
989
Tom Stellard70580f82015-07-20 14:28:41 +0000990bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
Matt Arsenaultd9b77842017-06-12 17:06:35 +0000991 if (!Subtarget->hasFlatInstOffsets()) {
992 // Flat instructions do not have offsets, and only have the register
993 // address.
994 return AM.BaseOffs == 0 && AM.Scale == 0;
995 }
996
997 // GFX9 added a 13-bit signed offset. When using regular flat instructions,
998 // the sign bit is ignored and is treated as a 12-bit unsigned offset.
999
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00001000 // GFX10 shrinked signed offset to 12 bits. When using regular flat
1001 // instructions, the sign bit is also ignored and is treated as 11-bit
1002 // unsigned offset.
1003
1004 if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10)
1005 return isUInt<11>(AM.BaseOffs) && AM.Scale == 0;
1006
Matt Arsenaultd9b77842017-06-12 17:06:35 +00001007 // Just r + i
1008 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
Tom Stellard70580f82015-07-20 14:28:41 +00001009}
1010
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +00001011bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const {
1012 if (Subtarget->hasFlatGlobalInsts())
1013 return isInt<13>(AM.BaseOffs) && AM.Scale == 0;
1014
1015 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) {
1016 // Assume the we will use FLAT for all global memory accesses
1017 // on VI.
1018 // FIXME: This assumption is currently wrong. On VI we still use
1019 // MUBUF instructions for the r + i addressing mode. As currently
1020 // implemented, the MUBUF instructions only work on buffer < 4GB.
1021 // It may be possible to support > 4GB buffers with MUBUF instructions,
1022 // by setting the stride value in the resource descriptor which would
1023 // increase the size limit to (stride * 4GB). However, this is risky,
1024 // because it has never been validated.
1025 return isLegalFlatAddressingMode(AM);
1026 }
1027
1028 return isLegalMUBUFAddressingMode(AM);
1029}
1030
Matt Arsenault711b3902015-08-07 20:18:34 +00001031bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
1032 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
1033 // additionally can do r + r + i with addr64. 32-bit has more addressing
1034 // mode options. Depending on the resource constant, it can also do
1035 // (i64 r0) + (i32 r1) * (i14 i).
1036 //
1037 // Private arrays end up using a scratch buffer most of the time, so also
1038 // assume those use MUBUF instructions. Scratch loads / stores are currently
1039 // implemented as mubuf instructions with offen bit set, so slightly
1040 // different than the normal addr64.
1041 if (!isUInt<12>(AM.BaseOffs))
1042 return false;
1043
1044 // FIXME: Since we can split immediate into soffset and immediate offset,
1045 // would it make sense to allow any immediate?
1046
1047 switch (AM.Scale) {
1048 case 0: // r + i or just i, depending on HasBaseReg.
1049 return true;
1050 case 1:
1051 return true; // We have r + r or r + i.
1052 case 2:
1053 if (AM.HasBaseReg) {
1054 // Reject 2 * r + r.
1055 return false;
1056 }
1057
1058 // Allow 2 * r as r + r
1059 // Or 2 * r + i is allowed as r + r + i.
1060 return true;
1061 default: // Don't allow n * r
1062 return false;
1063 }
1064}
1065
Mehdi Amini0cdec1e2015-07-09 02:09:40 +00001066bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
1067 const AddrMode &AM, Type *Ty,
Jonas Paulsson024e3192017-07-21 11:59:37 +00001068 unsigned AS, Instruction *I) const {
Matt Arsenault5015a892014-08-15 17:17:07 +00001069 // No global is ever allowed as a base.
1070 if (AM.BaseGV)
1071 return false;
1072
Matt Arsenault0da63502018-08-31 05:49:54 +00001073 if (AS == AMDGPUAS::GLOBAL_ADDRESS)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +00001074 return isLegalGlobalAddressingMode(AM);
Matt Arsenault5015a892014-08-15 17:17:07 +00001075
Matt Arsenault0da63502018-08-31 05:49:54 +00001076 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
Neil Henning523dab02019-03-18 14:44:28 +00001077 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
1078 AS == AMDGPUAS::BUFFER_FAT_POINTER) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001079 // If the offset isn't a multiple of 4, it probably isn't going to be
1080 // correctly aligned.
Matt Arsenault3cc1e002016-08-13 01:43:51 +00001081 // FIXME: Can we get the real alignment here?
Matt Arsenault711b3902015-08-07 20:18:34 +00001082 if (AM.BaseOffs % 4 != 0)
1083 return isLegalMUBUFAddressingMode(AM);
1084
1085 // There are no SMRD extloads, so if we have to do a small type access we
1086 // will use a MUBUF load.
1087 // FIXME?: We also need to do this if unaligned, but we don't know the
1088 // alignment here.
Stanislav Mekhanoshin57d341c2018-05-15 22:07:51 +00001089 if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4)
Matt Arsenaultdc8f5cc2017-07-29 01:12:31 +00001090 return isLegalGlobalAddressingMode(AM);
Matt Arsenault711b3902015-08-07 20:18:34 +00001091
Tom Stellard5bfbae52018-07-11 20:59:01 +00001092 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001093 // SMRD instructions have an 8-bit, dword offset on SI.
1094 if (!isUInt<8>(AM.BaseOffs / 4))
1095 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +00001096 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001097 // On CI+, this can also be a 32-bit literal constant offset. If it fits
1098 // in 8-bits, it can use a smaller encoding.
1099 if (!isUInt<32>(AM.BaseOffs / 4))
1100 return false;
Tom Stellard5bfbae52018-07-11 20:59:01 +00001101 } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001102 // On VI, these use the SMEM format and the offset is 20-bit in bytes.
1103 if (!isUInt<20>(AM.BaseOffs))
1104 return false;
1105 } else
1106 llvm_unreachable("unhandled generation");
1107
1108 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1109 return true;
1110
1111 if (AM.Scale == 1 && AM.HasBaseReg)
1112 return true;
1113
1114 return false;
Matt Arsenault711b3902015-08-07 20:18:34 +00001115
Matt Arsenault0da63502018-08-31 05:49:54 +00001116 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault711b3902015-08-07 20:18:34 +00001117 return isLegalMUBUFAddressingMode(AM);
Matt Arsenault0da63502018-08-31 05:49:54 +00001118 } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
1119 AS == AMDGPUAS::REGION_ADDRESS) {
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001120 // Basic, single offset DS instructions allow a 16-bit unsigned immediate
1121 // field.
1122 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
1123 // an 8-bit dword offset but we don't know the alignment here.
1124 if (!isUInt<16>(AM.BaseOffs))
Matt Arsenault5015a892014-08-15 17:17:07 +00001125 return false;
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001126
1127 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
1128 return true;
1129
1130 if (AM.Scale == 1 && AM.HasBaseReg)
1131 return true;
1132
Matt Arsenault5015a892014-08-15 17:17:07 +00001133 return false;
Matt Arsenault0da63502018-08-31 05:49:54 +00001134 } else if (AS == AMDGPUAS::FLAT_ADDRESS ||
1135 AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) {
Matt Arsenault7d1b6c82016-04-29 06:25:10 +00001136 // For an unknown address space, this usually means that this is for some
1137 // reason being used for pure arithmetic, and not based on some addressing
1138 // computation. We don't have instructions that compute pointers with any
1139 // addressing modes, so treat them as having no offset like flat
1140 // instructions.
Tom Stellard70580f82015-07-20 14:28:41 +00001141 return isLegalFlatAddressingMode(AM);
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001142 } else {
Matt Arsenault73e06fa2015-06-04 16:17:42 +00001143 llvm_unreachable("unhandled address space");
1144 }
Matt Arsenault5015a892014-08-15 17:17:07 +00001145}
1146
Nirav Dave4dcad5d2017-07-10 20:25:54 +00001147bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1148 const SelectionDAG &DAG) const {
Matt Arsenault0da63502018-08-31 05:49:54 +00001149 if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001150 return (MemVT.getSizeInBits() <= 4 * 32);
Matt Arsenault0da63502018-08-31 05:49:54 +00001151 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001152 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
1153 return (MemVT.getSizeInBits() <= MaxPrivateBits);
Matt Arsenault0da63502018-08-31 05:49:54 +00001154 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
Nirav Daved20066c2017-05-24 15:59:09 +00001155 return (MemVT.getSizeInBits() <= 2 * 32);
1156 }
1157 return true;
1158}
1159
Simon Pilgrim4e0648a2019-06-12 17:14:03 +00001160bool SITargetLowering::allowsMisalignedMemoryAccesses(
1161 EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
1162 bool *IsFast) const {
Matt Arsenault1018c892014-04-24 17:08:26 +00001163 if (IsFast)
1164 *IsFast = false;
1165
Matt Arsenault1018c892014-04-24 17:08:26 +00001166 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
1167 // which isn't a simple VT.
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001168 // Until MVT is extended to handle this, simply check for the size and
1169 // rely on the condition below: allow accesses if the size is a multiple of 4.
1170 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
1171 VT.getStoreSize() > 16)) {
Tom Stellard81d871d2013-11-13 23:36:50 +00001172 return false;
Alina Sbirlea6f937b12016-08-04 16:38:44 +00001173 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001174
Matt Arsenault0da63502018-08-31 05:49:54 +00001175 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1176 AddrSpace == AMDGPUAS::REGION_ADDRESS) {
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001177 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
1178 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
1179 // with adjacent offsets.
Sanjay Patelce74db92015-09-03 15:03:19 +00001180 bool AlignedBy4 = (Align % 4 == 0);
1181 if (IsFast)
1182 *IsFast = AlignedBy4;
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001183
Sanjay Patelce74db92015-09-03 15:03:19 +00001184 return AlignedBy4;
Matt Arsenault6f2a5262014-07-27 17:46:40 +00001185 }
Matt Arsenault1018c892014-04-24 17:08:26 +00001186
Tom Stellard64a9d082016-10-14 18:10:39 +00001187 // FIXME: We have to be conservative here and assume that flat operations
1188 // will access scratch. If we had access to the IR function, then we
1189 // could determine if any private memory was used in the function.
1190 if (!Subtarget->hasUnalignedScratchAccess() &&
Matt Arsenault0da63502018-08-31 05:49:54 +00001191 (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
1192 AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
Matt Arsenaultf4320112018-09-24 13:18:15 +00001193 bool AlignedBy4 = Align >= 4;
1194 if (IsFast)
1195 *IsFast = AlignedBy4;
1196
1197 return AlignedBy4;
Tom Stellard64a9d082016-10-14 18:10:39 +00001198 }
1199
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001200 if (Subtarget->hasUnalignedBufferAccess()) {
1201 // If we have an uniform constant load, it still requires using a slow
1202 // buffer instruction if unaligned.
1203 if (IsFast) {
Matt Arsenault0da63502018-08-31 05:49:54 +00001204 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
1205 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
Matt Arsenault7f681ac2016-07-01 23:03:44 +00001206 (Align % 4 == 0) : true;
1207 }
1208
1209 return true;
1210 }
1211
Tom Stellard33e64c62015-02-04 20:49:52 +00001212 // Smaller than dword value must be aligned.
Tom Stellard33e64c62015-02-04 20:49:52 +00001213 if (VT.bitsLT(MVT::i32))
1214 return false;
1215
Matt Arsenault1018c892014-04-24 17:08:26 +00001216 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
1217 // byte-address are ignored, thus forcing Dword alignment.
Tom Stellarde812f2f2014-07-21 15:45:06 +00001218 // This applies to private, global, and constant memory.
Matt Arsenault1018c892014-04-24 17:08:26 +00001219 if (IsFast)
1220 *IsFast = true;
Tom Stellardc6b299c2015-02-02 18:02:28 +00001221
1222 return VT.bitsGT(MVT::i32) && Align % 4 == 0;
Tom Stellard0125f2a2013-06-25 02:39:35 +00001223}
1224
Sjoerd Meijer180f1ae2019-04-30 08:38:12 +00001225EVT SITargetLowering::getOptimalMemOpType(
1226 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
1227 bool ZeroMemset, bool MemcpyStrSrc,
1228 const AttributeList &FuncAttributes) const {
Matt Arsenault46645fa2014-07-28 17:49:26 +00001229 // FIXME: Should account for address space here.
1230
1231 // The default fallback uses the private pointer size as a guess for a type to
1232 // use. Make sure we switch these to 64-bit accesses.
1233
1234 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
1235 return MVT::v4i32;
1236
1237 if (Size >= 8 && DstAlign >= 4)
1238 return MVT::v2i32;
1239
1240 // Use the default.
1241 return MVT::Other;
1242}
1243
Matt Arsenault0da63502018-08-31 05:49:54 +00001244static bool isFlatGlobalAddrSpace(unsigned AS) {
1245 return AS == AMDGPUAS::GLOBAL_ADDRESS ||
1246 AS == AMDGPUAS::FLAT_ADDRESS ||
Matt Arsenaulta8b43392019-02-08 02:40:47 +00001247 AS == AMDGPUAS::CONSTANT_ADDRESS ||
1248 AS > AMDGPUAS::MAX_AMDGPU_ADDRESS;
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001249}
1250
1251bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1252 unsigned DestAS) const {
Matt Arsenault0da63502018-08-31 05:49:54 +00001253 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +00001254}
1255
Alexander Timofeev18009562016-12-08 17:28:47 +00001256bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
1257 const MemSDNode *MemNode = cast<MemSDNode>(N);
1258 const Value *Ptr = MemNode->getMemOperand()->getValue();
Matt Arsenault0a0c8712018-03-27 18:39:45 +00001259 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
Alexander Timofeev18009562016-12-08 17:28:47 +00001260 return I && I->getMetadata("amdgpu.noclobber");
1261}
1262
Matt Arsenault8dbeb922019-06-03 18:41:34 +00001263bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
1264 unsigned DestAS) const {
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001265 // Flat -> private/local is a simple truncate.
1266 // Flat -> global is no-op
Matt Arsenault0da63502018-08-31 05:49:54 +00001267 if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
Matt Arsenaultd4da0ed2016-12-02 18:12:53 +00001268 return true;
1269
1270 return isNoopAddrSpaceCast(SrcAS, DestAS);
1271}
1272
Tom Stellarda6f24c62015-12-15 20:55:55 +00001273bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
1274 const MemSDNode *MemNode = cast<MemSDNode>(N);
Tom Stellarda6f24c62015-12-15 20:55:55 +00001275
Matt Arsenaultbcf7bec2018-02-09 16:57:48 +00001276 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand());
Tom Stellarda6f24c62015-12-15 20:55:55 +00001277}
1278
Chandler Carruth9d010ff2014-07-03 00:23:43 +00001279TargetLoweringBase::LegalizeTypeAction
Craig Topper0b5f8162018-11-05 23:26:13 +00001280SITargetLowering::getPreferredVectorAction(MVT VT) const {
Chandler Carruth9d010ff2014-07-03 00:23:43 +00001281 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
1282 return TypeSplitVector;
1283
1284 return TargetLoweringBase::getPreferredVectorAction(VT);
Tom Stellardd86003e2013-08-14 23:25:00 +00001285}
Tom Stellard0125f2a2013-06-25 02:39:35 +00001286
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001287bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1288 Type *Ty) const {
Matt Arsenault749035b2016-07-30 01:40:36 +00001289 // FIXME: Could be smarter if called for vector constants.
1290 return true;
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +00001291}
1292
Tom Stellard2e045bb2016-01-20 00:13:22 +00001293bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001294 if (Subtarget->has16BitInsts() && VT == MVT::i16) {
1295 switch (Op) {
1296 case ISD::LOAD:
1297 case ISD::STORE:
Tom Stellard2e045bb2016-01-20 00:13:22 +00001298
Matt Arsenault7b00cf42016-12-09 17:57:43 +00001299 // These operations are done with 32-bit instructions anyway.
1300 case ISD::AND:
1301 case ISD::OR:
1302 case ISD::XOR:
1303 case ISD::SELECT:
1304 // TODO: Extensions?
1305 return true;
1306 default:
1307 return false;
1308 }
1309 }
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +00001310
Tom Stellard2e045bb2016-01-20 00:13:22 +00001311 // SimplifySetCC uses this function to determine whether or not it should
1312 // create setcc with i1 operands. We don't have instructions for i1 setcc.
1313 if (VT == MVT::i1 && Op == ISD::SETCC)
1314 return false;
1315
1316 return TargetLowering::isTypeDesirableForOp(Op, VT);
1317}
1318
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001319SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
1320 const SDLoc &SL,
1321 SDValue Chain,
1322 uint64_t Offset) const {
Mehdi Aminia749f2a2015-07-09 02:09:52 +00001323 const DataLayout &DL = DAG.getDataLayout();
Tom Stellardec2e43c2014-09-22 15:35:29 +00001324 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001325 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1326
1327 const ArgDescriptor *InputPtrReg;
1328 const TargetRegisterClass *RC;
1329
1330 std::tie(InputPtrReg, RC)
1331 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Tom Stellard94593ee2013-06-03 17:40:18 +00001332
Matt Arsenault86033ca2014-07-28 17:31:39 +00001333 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
Matt Arsenault0da63502018-08-31 05:49:54 +00001334 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenaulta0269b62015-06-01 21:58:24 +00001335 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001336 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
1337
Matt Arsenault2fb9ccf2018-05-29 17:42:38 +00001338 return DAG.getObjectPtrOffset(SL, BasePtr, Offset);
Jan Veselyfea814d2016-06-21 20:46:20 +00001339}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001340
Matt Arsenault9166ce82017-07-28 15:52:08 +00001341SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
1342 const SDLoc &SL) const {
Matt Arsenault75e71922018-06-28 10:18:55 +00001343 uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(),
1344 FIRST_IMPLICIT);
Matt Arsenault9166ce82017-07-28 15:52:08 +00001345 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset);
1346}
1347
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001348SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
1349 const SDLoc &SL, SDValue Val,
1350 bool Signed,
Matt Arsenault6dca5422017-01-09 18:52:39 +00001351 const ISD::InputArg *Arg) const {
Tim Renouf361b5b22019-03-21 12:01:21 +00001352 // First, if it is a widened vector, narrow it.
1353 if (VT.isVector() &&
1354 VT.getVectorNumElements() != MemVT.getVectorNumElements()) {
1355 EVT NarrowedVT =
1356 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(),
1357 VT.getVectorNumElements());
1358 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val,
1359 DAG.getConstant(0, SL, MVT::i32));
1360 }
1361
1362 // Then convert the vector elements or scalar value.
Matt Arsenault6dca5422017-01-09 18:52:39 +00001363 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
1364 VT.bitsLT(MemVT)) {
1365 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
1366 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
1367 }
1368
Tom Stellardbc6c5232016-10-17 16:21:45 +00001369 if (MemVT.isFloatingPoint())
Matt Arsenault6dca5422017-01-09 18:52:39 +00001370 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001371 else if (Signed)
Matt Arsenault6dca5422017-01-09 18:52:39 +00001372 Val = DAG.getSExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001373 else
Matt Arsenault6dca5422017-01-09 18:52:39 +00001374 Val = DAG.getZExtOrTrunc(Val, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +00001375
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001376 return Val;
1377}
1378
1379SDValue SITargetLowering::lowerKernargMemParameter(
1380 SelectionDAG &DAG, EVT VT, EVT MemVT,
1381 const SDLoc &SL, SDValue Chain,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00001382 uint64_t Offset, unsigned Align, bool Signed,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001383 const ISD::InputArg *Arg) const {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001384 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
Matt Arsenault0da63502018-08-31 05:49:54 +00001385 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001386 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
1387
Matt Arsenault90083d32018-06-07 09:54:49 +00001388 // Try to avoid using an extload by loading earlier than the argument address,
1389 // and extracting the relevant bits. The load should hopefully be merged with
1390 // the previous argument.
Matt Arsenault4bec7d42018-07-20 09:05:08 +00001391 if (MemVT.getStoreSize() < 4 && Align < 4) {
1392 // TODO: Handle align < 4 and size >= 4 (can happen with packed structs).
Matt Arsenault90083d32018-06-07 09:54:49 +00001393 int64_t AlignDownOffset = alignDown(Offset, 4);
1394 int64_t OffsetDiff = Offset - AlignDownOffset;
1395
1396 EVT IntVT = MemVT.changeTypeToInteger();
1397
1398 // TODO: If we passed in the base kernel offset we could have a better
1399 // alignment than 4, but we don't really need it.
1400 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset);
1401 SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4,
1402 MachineMemOperand::MODereferenceable |
1403 MachineMemOperand::MOInvariant);
1404
1405 SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32);
1406 SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt);
1407
1408 SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract);
1409 ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal);
1410 ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg);
1411
1412
1413 return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL);
1414 }
1415
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001416 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
1417 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001418 MachineMemOperand::MODereferenceable |
1419 MachineMemOperand::MOInvariant);
1420
1421 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
Matt Arsenault6dca5422017-01-09 18:52:39 +00001422 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
Tom Stellard94593ee2013-06-03 17:40:18 +00001423}
1424
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001425SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
1426 const SDLoc &SL, SDValue Chain,
1427 const ISD::InputArg &Arg) const {
1428 MachineFunction &MF = DAG.getMachineFunction();
1429 MachineFrameInfo &MFI = MF.getFrameInfo();
1430
1431 if (Arg.Flags.isByVal()) {
1432 unsigned Size = Arg.Flags.getByValSize();
1433 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
1434 return DAG.getFrameIndex(FrameIdx, MVT::i32);
1435 }
1436
1437 unsigned ArgOffset = VA.getLocMemOffset();
1438 unsigned ArgSize = VA.getValVT().getStoreSize();
1439
1440 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
1441
1442 // Create load nodes to retrieve arguments from the stack.
1443 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1444 SDValue ArgValue;
1445
1446 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
1447 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
1448 MVT MemVT = VA.getValVT();
1449
1450 switch (VA.getLocInfo()) {
1451 default:
1452 break;
1453 case CCValAssign::BCvt:
1454 MemVT = VA.getLocVT();
1455 break;
1456 case CCValAssign::SExt:
1457 ExtType = ISD::SEXTLOAD;
1458 break;
1459 case CCValAssign::ZExt:
1460 ExtType = ISD::ZEXTLOAD;
1461 break;
1462 case CCValAssign::AExt:
1463 ExtType = ISD::EXTLOAD;
1464 break;
1465 }
1466
1467 ArgValue = DAG.getExtLoad(
1468 ExtType, SL, VA.getLocVT(), Chain, FIN,
1469 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
1470 MemVT);
1471 return ArgValue;
1472}
1473
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001474SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
1475 const SIMachineFunctionInfo &MFI,
1476 EVT VT,
1477 AMDGPUFunctionArgInfo::PreloadedValue PVID) const {
1478 const ArgDescriptor *Reg;
1479 const TargetRegisterClass *RC;
1480
1481 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID);
1482 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT);
1483}
1484
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001485static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
1486 CallingConv::ID CallConv,
1487 ArrayRef<ISD::InputArg> Ins,
1488 BitVector &Skipped,
1489 FunctionType *FType,
1490 SIMachineFunctionInfo *Info) {
1491 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001492 const ISD::InputArg *Arg = &Ins[I];
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001493
Matt Arsenault55ab9212018-08-01 19:57:34 +00001494 assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&
1495 "vector type argument should have been split");
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001496
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001497 // First check if it's a PS input addr.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001498 if (CallConv == CallingConv::AMDGPU_PS &&
1499 !Arg->Flags.isInReg() && !Arg->Flags.isByVal() && PSInputNum <= 15) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001500
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001501 bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum);
1502
1503 // Inconveniently only the first part of the split is marked as isSplit,
1504 // so skip to the end. We only want to increment PSInputNum once for the
1505 // entire split argument.
1506 if (Arg->Flags.isSplit()) {
1507 while (!Arg->Flags.isSplitEnd()) {
1508 assert(!Arg->VT.isVector() &&
1509 "unexpected vector split in ps argument type");
1510 if (!SkipArg)
1511 Splits.push_back(*Arg);
1512 Arg = &Ins[++I];
1513 }
1514 }
1515
1516 if (SkipArg) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001517 // We can safely skip PS inputs.
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001518 Skipped.set(Arg->getOrigArgIndex());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001519 ++PSInputNum;
1520 continue;
1521 }
1522
1523 Info->markPSInputAllocated(PSInputNum);
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00001524 if (Arg->Used)
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001525 Info->markPSInputEnabled(PSInputNum);
1526
1527 ++PSInputNum;
1528 }
1529
Matt Arsenault9ced1e02018-07-31 19:05:14 +00001530 Splits.push_back(*Arg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001531 }
1532}
1533
1534// Allocate special inputs passed in VGPRs.
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001535static void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
1536 MachineFunction &MF,
1537 const SIRegisterInfo &TRI,
1538 SIMachineFunctionInfo &Info) {
1539 if (Info.hasWorkItemIDX()) {
1540 unsigned Reg = AMDGPU::VGPR0;
1541 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001542
1543 CCInfo.AllocateReg(Reg);
1544 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg));
1545 }
1546
1547 if (Info.hasWorkItemIDY()) {
1548 unsigned Reg = AMDGPU::VGPR1;
1549 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1550
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001551 CCInfo.AllocateReg(Reg);
1552 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg));
1553 }
1554
1555 if (Info.hasWorkItemIDZ()) {
1556 unsigned Reg = AMDGPU::VGPR2;
1557 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1558
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001559 CCInfo.AllocateReg(Reg);
1560 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg));
1561 }
1562}
1563
1564// Try to allocate a VGPR at the end of the argument list, or if no argument
1565// VGPRs are left allocating a stack slot.
1566static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) {
1567 ArrayRef<MCPhysReg> ArgVGPRs
1568 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32);
1569 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
1570 if (RegIdx == ArgVGPRs.size()) {
1571 // Spill to stack required.
1572 int64_t Offset = CCInfo.AllocateStack(4, 4);
1573
1574 return ArgDescriptor::createStack(Offset);
1575 }
1576
1577 unsigned Reg = ArgVGPRs[RegIdx];
1578 Reg = CCInfo.AllocateReg(Reg);
1579 assert(Reg != AMDGPU::NoRegister);
1580
1581 MachineFunction &MF = CCInfo.getMachineFunction();
1582 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1583 return ArgDescriptor::createRegister(Reg);
1584}
1585
1586static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
1587 const TargetRegisterClass *RC,
1588 unsigned NumArgRegs) {
1589 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32);
1590 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs);
1591 if (RegIdx == ArgSGPRs.size())
1592 report_fatal_error("ran out of SGPRs for arguments");
1593
1594 unsigned Reg = ArgSGPRs[RegIdx];
1595 Reg = CCInfo.AllocateReg(Reg);
1596 assert(Reg != AMDGPU::NoRegister);
1597
1598 MachineFunction &MF = CCInfo.getMachineFunction();
1599 MF.addLiveIn(Reg, RC);
1600 return ArgDescriptor::createRegister(Reg);
1601}
1602
1603static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) {
1604 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32);
1605}
1606
1607static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) {
1608 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16);
1609}
1610
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001611static void allocateSpecialInputVGPRs(CCState &CCInfo,
1612 MachineFunction &MF,
1613 const SIRegisterInfo &TRI,
1614 SIMachineFunctionInfo &Info) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001615 if (Info.hasWorkItemIDX())
1616 Info.setWorkItemIDX(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001617
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001618 if (Info.hasWorkItemIDY())
1619 Info.setWorkItemIDY(allocateVGPR32Input(CCInfo));
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001620
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001621 if (Info.hasWorkItemIDZ())
1622 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo));
1623}
1624
1625static void allocateSpecialInputSGPRs(CCState &CCInfo,
1626 MachineFunction &MF,
1627 const SIRegisterInfo &TRI,
1628 SIMachineFunctionInfo &Info) {
1629 auto &ArgInfo = Info.getArgInfo();
1630
1631 // TODO: Unify handling with private memory pointers.
1632
1633 if (Info.hasDispatchPtr())
1634 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo);
1635
1636 if (Info.hasQueuePtr())
1637 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo);
1638
1639 if (Info.hasKernargSegmentPtr())
1640 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo);
1641
1642 if (Info.hasDispatchID())
1643 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo);
1644
1645 // flat_scratch_init is not applicable for non-kernel functions.
1646
1647 if (Info.hasWorkGroupIDX())
1648 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo);
1649
1650 if (Info.hasWorkGroupIDY())
1651 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo);
1652
1653 if (Info.hasWorkGroupIDZ())
1654 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo);
Matt Arsenault817c2532017-08-03 23:12:44 +00001655
1656 if (Info.hasImplicitArgPtr())
1657 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001658}
1659
1660// Allocate special inputs passed in user SGPRs.
1661static void allocateHSAUserSGPRs(CCState &CCInfo,
1662 MachineFunction &MF,
1663 const SIRegisterInfo &TRI,
1664 SIMachineFunctionInfo &Info) {
Matt Arsenault10fc0622017-06-26 03:01:31 +00001665 if (Info.hasImplicitBufferPtr()) {
1666 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1667 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1668 CCInfo.AllocateReg(ImplicitBufferPtrReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001669 }
1670
1671 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1672 if (Info.hasPrivateSegmentBuffer()) {
1673 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1674 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1675 CCInfo.AllocateReg(PrivateSegmentBufferReg);
1676 }
1677
1678 if (Info.hasDispatchPtr()) {
1679 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1680 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1681 CCInfo.AllocateReg(DispatchPtrReg);
1682 }
1683
1684 if (Info.hasQueuePtr()) {
1685 unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1686 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1687 CCInfo.AllocateReg(QueuePtrReg);
1688 }
1689
1690 if (Info.hasKernargSegmentPtr()) {
1691 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1692 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1693 CCInfo.AllocateReg(InputPtrReg);
1694 }
1695
1696 if (Info.hasDispatchID()) {
1697 unsigned DispatchIDReg = Info.addDispatchID(TRI);
1698 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1699 CCInfo.AllocateReg(DispatchIDReg);
1700 }
1701
1702 if (Info.hasFlatScratchInit()) {
1703 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1704 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1705 CCInfo.AllocateReg(FlatScratchInitReg);
1706 }
1707
1708 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1709 // these from the dispatch pointer.
1710}
1711
1712// Allocate special input registers that are initialized per-wave.
1713static void allocateSystemSGPRs(CCState &CCInfo,
1714 MachineFunction &MF,
1715 SIMachineFunctionInfo &Info,
Marek Olsak584d2c02017-05-04 22:25:20 +00001716 CallingConv::ID CallConv,
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001717 bool IsShader) {
1718 if (Info.hasWorkGroupIDX()) {
1719 unsigned Reg = Info.addWorkGroupIDX();
1720 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1721 CCInfo.AllocateReg(Reg);
1722 }
1723
1724 if (Info.hasWorkGroupIDY()) {
1725 unsigned Reg = Info.addWorkGroupIDY();
1726 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1727 CCInfo.AllocateReg(Reg);
1728 }
1729
1730 if (Info.hasWorkGroupIDZ()) {
1731 unsigned Reg = Info.addWorkGroupIDZ();
1732 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1733 CCInfo.AllocateReg(Reg);
1734 }
1735
1736 if (Info.hasWorkGroupInfo()) {
1737 unsigned Reg = Info.addWorkGroupInfo();
1738 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1739 CCInfo.AllocateReg(Reg);
1740 }
1741
1742 if (Info.hasPrivateSegmentWaveByteOffset()) {
1743 // Scratch wave offset passed in system SGPR.
1744 unsigned PrivateSegmentWaveByteOffsetReg;
1745
1746 if (IsShader) {
Marek Olsak584d2c02017-05-04 22:25:20 +00001747 PrivateSegmentWaveByteOffsetReg =
1748 Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1749
1750 // This is true if the scratch wave byte offset doesn't have a fixed
1751 // location.
1752 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1753 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1754 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1755 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001756 } else
1757 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1758
1759 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1760 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1761 }
1762}
1763
1764static void reservePrivateMemoryRegs(const TargetMachine &TM,
1765 MachineFunction &MF,
1766 const SIRegisterInfo &TRI,
Matt Arsenault1cc47f82017-07-18 16:44:56 +00001767 SIMachineFunctionInfo &Info) {
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001768 // Now that we've figured out where the scratch register inputs are, see if
1769 // should reserve the arguments and use them directly.
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001770 MachineFrameInfo &MFI = MF.getFrameInfo();
1771 bool HasStackObjects = MFI.hasStackObjects();
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001772 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001773
1774 // Record that we know we have non-spill stack objects so we don't need to
1775 // check all stack objects later.
1776 if (HasStackObjects)
1777 Info.setHasNonSpillStackObjects(true);
1778
1779 // Everything live out of a block is spilled with fast regalloc, so it's
1780 // almost certain that spilling will be required.
1781 if (TM.getOptLevel() == CodeGenOpt::None)
1782 HasStackObjects = true;
1783
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001784 // For now assume stack access is needed in any callee functions, so we need
1785 // the scratch registers to pass in.
1786 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls();
1787
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001788 if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) {
1789 // If we have stack objects, we unquestionably need the private buffer
1790 // resource. For the Code Object V2 ABI, this will be the first 4 user
1791 // SGPR inputs. We can reserve those and use them directly.
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001792
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001793 unsigned PrivateSegmentBufferReg =
1794 Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
1795 Info.setScratchRSrcReg(PrivateSegmentBufferReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001796 } else {
1797 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001798 // We tentatively reserve the last registers (skipping the last registers
1799 // which may contain VCC, FLAT_SCR, and XNACK). After register allocation,
1800 // we'll replace these with the ones immediately after those which were
1801 // really allocated. In the prologue copies will be inserted from the
1802 // argument to these reserved registers.
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001803
1804 // Without HSA, relocations are used for the scratch pointer and the
1805 // buffer resource setup is always inserted in the prologue. Scratch wave
1806 // offset is still in an input SGPR.
1807 Info.setScratchRSrcReg(ReservedBufferReg);
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001808 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001809
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001810 // This should be accurate for kernels even before the frame is finalized.
1811 const bool HasFP = ST.getFrameLowering()->hasFP(MF);
1812 if (HasFP) {
1813 unsigned ReservedOffsetReg =
1814 TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1815 MachineRegisterInfo &MRI = MF.getRegInfo();
1816
1817 // Try to use s32 as the SP, but move it if it would interfere with input
1818 // arguments. This won't work with calls though.
1819 //
1820 // FIXME: Move SP to avoid any possible inputs, or find a way to spill input
1821 // registers.
1822 if (!MRI.isLiveIn(AMDGPU::SGPR32)) {
1823 Info.setStackPtrOffsetReg(AMDGPU::SGPR32);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001824 } else {
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001825 assert(AMDGPU::isShader(MF.getFunction().getCallingConv()));
1826
1827 if (MFI.hasCalls())
1828 report_fatal_error("call in graphics shader with too many input SGPRs");
1829
1830 for (unsigned Reg : AMDGPU::SGPR_32RegClass) {
1831 if (!MRI.isLiveIn(Reg)) {
1832 Info.setStackPtrOffsetReg(Reg);
1833 break;
1834 }
1835 }
1836
1837 if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG)
1838 report_fatal_error("failed to find register for SP");
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001839 }
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00001840
1841 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1842 Info.setFrameOffsetReg(ReservedOffsetReg);
1843 } else if (RequiresStackAccess) {
1844 assert(!MFI.hasCalls());
1845 // We know there are accesses and they will be done relative to SP, so just
1846 // pin it to the input.
1847 //
1848 // FIXME: Should not do this if inline asm is reading/writing these
1849 // registers.
1850 unsigned PreloadedSP = Info.getPreloadedReg(
1851 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1852
1853 Info.setStackPtrOffsetReg(PreloadedSP);
1854 Info.setScratchWaveOffsetReg(PreloadedSP);
1855 Info.setFrameOffsetReg(PreloadedSP);
1856 } else {
1857 assert(!MFI.hasCalls());
1858
1859 // There may not be stack access at all. There may still be spills, or
1860 // access of a constant pointer (in which cases an extra copy will be
1861 // emitted in the prolog).
1862 unsigned ReservedOffsetReg
1863 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1864 Info.setStackPtrOffsetReg(ReservedOffsetReg);
1865 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1866 Info.setFrameOffsetReg(ReservedOffsetReg);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001867 }
1868}
1869
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00001870bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const {
1871 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1872 return !Info->isEntryFunction();
1873}
1874
1875void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
1876
1877}
1878
1879void SITargetLowering::insertCopiesSplitCSR(
1880 MachineBasicBlock *Entry,
1881 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
1882 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1883
1884 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
1885 if (!IStart)
1886 return;
1887
1888 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1889 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
1890 MachineBasicBlock::iterator MBBI = Entry->begin();
1891 for (const MCPhysReg *I = IStart; *I; ++I) {
1892 const TargetRegisterClass *RC = nullptr;
1893 if (AMDGPU::SReg_64RegClass.contains(*I))
1894 RC = &AMDGPU::SGPR_64RegClass;
1895 else if (AMDGPU::SReg_32RegClass.contains(*I))
1896 RC = &AMDGPU::SGPR_32RegClass;
1897 else
1898 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
1899
1900 unsigned NewVR = MRI->createVirtualRegister(RC);
1901 // Create copy from CSR to a virtual register.
1902 Entry->addLiveIn(*I);
1903 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
1904 .addReg(*I);
1905
1906 // Insert the copy-back instructions right before the terminator.
1907 for (auto *Exit : Exits)
1908 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
1909 TII->get(TargetOpcode::COPY), *I)
1910 .addReg(NewVR);
1911 }
1912}
1913
Christian Konig2c8f6d52013-03-07 09:03:52 +00001914SDValue SITargetLowering::LowerFormalArguments(
Eric Christopher7792e322015-01-30 23:24:40 +00001915 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001916 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1917 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001918 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001919
1920 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaultceafc552018-05-29 17:42:50 +00001921 const Function &Fn = MF.getFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00001922 FunctionType *FType = MF.getFunction().getFunctionType();
Christian Konig99ee0f42013-03-07 09:04:14 +00001923 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Christian Konig2c8f6d52013-03-07 09:03:52 +00001924
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +00001925 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00001926 DiagnosticInfoUnsupported NoGraphicsHSA(
Matthias Braunf1caa282017-12-15 22:22:58 +00001927 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
Matt Arsenaultd48da142015-11-02 23:23:02 +00001928 DAG.getContext()->diagnose(NoGraphicsHSA);
Diana Picus81bc3172016-05-26 15:24:55 +00001929 return DAG.getEntryNode();
Matt Arsenaultd48da142015-11-02 23:23:02 +00001930 }
1931
Christian Konig2c8f6d52013-03-07 09:03:52 +00001932 SmallVector<ISD::InputArg, 16> Splits;
Christian Konig2c8f6d52013-03-07 09:03:52 +00001933 SmallVector<CCValAssign, 16> ArgLocs;
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001934 BitVector Skipped(Ins.size());
Eric Christopherb5217502014-08-06 18:45:26 +00001935 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1936 *DAG.getContext());
Christian Konig2c8f6d52013-03-07 09:03:52 +00001937
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001938 bool IsShader = AMDGPU::isShader(CallConv);
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +00001939 bool IsKernel = AMDGPU::isKernel(CallConv);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001940 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
Christian Konig99ee0f42013-03-07 09:04:14 +00001941
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001942 if (IsShader) {
1943 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1944
1945 // At least one interpolation mode must be enabled or else the GPU will
1946 // hang.
1947 //
1948 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1949 // set PSInputAddr, the user wants to enable some bits after the compilation
1950 // based on run-time states. Since we can't know what the final PSInputEna
1951 // will look like, so we shouldn't do anything here and the user should take
1952 // responsibility for the correct programming.
1953 //
1954 // Otherwise, the following restrictions apply:
1955 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1956 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1957 // enabled too.
Tim Renoufc8ffffe2017-10-12 16:16:41 +00001958 if (CallConv == CallingConv::AMDGPU_PS) {
1959 if ((Info->getPSInputAddr() & 0x7F) == 0 ||
1960 ((Info->getPSInputAddr() & 0xF) == 0 &&
1961 Info->isPSInputAllocated(11))) {
1962 CCInfo.AllocateReg(AMDGPU::VGPR0);
1963 CCInfo.AllocateReg(AMDGPU::VGPR1);
1964 Info->markPSInputAllocated(0);
1965 Info->markPSInputEnabled(0);
1966 }
1967 if (Subtarget->isAmdPalOS()) {
1968 // For isAmdPalOS, the user does not enable some bits after compilation
1969 // based on run-time states; the register values being generated here are
1970 // the final ones set in hardware. Therefore we need to apply the
1971 // workaround to PSInputAddr and PSInputEnable together. (The case where
1972 // a bit is set in PSInputAddr but not PSInputEnable is where the
1973 // frontend set up an input arg for a particular interpolation mode, but
1974 // nothing uses that input arg. Really we should have an earlier pass
1975 // that removes such an arg.)
1976 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
1977 if ((PsInputBits & 0x7F) == 0 ||
1978 ((PsInputBits & 0xF) == 0 &&
1979 (PsInputBits >> 11 & 1)))
1980 Info->markPSInputEnabled(
1981 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
1982 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001983 }
1984
Tom Stellard2f3f9852017-01-25 01:25:13 +00001985 assert(!Info->hasDispatchPtr() &&
Tom Stellardf110f8f2016-04-14 16:27:03 +00001986 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1987 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1988 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1989 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1990 !Info->hasWorkItemIDZ());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001991 } else if (IsKernel) {
1992 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001993 } else {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00001994 Splits.append(Ins.begin(), Ins.end());
Tom Stellardaf775432013-10-23 00:44:32 +00001995 }
1996
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001997 if (IsEntryFunc) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00001998 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00001999 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
Tom Stellard2f3f9852017-01-25 01:25:13 +00002000 }
2001
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002002 if (IsKernel) {
Tom Stellardbbeb45a2016-09-16 21:53:00 +00002003 analyzeFormalArgumentsCompute(CCInfo, Ins);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002004 } else {
2005 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
2006 CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
2007 }
Christian Konig2c8f6d52013-03-07 09:03:52 +00002008
Matt Arsenaultcf13d182015-07-10 22:51:36 +00002009 SmallVector<SDValue, 16> Chains;
2010
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002011 // FIXME: This is the minimum kernel argument alignment. We should improve
2012 // this to the maximum alignment of the arguments.
2013 //
2014 // FIXME: Alignment of explicit arguments totally broken with non-0 explicit
2015 // kern arg offset.
2016 const unsigned KernelArgBaseAlign = 16;
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002017
2018 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
Christian Konigb7be72d2013-05-17 09:46:48 +00002019 const ISD::InputArg &Arg = Ins[i];
Matt Arsenaultd362b6a2018-07-13 16:40:37 +00002020 if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) {
Christian Konigb7be72d2013-05-17 09:46:48 +00002021 InVals.push_back(DAG.getUNDEF(Arg.VT));
Christian Konig99ee0f42013-03-07 09:04:14 +00002022 continue;
2023 }
2024
Christian Konig2c8f6d52013-03-07 09:03:52 +00002025 CCValAssign &VA = ArgLocs[ArgIdx++];
Craig Topper7f416c82014-11-16 21:17:18 +00002026 MVT VT = VA.getLocVT();
Tom Stellarded882c22013-06-03 17:40:11 +00002027
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002028 if (IsEntryFunc && VA.isMemLoc()) {
Tom Stellardaf775432013-10-23 00:44:32 +00002029 VT = Ins[i].VT;
Tom Stellardbbeb45a2016-09-16 21:53:00 +00002030 EVT MemVT = VA.getLocVT();
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002031
Matt Arsenault4bec7d42018-07-20 09:05:08 +00002032 const uint64_t Offset = VA.getLocMemOffset();
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002033 unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002034
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002035 SDValue Arg = lowerKernargMemParameter(
Matt Arsenault7b4826e2018-05-30 16:17:51 +00002036 DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]);
Matt Arsenaultcf13d182015-07-10 22:51:36 +00002037 Chains.push_back(Arg.getValue(1));
Tom Stellardca7ecf32014-08-22 18:49:31 +00002038
Craig Toppere3dcce92015-08-01 22:20:21 +00002039 auto *ParamTy =
Andrew Trick05938a52015-02-16 18:10:47 +00002040 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
Tom Stellard5bfbae52018-07-11 20:59:01 +00002041 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
Matt Arsenaultcdd191d2019-01-28 20:14:49 +00002042 ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2043 ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) {
Tom Stellardca7ecf32014-08-22 18:49:31 +00002044 // On SI local pointers are just offsets into LDS, so they are always
2045 // less than 16-bits. On CI and newer they could potentially be
2046 // real pointers, so we can't guarantee their size.
2047 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
2048 DAG.getValueType(MVT::i16));
2049 }
2050
Tom Stellarded882c22013-06-03 17:40:11 +00002051 InVals.push_back(Arg);
2052 continue;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002053 } else if (!IsEntryFunc && VA.isMemLoc()) {
2054 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
2055 InVals.push_back(Val);
2056 if (!Arg.Flags.isByVal())
2057 Chains.push_back(Val.getValue(1));
2058 continue;
Tom Stellarded882c22013-06-03 17:40:11 +00002059 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002060
Christian Konig2c8f6d52013-03-07 09:03:52 +00002061 assert(VA.isRegLoc() && "Parameter must be in a register!");
2062
2063 unsigned Reg = VA.getLocReg();
Christian Konig2c8f6d52013-03-07 09:03:52 +00002064 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
Matt Arsenaultb3463552017-07-15 05:52:59 +00002065 EVT ValVT = VA.getValVT();
Christian Konig2c8f6d52013-03-07 09:03:52 +00002066
2067 Reg = MF.addLiveIn(Reg, RC);
2068 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
2069
Matt Arsenault5c714cb2019-05-23 19:38:14 +00002070 if (Arg.Flags.isSRet()) {
Matt Arsenault45b98182017-11-15 00:45:43 +00002071 // The return object should be reasonably addressable.
2072
2073 // FIXME: This helps when the return is a real sret. If it is a
2074 // automatically inserted sret (i.e. CanLowerReturn returns false), an
2075 // extra copy is inserted in SelectionDAGBuilder which obscures this.
Matt Arsenault5c714cb2019-05-23 19:38:14 +00002076 unsigned NumBits
2077 = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex();
Matt Arsenault45b98182017-11-15 00:45:43 +00002078 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2079 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits)));
2080 }
2081
Matt Arsenaultb3463552017-07-15 05:52:59 +00002082 // If this is an 8 or 16-bit value, it is really passed promoted
2083 // to 32 bits. Insert an assert[sz]ext to capture this, then
2084 // truncate to the right size.
2085 switch (VA.getLocInfo()) {
2086 case CCValAssign::Full:
2087 break;
2088 case CCValAssign::BCvt:
2089 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
2090 break;
2091 case CCValAssign::SExt:
2092 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
2093 DAG.getValueType(ValVT));
2094 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2095 break;
2096 case CCValAssign::ZExt:
2097 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
2098 DAG.getValueType(ValVT));
2099 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2100 break;
2101 case CCValAssign::AExt:
2102 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
2103 break;
2104 default:
2105 llvm_unreachable("Unknown loc info!");
2106 }
2107
Christian Konig2c8f6d52013-03-07 09:03:52 +00002108 InVals.push_back(Val);
2109 }
Tom Stellarde99fb652015-01-20 19:33:04 +00002110
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002111 if (!IsEntryFunc) {
2112 // Special inputs come after user arguments.
2113 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
2114 }
2115
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002116 // Start adding system SGPRs.
2117 if (IsEntryFunc) {
2118 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002119 } else {
2120 CCInfo.AllocateReg(Info->getScratchRSrcReg());
2121 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
2122 CCInfo.AllocateReg(Info->getFrameOffsetReg());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002123 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002124 }
Matt Arsenaultcf13d182015-07-10 22:51:36 +00002125
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002126 auto &ArgUsageInfo =
2127 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
Matt Arsenaultceafc552018-05-29 17:42:50 +00002128 ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo());
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002129
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002130 unsigned StackArgSize = CCInfo.getNextStackOffset();
2131 Info->setBytesInStackArgArea(StackArgSize);
2132
Matt Arsenaulte622dc32017-04-11 22:29:24 +00002133 return Chains.empty() ? Chain :
2134 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
Christian Konig2c8f6d52013-03-07 09:03:52 +00002135}
2136
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002137// TODO: If return values can't fit in registers, we should return as many as
2138// possible in registers before passing on stack.
2139bool SITargetLowering::CanLowerReturn(
2140 CallingConv::ID CallConv,
2141 MachineFunction &MF, bool IsVarArg,
2142 const SmallVectorImpl<ISD::OutputArg> &Outs,
2143 LLVMContext &Context) const {
2144 // Replacing returns with sret/stack usage doesn't make sense for shaders.
2145 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
2146 // for shaders. Vector types should be explicitly handled by CC.
2147 if (AMDGPU::isEntryFunctionCC(CallConv))
2148 return true;
2149
2150 SmallVector<CCValAssign, 16> RVLocs;
2151 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2152 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
2153}
2154
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002155SDValue
2156SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2157 bool isVarArg,
2158 const SmallVectorImpl<ISD::OutputArg> &Outs,
2159 const SmallVectorImpl<SDValue> &OutVals,
2160 const SDLoc &DL, SelectionDAG &DAG) const {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002161 MachineFunction &MF = DAG.getMachineFunction();
2162 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2163
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002164 if (AMDGPU::isKernel(CallConv)) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002165 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
2166 OutVals, DL, DAG);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002167 }
2168
2169 bool IsShader = AMDGPU::isShader(CallConv);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002170
Matt Arsenault55ab9212018-08-01 19:57:34 +00002171 Info->setIfReturnsVoid(Outs.empty());
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002172 bool IsWaveEnd = Info->returnsVoid() && IsShader;
Marek Olsak8e9cc632016-01-13 17:23:09 +00002173
Marek Olsak8a0f3352016-01-13 17:23:04 +00002174 // CCValAssign - represent the assignment of the return value to a location.
2175 SmallVector<CCValAssign, 48> RVLocs;
Matt Arsenault55ab9212018-08-01 19:57:34 +00002176 SmallVector<ISD::OutputArg, 48> Splits;
Marek Olsak8a0f3352016-01-13 17:23:04 +00002177
2178 // CCState - Info about the registers and stack slots.
2179 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2180 *DAG.getContext());
2181
2182 // Analyze outgoing return values.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002183 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
Marek Olsak8a0f3352016-01-13 17:23:04 +00002184
2185 SDValue Flag;
2186 SmallVector<SDValue, 48> RetOps;
2187 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2188
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002189 // Add return address for callable functions.
2190 if (!Info->isEntryFunction()) {
2191 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2192 SDValue ReturnAddrReg = CreateLiveInRegister(
2193 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2194
2195 // FIXME: Should be able to use a vreg here, but need a way to prevent it
2196 // from being allcoated to a CSR.
2197
2198 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2199 MVT::i64);
2200
2201 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
2202 Flag = Chain.getValue(1);
2203
2204 RetOps.push_back(PhysReturnAddrReg);
2205 }
2206
Marek Olsak8a0f3352016-01-13 17:23:04 +00002207 // Copy the result values into the output registers.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002208 for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
2209 ++I, ++RealRVLocIdx) {
2210 CCValAssign &VA = RVLocs[I];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002211 assert(VA.isRegLoc() && "Can only return in registers!");
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002212 // TODO: Partially return in registers if return values don't fit.
Matt Arsenault55ab9212018-08-01 19:57:34 +00002213 SDValue Arg = OutVals[RealRVLocIdx];
Marek Olsak8a0f3352016-01-13 17:23:04 +00002214
2215 // Copied from other backends.
2216 switch (VA.getLocInfo()) {
Marek Olsak8a0f3352016-01-13 17:23:04 +00002217 case CCValAssign::Full:
2218 break;
2219 case CCValAssign::BCvt:
2220 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2221 break;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002222 case CCValAssign::SExt:
2223 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2224 break;
2225 case CCValAssign::ZExt:
2226 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2227 break;
2228 case CCValAssign::AExt:
2229 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2230 break;
2231 default:
2232 llvm_unreachable("Unknown loc info!");
Marek Olsak8a0f3352016-01-13 17:23:04 +00002233 }
2234
2235 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
2236 Flag = Chain.getValue(1);
2237 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2238 }
2239
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002240 // FIXME: Does sret work properly?
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002241 if (!Info->isEntryFunction()) {
Tom Stellardc5a154d2018-06-28 23:47:12 +00002242 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002243 const MCPhysReg *I =
2244 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2245 if (I) {
2246 for (; *I; ++I) {
2247 if (AMDGPU::SReg_64RegClass.contains(*I))
2248 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2249 else if (AMDGPU::SReg_32RegClass.contains(*I))
2250 RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2251 else
2252 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2253 }
2254 }
2255 }
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002256
Marek Olsak8a0f3352016-01-13 17:23:04 +00002257 // Update chain and glue.
2258 RetOps[0] = Chain;
2259 if (Flag.getNode())
2260 RetOps.push_back(Flag);
2261
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +00002262 unsigned Opc = AMDGPUISD::ENDPGM;
2263 if (!IsWaveEnd)
2264 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
Matt Arsenault9babdf42016-06-22 20:15:28 +00002265 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
Marek Olsak8a0f3352016-01-13 17:23:04 +00002266}
2267
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002268SDValue SITargetLowering::LowerCallResult(
2269 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
2270 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2271 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
2272 SDValue ThisVal) const {
2273 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg);
2274
2275 // Assign locations to each value returned by this call.
2276 SmallVector<CCValAssign, 16> RVLocs;
2277 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2278 *DAG.getContext());
2279 CCInfo.AnalyzeCallResult(Ins, RetCC);
2280
2281 // Copy all of the result registers out of their specified physreg.
2282 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2283 CCValAssign VA = RVLocs[i];
2284 SDValue Val;
2285
2286 if (VA.isRegLoc()) {
2287 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag);
2288 Chain = Val.getValue(1);
2289 InFlag = Val.getValue(2);
2290 } else if (VA.isMemLoc()) {
2291 report_fatal_error("TODO: return values in memory");
2292 } else
2293 llvm_unreachable("unknown argument location type");
2294
2295 switch (VA.getLocInfo()) {
2296 case CCValAssign::Full:
2297 break;
2298 case CCValAssign::BCvt:
2299 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2300 break;
2301 case CCValAssign::ZExt:
2302 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
2303 DAG.getValueType(VA.getValVT()));
2304 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2305 break;
2306 case CCValAssign::SExt:
2307 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
2308 DAG.getValueType(VA.getValVT()));
2309 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2310 break;
2311 case CCValAssign::AExt:
2312 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
2313 break;
2314 default:
2315 llvm_unreachable("Unknown loc info!");
2316 }
2317
2318 InVals.push_back(Val);
2319 }
2320
2321 return Chain;
2322}
2323
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002324// Add code to pass special inputs required depending on used features separate
2325// from the explicit user arguments present in the IR.
2326void SITargetLowering::passSpecialInputs(
2327 CallLoweringInfo &CLI,
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002328 CCState &CCInfo,
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002329 const SIMachineFunctionInfo &Info,
2330 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
2331 SmallVectorImpl<SDValue> &MemOpChains,
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002332 SDValue Chain) const {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002333 // If we don't have a call site, this was a call inserted by
2334 // legalization. These can never use special inputs.
2335 if (!CLI.CS)
2336 return;
2337
2338 const Function *CalleeFunc = CLI.CS.getCalledFunction();
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002339 assert(CalleeFunc);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002340
2341 SelectionDAG &DAG = CLI.DAG;
2342 const SDLoc &DL = CLI.DL;
2343
Tom Stellardc5a154d2018-06-28 23:47:12 +00002344 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002345
2346 auto &ArgUsageInfo =
2347 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
2348 const AMDGPUFunctionArgInfo &CalleeArgInfo
2349 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
2350
2351 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
2352
2353 // TODO: Unify with private memory register handling. This is complicated by
2354 // the fact that at least in kernels, the input argument is not necessarily
2355 // in the same location as the input.
2356 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
2357 AMDGPUFunctionArgInfo::DISPATCH_PTR,
2358 AMDGPUFunctionArgInfo::QUEUE_PTR,
2359 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR,
2360 AMDGPUFunctionArgInfo::DISPATCH_ID,
2361 AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
2362 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
2363 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
2364 AMDGPUFunctionArgInfo::WORKITEM_ID_X,
2365 AMDGPUFunctionArgInfo::WORKITEM_ID_Y,
Matt Arsenault817c2532017-08-03 23:12:44 +00002366 AMDGPUFunctionArgInfo::WORKITEM_ID_Z,
2367 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002368 };
2369
2370 for (auto InputID : InputRegs) {
2371 const ArgDescriptor *OutgoingArg;
2372 const TargetRegisterClass *ArgRC;
2373
2374 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
2375 if (!OutgoingArg)
2376 continue;
2377
2378 const ArgDescriptor *IncomingArg;
2379 const TargetRegisterClass *IncomingArgRC;
2380 std::tie(IncomingArg, IncomingArgRC)
2381 = CallerArgInfo.getPreloadedValue(InputID);
2382 assert(IncomingArgRC == ArgRC);
2383
2384 // All special arguments are ints for now.
2385 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32;
Matt Arsenault817c2532017-08-03 23:12:44 +00002386 SDValue InputReg;
2387
2388 if (IncomingArg) {
2389 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg);
2390 } else {
2391 // The implicit arg ptr is special because it doesn't have a corresponding
2392 // input for kernels, and is computed from the kernarg segment pointer.
2393 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
2394 InputReg = getImplicitArgPtr(DAG, DL);
2395 }
2396
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002397 if (OutgoingArg->isRegister()) {
2398 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
2399 } else {
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002400 unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
2401 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
2402 SpecialArgOffset);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002403 MemOpChains.push_back(ArgStore);
2404 }
2405 }
2406}
2407
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002408static bool canGuaranteeTCO(CallingConv::ID CC) {
2409 return CC == CallingConv::Fast;
2410}
2411
2412/// Return true if we might ever do TCO for calls with this calling convention.
2413static bool mayTailCallThisCC(CallingConv::ID CC) {
2414 switch (CC) {
2415 case CallingConv::C:
2416 return true;
2417 default:
2418 return canGuaranteeTCO(CC);
2419 }
2420}
2421
2422bool SITargetLowering::isEligibleForTailCallOptimization(
2423 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
2424 const SmallVectorImpl<ISD::OutputArg> &Outs,
2425 const SmallVectorImpl<SDValue> &OutVals,
2426 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
2427 if (!mayTailCallThisCC(CalleeCC))
2428 return false;
2429
2430 MachineFunction &MF = DAG.getMachineFunction();
Matthias Braunf1caa282017-12-15 22:22:58 +00002431 const Function &CallerF = MF.getFunction();
2432 CallingConv::ID CallerCC = CallerF.getCallingConv();
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002433 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2434 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2435
2436 // Kernels aren't callable, and don't have a live in return address so it
2437 // doesn't make sense to do a tail call with entry functions.
2438 if (!CallerPreserved)
2439 return false;
2440
2441 bool CCMatch = CallerCC == CalleeCC;
2442
2443 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
2444 if (canGuaranteeTCO(CalleeCC) && CCMatch)
2445 return true;
2446 return false;
2447 }
2448
2449 // TODO: Can we handle var args?
2450 if (IsVarArg)
2451 return false;
2452
Matthias Braunf1caa282017-12-15 22:22:58 +00002453 for (const Argument &Arg : CallerF.args()) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002454 if (Arg.hasByValAttr())
2455 return false;
2456 }
2457
2458 LLVMContext &Ctx = *DAG.getContext();
2459
2460 // Check that the call results are passed in the same way.
2461 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins,
2462 CCAssignFnForCall(CalleeCC, IsVarArg),
2463 CCAssignFnForCall(CallerCC, IsVarArg)))
2464 return false;
2465
2466 // The callee has to preserve all registers the caller needs to preserve.
2467 if (!CCMatch) {
2468 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2469 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2470 return false;
2471 }
2472
2473 // Nothing more to check if the callee is taking no arguments.
2474 if (Outs.empty())
2475 return true;
2476
2477 SmallVector<CCValAssign, 16> ArgLocs;
2478 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx);
2479
2480 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg));
2481
2482 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
2483 // If the stack arguments for this call do not fit into our own save area then
2484 // the call cannot be made tail.
2485 // TODO: Is this really necessary?
2486 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
2487 return false;
2488
2489 const MachineRegisterInfo &MRI = MF.getRegInfo();
2490 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals);
2491}
2492
2493bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2494 if (!CI->isTailCall())
2495 return false;
2496
2497 const Function *ParentFn = CI->getParent()->getParent();
2498 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv()))
2499 return false;
2500
2501 auto Attr = ParentFn->getFnAttribute("disable-tail-calls");
2502 return (Attr.getValueAsString() != "true");
2503}
2504
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002505// The wave scratch offset register is used as the global base pointer.
2506SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
2507 SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002508 SelectionDAG &DAG = CLI.DAG;
2509 const SDLoc &DL = CLI.DL;
2510 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2511 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2512 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2513 SDValue Chain = CLI.Chain;
2514 SDValue Callee = CLI.Callee;
2515 bool &IsTailCall = CLI.IsTailCall;
2516 CallingConv::ID CallConv = CLI.CallConv;
2517 bool IsVarArg = CLI.IsVarArg;
2518 bool IsSibCall = false;
2519 bool IsThisReturn = false;
2520 MachineFunction &MF = DAG.getMachineFunction();
2521
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002522 if (IsVarArg) {
2523 return lowerUnhandledCall(CLI, InVals,
2524 "unsupported call to variadic function ");
2525 }
2526
Matt Arsenault935f3b72018-08-08 16:58:39 +00002527 if (!CLI.CS.getInstruction())
2528 report_fatal_error("unsupported libcall legalization");
2529
Matt Arsenaulta176cc52017-08-03 23:32:41 +00002530 if (!CLI.CS.getCalledFunction()) {
2531 return lowerUnhandledCall(CLI, InVals,
2532 "unsupported indirect call to function ");
2533 }
2534
2535 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) {
2536 return lowerUnhandledCall(CLI, InVals,
2537 "unsupported required tail call to function ");
2538 }
2539
Matt Arsenault1fb90132018-06-28 10:18:36 +00002540 if (AMDGPU::isShader(MF.getFunction().getCallingConv())) {
2541 // Note the issue is with the CC of the calling function, not of the call
2542 // itself.
2543 return lowerUnhandledCall(CLI, InVals,
2544 "unsupported call from graphics shader of function ");
2545 }
2546
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002547 if (IsTailCall) {
2548 IsTailCall = isEligibleForTailCallOptimization(
2549 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
2550 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) {
2551 report_fatal_error("failed to perform tail call elimination on a call "
2552 "site marked musttail");
2553 }
2554
2555 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
2556
2557 // A sibling call is one where we're under the usual C ABI and not planning
2558 // to change that but can still do a tail call:
2559 if (!TailCallOpt && IsTailCall)
2560 IsSibCall = true;
2561
2562 if (IsTailCall)
2563 ++NumTailCalls;
2564 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002565
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002566 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2567
2568 // Analyze operands of the call, assigning locations to each operand.
2569 SmallVector<CCValAssign, 16> ArgLocs;
2570 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2571 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg);
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002572
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002573 CCInfo.AnalyzeCallOperands(Outs, AssignFn);
2574
2575 // Get a count of how many bytes are to be pushed on the stack.
2576 unsigned NumBytes = CCInfo.getNextStackOffset();
2577
2578 if (IsSibCall) {
2579 // Since we're not changing the ABI to make this a tail call, the memory
2580 // operands are already available in the caller's incoming argument space.
2581 NumBytes = 0;
2582 }
2583
2584 // FPDiff is the byte offset of the call's argument area from the callee's.
2585 // Stores to callee stack arguments will be placed in FixedStackSlots offset
2586 // by this amount for a tail call. In a sibling call it must be 0 because the
2587 // caller will deallocate the entire stack and the callee still expects its
2588 // arguments to begin at SP+0. Completely unused for non-tail calls.
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002589 int32_t FPDiff = 0;
2590 MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002591 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2592
Matt Arsenault6efd0822017-09-14 17:14:57 +00002593 SDValue CallerSavedFP;
2594
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002595 // Adjust the stack pointer for the new arguments...
2596 // These operations are automatically eliminated by the prolog/epilog pass
2597 if (!IsSibCall) {
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002598 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002599
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002600 SmallVector<SDValue, 4> CopyFromChains;
2601
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002602 unsigned OffsetReg = Info->getScratchWaveOffsetReg();
2603
2604 // In the HSA case, this should be an identity copy.
2605 SDValue ScratchRSrcReg
2606 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32);
2607 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002608 CopyFromChains.push_back(ScratchRSrcReg.getValue(1));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002609
2610 // TODO: Don't hardcode these registers and get from the callee function.
2611 SDValue ScratchWaveOffsetReg
2612 = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32);
2613 RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg);
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002614 CopyFromChains.push_back(ScratchWaveOffsetReg.getValue(1));
Matt Arsenault6efd0822017-09-14 17:14:57 +00002615
2616 if (!Info->isEntryFunction()) {
2617 // Avoid clobbering this function's FP value. In the current convention
2618 // callee will overwrite this, so do save/restore around the call site.
2619 CallerSavedFP = DAG.getCopyFromReg(Chain, DL,
2620 Info->getFrameOffsetReg(), MVT::i32);
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002621 CopyFromChains.push_back(CallerSavedFP.getValue(1));
Matt Arsenault6efd0822017-09-14 17:14:57 +00002622 }
Matt Arsenault99e6f4d2019-05-16 15:10:27 +00002623
2624 Chain = DAG.getTokenFactor(DL, CopyFromChains);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002625 }
2626
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002627 SmallVector<SDValue, 8> MemOpChains;
2628 MVT PtrVT = MVT::i32;
2629
2630 // Walk the register/memloc assignments, inserting copies/loads.
2631 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e;
2632 ++i, ++realArgIdx) {
2633 CCValAssign &VA = ArgLocs[i];
2634 SDValue Arg = OutVals[realArgIdx];
2635
2636 // Promote the value if needed.
2637 switch (VA.getLocInfo()) {
2638 case CCValAssign::Full:
2639 break;
2640 case CCValAssign::BCvt:
2641 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
2642 break;
2643 case CCValAssign::ZExt:
2644 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
2645 break;
2646 case CCValAssign::SExt:
2647 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
2648 break;
2649 case CCValAssign::AExt:
2650 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
2651 break;
2652 case CCValAssign::FPExt:
2653 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
2654 break;
2655 default:
2656 llvm_unreachable("Unknown loc info!");
2657 }
2658
2659 if (VA.isRegLoc()) {
2660 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2661 } else {
2662 assert(VA.isMemLoc());
2663
2664 SDValue DstAddr;
2665 MachinePointerInfo DstInfo;
2666
2667 unsigned LocMemOffset = VA.getLocMemOffset();
2668 int32_t Offset = LocMemOffset;
Matt Arsenaultb655fa92017-11-29 01:25:12 +00002669
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002670 SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT);
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002671 unsigned Align = 0;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002672
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002673 if (IsTailCall) {
2674 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2675 unsigned OpSize = Flags.isByVal() ?
2676 Flags.getByValSize() : VA.getValVT().getStoreSize();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002677
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002678 // FIXME: We can have better than the minimum byval required alignment.
2679 Align = Flags.isByVal() ? Flags.getByValAlign() :
2680 MinAlign(Subtarget->getStackAlignment(), Offset);
2681
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002682 Offset = Offset + FPDiff;
2683 int FI = MFI.CreateFixedObject(OpSize, Offset, true);
2684
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002685 DstAddr = DAG.getFrameIndex(FI, PtrVT);
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002686 DstInfo = MachinePointerInfo::getFixedStack(MF, FI);
2687
2688 // Make sure any stack arguments overlapping with where we're storing
2689 // are loaded before this eventual operation. Otherwise they'll be
2690 // clobbered.
2691
2692 // FIXME: Why is this really necessary? This seems to just result in a
2693 // lot of code to copy the stack and write them back to the same
2694 // locations, which are supposed to be immutable?
2695 Chain = addTokenForArgument(Chain, DAG, MFI, FI);
2696 } else {
2697 DstAddr = PtrOff;
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002698 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset);
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002699 Align = MinAlign(Subtarget->getStackAlignment(), LocMemOffset);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002700 }
2701
2702 if (Outs[i].Flags.isByVal()) {
2703 SDValue SizeNode =
2704 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32);
2705 SDValue Cpy = DAG.getMemcpy(
2706 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
2707 /*isVol = */ false, /*AlwaysInline = */ true,
Yaxun Liuc5962262017-11-22 16:13:35 +00002708 /*isTailCall = */ false, DstInfo,
2709 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy(
Matt Arsenault0da63502018-08-31 05:49:54 +00002710 *DAG.getContext(), AMDGPUAS::PRIVATE_ADDRESS))));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002711
2712 MemOpChains.push_back(Cpy);
2713 } else {
Matt Arsenaultff987ac2018-09-13 12:14:31 +00002714 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Align);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002715 MemOpChains.push_back(Store);
2716 }
2717 }
2718 }
2719
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002720 // Copy special input registers after user input arguments.
Matt Arsenaultbb8e64e2018-08-22 11:09:45 +00002721 passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00002722
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002723 if (!MemOpChains.empty())
2724 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2725
2726 // Build a sequence of copy-to-reg nodes chained together with token chain
2727 // and flag operands which copy the outgoing args into the appropriate regs.
2728 SDValue InFlag;
2729 for (auto &RegToPass : RegsToPass) {
2730 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
2731 RegToPass.second, InFlag);
2732 InFlag = Chain.getValue(1);
2733 }
2734
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002735
2736 SDValue PhysReturnAddrReg;
2737 if (IsTailCall) {
2738 // Since the return is being combined with the call, we need to pass on the
2739 // return address.
2740
2741 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2742 SDValue ReturnAddrReg = CreateLiveInRegister(
2743 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
2744
2745 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
2746 MVT::i64);
2747 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
2748 InFlag = Chain.getValue(1);
2749 }
2750
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002751 // We don't usually want to end the call-sequence here because we would tidy
2752 // the frame up *after* the call, however in the ABI-changing tail-call case
2753 // we've carefully laid out the parameters so that when sp is reset they'll be
2754 // in the correct location.
2755 if (IsTailCall && !IsSibCall) {
2756 Chain = DAG.getCALLSEQ_END(Chain,
2757 DAG.getTargetConstant(NumBytes, DL, MVT::i32),
2758 DAG.getTargetConstant(0, DL, MVT::i32),
2759 InFlag, DL);
2760 InFlag = Chain.getValue(1);
2761 }
2762
2763 std::vector<SDValue> Ops;
2764 Ops.push_back(Chain);
2765 Ops.push_back(Callee);
Scott Linderd19d1972019-02-04 20:00:07 +00002766 // Add a redundant copy of the callee global which will not be legalized, as
2767 // we need direct access to the callee later.
2768 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
2769 const GlobalValue *GV = GSD->getGlobal();
2770 Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002771
2772 if (IsTailCall) {
2773 // Each tail call may have to adjust the stack by a different amount, so
2774 // this information must travel along with the operation for eventual
2775 // consumption by emitEpilogue.
2776 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002777
2778 Ops.push_back(PhysReturnAddrReg);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002779 }
2780
2781 // Add argument registers to the end of the list so that they are known live
2782 // into the call.
2783 for (auto &RegToPass : RegsToPass) {
2784 Ops.push_back(DAG.getRegister(RegToPass.first,
2785 RegToPass.second.getValueType()));
2786 }
2787
2788 // Add a register mask operand representing the call-preserved registers.
2789
Tom Stellardc5a154d2018-06-28 23:47:12 +00002790 auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002791 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2792 assert(Mask && "Missing call preserved mask for calling convention");
2793 Ops.push_back(DAG.getRegisterMask(Mask));
2794
2795 if (InFlag.getNode())
2796 Ops.push_back(InFlag);
2797
2798 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2799
2800 // If we're doing a tall call, use a TC_RETURN here rather than an
2801 // actual call instruction.
2802 if (IsTailCall) {
Matt Arsenault71bcbd42017-08-11 20:42:08 +00002803 MFI.setHasTailCall();
2804 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002805 }
2806
2807 // Returns a chain and a flag for retval copy to use.
2808 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops);
2809 Chain = Call.getValue(0);
2810 InFlag = Call.getValue(1);
2811
Matt Arsenault6efd0822017-09-14 17:14:57 +00002812 if (CallerSavedFP) {
2813 SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32);
2814 Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag);
2815 InFlag = Chain.getValue(1);
2816 }
2817
Matt Arsenaultdefe3712017-09-14 17:37:40 +00002818 uint64_t CalleePopBytes = NumBytes;
2819 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32),
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00002820 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32),
2821 InFlag, DL);
2822 if (!Ins.empty())
2823 InFlag = Chain.getValue(1);
2824
2825 // Handle result values, copying them out of physregs into vregs that we
2826 // return.
2827 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2828 InVals, IsThisReturn,
2829 IsThisReturn ? OutVals[0] : SDValue());
2830}
2831
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002832unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
2833 SelectionDAG &DAG) const {
2834 unsigned Reg = StringSwitch<unsigned>(RegName)
2835 .Case("m0", AMDGPU::M0)
2836 .Case("exec", AMDGPU::EXEC)
2837 .Case("exec_lo", AMDGPU::EXEC_LO)
2838 .Case("exec_hi", AMDGPU::EXEC_HI)
2839 .Case("flat_scratch", AMDGPU::FLAT_SCR)
2840 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
2841 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
2842 .Default(AMDGPU::NoRegister);
2843
2844 if (Reg == AMDGPU::NoRegister) {
2845 report_fatal_error(Twine("invalid register name \""
2846 + StringRef(RegName) + "\"."));
2847
2848 }
2849
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00002850 if ((Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS ||
2851 Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) &&
2852 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
Matt Arsenault9a10cea2016-01-26 04:29:24 +00002853 report_fatal_error(Twine("invalid register \""
2854 + StringRef(RegName) + "\" for subtarget."));
2855 }
2856
2857 switch (Reg) {
2858 case AMDGPU::M0:
2859 case AMDGPU::EXEC_LO:
2860 case AMDGPU::EXEC_HI:
2861 case AMDGPU::FLAT_SCR_LO:
2862 case AMDGPU::FLAT_SCR_HI:
2863 if (VT.getSizeInBits() == 32)
2864 return Reg;
2865 break;
2866 case AMDGPU::EXEC:
2867 case AMDGPU::FLAT_SCR:
2868 if (VT.getSizeInBits() == 64)
2869 return Reg;
2870 break;
2871 default:
2872 llvm_unreachable("missing register type checking");
2873 }
2874
2875 report_fatal_error(Twine("invalid type for register \""
2876 + StringRef(RegName) + "\"."));
2877}
2878
Matt Arsenault786724a2016-07-12 21:41:32 +00002879// If kill is not the last instruction, split the block so kill is always a
2880// proper terminator.
2881MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
2882 MachineBasicBlock *BB) const {
2883 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2884
2885 MachineBasicBlock::iterator SplitPoint(&MI);
2886 ++SplitPoint;
2887
2888 if (SplitPoint == BB->end()) {
2889 // Don't bother with a new block.
Marek Olsakce76ea02017-10-24 10:27:13 +00002890 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002891 return BB;
2892 }
2893
2894 MachineFunction *MF = BB->getParent();
2895 MachineBasicBlock *SplitBB
2896 = MF->CreateMachineBasicBlock(BB->getBasicBlock());
2897
Matt Arsenault786724a2016-07-12 21:41:32 +00002898 MF->insert(++MachineFunction::iterator(BB), SplitBB);
2899 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
2900
Matt Arsenaultd40ded62016-07-22 17:01:15 +00002901 SplitBB->transferSuccessorsAndUpdatePHIs(BB);
Matt Arsenault786724a2016-07-12 21:41:32 +00002902 BB->addSuccessor(SplitBB);
2903
Marek Olsakce76ea02017-10-24 10:27:13 +00002904 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
Matt Arsenault786724a2016-07-12 21:41:32 +00002905 return SplitBB;
2906}
2907
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002908// Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
2909// wavefront. If the value is uniform and just happens to be in a VGPR, this
2910// will only do one iteration. In the worst case, this will loop 64 times.
2911//
2912// TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002913static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
2914 const SIInstrInfo *TII,
2915 MachineRegisterInfo &MRI,
2916 MachineBasicBlock &OrigBB,
2917 MachineBasicBlock &LoopBB,
2918 const DebugLoc &DL,
2919 const MachineOperand &IdxReg,
2920 unsigned InitReg,
2921 unsigned ResultReg,
2922 unsigned PhiReg,
2923 unsigned InitSaveExecReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002924 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002925 bool UseGPRIdxMode,
2926 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002927 MachineBasicBlock::iterator I = LoopBB.begin();
2928
2929 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2930 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2931 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2932 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2933
2934 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
2935 .addReg(InitReg)
2936 .addMBB(&OrigBB)
2937 .addReg(ResultReg)
2938 .addMBB(&LoopBB);
2939
2940 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
2941 .addReg(InitSaveExecReg)
2942 .addMBB(&OrigBB)
2943 .addReg(NewExec)
2944 .addMBB(&LoopBB);
2945
2946 // Read the next variant <- also loop target.
2947 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
2948 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
2949
2950 // Compare the just read M0 value to all possible Idx values.
2951 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
2952 .addReg(CurrentIdxReg)
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +00002953 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002954
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002955 // Update EXEC, save the original EXEC value to VCC.
2956 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
2957 .addReg(CondReg, RegState::Kill);
2958
2959 MRI.setSimpleHint(NewExec, CondReg);
2960
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002961 if (UseGPRIdxMode) {
2962 unsigned IdxReg;
2963 if (Offset == 0) {
2964 IdxReg = CurrentIdxReg;
2965 } else {
2966 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2967 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
2968 .addReg(CurrentIdxReg, RegState::Kill)
2969 .addImm(Offset);
2970 }
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002971 unsigned IdxMode = IsIndirectSrc ?
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00002972 AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
Changpeng Fangda38b5f2018-02-16 16:31:30 +00002973 MachineInstr *SetOn =
2974 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2975 .addReg(IdxReg, RegState::Kill)
2976 .addImm(IdxMode);
2977 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002978 } else {
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00002979 // Move index from VCC into M0
2980 if (Offset == 0) {
2981 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2982 .addReg(CurrentIdxReg, RegState::Kill);
2983 } else {
2984 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
2985 .addReg(CurrentIdxReg, RegState::Kill)
2986 .addImm(Offset);
2987 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002988 }
2989
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002990 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00002991 MachineInstr *InsertPt =
Scott Lindere2c58472019-02-05 19:50:32 +00002992 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00002993 .addReg(AMDGPU::EXEC)
2994 .addReg(NewExec);
2995
2996 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
2997 // s_cbranch_scc0?
2998
2999 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
3000 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
3001 .addMBB(&LoopBB);
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003002
3003 return InsertPt->getIterator();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003004}
3005
3006// This has slightly sub-optimal regalloc when the source vector is killed by
3007// the read. The register allocator does not understand that the kill is
3008// per-workitem, so is kept alive for the whole loop so we end up not re-using a
3009// subregister from it, using 1 more VGPR than necessary. This was saved when
3010// this was expanded after register allocation.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003011static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
3012 MachineBasicBlock &MBB,
3013 MachineInstr &MI,
3014 unsigned InitResultReg,
3015 unsigned PhiReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003016 int Offset,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003017 bool UseGPRIdxMode,
3018 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003019 MachineFunction *MF = MBB.getParent();
3020 MachineRegisterInfo &MRI = MF->getRegInfo();
3021 const DebugLoc &DL = MI.getDebugLoc();
3022 MachineBasicBlock::iterator I(&MI);
3023
3024 unsigned DstReg = MI.getOperand(0).getReg();
Matt Arsenault301162c2017-11-15 21:51:43 +00003025 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
3026 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003027
3028 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
3029
3030 // Save the EXEC mask
3031 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
3032 .addReg(AMDGPU::EXEC);
3033
3034 // To insert the loop we need to split the block. Move everything after this
3035 // point to a new block, and insert a new empty block between the two.
3036 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
3037 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
3038 MachineFunction::iterator MBBI(MBB);
3039 ++MBBI;
3040
3041 MF->insert(MBBI, LoopBB);
3042 MF->insert(MBBI, RemainderBB);
3043
3044 LoopBB->addSuccessor(LoopBB);
3045 LoopBB->addSuccessor(RemainderBB);
3046
3047 // Move the rest of the block into a new block.
Matt Arsenaultd40ded62016-07-22 17:01:15 +00003048 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003049 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
3050
3051 MBB.addSuccessor(LoopBB);
3052
3053 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3054
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003055 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
3056 InitResultReg, DstReg, PhiReg, TmpExec,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003057 Offset, UseGPRIdxMode, IsIndirectSrc);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003058
3059 MachineBasicBlock::iterator First = RemainderBB->begin();
3060 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
3061 .addReg(SaveExec);
3062
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003063 return InsPt;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003064}
3065
3066// Returns subreg index, offset
3067static std::pair<unsigned, int>
3068computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
3069 const TargetRegisterClass *SuperRC,
3070 unsigned VecReg,
3071 int Offset) {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003072 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003073
3074 // Skip out of bounds offsets, or else we would end up using an undefined
3075 // register.
3076 if (Offset >= NumElts || Offset < 0)
3077 return std::make_pair(AMDGPU::sub0, Offset);
3078
3079 return std::make_pair(AMDGPU::sub0 + Offset, 0);
3080}
3081
3082// Return true if the index is an SGPR and was set.
3083static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
3084 MachineRegisterInfo &MRI,
3085 MachineInstr &MI,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003086 int Offset,
3087 bool UseGPRIdxMode,
3088 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003089 MachineBasicBlock *MBB = MI.getParent();
3090 const DebugLoc &DL = MI.getDebugLoc();
3091 MachineBasicBlock::iterator I(&MI);
3092
3093 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3094 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
3095
3096 assert(Idx->getReg() != AMDGPU::NoRegister);
3097
3098 if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
3099 return false;
3100
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003101 if (UseGPRIdxMode) {
3102 unsigned IdxMode = IsIndirectSrc ?
Dmitry Preobrazhenskyef920352019-02-27 13:12:12 +00003103 AMDGPU::VGPRIndexMode::SRC0_ENABLE : AMDGPU::VGPRIndexMode::DST_ENABLE;
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003104 if (Offset == 0) {
3105 MachineInstr *SetOn =
Diana Picus116bbab2017-01-13 09:58:52 +00003106 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3107 .add(*Idx)
3108 .addImm(IdxMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003109
Matt Arsenaultdac31db2016-10-13 12:45:16 +00003110 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003111 } else {
3112 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3113 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
Diana Picus116bbab2017-01-13 09:58:52 +00003114 .add(*Idx)
3115 .addImm(Offset);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003116 MachineInstr *SetOn =
3117 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
3118 .addReg(Tmp, RegState::Kill)
3119 .addImm(IdxMode);
3120
Matt Arsenaultdac31db2016-10-13 12:45:16 +00003121 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003122 }
3123
3124 return true;
3125 }
3126
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003127 if (Offset == 0) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00003128 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
3129 .add(*Idx);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003130 } else {
3131 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00003132 .add(*Idx)
3133 .addImm(Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003134 }
3135
3136 return true;
3137}
3138
3139// Control flow needs to be inserted if indexing with a VGPR.
3140static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
3141 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00003142 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003143 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003144 const SIRegisterInfo &TRI = TII->getRegisterInfo();
3145 MachineFunction *MF = MBB.getParent();
3146 MachineRegisterInfo &MRI = MF->getRegInfo();
3147
3148 unsigned Dst = MI.getOperand(0).getReg();
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003149 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003150 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3151
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003152 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003153
3154 unsigned SubReg;
3155 std::tie(SubReg, Offset)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003156 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003157
Marek Olsake22fdb92017-03-21 17:00:32 +00003158 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003159
3160 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003161 MachineBasicBlock::iterator I(&MI);
3162 const DebugLoc &DL = MI.getDebugLoc();
3163
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003164 if (UseGPRIdxMode) {
3165 // TODO: Look at the uses to avoid the copy. This may require rescheduling
3166 // to avoid interfering with other uses, so probably requires a new
3167 // optimization pass.
3168 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003169 .addReg(SrcReg, RegState::Undef, SubReg)
3170 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003171 .addReg(AMDGPU::M0, RegState::Implicit);
3172 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3173 } else {
3174 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003175 .addReg(SrcReg, RegState::Undef, SubReg)
3176 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003177 }
3178
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003179 MI.eraseFromParent();
3180
3181 return &MBB;
3182 }
3183
3184 const DebugLoc &DL = MI.getDebugLoc();
3185 MachineBasicBlock::iterator I(&MI);
3186
3187 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3188 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3189
3190 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
3191
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003192 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg,
3193 Offset, UseGPRIdxMode, true);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003194 MachineBasicBlock *LoopBB = InsPt->getParent();
3195
3196 if (UseGPRIdxMode) {
3197 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003198 .addReg(SrcReg, RegState::Undef, SubReg)
3199 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003200 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003201 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003202 } else {
3203 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003204 .addReg(SrcReg, RegState::Undef, SubReg)
3205 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003206 }
3207
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003208 MI.eraseFromParent();
3209
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003210 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003211}
3212
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003213static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
3214 const TargetRegisterClass *VecRC) {
3215 switch (TRI.getRegSizeInBits(*VecRC)) {
3216 case 32: // 4 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003217 return AMDGPU::V_MOVRELD_B32_V1;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003218 case 64: // 8 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003219 return AMDGPU::V_MOVRELD_B32_V2;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003220 case 128: // 16 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003221 return AMDGPU::V_MOVRELD_B32_V4;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003222 case 256: // 32 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003223 return AMDGPU::V_MOVRELD_B32_V8;
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003224 case 512: // 64 bytes
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003225 return AMDGPU::V_MOVRELD_B32_V16;
3226 default:
3227 llvm_unreachable("unsupported size for MOVRELD pseudos");
3228 }
3229}
3230
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003231static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
3232 MachineBasicBlock &MBB,
Tom Stellard5bfbae52018-07-11 20:59:01 +00003233 const GCNSubtarget &ST) {
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003234 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003235 const SIRegisterInfo &TRI = TII->getRegisterInfo();
3236 MachineFunction *MF = MBB.getParent();
3237 MachineRegisterInfo &MRI = MF->getRegInfo();
3238
3239 unsigned Dst = MI.getOperand(0).getReg();
3240 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
3241 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
3242 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
3243 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
3244 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
3245
3246 // This can be an immediate, but will be folded later.
3247 assert(Val->getReg());
3248
3249 unsigned SubReg;
3250 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
3251 SrcVec->getReg(),
3252 Offset);
Marek Olsake22fdb92017-03-21 17:00:32 +00003253 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003254
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003255 if (Idx->getReg() == AMDGPU::NoRegister) {
3256 MachineBasicBlock::iterator I(&MI);
3257 const DebugLoc &DL = MI.getDebugLoc();
3258
3259 assert(Offset == 0);
3260
3261 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
Diana Picus116bbab2017-01-13 09:58:52 +00003262 .add(*SrcVec)
3263 .add(*Val)
3264 .addImm(SubReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003265
3266 MI.eraseFromParent();
3267 return &MBB;
3268 }
3269
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003270 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003271 MachineBasicBlock::iterator I(&MI);
3272 const DebugLoc &DL = MI.getDebugLoc();
3273
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003274 if (UseGPRIdxMode) {
3275 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003276 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
3277 .add(*Val)
3278 .addReg(Dst, RegState::ImplicitDefine)
3279 .addReg(SrcVec->getReg(), RegState::Implicit)
3280 .addReg(AMDGPU::M0, RegState::Implicit);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003281
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003282 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
3283 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003284 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003285
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003286 BuildMI(MBB, I, DL, MovRelDesc)
3287 .addReg(Dst, RegState::Define)
3288 .addReg(SrcVec->getReg())
Diana Picus116bbab2017-01-13 09:58:52 +00003289 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003290 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003291 }
3292
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003293 MI.eraseFromParent();
3294 return &MBB;
3295 }
3296
3297 if (Val->isReg())
3298 MRI.clearKillFlags(Val->getReg());
3299
3300 const DebugLoc &DL = MI.getDebugLoc();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003301
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003302 unsigned PhiReg = MRI.createVirtualRegister(VecRC);
3303
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003304 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003305 Offset, UseGPRIdxMode, false);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003306 MachineBasicBlock *LoopBB = InsPt->getParent();
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003307
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003308 if (UseGPRIdxMode) {
3309 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
Diana Picus116bbab2017-01-13 09:58:52 +00003310 .addReg(PhiReg, RegState::Undef, SubReg) // vdst
3311 .add(*Val) // src0
3312 .addReg(Dst, RegState::ImplicitDefine)
3313 .addReg(PhiReg, RegState::Implicit)
3314 .addReg(AMDGPU::M0, RegState::Implicit);
Changpeng Fangda38b5f2018-02-16 16:31:30 +00003315 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003316 } else {
Krzysztof Parzyszek44e25f32017-04-24 18:55:33 +00003317 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003318
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003319 BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
3320 .addReg(Dst, RegState::Define)
3321 .addReg(PhiReg)
Diana Picus116bbab2017-01-13 09:58:52 +00003322 .add(*Val)
Nicolai Haehnlea7852092016-10-24 14:56:02 +00003323 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003324 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003325
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00003326 MI.eraseFromParent();
3327
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00003328 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003329}
3330
Matt Arsenault786724a2016-07-12 21:41:32 +00003331MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
3332 MachineInstr &MI, MachineBasicBlock *BB) const {
Tom Stellard244891d2016-12-20 15:52:17 +00003333
3334 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3335 MachineFunction *MF = BB->getParent();
3336 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
3337
3338 if (TII->isMIMG(MI)) {
Matt Arsenault905f3512017-12-29 17:18:14 +00003339 if (MI.memoperands_empty() && MI.mayLoadOrStore()) {
3340 report_fatal_error("missing mem operand from MIMG instruction");
3341 }
Tom Stellard244891d2016-12-20 15:52:17 +00003342 // Add a memoperand for mimg instructions so that they aren't assumed to
3343 // be ordered memory instuctions.
3344
Tom Stellard244891d2016-12-20 15:52:17 +00003345 return BB;
3346 }
3347
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003348 switch (MI.getOpcode()) {
Matt Arsenault301162c2017-11-15 21:51:43 +00003349 case AMDGPU::S_ADD_U64_PSEUDO:
3350 case AMDGPU::S_SUB_U64_PSEUDO: {
3351 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
3352 const DebugLoc &DL = MI.getDebugLoc();
3353
3354 MachineOperand &Dest = MI.getOperand(0);
3355 MachineOperand &Src0 = MI.getOperand(1);
3356 MachineOperand &Src1 = MI.getOperand(2);
3357
3358 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3359 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
3360
3361 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3362 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3363 &AMDGPU::SReg_32_XM0RegClass);
3364 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3365 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3366 &AMDGPU::SReg_32_XM0RegClass);
3367
3368 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI,
3369 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0,
3370 &AMDGPU::SReg_32_XM0RegClass);
3371 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI,
3372 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1,
3373 &AMDGPU::SReg_32_XM0RegClass);
3374
3375 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
3376
3377 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
3378 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
3379 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0)
3380 .add(Src0Sub0)
3381 .add(Src1Sub0);
3382 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1)
3383 .add(Src0Sub1)
3384 .add(Src1Sub1);
3385 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
3386 .addReg(DestSub0)
3387 .addImm(AMDGPU::sub0)
3388 .addReg(DestSub1)
3389 .addImm(AMDGPU::sub1);
3390 MI.eraseFromParent();
3391 return BB;
3392 }
3393 case AMDGPU::SI_INIT_M0: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003394 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
Matt Arsenault4ac341c2016-04-14 21:58:15 +00003395 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Diana Picus116bbab2017-01-13 09:58:52 +00003396 .add(MI.getOperand(0));
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003397 MI.eraseFromParent();
Matt Arsenault20711b72015-02-20 22:10:45 +00003398 return BB;
Matt Arsenault301162c2017-11-15 21:51:43 +00003399 }
Marek Olsak2d825902017-04-28 20:21:58 +00003400 case AMDGPU::SI_INIT_EXEC:
3401 // This should be before all vector instructions.
3402 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
3403 AMDGPU::EXEC)
3404 .addImm(MI.getOperand(0).getImm());
3405 MI.eraseFromParent();
3406 return BB;
3407
3408 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
3409 // Extract the thread count from an SGPR input and set EXEC accordingly.
3410 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
3411 //
3412 // S_BFE_U32 count, input, {shift, 7}
3413 // S_BFM_B64 exec, count, 0
3414 // S_CMP_EQ_U32 count, 64
3415 // S_CMOV_B64 exec, -1
3416 MachineInstr *FirstMI = &*BB->begin();
3417 MachineRegisterInfo &MRI = MF->getRegInfo();
3418 unsigned InputReg = MI.getOperand(0).getReg();
3419 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
3420 bool Found = false;
3421
3422 // Move the COPY of the input reg to the beginning, so that we can use it.
3423 for (auto I = BB->begin(); I != &MI; I++) {
3424 if (I->getOpcode() != TargetOpcode::COPY ||
3425 I->getOperand(0).getReg() != InputReg)
3426 continue;
3427
3428 if (I == FirstMI) {
3429 FirstMI = &*++BB->begin();
3430 } else {
3431 I->removeFromParent();
3432 BB->insert(FirstMI, &*I);
3433 }
3434 Found = true;
3435 break;
3436 }
3437 assert(Found);
Davide Italiano0dcc0152017-05-11 19:58:52 +00003438 (void)Found;
Marek Olsak2d825902017-04-28 20:21:58 +00003439
3440 // This should be before all vector instructions.
3441 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
3442 .addReg(InputReg)
3443 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
3444 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
3445 AMDGPU::EXEC)
3446 .addReg(CountReg)
3447 .addImm(0);
3448 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
3449 .addReg(CountReg, RegState::Kill)
3450 .addImm(64);
3451 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
3452 AMDGPU::EXEC)
3453 .addImm(-1);
3454 MI.eraseFromParent();
3455 return BB;
3456 }
3457
Changpeng Fang01f60622016-03-15 17:28:44 +00003458 case AMDGPU::GET_GROUPSTATICSIZE: {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003459 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3c07c812016-07-22 17:01:33 +00003460 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
Diana Picus116bbab2017-01-13 09:58:52 +00003461 .add(MI.getOperand(0))
3462 .addImm(MFI->getLDSSize());
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00003463 MI.eraseFromParent();
Changpeng Fang01f60622016-03-15 17:28:44 +00003464 return BB;
3465 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003466 case AMDGPU::SI_INDIRECT_SRC_V1:
3467 case AMDGPU::SI_INDIRECT_SRC_V2:
3468 case AMDGPU::SI_INDIRECT_SRC_V4:
3469 case AMDGPU::SI_INDIRECT_SRC_V8:
3470 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003471 return emitIndirectSrc(MI, *BB, *getSubtarget());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00003472 case AMDGPU::SI_INDIRECT_DST_V1:
3473 case AMDGPU::SI_INDIRECT_DST_V2:
3474 case AMDGPU::SI_INDIRECT_DST_V4:
3475 case AMDGPU::SI_INDIRECT_DST_V8:
3476 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00003477 return emitIndirectDst(MI, *BB, *getSubtarget());
Marek Olsakce76ea02017-10-24 10:27:13 +00003478 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
3479 case AMDGPU::SI_KILL_I1_PSEUDO:
Matt Arsenault786724a2016-07-12 21:41:32 +00003480 return splitKillBlock(MI, BB);
Matt Arsenault22e41792016-08-27 01:00:37 +00003481 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
3482 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
Matt Arsenault22e41792016-08-27 01:00:37 +00003483
3484 unsigned Dst = MI.getOperand(0).getReg();
3485 unsigned Src0 = MI.getOperand(1).getReg();
3486 unsigned Src1 = MI.getOperand(2).getReg();
3487 const DebugLoc &DL = MI.getDebugLoc();
3488 unsigned SrcCond = MI.getOperand(3).getReg();
3489
3490 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3491 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003492 unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
Matt Arsenault22e41792016-08-27 01:00:37 +00003493
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003494 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
3495 .addReg(SrcCond);
Matt Arsenault22e41792016-08-27 01:00:37 +00003496 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003497 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003498 .addReg(Src0, 0, AMDGPU::sub0)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003499 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003500 .addReg(Src1, 0, AMDGPU::sub0)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003501 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003502 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003503 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003504 .addReg(Src0, 0, AMDGPU::sub1)
Tim Renouf2e94f6e2019-03-18 19:25:39 +00003505 .addImm(0)
Matt Arsenault22e41792016-08-27 01:00:37 +00003506 .addReg(Src1, 0, AMDGPU::sub1)
Nicolai Haehnlece4ddd02017-09-29 15:37:31 +00003507 .addReg(SrcCondCopy);
Matt Arsenault22e41792016-08-27 01:00:37 +00003508
3509 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
3510 .addReg(DstLo)
3511 .addImm(AMDGPU::sub0)
3512 .addReg(DstHi)
3513 .addImm(AMDGPU::sub1);
3514 MI.eraseFromParent();
3515 return BB;
3516 }
Matt Arsenault327188a2016-12-15 21:57:11 +00003517 case AMDGPU::SI_BR_UNDEF: {
3518 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3519 const DebugLoc &DL = MI.getDebugLoc();
3520 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
Diana Picus116bbab2017-01-13 09:58:52 +00003521 .add(MI.getOperand(0));
Matt Arsenault327188a2016-12-15 21:57:11 +00003522 Br->getOperand(1).setIsUndef(true); // read undef SCC
3523 MI.eraseFromParent();
3524 return BB;
3525 }
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003526 case AMDGPU::ADJCALLSTACKUP:
3527 case AMDGPU::ADJCALLSTACKDOWN: {
3528 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3529 MachineInstrBuilder MIB(*MF, &MI);
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003530
3531 // Add an implicit use of the frame offset reg to prevent the restore copy
3532 // inserted after the call from being reorderd after stack operations in the
3533 // the caller's frame.
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003534 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine)
Matt Arsenaulte9f36792018-03-27 18:38:51 +00003535 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit)
3536 .addReg(Info->getFrameOffsetReg(), RegState::Implicit);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003537 return BB;
3538 }
Scott Linderd19d1972019-02-04 20:00:07 +00003539 case AMDGPU::SI_CALL_ISEL: {
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003540 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3541 const DebugLoc &DL = MI.getDebugLoc();
Scott Linderd19d1972019-02-04 20:00:07 +00003542
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003543 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF);
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003544
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003545 MachineInstrBuilder MIB;
Scott Linderd19d1972019-02-04 20:00:07 +00003546 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
Matt Arsenault71bcbd42017-08-11 20:42:08 +00003547
Scott Linderd19d1972019-02-04 20:00:07 +00003548 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003549 MIB.add(MI.getOperand(I));
Matt Arsenault6ed7b9b2017-08-02 01:31:28 +00003550
Chandler Carruthc73c0302018-08-16 21:30:05 +00003551 MIB.cloneMemRefs(MI);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00003552 MI.eraseFromParent();
3553 return BB;
3554 }
Stanislav Mekhanoshin64399da2019-05-02 04:26:35 +00003555 case AMDGPU::V_ADD_I32_e32:
3556 case AMDGPU::V_SUB_I32_e32:
3557 case AMDGPU::V_SUBREV_I32_e32: {
3558 // TODO: Define distinct V_*_I32_Pseudo instructions instead.
3559 const DebugLoc &DL = MI.getDebugLoc();
3560 unsigned Opc = MI.getOpcode();
3561
3562 bool NeedClampOperand = false;
3563 if (TII->pseudoToMCOpcode(Opc) == -1) {
3564 Opc = AMDGPU::getVOPe64(Opc);
3565 NeedClampOperand = true;
3566 }
3567
3568 auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg());
3569 if (TII->isVOP3(*I)) {
3570 I.addReg(AMDGPU::VCC, RegState::Define);
3571 }
3572 I.add(MI.getOperand(1))
3573 .add(MI.getOperand(2));
3574 if (NeedClampOperand)
3575 I.addImm(0); // clamp bit for e64 encoding
3576
3577 TII->legalizeOperands(*I);
3578
3579 MI.eraseFromParent();
3580 return BB;
3581 }
Changpeng Fang01f60622016-03-15 17:28:44 +00003582 default:
3583 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
Tom Stellard75aadc22012-12-11 21:25:42 +00003584 }
Tom Stellard75aadc22012-12-11 21:25:42 +00003585}
3586
Matt Arsenaulte11d8ac2017-10-13 21:10:22 +00003587bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const {
3588 return isTypeLegal(VT.getScalarType());
3589}
3590
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003591bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
3592 // This currently forces unfolding various combinations of fsub into fma with
3593 // free fneg'd operands. As long as we have fast FMA (controlled by
3594 // isFMAFasterThanFMulAndFAdd), we should perform these.
3595
3596 // When fma is quarter rate, for f64 where add / sub are at best half rate,
3597 // most of these combines appear to be cycle neutral but save on instruction
3598 // count / code size.
3599 return true;
3600}
3601
Mehdi Amini44ede332015-07-09 02:09:04 +00003602EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
3603 EVT VT) const {
Tom Stellard83747202013-07-18 21:43:53 +00003604 if (!VT.isVector()) {
3605 return MVT::i1;
3606 }
Matt Arsenault8596f712014-11-28 22:51:38 +00003607 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
Tom Stellard75aadc22012-12-11 21:25:42 +00003608}
3609
Matt Arsenault94163282016-12-22 16:36:25 +00003610MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
3611 // TODO: Should i16 be used always if legal? For now it would force VALU
3612 // shifts.
3613 return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
Christian Konig082a14a2013-03-18 11:34:05 +00003614}
3615
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003616// Answering this is somewhat tricky and depends on the specific device which
3617// have different rates for fma or all f64 operations.
3618//
3619// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
3620// regardless of which device (although the number of cycles differs between
3621// devices), so it is always profitable for f64.
3622//
3623// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
3624// only on full rate devices. Normally, we should prefer selecting v_mad_f32
3625// which we can always do even without fused FP ops since it returns the same
3626// result as the separate operations and since it is always full
3627// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
3628// however does not support denormals, so we do report fma as faster if we have
3629// a fast fma device and require denormals.
3630//
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003631bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3632 VT = VT.getScalarType();
3633
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003634 switch (VT.getSimpleVT().SimpleTy) {
Matt Arsenault0084adc2018-04-30 19:08:16 +00003635 case MVT::f32: {
Matt Arsenault423bf3f2015-01-29 19:34:32 +00003636 // This is as fast on some subtargets. However, we always have full rate f32
3637 // mad available which returns the same result as the separate operations
Matt Arsenault8d630032015-02-20 22:10:41 +00003638 // which we should prefer over fma. We can't use this if we want to support
3639 // denormals, so only report this in these cases.
Matt Arsenault0084adc2018-04-30 19:08:16 +00003640 if (Subtarget->hasFP32Denormals())
3641 return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts();
3642
3643 // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32.
3644 return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts();
3645 }
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003646 case MVT::f64:
3647 return true;
Matt Arsenault9e22bc22016-12-22 03:21:48 +00003648 case MVT::f16:
3649 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00003650 default:
3651 break;
3652 }
3653
3654 return false;
3655}
3656
Tom Stellard75aadc22012-12-11 21:25:42 +00003657//===----------------------------------------------------------------------===//
3658// Custom DAG Lowering Operations
3659//===----------------------------------------------------------------------===//
3660
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003661// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3662// wider vector type is legal.
3663SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
3664 SelectionDAG &DAG) const {
3665 unsigned Opc = Op.getOpcode();
3666 EVT VT = Op.getValueType();
3667 assert(VT == MVT::v4f16);
3668
3669 SDValue Lo, Hi;
3670 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3671
3672 SDLoc SL(Op);
3673 SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo,
3674 Op->getFlags());
3675 SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi,
3676 Op->getFlags());
3677
3678 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3679}
3680
3681// Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
3682// wider vector type is legal.
3683SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
3684 SelectionDAG &DAG) const {
3685 unsigned Opc = Op.getOpcode();
3686 EVT VT = Op.getValueType();
3687 assert(VT == MVT::v4i16 || VT == MVT::v4f16);
3688
3689 SDValue Lo0, Hi0;
3690 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0);
3691 SDValue Lo1, Hi1;
3692 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1);
3693
3694 SDLoc SL(Op);
3695
3696 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1,
3697 Op->getFlags());
3698 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1,
3699 Op->getFlags());
3700
3701 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
3702}
3703
Tom Stellard75aadc22012-12-11 21:25:42 +00003704SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3705 switch (Op.getOpcode()) {
3706 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
Tom Stellardf8794352012-12-19 22:10:31 +00003707 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
Aakanksha Patild5443f82019-05-29 18:20:11 +00003708 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
Tom Stellard35bb18c2013-08-26 15:06:04 +00003709 case ISD::LOAD: {
Tom Stellarde812f2f2014-07-21 15:45:06 +00003710 SDValue Result = LowerLOAD(Op, DAG);
3711 assert((!Result.getNode() ||
3712 Result.getNode()->getNumValues() == 2) &&
3713 "Load should return a value and a chain");
3714 return Result;
Tom Stellard35bb18c2013-08-26 15:06:04 +00003715 }
Tom Stellardaf775432013-10-23 00:44:32 +00003716
Matt Arsenaultad14ce82014-07-19 18:44:39 +00003717 case ISD::FSIN:
3718 case ISD::FCOS:
3719 return LowerTrig(Op, DAG);
Tom Stellard0ec134f2014-02-04 17:18:40 +00003720 case ISD::SELECT: return LowerSELECT(Op, DAG);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00003721 case ISD::FDIV: return LowerFDIV(Op, DAG);
Tom Stellard354a43c2016-04-01 18:27:37 +00003722 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +00003723 case ISD::STORE: return LowerSTORE(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003724 case ISD::GlobalAddress: {
3725 MachineFunction &MF = DAG.getMachineFunction();
3726 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3727 return LowerGlobalAddress(MFI, Op, DAG);
Tom Stellard94593ee2013-06-03 17:40:18 +00003728 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003729 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00003730 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00003731 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00003732 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
Matt Arsenault3aef8092017-01-23 23:09:58 +00003733 case ISD::INSERT_VECTOR_ELT:
3734 return lowerINSERT_VECTOR_ELT(Op, DAG);
3735 case ISD::EXTRACT_VECTOR_ELT:
3736 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
Matt Arsenault67a98152018-05-16 11:47:30 +00003737 case ISD::BUILD_VECTOR:
3738 return lowerBUILD_VECTOR(Op, DAG);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00003739 case ISD::FP_ROUND:
3740 return lowerFP_ROUND(Op, DAG);
Matt Arsenault3e025382017-04-24 17:49:13 +00003741 case ISD::TRAP:
Matt Arsenault3e025382017-04-24 17:49:13 +00003742 return lowerTRAP(Op, DAG);
Tony Tye43259df2018-05-16 16:19:34 +00003743 case ISD::DEBUGTRAP:
3744 return lowerDEBUGTRAP(Op, DAG);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003745 case ISD::FABS:
3746 case ISD::FNEG:
Matt Arsenault36cdcfa2018-08-02 13:43:42 +00003747 case ISD::FCANONICALIZE:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003748 return splitUnaryVectorOp(Op, DAG);
Matt Arsenault687ec752018-10-22 16:27:27 +00003749 case ISD::FMINNUM:
3750 case ISD::FMAXNUM:
3751 return lowerFMINNUM_FMAXNUM(Op, DAG);
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003752 case ISD::SHL:
3753 case ISD::SRA:
3754 case ISD::SRL:
3755 case ISD::ADD:
3756 case ISD::SUB:
3757 case ISD::MUL:
3758 case ISD::SMIN:
3759 case ISD::SMAX:
3760 case ISD::UMIN:
3761 case ISD::UMAX:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003762 case ISD::FADD:
3763 case ISD::FMUL:
Matt Arsenault687ec752018-10-22 16:27:27 +00003764 case ISD::FMINNUM_IEEE:
3765 case ISD::FMAXNUM_IEEE:
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003766 return splitBinaryVectorOp(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +00003767 }
3768 return SDValue();
3769}
3770
Matt Arsenault1349a042018-05-22 06:32:10 +00003771static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
3772 const SDLoc &DL,
3773 SelectionDAG &DAG, bool Unpacked) {
3774 if (!LoadVT.isVector())
3775 return Result;
3776
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003777 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16.
3778 // Truncate to v2i16/v4i16.
3779 EVT IntLoadVT = LoadVT.changeTypeToInteger();
Matt Arsenault1349a042018-05-22 06:32:10 +00003780
3781 // Workaround legalizer not scalarizing truncate after vector op
3782 // legalization byt not creating intermediate vector trunc.
3783 SmallVector<SDValue, 4> Elts;
3784 DAG.ExtractVectorElements(Result, Elts);
3785 for (SDValue &Elt : Elts)
3786 Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt);
3787
3788 Result = DAG.getBuildVector(IntLoadVT, DL, Elts);
3789
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003790 // Bitcast to original type (v2f16/v4f16).
Matt Arsenault1349a042018-05-22 06:32:10 +00003791 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003792 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003793
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003794 // Cast back to the original packed type.
3795 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result);
3796}
3797
Matt Arsenault1349a042018-05-22 06:32:10 +00003798SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
3799 MemSDNode *M,
3800 SelectionDAG &DAG,
Tim Renouf366a49d2018-08-02 23:33:01 +00003801 ArrayRef<SDValue> Ops,
Matt Arsenault1349a042018-05-22 06:32:10 +00003802 bool IsIntrinsic) const {
3803 SDLoc DL(M);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003804
3805 bool Unpacked = Subtarget->hasUnpackedD16VMem();
Matt Arsenault1349a042018-05-22 06:32:10 +00003806 EVT LoadVT = M->getValueType(0);
3807
Matt Arsenault1349a042018-05-22 06:32:10 +00003808 EVT EquivLoadVT = LoadVT;
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003809 if (Unpacked && LoadVT.isVector()) {
3810 EquivLoadVT = LoadVT.isVector() ?
3811 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3812 LoadVT.getVectorNumElements()) : LoadVT;
Matt Arsenault1349a042018-05-22 06:32:10 +00003813 }
3814
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003815 // Change from v4f16/v2f16 to EquivLoadVT.
3816 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other);
3817
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003818 SDValue Load
3819 = DAG.getMemIntrinsicNode(
3820 IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL,
3821 VTList, Ops, M->getMemoryVT(),
3822 M->getMemOperand());
3823 if (!Unpacked) // Just adjusted the opcode.
3824 return Load;
Changpeng Fang4737e892018-01-18 22:08:53 +00003825
Matt Arsenault1349a042018-05-22 06:32:10 +00003826 SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked);
Changpeng Fang4737e892018-01-18 22:08:53 +00003827
Matt Arsenault1349a042018-05-22 06:32:10 +00003828 return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003829}
3830
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00003831static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
3832 SDNode *N, SelectionDAG &DAG) {
3833 EVT VT = N->getValueType(0);
Matt Arsenaultcaf13162019-03-12 21:02:54 +00003834 const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00003835 int CondCode = CD->getSExtValue();
3836 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
3837 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
3838 return DAG.getUNDEF(VT);
3839
3840 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
3841
3842
3843 SDValue LHS = N->getOperand(1);
3844 SDValue RHS = N->getOperand(2);
3845
3846 SDLoc DL(N);
3847
3848 EVT CmpVT = LHS.getValueType();
3849 if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) {
3850 unsigned PromoteOp = ICmpInst::isSigned(IcInput) ?
3851 ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3852 LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS);
3853 RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS);
3854 }
3855
3856 ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
3857
3858 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, LHS, RHS,
3859 DAG.getCondCode(CCOpcode));
3860}
3861
3862static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
3863 SDNode *N, SelectionDAG &DAG) {
3864 EVT VT = N->getValueType(0);
Matt Arsenaultcaf13162019-03-12 21:02:54 +00003865 const auto *CD = cast<ConstantSDNode>(N->getOperand(3));
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00003866
3867 int CondCode = CD->getSExtValue();
3868 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
3869 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) {
3870 return DAG.getUNDEF(VT);
3871 }
3872
3873 SDValue Src0 = N->getOperand(1);
3874 SDValue Src1 = N->getOperand(2);
3875 EVT CmpVT = Src0.getValueType();
3876 SDLoc SL(N);
3877
3878 if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) {
3879 Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
3880 Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
3881 }
3882
3883 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
3884 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
3885 return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src0,
3886 Src1, DAG.getCondCode(CCOpcode));
3887}
3888
Matt Arsenault3aef8092017-01-23 23:09:58 +00003889void SITargetLowering::ReplaceNodeResults(SDNode *N,
3890 SmallVectorImpl<SDValue> &Results,
3891 SelectionDAG &DAG) const {
3892 switch (N->getOpcode()) {
3893 case ISD::INSERT_VECTOR_ELT: {
3894 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
3895 Results.push_back(Res);
3896 return;
3897 }
3898 case ISD::EXTRACT_VECTOR_ELT: {
3899 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
3900 Results.push_back(Res);
3901 return;
3902 }
Matt Arsenault1f17c662017-02-22 00:27:34 +00003903 case ISD::INTRINSIC_WO_CHAIN: {
3904 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Marek Olsak13e47412018-01-31 20:18:04 +00003905 switch (IID) {
3906 case Intrinsic::amdgcn_cvt_pkrtz: {
Matt Arsenault1f17c662017-02-22 00:27:34 +00003907 SDValue Src0 = N->getOperand(1);
3908 SDValue Src1 = N->getOperand(2);
3909 SDLoc SL(N);
3910 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
3911 Src0, Src1);
Matt Arsenault1f17c662017-02-22 00:27:34 +00003912 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
3913 return;
3914 }
Marek Olsak13e47412018-01-31 20:18:04 +00003915 case Intrinsic::amdgcn_cvt_pknorm_i16:
3916 case Intrinsic::amdgcn_cvt_pknorm_u16:
3917 case Intrinsic::amdgcn_cvt_pk_i16:
3918 case Intrinsic::amdgcn_cvt_pk_u16: {
3919 SDValue Src0 = N->getOperand(1);
3920 SDValue Src1 = N->getOperand(2);
3921 SDLoc SL(N);
3922 unsigned Opcode;
3923
3924 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16)
3925 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
3926 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16)
3927 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
3928 else if (IID == Intrinsic::amdgcn_cvt_pk_i16)
3929 Opcode = AMDGPUISD::CVT_PK_I16_I32;
3930 else
3931 Opcode = AMDGPUISD::CVT_PK_U16_U32;
3932
Matt Arsenault709374d2018-08-01 20:13:58 +00003933 EVT VT = N->getValueType(0);
3934 if (isTypeLegal(VT))
3935 Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1));
3936 else {
3937 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1);
3938 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt));
3939 }
Marek Olsak13e47412018-01-31 20:18:04 +00003940 return;
3941 }
3942 }
Simon Pilgrimd362d272017-07-08 19:50:03 +00003943 break;
Matt Arsenault1f17c662017-02-22 00:27:34 +00003944 }
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003945 case ISD::INTRINSIC_W_CHAIN: {
Matt Arsenault1349a042018-05-22 06:32:10 +00003946 if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003947 Results.push_back(Res);
Matt Arsenault1349a042018-05-22 06:32:10 +00003948 Results.push_back(Res.getValue(1));
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003949 return;
3950 }
Matt Arsenault1349a042018-05-22 06:32:10 +00003951
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00003952 break;
3953 }
Matt Arsenault4a486232017-04-19 20:53:07 +00003954 case ISD::SELECT: {
3955 SDLoc SL(N);
3956 EVT VT = N->getValueType(0);
3957 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3958 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
3959 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
3960
3961 EVT SelectVT = NewVT;
3962 if (NewVT.bitsLT(MVT::i32)) {
3963 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
3964 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
3965 SelectVT = MVT::i32;
3966 }
3967
3968 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
3969 N->getOperand(0), LHS, RHS);
3970
3971 if (NewVT != SelectVT)
3972 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
3973 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
3974 return;
3975 }
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003976 case ISD::FNEG: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003977 if (N->getValueType(0) != MVT::v2f16)
3978 break;
3979
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003980 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003981 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3982
3983 SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32,
3984 BC,
3985 DAG.getConstant(0x80008000, SL, MVT::i32));
3986 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
3987 return;
3988 }
3989 case ISD::FABS: {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00003990 if (N->getValueType(0) != MVT::v2f16)
3991 break;
3992
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003993 SDLoc SL(N);
Matt Arsenaulte9524f12018-06-06 21:28:11 +00003994 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0));
3995
3996 SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32,
3997 BC,
3998 DAG.getConstant(0x7fff7fff, SL, MVT::i32));
3999 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op));
4000 return;
4001 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00004002 default:
4003 break;
4004 }
4005}
4006
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00004007/// Helper function for LowerBRCOND
Tom Stellardf8794352012-12-19 22:10:31 +00004008static SDNode *findUser(SDValue Value, unsigned Opcode) {
Tom Stellard75aadc22012-12-11 21:25:42 +00004009
Tom Stellardf8794352012-12-19 22:10:31 +00004010 SDNode *Parent = Value.getNode();
4011 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
4012 I != E; ++I) {
4013
4014 if (I.getUse().get() != Value)
4015 continue;
4016
4017 if (I->getOpcode() == Opcode)
4018 return *I;
4019 }
Craig Topper062a2ba2014-04-25 05:30:21 +00004020 return nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00004021}
4022
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004023unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
Matt Arsenault6408c912016-09-16 22:11:18 +00004024 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4025 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004026 case Intrinsic::amdgcn_if:
4027 return AMDGPUISD::IF;
4028 case Intrinsic::amdgcn_else:
4029 return AMDGPUISD::ELSE;
4030 case Intrinsic::amdgcn_loop:
4031 return AMDGPUISD::LOOP;
4032 case Intrinsic::amdgcn_end_cf:
4033 llvm_unreachable("should not occur");
Matt Arsenault6408c912016-09-16 22:11:18 +00004034 default:
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004035 return 0;
Matt Arsenault6408c912016-09-16 22:11:18 +00004036 }
Tom Stellardbc4497b2016-02-12 23:45:29 +00004037 }
Matt Arsenault6408c912016-09-16 22:11:18 +00004038
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004039 // break, if_break, else_break are all only used as inputs to loop, not
4040 // directly as branch conditions.
4041 return 0;
Tom Stellardbc4497b2016-02-12 23:45:29 +00004042}
4043
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004044bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
4045 const Triple &TT = getTargetMachine().getTargetTriple();
Matt Arsenault0da63502018-08-31 05:49:54 +00004046 return (GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4047 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004048 AMDGPU::shouldEmitConstantsToTextSection(TT);
4049}
4050
4051bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
Scott Linderd19d1972019-02-04 20:00:07 +00004052 // FIXME: Either avoid relying on address space here or change the default
4053 // address space for functions to avoid the explicit check.
4054 return (GV->getValueType()->isFunctionTy() ||
4055 GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
Matt Arsenault0da63502018-08-31 05:49:54 +00004056 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4057 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004058 !shouldEmitFixup(GV) &&
4059 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
4060}
4061
4062bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
4063 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
4064}
4065
Tom Stellardf8794352012-12-19 22:10:31 +00004066/// This transforms the control flow intrinsics to get the branch destination as
4067/// last parameter, also switches branch target with BR if the need arise
4068SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
4069 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00004070 SDLoc DL(BRCOND);
Tom Stellardf8794352012-12-19 22:10:31 +00004071
4072 SDNode *Intr = BRCOND.getOperand(1).getNode();
4073 SDValue Target = BRCOND.getOperand(2);
Craig Topper062a2ba2014-04-25 05:30:21 +00004074 SDNode *BR = nullptr;
Tom Stellardbc4497b2016-02-12 23:45:29 +00004075 SDNode *SetCC = nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00004076
4077 if (Intr->getOpcode() == ISD::SETCC) {
4078 // As long as we negate the condition everything is fine
Tom Stellardbc4497b2016-02-12 23:45:29 +00004079 SetCC = Intr;
Tom Stellardf8794352012-12-19 22:10:31 +00004080 Intr = SetCC->getOperand(0).getNode();
4081
4082 } else {
4083 // Get the target from BR if we don't negate the condition
4084 BR = findUser(BRCOND, ISD::BR);
4085 Target = BR->getOperand(1);
4086 }
4087
Matt Arsenault6408c912016-09-16 22:11:18 +00004088 // FIXME: This changes the types of the intrinsics instead of introducing new
4089 // nodes with the correct types.
4090 // e.g. llvm.amdgcn.loop
4091
4092 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
4093 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
4094
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004095 unsigned CFNode = isCFIntrinsic(Intr);
4096 if (CFNode == 0) {
Tom Stellardbc4497b2016-02-12 23:45:29 +00004097 // This is a uniform branch so we don't need to legalize.
4098 return BRCOND;
4099 }
4100
Matt Arsenault6408c912016-09-16 22:11:18 +00004101 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
4102 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
4103
Tom Stellardbc4497b2016-02-12 23:45:29 +00004104 assert(!SetCC ||
4105 (SetCC->getConstantOperandVal(1) == 1 &&
Tom Stellardbc4497b2016-02-12 23:45:29 +00004106 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
4107 ISD::SETNE));
Tom Stellardf8794352012-12-19 22:10:31 +00004108
Tom Stellardf8794352012-12-19 22:10:31 +00004109 // operands of the new intrinsic call
4110 SmallVector<SDValue, 4> Ops;
Matt Arsenault6408c912016-09-16 22:11:18 +00004111 if (HaveChain)
4112 Ops.push_back(BRCOND.getOperand(0));
4113
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004114 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
Tom Stellardf8794352012-12-19 22:10:31 +00004115 Ops.push_back(Target);
4116
Matt Arsenault6408c912016-09-16 22:11:18 +00004117 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
4118
Tom Stellardf8794352012-12-19 22:10:31 +00004119 // build the new intrinsic call
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00004120 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00004121
Matt Arsenault6408c912016-09-16 22:11:18 +00004122 if (!HaveChain) {
4123 SDValue Ops[] = {
4124 SDValue(Result, 0),
4125 BRCOND.getOperand(0)
4126 };
4127
4128 Result = DAG.getMergeValues(Ops, DL).getNode();
4129 }
4130
Tom Stellardf8794352012-12-19 22:10:31 +00004131 if (BR) {
4132 // Give the branch instruction our target
4133 SDValue Ops[] = {
4134 BR->getOperand(0),
4135 BRCOND.getOperand(2)
4136 };
Chandler Carruth356665a2014-08-01 22:09:43 +00004137 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
4138 DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
4139 BR = NewBR.getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00004140 }
4141
4142 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
4143
4144 // Copy the intrinsic results to registers
4145 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
4146 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
4147 if (!CopyToReg)
4148 continue;
4149
4150 Chain = DAG.getCopyToReg(
4151 Chain, DL,
4152 CopyToReg->getOperand(1),
4153 SDValue(Result, i - 1),
4154 SDValue());
4155
4156 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
4157 }
4158
4159 // Remove the old intrinsic from the chain
4160 DAG.ReplaceAllUsesOfValueWith(
4161 SDValue(Intr, Intr->getNumValues() - 1),
4162 Intr->getOperand(0));
4163
4164 return Chain;
Tom Stellard75aadc22012-12-11 21:25:42 +00004165}
4166
Aakanksha Patild5443f82019-05-29 18:20:11 +00004167SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
4168 SelectionDAG &DAG) const {
4169 MVT VT = Op.getSimpleValueType();
4170 SDLoc DL(Op);
4171 // Checking the depth
4172 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0)
4173 return DAG.getConstant(0, DL, VT);
4174
4175 MachineFunction &MF = DAG.getMachineFunction();
4176 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4177 // Check for kernel and shader functions
4178 if (Info->isEntryFunction())
4179 return DAG.getConstant(0, DL, VT);
4180
4181 MachineFrameInfo &MFI = MF.getFrameInfo();
4182 // There is a call to @llvm.returnaddress in this function
4183 MFI.setReturnAddressIsTaken(true);
4184
4185 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
4186 // Get the return address reg and mark it as an implicit live-in
4187 unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent()));
4188
4189 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
4190}
4191
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004192SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
4193 SDValue Op,
4194 const SDLoc &DL,
4195 EVT VT) const {
4196 return Op.getValueType().bitsLE(VT) ?
4197 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
4198 DAG.getNode(ISD::FTRUNC, DL, VT, Op);
4199}
4200
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004201SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004202 assert(Op.getValueType() == MVT::f16 &&
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004203 "Do not know how to custom lower FP_ROUND for non-f16 type");
4204
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004205 SDValue Src = Op.getOperand(0);
4206 EVT SrcVT = Src.getValueType();
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004207 if (SrcVT != MVT::f64)
4208 return Op;
4209
4210 SDLoc DL(Op);
Matt Arsenaultafe614c2016-11-18 18:33:36 +00004211
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004212 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
4213 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
Mandeep Singh Grang5e1697e2017-06-06 05:08:36 +00004214 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
Konstantin Zhuravlyovd709efb2016-11-17 04:28:37 +00004215}
4216
Matt Arsenault687ec752018-10-22 16:27:27 +00004217SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
4218 SelectionDAG &DAG) const {
4219 EVT VT = Op.getValueType();
Matt Arsenault055e4dc2019-03-29 19:14:54 +00004220 const MachineFunction &MF = DAG.getMachineFunction();
4221 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4222 bool IsIEEEMode = Info->getMode().IEEE;
Matt Arsenault687ec752018-10-22 16:27:27 +00004223
4224 // FIXME: Assert during eslection that this is only selected for
4225 // ieee_mode. Currently a combine can produce the ieee version for non-ieee
4226 // mode functions, but this happens to be OK since it's only done in cases
4227 // where there is known no sNaN.
4228 if (IsIEEEMode)
4229 return expandFMINNUM_FMAXNUM(Op.getNode(), DAG);
4230
4231 if (VT == MVT::v4f16)
4232 return splitBinaryVectorOp(Op, DAG);
4233 return Op;
4234}
4235
Matt Arsenault3e025382017-04-24 17:49:13 +00004236SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
4237 SDLoc SL(Op);
Matt Arsenault3e025382017-04-24 17:49:13 +00004238 SDValue Chain = Op.getOperand(0);
4239
Tom Stellard5bfbae52018-07-11 20:59:01 +00004240 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00004241 !Subtarget->isTrapHandlerEnabled())
Matt Arsenault3e025382017-04-24 17:49:13 +00004242 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
Tony Tye43259df2018-05-16 16:19:34 +00004243
4244 MachineFunction &MF = DAG.getMachineFunction();
4245 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4246 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4247 assert(UserSGPR != AMDGPU::NoRegister);
4248 SDValue QueuePtr = CreateLiveInRegister(
4249 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
4250 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
4251 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
4252 QueuePtr, SDValue());
4253 SDValue Ops[] = {
4254 ToReg,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004255 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16),
Tony Tye43259df2018-05-16 16:19:34 +00004256 SGPR01,
4257 ToReg.getValue(1)
4258 };
4259 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
4260}
4261
4262SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const {
4263 SDLoc SL(Op);
4264 SDValue Chain = Op.getOperand(0);
4265 MachineFunction &MF = DAG.getMachineFunction();
4266
Tom Stellard5bfbae52018-07-11 20:59:01 +00004267 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
Tony Tye43259df2018-05-16 16:19:34 +00004268 !Subtarget->isTrapHandlerEnabled()) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004269 DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
Matt Arsenault3e025382017-04-24 17:49:13 +00004270 "debugtrap handler not supported",
4271 Op.getDebugLoc(),
4272 DS_Warning);
Matthias Braunf1caa282017-12-15 22:22:58 +00004273 LLVMContext &Ctx = MF.getFunction().getContext();
Matt Arsenault3e025382017-04-24 17:49:13 +00004274 Ctx.diagnose(NoTrap);
4275 return Chain;
4276 }
Matt Arsenault3e025382017-04-24 17:49:13 +00004277
Tony Tye43259df2018-05-16 16:19:34 +00004278 SDValue Ops[] = {
4279 Chain,
Tom Stellard5bfbae52018-07-11 20:59:01 +00004280 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16)
Tony Tye43259df2018-05-16 16:19:34 +00004281 };
4282 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
Matt Arsenault3e025382017-04-24 17:49:13 +00004283}
4284
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004285SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
Matt Arsenault99c14522016-04-25 19:27:24 +00004286 SelectionDAG &DAG) const {
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004287 // FIXME: Use inline constants (src_{shared, private}_base) instead.
4288 if (Subtarget->hasApertureRegs()) {
Matt Arsenault0da63502018-08-31 05:49:54 +00004289 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004290 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
4291 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
Matt Arsenault0da63502018-08-31 05:49:54 +00004292 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004293 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
4294 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
4295 unsigned Encoding =
4296 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
4297 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
4298 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
Matt Arsenaulte823d922017-02-18 18:29:53 +00004299
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004300 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
4301 SDValue ApertureReg = SDValue(
4302 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
4303 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
4304 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
Matt Arsenaulte823d922017-02-18 18:29:53 +00004305 }
4306
Matt Arsenault99c14522016-04-25 19:27:24 +00004307 MachineFunction &MF = DAG.getMachineFunction();
4308 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004309 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
4310 assert(UserSGPR != AMDGPU::NoRegister);
4311
Matt Arsenault99c14522016-04-25 19:27:24 +00004312 SDValue QueuePtr = CreateLiveInRegister(
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00004313 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
Matt Arsenault99c14522016-04-25 19:27:24 +00004314
4315 // Offset into amd_queue_t for group_segment_aperture_base_hi /
4316 // private_segment_aperture_base_hi.
Matt Arsenault0da63502018-08-31 05:49:54 +00004317 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
Matt Arsenault99c14522016-04-25 19:27:24 +00004318
Matt Arsenaultb655fa92017-11-29 01:25:12 +00004319 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset);
Matt Arsenault99c14522016-04-25 19:27:24 +00004320
4321 // TODO: Use custom target PseudoSourceValue.
4322 // TODO: We should use the value from the IR intrinsic call, but it might not
4323 // be available and how do we get it?
4324 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
Matt Arsenault0da63502018-08-31 05:49:54 +00004325 AMDGPUAS::CONSTANT_ADDRESS));
Matt Arsenault99c14522016-04-25 19:27:24 +00004326
4327 MachinePointerInfo PtrInfo(V, StructOffset);
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004328 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
Justin Lebar9c375812016-07-15 18:27:10 +00004329 MinAlign(64, StructOffset),
Justin Lebaradbf09e2016-09-11 01:38:58 +00004330 MachineMemOperand::MODereferenceable |
4331 MachineMemOperand::MOInvariant);
Matt Arsenault99c14522016-04-25 19:27:24 +00004332}
4333
4334SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
4335 SelectionDAG &DAG) const {
4336 SDLoc SL(Op);
4337 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
4338
4339 SDValue Src = ASC->getOperand(0);
Matt Arsenault99c14522016-04-25 19:27:24 +00004340 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
4341
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004342 const AMDGPUTargetMachine &TM =
4343 static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
4344
Matt Arsenault99c14522016-04-25 19:27:24 +00004345 // flat -> local/private
Matt Arsenault0da63502018-08-31 05:49:54 +00004346 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004347 unsigned DestAS = ASC->getDestAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004348
Matt Arsenault0da63502018-08-31 05:49:54 +00004349 if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
4350 DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004351 unsigned NullVal = TM.getNullPointerValue(DestAS);
4352 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault99c14522016-04-25 19:27:24 +00004353 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
4354 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
4355
4356 return DAG.getNode(ISD::SELECT, SL, MVT::i32,
4357 NonNull, Ptr, SegmentNullPtr);
4358 }
4359 }
4360
4361 // local/private -> flat
Matt Arsenault0da63502018-08-31 05:49:54 +00004362 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenault971c85e2017-03-13 19:47:31 +00004363 unsigned SrcAS = ASC->getSrcAddressSpace();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00004364
Matt Arsenault0da63502018-08-31 05:49:54 +00004365 if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
4366 SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenault747bf8a2017-03-13 20:18:14 +00004367 unsigned NullVal = TM.getNullPointerValue(SrcAS);
4368 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
Matt Arsenault971c85e2017-03-13 19:47:31 +00004369
Matt Arsenault99c14522016-04-25 19:27:24 +00004370 SDValue NonNull
4371 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
4372
Konstantin Zhuravlyov4b3847e2017-04-06 23:02:33 +00004373 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00004374 SDValue CvtPtr
4375 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
4376
4377 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
4378 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
4379 FlatNullPtr);
4380 }
4381 }
4382
4383 // global <-> flat are no-ops and never emitted.
4384
4385 const MachineFunction &MF = DAG.getMachineFunction();
4386 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
Matthias Braunf1caa282017-12-15 22:22:58 +00004387 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
Matt Arsenault99c14522016-04-25 19:27:24 +00004388 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
4389
4390 return DAG.getUNDEF(ASC->getValueType(0));
4391}
4392
Matt Arsenault3aef8092017-01-23 23:09:58 +00004393SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4394 SelectionDAG &DAG) const {
Matt Arsenault67a98152018-05-16 11:47:30 +00004395 SDValue Vec = Op.getOperand(0);
4396 SDValue InsVal = Op.getOperand(1);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004397 SDValue Idx = Op.getOperand(2);
Matt Arsenault67a98152018-05-16 11:47:30 +00004398 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004399 EVT EltVT = VecVT.getVectorElementType();
4400 unsigned VecSize = VecVT.getSizeInBits();
4401 unsigned EltSize = EltVT.getSizeInBits();
Matt Arsenault67a98152018-05-16 11:47:30 +00004402
Matt Arsenault9224c002018-06-05 19:52:46 +00004403
4404 assert(VecSize <= 64);
Matt Arsenault67a98152018-05-16 11:47:30 +00004405
4406 unsigned NumElts = VecVT.getVectorNumElements();
4407 SDLoc SL(Op);
4408 auto KIdx = dyn_cast<ConstantSDNode>(Idx);
4409
Matt Arsenault9224c002018-06-05 19:52:46 +00004410 if (NumElts == 4 && EltSize == 16 && KIdx) {
Matt Arsenault67a98152018-05-16 11:47:30 +00004411 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec);
4412
4413 SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4414 DAG.getConstant(0, SL, MVT::i32));
4415 SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec,
4416 DAG.getConstant(1, SL, MVT::i32));
4417
4418 SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf);
4419 SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf);
4420
4421 unsigned Idx = KIdx->getZExtValue();
4422 bool InsertLo = Idx < 2;
4423 SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16,
4424 InsertLo ? LoVec : HiVec,
4425 DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal),
4426 DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32));
4427
4428 InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf);
4429
4430 SDValue Concat = InsertLo ?
4431 DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) :
4432 DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf });
4433
4434 return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat);
4435 }
4436
Matt Arsenault3aef8092017-01-23 23:09:58 +00004437 if (isa<ConstantSDNode>(Idx))
4438 return SDValue();
4439
Matt Arsenault9224c002018-06-05 19:52:46 +00004440 MVT IntVT = MVT::getIntegerVT(VecSize);
Matt Arsenault67a98152018-05-16 11:47:30 +00004441
Matt Arsenault3aef8092017-01-23 23:09:58 +00004442 // Avoid stack access for dynamic indexing.
Matt Arsenault3aef8092017-01-23 23:09:58 +00004443 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
Tim Corringhamfa3e4e52019-02-01 16:51:09 +00004444
4445 // Create a congruent vector with the target value in each element so that
4446 // the required element can be masked and ORed into the target vector.
4447 SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT,
4448 DAG.getSplatBuildVector(VecVT, SL, InsVal));
Matt Arsenault3aef8092017-01-23 23:09:58 +00004449
Matt Arsenault9224c002018-06-05 19:52:46 +00004450 assert(isPowerOf2_32(EltSize));
4451 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4452
Matt Arsenault3aef8092017-01-23 23:09:58 +00004453 // Convert vector index to bit-index.
Matt Arsenault9224c002018-06-05 19:52:46 +00004454 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004455
Matt Arsenault67a98152018-05-16 11:47:30 +00004456 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4457 SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT,
4458 DAG.getConstant(0xffff, SL, IntVT),
Matt Arsenault3aef8092017-01-23 23:09:58 +00004459 ScaledIdx);
4460
Matt Arsenault67a98152018-05-16 11:47:30 +00004461 SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal);
4462 SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT,
4463 DAG.getNOT(SL, BFM, IntVT), BCVec);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004464
Matt Arsenault67a98152018-05-16 11:47:30 +00004465 SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS);
4466 return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004467}
4468
4469SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4470 SelectionDAG &DAG) const {
4471 SDLoc SL(Op);
4472
4473 EVT ResultVT = Op.getValueType();
4474 SDValue Vec = Op.getOperand(0);
4475 SDValue Idx = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004476 EVT VecVT = Vec.getValueType();
Matt Arsenault9224c002018-06-05 19:52:46 +00004477 unsigned VecSize = VecVT.getSizeInBits();
4478 EVT EltVT = VecVT.getVectorElementType();
4479 assert(VecSize <= 64);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004480
Matt Arsenault98f29462017-05-17 20:30:58 +00004481 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
4482
Hiroshi Inoue372ffa12018-04-13 11:37:06 +00004483 // Make sure we do any optimizations that will make it easier to fold
Matt Arsenault98f29462017-05-17 20:30:58 +00004484 // source modifiers before obscuring it with bit operations.
4485
4486 // XXX - Why doesn't this get called when vector_shuffle is expanded?
4487 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
4488 return Combined;
4489
Matt Arsenault9224c002018-06-05 19:52:46 +00004490 unsigned EltSize = EltVT.getSizeInBits();
4491 assert(isPowerOf2_32(EltSize));
Matt Arsenault3aef8092017-01-23 23:09:58 +00004492
Matt Arsenault9224c002018-06-05 19:52:46 +00004493 MVT IntVT = MVT::getIntegerVT(VecSize);
4494 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32);
4495
4496 // Convert vector index to bit-index (* EltSize)
4497 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004498
Matt Arsenault67a98152018-05-16 11:47:30 +00004499 SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec);
4500 SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004501
Matt Arsenault67a98152018-05-16 11:47:30 +00004502 if (ResultVT == MVT::f16) {
4503 SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt);
4504 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
4505 }
Matt Arsenault3aef8092017-01-23 23:09:58 +00004506
Matt Arsenault67a98152018-05-16 11:47:30 +00004507 return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT);
4508}
4509
4510SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
4511 SelectionDAG &DAG) const {
4512 SDLoc SL(Op);
4513 EVT VT = Op.getValueType();
Matt Arsenault67a98152018-05-16 11:47:30 +00004514
Matt Arsenault02dc7e12018-06-15 15:15:46 +00004515 if (VT == MVT::v4i16 || VT == MVT::v4f16) {
4516 EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2);
4517
4518 // Turn into pair of packed build_vectors.
4519 // TODO: Special case for constants that can be materialized with s_mov_b64.
4520 SDValue Lo = DAG.getBuildVector(HalfVT, SL,
4521 { Op.getOperand(0), Op.getOperand(1) });
4522 SDValue Hi = DAG.getBuildVector(HalfVT, SL,
4523 { Op.getOperand(2), Op.getOperand(3) });
4524
4525 SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo);
4526 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi);
4527
4528 SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi });
4529 return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
4530 }
4531
Matt Arsenault1349a042018-05-22 06:32:10 +00004532 assert(VT == MVT::v2f16 || VT == MVT::v2i16);
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004533 assert(!Subtarget->hasVOP3PInsts() && "this should be legal");
Matt Arsenault67a98152018-05-16 11:47:30 +00004534
Matt Arsenault1349a042018-05-22 06:32:10 +00004535 SDValue Lo = Op.getOperand(0);
4536 SDValue Hi = Op.getOperand(1);
Matt Arsenault67a98152018-05-16 11:47:30 +00004537
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004538 // Avoid adding defined bits with the zero_extend.
4539 if (Hi.isUndef()) {
4540 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4541 SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo);
4542 return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo);
4543 }
Matt Arsenault67a98152018-05-16 11:47:30 +00004544
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004545 Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi);
Matt Arsenault1349a042018-05-22 06:32:10 +00004546 Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi);
4547
4548 SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi,
4549 DAG.getConstant(16, SL, MVT::i32));
Matt Arsenault3ead7d72018-08-12 08:42:46 +00004550 if (Lo.isUndef())
4551 return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi);
4552
4553 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo);
4554 Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo);
Matt Arsenault1349a042018-05-22 06:32:10 +00004555
4556 SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi);
Matt Arsenault1349a042018-05-22 06:32:10 +00004557 return DAG.getNode(ISD::BITCAST, SL, VT, Or);
Matt Arsenault3aef8092017-01-23 23:09:58 +00004558}
4559
Tom Stellard418beb72016-07-13 14:23:33 +00004560bool
4561SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4562 // We can fold offsets for anything that doesn't require a GOT relocation.
Matt Arsenault0da63502018-08-31 05:49:54 +00004563 return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
4564 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
4565 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004566 !shouldEmitGOTReloc(GA->getGlobal());
Tom Stellard418beb72016-07-13 14:23:33 +00004567}
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004568
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004569static SDValue
4570buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
4571 const SDLoc &DL, unsigned Offset, EVT PtrVT,
4572 unsigned GAFlags = SIInstrInfo::MO_NONE) {
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004573 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
4574 // lowered to the following code sequence:
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004575 //
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004576 // For constant address space:
4577 // s_getpc_b64 s[0:1]
4578 // s_add_u32 s0, s0, $symbol
4579 // s_addc_u32 s1, s1, 0
4580 //
4581 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4582 // a fixup or relocation is emitted to replace $symbol with a literal
4583 // constant, which is a pc-relative offset from the encoding of the $symbol
4584 // operand to the global variable.
4585 //
4586 // For global address space:
4587 // s_getpc_b64 s[0:1]
4588 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
4589 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
4590 //
4591 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
4592 // fixups or relocations are emitted to replace $symbol@*@lo and
4593 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
4594 // which is a 64-bit pc-relative offset from the encoding of the $symbol
4595 // operand to the global variable.
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004596 //
4597 // What we want here is an offset from the value returned by s_getpc
4598 // (which is the address of the s_add_u32 instruction) to the global
4599 // variable, but since the encoding of $symbol starts 4 bytes after the start
4600 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
4601 // small. This requires us to add 4 to the global variable offset in order to
4602 // compute the correct address.
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004603 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4604 GAFlags);
4605 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
4606 GAFlags == SIInstrInfo::MO_NONE ?
4607 GAFlags : GAFlags + 1);
4608 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
Tom Stellardbf3e6e52016-06-14 20:29:59 +00004609}
4610
Tom Stellard418beb72016-07-13 14:23:33 +00004611SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
4612 SDValue Op,
4613 SelectionDAG &DAG) const {
4614 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +00004615 const GlobalValue *GV = GSD->getGlobal();
Matt Arsenaultd1f45712018-09-10 12:16:11 +00004616 if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
4617 GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS ||
4618 GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS)
Tom Stellard418beb72016-07-13 14:23:33 +00004619 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
4620
4621 SDLoc DL(GSD);
Tom Stellard418beb72016-07-13 14:23:33 +00004622 EVT PtrVT = Op.getValueType();
4623
Matt Arsenaultd1f45712018-09-10 12:16:11 +00004624 // FIXME: Should not make address space based decisions here.
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004625 if (shouldEmitFixup(GV))
Tom Stellard418beb72016-07-13 14:23:33 +00004626 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00004627 else if (shouldEmitPCReloc(GV))
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004628 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
4629 SIInstrInfo::MO_REL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004630
4631 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00004632 SIInstrInfo::MO_GOTPCREL32);
Tom Stellard418beb72016-07-13 14:23:33 +00004633
4634 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
Matt Arsenault0da63502018-08-31 05:49:54 +00004635 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
Tom Stellard418beb72016-07-13 14:23:33 +00004636 const DataLayout &DataLayout = DAG.getDataLayout();
4637 unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
Matt Arsenaultd77fcc22018-09-10 02:23:39 +00004638 MachinePointerInfo PtrInfo
4639 = MachinePointerInfo::getGOT(DAG.getMachineFunction());
Tom Stellard418beb72016-07-13 14:23:33 +00004640
Justin Lebar9c375812016-07-15 18:27:10 +00004641 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
Justin Lebaradbf09e2016-09-11 01:38:58 +00004642 MachineMemOperand::MODereferenceable |
4643 MachineMemOperand::MOInvariant);
Tom Stellard418beb72016-07-13 14:23:33 +00004644}
4645
Benjamin Kramerbdc49562016-06-12 15:39:02 +00004646SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
4647 const SDLoc &DL, SDValue V) const {
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004648 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
4649 // the destination register.
4650 //
Tom Stellardfc92e772015-05-12 14:18:14 +00004651 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
4652 // so we will end up with redundant moves to m0.
4653 //
Matt Arsenault4ac341c2016-04-14 21:58:15 +00004654 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
4655
4656 // A Null SDValue creates a glue result.
4657 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
4658 V, Chain);
4659 return SDValue(M0, 0);
Tom Stellardfc92e772015-05-12 14:18:14 +00004660}
4661
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004662SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
4663 SDValue Op,
4664 MVT VT,
4665 unsigned Offset) const {
4666 SDLoc SL(Op);
Matt Arsenaulte622dc32017-04-11 22:29:24 +00004667 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
Matt Arsenault7b4826e2018-05-30 16:17:51 +00004668 DAG.getEntryNode(), Offset, 4, false);
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00004669 // The local size values will have the hi 16-bits as zero.
4670 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
4671 DAG.getValueType(VT));
4672}
4673
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004674static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4675 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004676 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004677 "non-hsa intrinsic with hsa target",
4678 DL.getDebugLoc());
4679 DAG.getContext()->diagnose(BadIntrin);
4680 return DAG.getUNDEF(VT);
4681}
4682
Benjamin Kramer061f4a52017-01-13 14:39:03 +00004683static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
4684 EVT VT) {
Matthias Braunf1caa282017-12-15 22:22:58 +00004685 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00004686 "intrinsic not supported on subtarget",
4687 DL.getDebugLoc());
Matt Arsenaulte0132462016-01-30 05:19:45 +00004688 DAG.getContext()->diagnose(BadIntrin);
4689 return DAG.getUNDEF(VT);
4690}
4691
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004692static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
4693 ArrayRef<SDValue> Elts) {
4694 assert(!Elts.empty());
4695 MVT Type;
4696 unsigned NumElts;
4697
4698 if (Elts.size() == 1) {
4699 Type = MVT::f32;
4700 NumElts = 1;
4701 } else if (Elts.size() == 2) {
4702 Type = MVT::v2f32;
4703 NumElts = 2;
4704 } else if (Elts.size() <= 4) {
4705 Type = MVT::v4f32;
4706 NumElts = 4;
4707 } else if (Elts.size() <= 8) {
4708 Type = MVT::v8f32;
4709 NumElts = 8;
4710 } else {
4711 assert(Elts.size() <= 16);
4712 Type = MVT::v16f32;
4713 NumElts = 16;
4714 }
4715
4716 SmallVector<SDValue, 16> VecElts(NumElts);
4717 for (unsigned i = 0; i < Elts.size(); ++i) {
4718 SDValue Elt = Elts[i];
4719 if (Elt.getValueType() != MVT::f32)
4720 Elt = DAG.getBitcast(MVT::f32, Elt);
4721 VecElts[i] = Elt;
4722 }
4723 for (unsigned i = Elts.size(); i < NumElts; ++i)
4724 VecElts[i] = DAG.getUNDEF(MVT::f32);
4725
4726 if (NumElts == 1)
4727 return VecElts[0];
4728 return DAG.getBuildVector(Type, DL, VecElts);
4729}
4730
4731static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG,
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004732 SDValue *GLC, SDValue *SLC, SDValue *DLC) {
Matt Arsenaultcaf13162019-03-12 21:02:54 +00004733 auto CachePolicyConst = cast<ConstantSDNode>(CachePolicy.getNode());
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004734
4735 uint64_t Value = CachePolicyConst->getZExtValue();
4736 SDLoc DL(CachePolicy);
4737 if (GLC) {
4738 *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
4739 Value &= ~(uint64_t)0x1;
4740 }
4741 if (SLC) {
4742 *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
4743 Value &= ~(uint64_t)0x2;
4744 }
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004745 if (DLC) {
4746 *DLC = DAG.getTargetConstant((Value & 0x4) ? 1 : 0, DL, MVT::i32);
4747 Value &= ~(uint64_t)0x4;
4748 }
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004749
4750 return Value == 0;
4751}
4752
David Stuttardf77079f2019-01-14 11:55:24 +00004753// Re-construct the required return value for a image load intrinsic.
4754// This is more complicated due to the optional use TexFailCtrl which means the required
4755// return type is an aggregate
4756static SDValue constructRetValue(SelectionDAG &DAG,
4757 MachineSDNode *Result,
4758 ArrayRef<EVT> ResultTypes,
4759 bool IsTexFail, bool Unpacked, bool IsD16,
4760 int DMaskPop, int NumVDataDwords,
4761 const SDLoc &DL, LLVMContext &Context) {
4762 // Determine the required return type. This is the same regardless of IsTexFail flag
4763 EVT ReqRetVT = ResultTypes[0];
4764 EVT ReqRetEltVT = ReqRetVT.isVector() ? ReqRetVT.getVectorElementType() : ReqRetVT;
4765 int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1;
4766 EVT AdjEltVT = Unpacked && IsD16 ? MVT::i32 : ReqRetEltVT;
4767 EVT AdjVT = Unpacked ? ReqRetNumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, ReqRetNumElts)
4768 : AdjEltVT
4769 : ReqRetVT;
4770
4771 // Extract data part of the result
4772 // Bitcast the result to the same type as the required return type
4773 int NumElts;
4774 if (IsD16 && !Unpacked)
4775 NumElts = NumVDataDwords << 1;
4776 else
4777 NumElts = NumVDataDwords;
4778
4779 EVT CastVT = NumElts > 1 ? EVT::getVectorVT(Context, AdjEltVT, NumElts)
4780 : AdjEltVT;
4781
Tim Renouf6f0191a2019-03-22 15:21:11 +00004782 // Special case for v6f16. Rather than add support for this, use v3i32 to
David Stuttardf77079f2019-01-14 11:55:24 +00004783 // extract the data elements
Tim Renouf6f0191a2019-03-22 15:21:11 +00004784 bool V6F16Special = false;
4785 if (NumElts == 6) {
4786 CastVT = EVT::getVectorVT(Context, MVT::i32, NumElts / 2);
David Stuttardf77079f2019-01-14 11:55:24 +00004787 DMaskPop >>= 1;
4788 ReqRetNumElts >>= 1;
Tim Renouf6f0191a2019-03-22 15:21:11 +00004789 V6F16Special = true;
David Stuttardf77079f2019-01-14 11:55:24 +00004790 AdjVT = MVT::v2i32;
4791 }
4792
4793 SDValue N = SDValue(Result, 0);
4794 SDValue CastRes = DAG.getNode(ISD::BITCAST, DL, CastVT, N);
4795
4796 // Iterate over the result
4797 SmallVector<SDValue, 4> BVElts;
4798
4799 if (CastVT.isVector()) {
4800 DAG.ExtractVectorElements(CastRes, BVElts, 0, DMaskPop);
4801 } else {
4802 BVElts.push_back(CastRes);
4803 }
4804 int ExtraElts = ReqRetNumElts - DMaskPop;
4805 while(ExtraElts--)
4806 BVElts.push_back(DAG.getUNDEF(AdjEltVT));
4807
4808 SDValue PreTFCRes;
4809 if (ReqRetNumElts > 1) {
4810 SDValue NewVec = DAG.getBuildVector(AdjVT, DL, BVElts);
4811 if (IsD16 && Unpacked)
4812 PreTFCRes = adjustLoadValueTypeImpl(NewVec, ReqRetVT, DL, DAG, Unpacked);
4813 else
4814 PreTFCRes = NewVec;
4815 } else {
4816 PreTFCRes = BVElts[0];
4817 }
4818
Tim Renouf6f0191a2019-03-22 15:21:11 +00004819 if (V6F16Special)
David Stuttardf77079f2019-01-14 11:55:24 +00004820 PreTFCRes = DAG.getNode(ISD::BITCAST, DL, MVT::v4f16, PreTFCRes);
4821
4822 if (!IsTexFail) {
4823 if (Result->getNumValues() > 1)
4824 return DAG.getMergeValues({PreTFCRes, SDValue(Result, 1)}, DL);
4825 else
4826 return PreTFCRes;
4827 }
4828
4829 // Extract the TexFail result and insert into aggregate return
4830 SmallVector<SDValue, 1> TFCElt;
4831 DAG.ExtractVectorElements(N, TFCElt, DMaskPop, 1);
4832 SDValue TFCRes = DAG.getNode(ISD::BITCAST, DL, ResultTypes[1], TFCElt[0]);
4833 return DAG.getMergeValues({PreTFCRes, TFCRes, SDValue(Result, 1)}, DL);
4834}
4835
4836static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
4837 SDValue *LWE, bool &IsTexFail) {
Matt Arsenaultcaf13162019-03-12 21:02:54 +00004838 auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode());
David Stuttardf77079f2019-01-14 11:55:24 +00004839
4840 uint64_t Value = TexFailCtrlConst->getZExtValue();
4841 if (Value) {
4842 IsTexFail = true;
4843 }
4844
4845 SDLoc DL(TexFailCtrlConst);
4846 *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32);
4847 Value &= ~(uint64_t)0x1;
4848 *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32);
4849 Value &= ~(uint64_t)0x2;
4850
4851 return Value == 0;
4852}
4853
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004854SDValue SITargetLowering::lowerImage(SDValue Op,
4855 const AMDGPU::ImageDimIntrinsicInfo *Intr,
4856 SelectionDAG &DAG) const {
4857 SDLoc DL(Op);
Ryan Taylor1f334d02018-08-28 15:07:30 +00004858 MachineFunction &MF = DAG.getMachineFunction();
4859 const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>();
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004860 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
4861 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
4862 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004863 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
4864 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
Piotr Sobczak9b11e932019-06-10 15:58:51 +00004865 const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
4866 AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004867 unsigned IntrOpcode = Intr->BaseOpcode;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00004868 bool IsGFX10 = Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004869
David Stuttardf77079f2019-01-14 11:55:24 +00004870 SmallVector<EVT, 3> ResultTypes(Op->value_begin(), Op->value_end());
4871 SmallVector<EVT, 3> OrigResultTypes(Op->value_begin(), Op->value_end());
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004872 bool IsD16 = false;
Ryan Taylor1f334d02018-08-28 15:07:30 +00004873 bool IsA16 = false;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004874 SDValue VData;
4875 int NumVDataDwords;
David Stuttardf77079f2019-01-14 11:55:24 +00004876 bool AdjustRetType = false;
4877
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004878 unsigned AddrIdx; // Index of first address argument
4879 unsigned DMask;
David Stuttardf77079f2019-01-14 11:55:24 +00004880 unsigned DMaskLanes = 0;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004881
4882 if (BaseOpcode->Atomic) {
4883 VData = Op.getOperand(2);
4884
4885 bool Is64Bit = VData.getValueType() == MVT::i64;
4886 if (BaseOpcode->AtomicX2) {
4887 SDValue VData2 = Op.getOperand(3);
4888 VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL,
4889 {VData, VData2});
4890 if (Is64Bit)
4891 VData = DAG.getBitcast(MVT::v4i32, VData);
4892
4893 ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32;
4894 DMask = Is64Bit ? 0xf : 0x3;
4895 NumVDataDwords = Is64Bit ? 4 : 2;
4896 AddrIdx = 4;
4897 } else {
4898 DMask = Is64Bit ? 0x3 : 0x1;
4899 NumVDataDwords = Is64Bit ? 2 : 1;
4900 AddrIdx = 3;
4901 }
4902 } else {
David Stuttardf77079f2019-01-14 11:55:24 +00004903 unsigned DMaskIdx = BaseOpcode->Store ? 3 : isa<MemSDNode>(Op) ? 2 : 1;
Matt Arsenaultcaf13162019-03-12 21:02:54 +00004904 auto DMaskConst = cast<ConstantSDNode>(Op.getOperand(DMaskIdx));
David Stuttardf77079f2019-01-14 11:55:24 +00004905 DMask = DMaskConst->getZExtValue();
4906 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004907
4908 if (BaseOpcode->Store) {
4909 VData = Op.getOperand(2);
4910
4911 MVT StoreVT = VData.getSimpleValueType();
4912 if (StoreVT.getScalarType() == MVT::f16) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004913 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004914 !BaseOpcode->HasD16)
4915 return Op; // D16 is unsupported for this instruction
4916
4917 IsD16 = true;
4918 VData = handleD16VData(VData, DAG);
4919 }
4920
4921 NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004922 } else {
David Stuttardf77079f2019-01-14 11:55:24 +00004923 // Work out the num dwords based on the dmask popcount and underlying type
4924 // and whether packing is supported.
4925 MVT LoadVT = ResultTypes[0].getSimpleVT();
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004926 if (LoadVT.getScalarType() == MVT::f16) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00004927 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ||
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004928 !BaseOpcode->HasD16)
4929 return Op; // D16 is unsupported for this instruction
4930
4931 IsD16 = true;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004932 }
4933
David Stuttardf77079f2019-01-14 11:55:24 +00004934 // Confirm that the return type is large enough for the dmask specified
4935 if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) ||
4936 (!LoadVT.isVector() && DMaskLanes > 1))
4937 return Op;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004938
David Stuttardf77079f2019-01-14 11:55:24 +00004939 if (IsD16 && !Subtarget->hasUnpackedD16VMem())
4940 NumVDataDwords = (DMaskLanes + 1) / 2;
4941 else
4942 NumVDataDwords = DMaskLanes;
4943
4944 AdjustRetType = true;
4945 }
David Stuttardc6603862018-11-29 20:14:17 +00004946
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004947 AddrIdx = DMaskIdx + 1;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004948 }
4949
Ryan Taylor1f334d02018-08-28 15:07:30 +00004950 unsigned NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
4951 unsigned NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
4952 unsigned NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
4953 unsigned NumVAddrs = BaseOpcode->NumExtraArgs + NumGradients +
4954 NumCoords + NumLCM;
4955 unsigned NumMIVAddrs = NumVAddrs;
4956
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00004957 SmallVector<SDValue, 4> VAddrs;
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004958
4959 // Optimize _L to _LZ when _L is zero
4960 if (LZMappingInfo) {
4961 if (auto ConstantLod =
Ryan Taylor1f334d02018-08-28 15:07:30 +00004962 dyn_cast<ConstantFPSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004963 if (ConstantLod->isZero() || ConstantLod->isNegative()) {
4964 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
Ryan Taylor1f334d02018-08-28 15:07:30 +00004965 NumMIVAddrs--; // remove 'lod'
Ryan Taylor894c8fd2018-08-01 12:12:01 +00004966 }
4967 }
4968 }
4969
Piotr Sobczak9b11e932019-06-10 15:58:51 +00004970 // Optimize _mip away, when 'lod' is zero
4971 if (MIPMappingInfo) {
4972 if (auto ConstantLod =
4973 dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx+NumVAddrs-1))) {
4974 if (ConstantLod->isNullValue()) {
4975 IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip
4976 NumMIVAddrs--; // remove 'lod'
4977 }
4978 }
4979 }
4980
Ryan Taylor1f334d02018-08-28 15:07:30 +00004981 // Check for 16 bit addresses and pack if true.
4982 unsigned DimIdx = AddrIdx + BaseOpcode->NumExtraArgs;
4983 MVT VAddrVT = Op.getOperand(DimIdx).getSimpleValueType();
Neil Henning63718b22018-10-31 10:34:48 +00004984 const MVT VAddrScalarVT = VAddrVT.getScalarType();
4985 if (((VAddrScalarVT == MVT::f16) || (VAddrScalarVT == MVT::i16)) &&
Ryan Taylor1f334d02018-08-28 15:07:30 +00004986 ST->hasFeature(AMDGPU::FeatureR128A16)) {
4987 IsA16 = true;
Neil Henning63718b22018-10-31 10:34:48 +00004988 const MVT VectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16;
Ryan Taylor1f334d02018-08-28 15:07:30 +00004989 for (unsigned i = AddrIdx; i < (AddrIdx + NumMIVAddrs); ++i) {
4990 SDValue AddrLo, AddrHi;
4991 // Push back extra arguments.
4992 if (i < DimIdx) {
4993 AddrLo = Op.getOperand(i);
4994 } else {
4995 AddrLo = Op.getOperand(i);
4996 // Dz/dh, dz/dv and the last odd coord are packed with undef. Also,
4997 // in 1D, derivatives dx/dh and dx/dv are packed with undef.
4998 if (((i + 1) >= (AddrIdx + NumMIVAddrs)) ||
Matt Arsenault0da63502018-08-31 05:49:54 +00004999 ((NumGradients / 2) % 2 == 1 &&
5000 (i == DimIdx + (NumGradients / 2) - 1 ||
Ryan Taylor1f334d02018-08-28 15:07:30 +00005001 i == DimIdx + NumGradients - 1))) {
5002 AddrHi = DAG.getUNDEF(MVT::f16);
5003 } else {
5004 AddrHi = Op.getOperand(i + 1);
5005 i++;
5006 }
Neil Henning63718b22018-10-31 10:34:48 +00005007 AddrLo = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorVT,
Ryan Taylor1f334d02018-08-28 15:07:30 +00005008 {AddrLo, AddrHi});
5009 AddrLo = DAG.getBitcast(MVT::i32, AddrLo);
5010 }
5011 VAddrs.push_back(AddrLo);
5012 }
5013 } else {
5014 for (unsigned i = 0; i < NumMIVAddrs; ++i)
5015 VAddrs.push_back(Op.getOperand(AddrIdx + i));
5016 }
5017
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005018 // If the register allocator cannot place the address registers contiguously
5019 // without introducing moves, then using the non-sequential address encoding
5020 // is always preferable, since it saves VALU instructions and is usually a
5021 // wash in terms of code size or even better.
5022 //
5023 // However, we currently have no way of hinting to the register allocator that
5024 // MIMG addresses should be placed contiguously when it is possible to do so,
5025 // so force non-NSA for the common 2-address case as a heuristic.
5026 //
5027 // SIShrinkInstructions will convert NSA encodings to non-NSA after register
5028 // allocation when possible.
5029 bool UseNSA =
5030 ST->hasFeature(AMDGPU::FeatureNSAEncoding) && VAddrs.size() >= 3;
5031 SDValue VAddr;
5032 if (!UseNSA)
5033 VAddr = getBuildDwordsVector(DAG, DL, VAddrs);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005034
5035 SDValue True = DAG.getTargetConstant(1, DL, MVT::i1);
5036 SDValue False = DAG.getTargetConstant(0, DL, MVT::i1);
5037 unsigned CtrlIdx; // Index of texfailctrl argument
5038 SDValue Unorm;
5039 if (!BaseOpcode->Sampler) {
5040 Unorm = True;
5041 CtrlIdx = AddrIdx + NumVAddrs + 1;
5042 } else {
5043 auto UnormConst =
Matt Arsenaultcaf13162019-03-12 21:02:54 +00005044 cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2));
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005045
5046 Unorm = UnormConst->getZExtValue() ? True : False;
5047 CtrlIdx = AddrIdx + NumVAddrs + 3;
5048 }
5049
David Stuttardf77079f2019-01-14 11:55:24 +00005050 SDValue TFE;
5051 SDValue LWE;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005052 SDValue TexFail = Op.getOperand(CtrlIdx);
David Stuttardf77079f2019-01-14 11:55:24 +00005053 bool IsTexFail = false;
5054 if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail))
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005055 return Op;
5056
David Stuttardf77079f2019-01-14 11:55:24 +00005057 if (IsTexFail) {
5058 if (!DMaskLanes) {
5059 // Expecting to get an error flag since TFC is on - and dmask is 0
5060 // Force dmask to be at least 1 otherwise the instruction will fail
5061 DMask = 0x1;
5062 DMaskLanes = 1;
5063 NumVDataDwords = 1;
5064 }
5065 NumVDataDwords += 1;
5066 AdjustRetType = true;
5067 }
5068
5069 // Has something earlier tagged that the return type needs adjusting
5070 // This happens if the instruction is a load or has set TexFailCtrl flags
5071 if (AdjustRetType) {
5072 // NumVDataDwords reflects the true number of dwords required in the return type
5073 if (DMaskLanes == 0 && !BaseOpcode->Store) {
5074 // This is a no-op load. This can be eliminated
5075 SDValue Undef = DAG.getUNDEF(Op.getValueType());
5076 if (isa<MemSDNode>(Op))
5077 return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL);
5078 return Undef;
5079 }
5080
David Stuttardf77079f2019-01-14 11:55:24 +00005081 EVT NewVT = NumVDataDwords > 1 ?
5082 EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumVDataDwords)
5083 : MVT::f32;
5084
5085 ResultTypes[0] = NewVT;
5086 if (ResultTypes.size() == 3) {
5087 // Original result was aggregate type used for TexFailCtrl results
5088 // The actual instruction returns as a vector type which has now been
5089 // created. Remove the aggregate result.
5090 ResultTypes.erase(&ResultTypes[1]);
5091 }
5092 }
5093
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005094 SDValue GLC;
5095 SDValue SLC;
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005096 SDValue DLC;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005097 if (BaseOpcode->Atomic) {
5098 GLC = True; // TODO no-return optimization
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005099 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC,
5100 IsGFX10 ? &DLC : nullptr))
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005101 return Op;
5102 } else {
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005103 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC,
5104 IsGFX10 ? &DLC : nullptr))
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005105 return Op;
5106 }
5107
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005108 SmallVector<SDValue, 26> Ops;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005109 if (BaseOpcode->Store || BaseOpcode->Atomic)
5110 Ops.push_back(VData); // vdata
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005111 if (UseNSA) {
5112 for (const SDValue &Addr : VAddrs)
5113 Ops.push_back(Addr);
5114 } else {
5115 Ops.push_back(VAddr);
5116 }
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005117 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc
5118 if (BaseOpcode->Sampler)
5119 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler
5120 Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32));
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005121 if (IsGFX10)
5122 Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32));
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005123 Ops.push_back(Unorm);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005124 if (IsGFX10)
5125 Ops.push_back(DLC);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005126 Ops.push_back(GLC);
5127 Ops.push_back(SLC);
Ryan Taylor1f334d02018-08-28 15:07:30 +00005128 Ops.push_back(IsA16 && // a16 or r128
5129 ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False);
David Stuttardf77079f2019-01-14 11:55:24 +00005130 Ops.push_back(TFE); // tfe
5131 Ops.push_back(LWE); // lwe
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005132 if (!IsGFX10)
5133 Ops.push_back(DimInfo->DA ? True : False);
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005134 if (BaseOpcode->HasD16)
5135 Ops.push_back(IsD16 ? True : False);
5136 if (isa<MemSDNode>(Op))
5137 Ops.push_back(Op.getOperand(0)); // chain
5138
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005139 int NumVAddrDwords =
5140 UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32;
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005141 int Opcode = -1;
5142
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005143 if (IsGFX10) {
5144 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
5145 UseNSA ? AMDGPU::MIMGEncGfx10NSA
5146 : AMDGPU::MIMGEncGfx10Default,
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005147 NumVDataDwords, NumVAddrDwords);
Stanislav Mekhanoshin692560d2019-05-01 16:32:58 +00005148 } else {
5149 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
5150 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
5151 NumVDataDwords, NumVAddrDwords);
5152 if (Opcode == -1)
5153 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
5154 NumVDataDwords, NumVAddrDwords);
5155 }
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005156 assert(Opcode != -1);
5157
5158 MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops);
5159 if (auto MemOp = dyn_cast<MemSDNode>(Op)) {
Chandler Carruth66654b72018-08-14 23:30:32 +00005160 MachineMemOperand *MemRef = MemOp->getMemOperand();
5161 DAG.setNodeMemRefs(NewNode, {MemRef});
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005162 }
5163
5164 if (BaseOpcode->AtomicX2) {
5165 SmallVector<SDValue, 1> Elt;
5166 DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1);
5167 return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL);
David Stuttardf77079f2019-01-14 11:55:24 +00005168 } else if (!BaseOpcode->Store) {
5169 return constructRetValue(DAG, NewNode,
5170 OrigResultTypes, IsTexFail,
5171 Subtarget->hasUnpackedD16VMem(), IsD16,
5172 DMaskLanes, NumVDataDwords, DL,
5173 *DAG.getContext());
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005174 }
5175
5176 return SDValue(NewNode, 0);
5177}
5178
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005179SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
5180 SDValue Offset, SDValue GLC,
5181 SelectionDAG &DAG) const {
5182 MachineFunction &MF = DAG.getMachineFunction();
5183 MachineMemOperand *MMO = MF.getMachineMemOperand(
5184 MachinePointerInfo(),
5185 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
5186 MachineMemOperand::MOInvariant,
5187 VT.getStoreSize(), VT.getStoreSize());
5188
5189 if (!Offset->isDivergent()) {
5190 SDValue Ops[] = {
5191 Rsrc,
5192 Offset, // Offset
5193 GLC // glc
5194 };
5195 return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
5196 DAG.getVTList(VT), Ops, VT, MMO);
5197 }
5198
5199 // We have a divergent offset. Emit a MUBUF buffer load instead. We can
5200 // assume that the buffer is unswizzled.
5201 SmallVector<SDValue, 4> Loads;
5202 unsigned NumLoads = 1;
5203 MVT LoadVT = VT.getSimpleVT();
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005204 unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
Simon Pilgrim44dfd812018-12-07 21:44:25 +00005205 assert((LoadVT.getScalarType() == MVT::i32 ||
5206 LoadVT.getScalarType() == MVT::f32) &&
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005207 isPowerOf2_32(NumElts));
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005208
Matt Arsenaultce2e0532018-12-07 18:41:39 +00005209 if (NumElts == 8 || NumElts == 16) {
5210 NumLoads = NumElts == 16 ? 4 : 2;
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005211 LoadVT = MVT::v4i32;
5212 }
5213
5214 SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue});
5215 unsigned CachePolicy = cast<ConstantSDNode>(GLC)->getZExtValue();
5216 SDValue Ops[] = {
5217 DAG.getEntryNode(), // Chain
5218 Rsrc, // rsrc
5219 DAG.getConstant(0, DL, MVT::i32), // vindex
5220 {}, // voffset
5221 {}, // soffset
5222 {}, // offset
5223 DAG.getConstant(CachePolicy, DL, MVT::i32), // cachepolicy
5224 DAG.getConstant(0, DL, MVT::i1), // idxen
5225 };
5226
5227 // Use the alignment to ensure that the required offsets will fit into the
5228 // immediate offsets.
5229 setBufferOffsets(Offset, DAG, &Ops[3], NumLoads > 1 ? 16 * NumLoads : 4);
5230
5231 uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
5232 for (unsigned i = 0; i < NumLoads; ++i) {
5233 Ops[5] = DAG.getConstant(InstOffset + 16 * i, DL, MVT::i32);
5234 Loads.push_back(DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList,
5235 Ops, LoadVT, MMO));
5236 }
5237
5238 if (VT == MVT::v8i32 || VT == MVT::v16i32)
5239 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
5240
5241 return Loads[0];
5242}
5243
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005244SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
5245 SelectionDAG &DAG) const {
5246 MachineFunction &MF = DAG.getMachineFunction();
Tom Stellarddcb9f092015-07-09 21:20:37 +00005247 auto MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005248
5249 EVT VT = Op.getValueType();
5250 SDLoc DL(Op);
5251 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5252
Sanjay Patela2607012015-09-16 16:31:21 +00005253 // TODO: Should this propagate fast-math-flags?
5254
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005255 switch (IntrinsicID) {
Tom Stellard2f3f9852017-01-25 01:25:13 +00005256 case Intrinsic::amdgcn_implicit_buffer_ptr: {
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +00005257 if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction()))
Matt Arsenault10fc0622017-06-26 03:01:31 +00005258 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005259 return getPreloadedValue(DAG, *MFI, VT,
5260 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
Tom Stellard2f3f9852017-01-25 01:25:13 +00005261 }
Tom Stellard48f29f22015-11-26 00:43:29 +00005262 case Intrinsic::amdgcn_dispatch_ptr:
Matt Arsenault48ab5262016-04-25 19:27:18 +00005263 case Intrinsic::amdgcn_queue_ptr: {
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +00005264 if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00005265 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00005266 MF.getFunction(), "unsupported hsa intrinsic without hsa target",
Oliver Stannard7e7d9832016-02-02 13:52:43 +00005267 DL.getDebugLoc());
Matt Arsenault800fecf2016-01-11 21:18:33 +00005268 DAG.getContext()->diagnose(BadIntrin);
5269 return DAG.getUNDEF(VT);
5270 }
5271
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005272 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
5273 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR;
5274 return getPreloadedValue(DAG, *MFI, VT, RegID);
Matt Arsenault48ab5262016-04-25 19:27:18 +00005275 }
Jan Veselyfea814d2016-06-21 20:46:20 +00005276 case Intrinsic::amdgcn_implicitarg_ptr: {
Matt Arsenault9166ce82017-07-28 15:52:08 +00005277 if (MFI->isEntryFunction())
5278 return getImplicitArgPtr(DAG, DL);
Matt Arsenault817c2532017-08-03 23:12:44 +00005279 return getPreloadedValue(DAG, *MFI, VT,
5280 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
Jan Veselyfea814d2016-06-21 20:46:20 +00005281 }
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00005282 case Intrinsic::amdgcn_kernarg_segment_ptr: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005283 return getPreloadedValue(DAG, *MFI, VT,
5284 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00005285 }
Matt Arsenault8d718dc2016-07-22 17:01:30 +00005286 case Intrinsic::amdgcn_dispatch_id: {
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005287 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID);
Matt Arsenault8d718dc2016-07-22 17:01:30 +00005288 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005289 case Intrinsic::amdgcn_rcp:
5290 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
5291 case Intrinsic::amdgcn_rsq:
5292 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00005293 case Intrinsic::amdgcn_rsq_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00005294 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005295 return emitRemovedIntrinsicError(DAG, DL, VT);
5296
5297 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
Eugene Zelenko66203762017-01-21 00:53:49 +00005298 case Intrinsic::amdgcn_rcp_legacy:
Tom Stellard5bfbae52018-07-11 20:59:01 +00005299 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault32fc5272016-07-26 16:45:45 +00005300 return emitRemovedIntrinsicError(DAG, DL, VT);
5301 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
Matt Arsenault09b2c4a2016-07-15 21:26:52 +00005302 case Intrinsic::amdgcn_rsq_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00005303 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenault79963e82016-02-13 01:03:00 +00005304 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
Tom Stellard48f29f22015-11-26 00:43:29 +00005305
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005306 Type *Type = VT.getTypeForEVT(*DAG.getContext());
5307 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
5308 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
5309
5310 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
5311 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
5312 DAG.getConstantFP(Max, DL, VT));
5313 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
5314 DAG.getConstantFP(Min, DL, VT));
5315 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005316 case Intrinsic::r600_read_ngroups_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005317 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005318 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005319
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005320 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005321 SI::KernelInputOffsets::NGROUPS_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005322 case Intrinsic::r600_read_ngroups_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005323 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005324 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005325
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005326 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005327 SI::KernelInputOffsets::NGROUPS_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005328 case Intrinsic::r600_read_ngroups_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005329 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005330 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005331
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005332 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005333 SI::KernelInputOffsets::NGROUPS_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005334 case Intrinsic::r600_read_global_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005335 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005336 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005337
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005338 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005339 SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005340 case Intrinsic::r600_read_global_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005341 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005342 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005343
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005344 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005345 SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005346 case Intrinsic::r600_read_global_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005347 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005348 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005349
Matt Arsenaulte622dc32017-04-11 22:29:24 +00005350 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
Matt Arsenault7b4826e2018-05-30 16:17:51 +00005351 SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005352 case Intrinsic::r600_read_local_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005353 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005354 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005355
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005356 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5357 SI::KernelInputOffsets::LOCAL_SIZE_X);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005358 case Intrinsic::r600_read_local_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005359 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005360 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005361
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005362 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5363 SI::KernelInputOffsets::LOCAL_SIZE_Y);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005364 case Intrinsic::r600_read_local_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00005365 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00005366 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00005367
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00005368 return lowerImplicitZextParam(DAG, Op, MVT::i16,
5369 SI::KernelInputOffsets::LOCAL_SIZE_Z);
Matt Arsenault43976df2016-01-30 04:25:19 +00005370 case Intrinsic::amdgcn_workgroup_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005371 case Intrinsic::r600_read_tgid_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005372 return getPreloadedValue(DAG, *MFI, VT,
5373 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
Matt Arsenault43976df2016-01-30 04:25:19 +00005374 case Intrinsic::amdgcn_workgroup_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005375 case Intrinsic::r600_read_tgid_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005376 return getPreloadedValue(DAG, *MFI, VT,
5377 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
Matt Arsenault43976df2016-01-30 04:25:19 +00005378 case Intrinsic::amdgcn_workgroup_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005379 case Intrinsic::r600_read_tgid_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005380 return getPreloadedValue(DAG, *MFI, VT,
5381 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
Reid Kleckner4dc0b1a2018-11-01 19:54:45 +00005382 case Intrinsic::amdgcn_workitem_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005383 case Intrinsic::r600_read_tidig_x:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005384 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5385 SDLoc(DAG.getEntryNode()),
5386 MFI->getArgInfo().WorkItemIDX);
Matt Arsenault43976df2016-01-30 04:25:19 +00005387 case Intrinsic::amdgcn_workitem_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005388 case Intrinsic::r600_read_tidig_y:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005389 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5390 SDLoc(DAG.getEntryNode()),
5391 MFI->getArgInfo().WorkItemIDY);
Matt Arsenault43976df2016-01-30 04:25:19 +00005392 case Intrinsic::amdgcn_workitem_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005393 case Intrinsic::r600_read_tidig_z:
Matt Arsenault8623e8d2017-08-03 23:00:29 +00005394 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32,
5395 SDLoc(DAG.getEntryNode()),
5396 MFI->getArgInfo().WorkItemIDZ);
Tim Renouf904343f2018-08-25 14:53:17 +00005397 case Intrinsic::amdgcn_s_buffer_load: {
5398 unsigned Cache = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00005399 return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2),
5400 DAG.getTargetConstant(Cache & 1, DL, MVT::i1), DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005401 }
Matt Arsenaultc5b641a2017-03-17 20:41:45 +00005402 case Intrinsic::amdgcn_fdiv_fast:
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00005403 return lowerFDIV_FAST(Op, DAG);
Tom Stellard2187bb82016-12-06 23:52:13 +00005404 case Intrinsic::amdgcn_interp_mov: {
5405 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5406 SDValue Glue = M0.getValue(1);
5407 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
5408 Op.getOperand(2), Op.getOperand(3), Glue);
5409 }
Tom Stellardad7d03d2015-12-15 17:02:49 +00005410 case Intrinsic::amdgcn_interp_p1: {
5411 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
5412 SDValue Glue = M0.getValue(1);
5413 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
5414 Op.getOperand(2), Op.getOperand(3), Glue);
5415 }
5416 case Intrinsic::amdgcn_interp_p2: {
5417 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5418 SDValue Glue = SDValue(M0.getNode(), 1);
5419 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
5420 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
5421 Glue);
5422 }
Tim Corringham824ca3f2019-01-28 13:48:59 +00005423 case Intrinsic::amdgcn_interp_p1_f16: {
5424 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
5425 SDValue Glue = M0.getValue(1);
5426 if (getSubtarget()->getLDSBankCount() == 16) {
5427 // 16 bank LDS
5428 SDValue S = DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32,
5429 DAG.getConstant(2, DL, MVT::i32), // P0
5430 Op.getOperand(2), // Attrchan
5431 Op.getOperand(3), // Attr
5432 Glue);
5433 SDValue Ops[] = {
5434 Op.getOperand(1), // Src0
5435 Op.getOperand(2), // Attrchan
5436 Op.getOperand(3), // Attr
5437 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5438 S, // Src2 - holds two f16 values selected by high
5439 DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5440 Op.getOperand(4), // high
5441 DAG.getConstant(0, DL, MVT::i1), // $clamp
5442 DAG.getConstant(0, DL, MVT::i32) // $omod
5443 };
5444 return DAG.getNode(AMDGPUISD::INTERP_P1LV_F16, DL, MVT::f32, Ops);
5445 } else {
5446 // 32 bank LDS
5447 SDValue Ops[] = {
5448 Op.getOperand(1), // Src0
5449 Op.getOperand(2), // Attrchan
5450 Op.getOperand(3), // Attr
5451 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5452 Op.getOperand(4), // high
5453 DAG.getConstant(0, DL, MVT::i1), // $clamp
5454 DAG.getConstant(0, DL, MVT::i32), // $omod
5455 Glue
5456 };
5457 return DAG.getNode(AMDGPUISD::INTERP_P1LL_F16, DL, MVT::f32, Ops);
5458 }
5459 }
5460 case Intrinsic::amdgcn_interp_p2_f16: {
5461 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(6));
5462 SDValue Glue = SDValue(M0.getNode(), 1);
5463 SDValue Ops[] = {
5464 Op.getOperand(2), // Src0
5465 Op.getOperand(3), // Attrchan
5466 Op.getOperand(4), // Attr
5467 DAG.getConstant(0, DL, MVT::i32), // $src0_modifiers
5468 Op.getOperand(1), // Src2
5469 DAG.getConstant(0, DL, MVT::i32), // $src2_modifiers
5470 Op.getOperand(5), // high
5471 DAG.getConstant(0, DL, MVT::i1), // $clamp
5472 Glue
5473 };
5474 return DAG.getNode(AMDGPUISD::INTERP_P2_F16, DL, MVT::f16, Ops);
5475 }
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005476 case Intrinsic::amdgcn_sin:
5477 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
5478
5479 case Intrinsic::amdgcn_cos:
5480 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
5481
5482 case Intrinsic::amdgcn_log_clamp: {
Tom Stellard5bfbae52018-07-11 20:59:01 +00005483 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005484 return SDValue();
5485
5486 DiagnosticInfoUnsupported BadIntrin(
Matthias Braunf1caa282017-12-15 22:22:58 +00005487 MF.getFunction(), "intrinsic not supported on subtarget",
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00005488 DL.getDebugLoc());
5489 DAG.getContext()->diagnose(BadIntrin);
5490 return DAG.getUNDEF(VT);
5491 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005492 case Intrinsic::amdgcn_ldexp:
5493 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
5494 Op.getOperand(1), Op.getOperand(2));
Matt Arsenault74015162016-05-28 00:19:52 +00005495
5496 case Intrinsic::amdgcn_fract:
5497 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
5498
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005499 case Intrinsic::amdgcn_class:
5500 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
5501 Op.getOperand(1), Op.getOperand(2));
5502 case Intrinsic::amdgcn_div_fmas:
5503 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
5504 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5505 Op.getOperand(4));
5506
5507 case Intrinsic::amdgcn_div_fixup:
5508 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
5509 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5510
5511 case Intrinsic::amdgcn_trig_preop:
5512 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
5513 Op.getOperand(1), Op.getOperand(2));
5514 case Intrinsic::amdgcn_div_scale: {
Matt Arsenaultcaf13162019-03-12 21:02:54 +00005515 const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3));
Matt Arsenaultf75257a2016-01-23 05:32:20 +00005516
5517 // Translate to the operands expected by the machine instruction. The
5518 // first parameter must be the same as the first instruction.
5519 SDValue Numerator = Op.getOperand(1);
5520 SDValue Denominator = Op.getOperand(2);
5521
5522 // Note this order is opposite of the machine instruction's operations,
5523 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
5524 // intrinsic has the numerator as the first operand to match a normal
5525 // division operation.
5526
5527 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
5528
5529 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
5530 Denominator, Numerator);
5531 }
Wei Ding07e03712016-07-28 16:42:13 +00005532 case Intrinsic::amdgcn_icmp: {
Marek Olsak33eb4d92019-01-15 02:13:18 +00005533 // There is a Pat that handles this variant, so return it as-is.
5534 if (Op.getOperand(1).getValueType() == MVT::i1 &&
5535 Op.getConstantOperandVal(2) == 0 &&
5536 Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE)
5537 return Op;
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00005538 return lowerICMPIntrinsic(*this, Op.getNode(), DAG);
Wei Ding07e03712016-07-28 16:42:13 +00005539 }
5540 case Intrinsic::amdgcn_fcmp: {
Matt Arsenaultb3a80e52018-08-15 21:25:20 +00005541 return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
Wei Ding07e03712016-07-28 16:42:13 +00005542 }
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00005543 case Intrinsic::amdgcn_fmed3:
5544 return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
5545 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Farhana Aleenc370d7b2018-07-16 18:19:59 +00005546 case Intrinsic::amdgcn_fdot2:
5547 return DAG.getNode(AMDGPUISD::FDOT2, DL, VT,
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00005548 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
5549 Op.getOperand(4));
Matt Arsenault32fc5272016-07-26 16:45:45 +00005550 case Intrinsic::amdgcn_fmul_legacy:
5551 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
5552 Op.getOperand(1), Op.getOperand(2));
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00005553 case Intrinsic::amdgcn_sffbh:
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00005554 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
Matt Arsenaultf5262252017-02-22 23:04:58 +00005555 case Intrinsic::amdgcn_sbfe:
5556 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
5557 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
5558 case Intrinsic::amdgcn_ubfe:
5559 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
5560 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
Marek Olsak13e47412018-01-31 20:18:04 +00005561 case Intrinsic::amdgcn_cvt_pkrtz:
5562 case Intrinsic::amdgcn_cvt_pknorm_i16:
5563 case Intrinsic::amdgcn_cvt_pknorm_u16:
5564 case Intrinsic::amdgcn_cvt_pk_i16:
5565 case Intrinsic::amdgcn_cvt_pk_u16: {
5566 // FIXME: Stop adding cast if v2f16/v2i16 are legal.
Matt Arsenault1f17c662017-02-22 00:27:34 +00005567 EVT VT = Op.getValueType();
Marek Olsak13e47412018-01-31 20:18:04 +00005568 unsigned Opcode;
5569
5570 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz)
5571 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32;
5572 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16)
5573 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32;
5574 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16)
5575 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32;
5576 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16)
5577 Opcode = AMDGPUISD::CVT_PK_I16_I32;
5578 else
5579 Opcode = AMDGPUISD::CVT_PK_U16_U32;
5580
Matt Arsenault709374d2018-08-01 20:13:58 +00005581 if (isTypeLegal(VT))
5582 return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2));
5583
Marek Olsak13e47412018-01-31 20:18:04 +00005584 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32,
Matt Arsenault1f17c662017-02-22 00:27:34 +00005585 Op.getOperand(1), Op.getOperand(2));
5586 return DAG.getNode(ISD::BITCAST, DL, VT, Node);
5587 }
Connor Abbott8c217d02017-08-04 18:36:49 +00005588 case Intrinsic::amdgcn_wqm: {
5589 SDValue Src = Op.getOperand(1);
5590 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src),
5591 0);
5592 }
Connor Abbott92638ab2017-08-04 18:36:52 +00005593 case Intrinsic::amdgcn_wwm: {
5594 SDValue Src = Op.getOperand(1);
5595 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src),
5596 0);
5597 }
Stanislav Mekhanoshindacda792018-06-26 20:04:19 +00005598 case Intrinsic::amdgcn_fmad_ftz:
5599 return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1),
5600 Op.getOperand(2), Op.getOperand(3));
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005601 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00005602 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
5603 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
5604 return lowerImage(Op, ImageDimIntr, DAG);
5605
Matt Arsenault754dd3e2017-04-03 18:08:08 +00005606 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00005607 }
5608}
5609
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005610SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
5611 SelectionDAG &DAG) const {
5612 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Tom Stellard6f9ef142016-12-20 17:19:44 +00005613 SDLoc DL(Op);
David Stuttard70e8bc12017-06-22 16:29:22 +00005614
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005615 switch (IntrID) {
Marek Olsakc5cec5e2019-01-16 15:43:53 +00005616 case Intrinsic::amdgcn_ds_ordered_add:
5617 case Intrinsic::amdgcn_ds_ordered_swap: {
5618 MemSDNode *M = cast<MemSDNode>(Op);
5619 SDValue Chain = M->getOperand(0);
5620 SDValue M0 = M->getOperand(2);
5621 SDValue Value = M->getOperand(3);
5622 unsigned OrderedCountIndex = M->getConstantOperandVal(7);
5623 unsigned WaveRelease = M->getConstantOperandVal(8);
5624 unsigned WaveDone = M->getConstantOperandVal(9);
5625 unsigned ShaderType;
5626 unsigned Instruction;
5627
5628 switch (IntrID) {
5629 case Intrinsic::amdgcn_ds_ordered_add:
5630 Instruction = 0;
5631 break;
5632 case Intrinsic::amdgcn_ds_ordered_swap:
5633 Instruction = 1;
5634 break;
5635 }
5636
5637 if (WaveDone && !WaveRelease)
5638 report_fatal_error("ds_ordered_count: wave_done requires wave_release");
5639
5640 switch (DAG.getMachineFunction().getFunction().getCallingConv()) {
5641 case CallingConv::AMDGPU_CS:
5642 case CallingConv::AMDGPU_KERNEL:
5643 ShaderType = 0;
5644 break;
5645 case CallingConv::AMDGPU_PS:
5646 ShaderType = 1;
5647 break;
5648 case CallingConv::AMDGPU_VS:
5649 ShaderType = 2;
5650 break;
5651 case CallingConv::AMDGPU_GS:
5652 ShaderType = 3;
5653 break;
5654 default:
5655 report_fatal_error("ds_ordered_count unsupported for this calling conv");
5656 }
5657
5658 unsigned Offset0 = OrderedCountIndex << 2;
5659 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
5660 (Instruction << 4);
5661 unsigned Offset = Offset0 | (Offset1 << 8);
5662
5663 SDValue Ops[] = {
5664 Chain,
5665 Value,
5666 DAG.getTargetConstant(Offset, DL, MVT::i16),
5667 copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue
5668 };
5669 return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL,
5670 M->getVTList(), Ops, M->getMemoryVT(),
5671 M->getMemOperand());
5672 }
Matt Arsenaulta5840c32019-01-22 18:36:06 +00005673 case Intrinsic::amdgcn_ds_fadd: {
5674 MemSDNode *M = cast<MemSDNode>(Op);
5675 unsigned Opc;
5676 switch (IntrID) {
5677 case Intrinsic::amdgcn_ds_fadd:
5678 Opc = ISD::ATOMIC_LOAD_FADD;
5679 break;
5680 }
5681
5682 return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
5683 M->getOperand(0), M->getOperand(2), M->getOperand(3),
5684 M->getMemOperand());
5685 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005686 case Intrinsic::amdgcn_atomic_inc:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005687 case Intrinsic::amdgcn_atomic_dec:
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005688 case Intrinsic::amdgcn_ds_fmin:
5689 case Intrinsic::amdgcn_ds_fmax: {
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005690 MemSDNode *M = cast<MemSDNode>(Op);
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005691 unsigned Opc;
5692 switch (IntrID) {
5693 case Intrinsic::amdgcn_atomic_inc:
5694 Opc = AMDGPUISD::ATOMIC_INC;
5695 break;
5696 case Intrinsic::amdgcn_atomic_dec:
5697 Opc = AMDGPUISD::ATOMIC_DEC;
5698 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005699 case Intrinsic::amdgcn_ds_fmin:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005700 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
5701 break;
Daniil Fukalov6e1dc682018-01-26 11:09:38 +00005702 case Intrinsic::amdgcn_ds_fmax:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00005703 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX;
5704 break;
5705 default:
5706 llvm_unreachable("Unknown intrinsic!");
5707 }
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00005708 SDValue Ops[] = {
5709 M->getOperand(0), // Chain
5710 M->getOperand(2), // Ptr
5711 M->getOperand(3) // Value
5712 };
5713
5714 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
5715 M->getMemoryVT(), M->getMemOperand());
5716 }
Tom Stellard6f9ef142016-12-20 17:19:44 +00005717 case Intrinsic::amdgcn_buffer_load:
5718 case Intrinsic::amdgcn_buffer_load_format: {
Tim Renouf4f703f52018-08-21 11:07:10 +00005719 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
5720 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
5721 unsigned IdxEn = 1;
5722 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
5723 IdxEn = Idx->getZExtValue() != 0;
Tom Stellard6f9ef142016-12-20 17:19:44 +00005724 SDValue Ops[] = {
5725 Op.getOperand(0), // Chain
5726 Op.getOperand(2), // rsrc
5727 Op.getOperand(3), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00005728 SDValue(), // voffset -- will be set by setBufferOffsets
5729 SDValue(), // soffset -- will be set by setBufferOffsets
5730 SDValue(), // offset -- will be set by setBufferOffsets
5731 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
5732 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Tom Stellard6f9ef142016-12-20 17:19:44 +00005733 };
Tom Stellard6f9ef142016-12-20 17:19:44 +00005734
Tim Renouf4f703f52018-08-21 11:07:10 +00005735 setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
Tom Stellard6f9ef142016-12-20 17:19:44 +00005736 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
5737 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
Tim Renouf4f703f52018-08-21 11:07:10 +00005738
5739 EVT VT = Op.getValueType();
5740 EVT IntVT = VT.changeTypeToInteger();
5741 auto *M = cast<MemSDNode>(Op);
5742 EVT LoadVT = Op.getValueType();
5743
5744 if (LoadVT.getScalarType() == MVT::f16)
5745 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5746 M, DAG, Ops);
Ryan Taylor00e063a2019-03-19 16:07:00 +00005747
5748 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
5749 if (LoadVT.getScalarType() == MVT::i8 ||
5750 LoadVT.getScalarType() == MVT::i16)
5751 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
5752
Tim Renouf677387d2019-03-22 14:58:02 +00005753 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5754 M->getMemOperand(), DAG);
Tim Renouf4f703f52018-08-21 11:07:10 +00005755 }
5756 case Intrinsic::amdgcn_raw_buffer_load:
5757 case Intrinsic::amdgcn_raw_buffer_load_format: {
5758 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
5759 SDValue Ops[] = {
5760 Op.getOperand(0), // Chain
5761 Op.getOperand(2), // rsrc
5762 DAG.getConstant(0, DL, MVT::i32), // vindex
5763 Offsets.first, // voffset
5764 Op.getOperand(4), // soffset
5765 Offsets.second, // offset
5766 Op.getOperand(5), // cachepolicy
5767 DAG.getConstant(0, DL, MVT::i1), // idxen
5768 };
5769
5770 unsigned Opc = (IntrID == Intrinsic::amdgcn_raw_buffer_load) ?
5771 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
5772
5773 EVT VT = Op.getValueType();
5774 EVT IntVT = VT.changeTypeToInteger();
5775 auto *M = cast<MemSDNode>(Op);
5776 EVT LoadVT = Op.getValueType();
5777
5778 if (LoadVT.getScalarType() == MVT::f16)
5779 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5780 M, DAG, Ops);
Ryan Taylor00e063a2019-03-19 16:07:00 +00005781
5782 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
5783 if (LoadVT.getScalarType() == MVT::i8 ||
5784 LoadVT.getScalarType() == MVT::i16)
5785 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
5786
Tim Renouf677387d2019-03-22 14:58:02 +00005787 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5788 M->getMemOperand(), DAG);
Tim Renouf4f703f52018-08-21 11:07:10 +00005789 }
5790 case Intrinsic::amdgcn_struct_buffer_load:
5791 case Intrinsic::amdgcn_struct_buffer_load_format: {
5792 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
5793 SDValue Ops[] = {
5794 Op.getOperand(0), // Chain
5795 Op.getOperand(2), // rsrc
5796 Op.getOperand(3), // vindex
5797 Offsets.first, // voffset
5798 Op.getOperand(5), // soffset
5799 Offsets.second, // offset
5800 Op.getOperand(6), // cachepolicy
5801 DAG.getConstant(1, DL, MVT::i1), // idxen
5802 };
5803
5804 unsigned Opc = (IntrID == Intrinsic::amdgcn_struct_buffer_load) ?
5805 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
5806
Tom Stellard6f9ef142016-12-20 17:19:44 +00005807 EVT VT = Op.getValueType();
5808 EVT IntVT = VT.changeTypeToInteger();
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005809 auto *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00005810 EVT LoadVT = Op.getValueType();
Matt Arsenault1349a042018-05-22 06:32:10 +00005811
Tim Renouf366a49d2018-08-02 23:33:01 +00005812 if (LoadVT.getScalarType() == MVT::f16)
5813 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16,
5814 M, DAG, Ops);
Ryan Taylor00e063a2019-03-19 16:07:00 +00005815
5816 // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
5817 if (LoadVT.getScalarType() == MVT::i8 ||
5818 LoadVT.getScalarType() == MVT::i16)
5819 return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M);
5820
Tim Renouf677387d2019-03-22 14:58:02 +00005821 return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT,
5822 M->getMemOperand(), DAG);
Tom Stellard6f9ef142016-12-20 17:19:44 +00005823 }
David Stuttard70e8bc12017-06-22 16:29:22 +00005824 case Intrinsic::amdgcn_tbuffer_load: {
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005825 MemSDNode *M = cast<MemSDNode>(Op);
Matt Arsenault1349a042018-05-22 06:32:10 +00005826 EVT LoadVT = Op.getValueType();
Matt Arsenault1349a042018-05-22 06:32:10 +00005827
Tim Renouf35484c92018-08-21 11:06:05 +00005828 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
5829 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
5830 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
5831 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
5832 unsigned IdxEn = 1;
5833 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
5834 IdxEn = Idx->getZExtValue() != 0;
David Stuttard70e8bc12017-06-22 16:29:22 +00005835 SDValue Ops[] = {
5836 Op.getOperand(0), // Chain
5837 Op.getOperand(2), // rsrc
5838 Op.getOperand(3), // vindex
5839 Op.getOperand(4), // voffset
5840 Op.getOperand(5), // soffset
5841 Op.getOperand(6), // offset
Tim Renouf35484c92018-08-21 11:06:05 +00005842 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
5843 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
5844 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
5845 };
5846
5847 if (LoadVT.getScalarType() == MVT::f16)
5848 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5849 M, DAG, Ops);
Tim Renouf677387d2019-03-22 14:58:02 +00005850 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
5851 Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
5852 DAG);
Tim Renouf35484c92018-08-21 11:06:05 +00005853 }
5854 case Intrinsic::amdgcn_raw_tbuffer_load: {
5855 MemSDNode *M = cast<MemSDNode>(Op);
5856 EVT LoadVT = Op.getValueType();
5857 auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG);
5858
5859 SDValue Ops[] = {
5860 Op.getOperand(0), // Chain
5861 Op.getOperand(2), // rsrc
5862 DAG.getConstant(0, DL, MVT::i32), // vindex
5863 Offsets.first, // voffset
5864 Op.getOperand(4), // soffset
5865 Offsets.second, // offset
5866 Op.getOperand(5), // format
5867 Op.getOperand(6), // cachepolicy
5868 DAG.getConstant(0, DL, MVT::i1), // idxen
5869 };
5870
5871 if (LoadVT.getScalarType() == MVT::f16)
5872 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5873 M, DAG, Ops);
Tim Renouf677387d2019-03-22 14:58:02 +00005874 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
5875 Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
5876 DAG);
Tim Renouf35484c92018-08-21 11:06:05 +00005877 }
5878 case Intrinsic::amdgcn_struct_tbuffer_load: {
5879 MemSDNode *M = cast<MemSDNode>(Op);
5880 EVT LoadVT = Op.getValueType();
5881 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
5882
5883 SDValue Ops[] = {
5884 Op.getOperand(0), // Chain
5885 Op.getOperand(2), // rsrc
5886 Op.getOperand(3), // vindex
5887 Offsets.first, // voffset
5888 Op.getOperand(5), // soffset
5889 Offsets.second, // offset
5890 Op.getOperand(6), // format
5891 Op.getOperand(7), // cachepolicy
5892 DAG.getConstant(1, DL, MVT::i1), // idxen
David Stuttard70e8bc12017-06-22 16:29:22 +00005893 };
5894
Tim Renouf366a49d2018-08-02 23:33:01 +00005895 if (LoadVT.getScalarType() == MVT::f16)
5896 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16,
5897 M, DAG, Ops);
Tim Renouf677387d2019-03-22 14:58:02 +00005898 return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
5899 Op->getVTList(), Ops, LoadVT, M->getMemOperand(),
5900 DAG);
David Stuttard70e8bc12017-06-22 16:29:22 +00005901 }
Marek Olsak5cec6412017-11-09 01:52:48 +00005902 case Intrinsic::amdgcn_buffer_atomic_swap:
5903 case Intrinsic::amdgcn_buffer_atomic_add:
5904 case Intrinsic::amdgcn_buffer_atomic_sub:
5905 case Intrinsic::amdgcn_buffer_atomic_smin:
5906 case Intrinsic::amdgcn_buffer_atomic_umin:
5907 case Intrinsic::amdgcn_buffer_atomic_smax:
5908 case Intrinsic::amdgcn_buffer_atomic_umax:
5909 case Intrinsic::amdgcn_buffer_atomic_and:
5910 case Intrinsic::amdgcn_buffer_atomic_or:
5911 case Intrinsic::amdgcn_buffer_atomic_xor: {
Tim Renouf4f703f52018-08-21 11:07:10 +00005912 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
5913 unsigned IdxEn = 1;
5914 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
5915 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00005916 SDValue Ops[] = {
5917 Op.getOperand(0), // Chain
5918 Op.getOperand(2), // vdata
5919 Op.getOperand(3), // rsrc
5920 Op.getOperand(4), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00005921 SDValue(), // voffset -- will be set by setBufferOffsets
5922 SDValue(), // soffset -- will be set by setBufferOffsets
5923 SDValue(), // offset -- will be set by setBufferOffsets
5924 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
5925 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00005926 };
Tim Renouf4f703f52018-08-21 11:07:10 +00005927 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005928 EVT VT = Op.getValueType();
5929
5930 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00005931 unsigned Opcode = 0;
5932
5933 switch (IntrID) {
5934 case Intrinsic::amdgcn_buffer_atomic_swap:
5935 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
5936 break;
5937 case Intrinsic::amdgcn_buffer_atomic_add:
5938 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
5939 break;
5940 case Intrinsic::amdgcn_buffer_atomic_sub:
5941 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
5942 break;
5943 case Intrinsic::amdgcn_buffer_atomic_smin:
5944 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
5945 break;
5946 case Intrinsic::amdgcn_buffer_atomic_umin:
5947 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
5948 break;
5949 case Intrinsic::amdgcn_buffer_atomic_smax:
5950 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
5951 break;
5952 case Intrinsic::amdgcn_buffer_atomic_umax:
5953 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
5954 break;
5955 case Intrinsic::amdgcn_buffer_atomic_and:
5956 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
5957 break;
5958 case Intrinsic::amdgcn_buffer_atomic_or:
5959 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
5960 break;
5961 case Intrinsic::amdgcn_buffer_atomic_xor:
5962 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
5963 break;
5964 default:
5965 llvm_unreachable("unhandled atomic opcode");
5966 }
5967
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00005968 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
5969 M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00005970 }
Tim Renouf4f703f52018-08-21 11:07:10 +00005971 case Intrinsic::amdgcn_raw_buffer_atomic_swap:
5972 case Intrinsic::amdgcn_raw_buffer_atomic_add:
5973 case Intrinsic::amdgcn_raw_buffer_atomic_sub:
5974 case Intrinsic::amdgcn_raw_buffer_atomic_smin:
5975 case Intrinsic::amdgcn_raw_buffer_atomic_umin:
5976 case Intrinsic::amdgcn_raw_buffer_atomic_smax:
5977 case Intrinsic::amdgcn_raw_buffer_atomic_umax:
5978 case Intrinsic::amdgcn_raw_buffer_atomic_and:
5979 case Intrinsic::amdgcn_raw_buffer_atomic_or:
5980 case Intrinsic::amdgcn_raw_buffer_atomic_xor: {
5981 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
5982 SDValue Ops[] = {
5983 Op.getOperand(0), // Chain
5984 Op.getOperand(2), // vdata
5985 Op.getOperand(3), // rsrc
5986 DAG.getConstant(0, DL, MVT::i32), // vindex
5987 Offsets.first, // voffset
5988 Op.getOperand(5), // soffset
5989 Offsets.second, // offset
5990 Op.getOperand(6), // cachepolicy
5991 DAG.getConstant(0, DL, MVT::i1), // idxen
5992 };
5993 EVT VT = Op.getValueType();
Marek Olsak5cec6412017-11-09 01:52:48 +00005994
Tim Renouf4f703f52018-08-21 11:07:10 +00005995 auto *M = cast<MemSDNode>(Op);
5996 unsigned Opcode = 0;
5997
5998 switch (IntrID) {
5999 case Intrinsic::amdgcn_raw_buffer_atomic_swap:
6000 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6001 break;
6002 case Intrinsic::amdgcn_raw_buffer_atomic_add:
6003 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6004 break;
6005 case Intrinsic::amdgcn_raw_buffer_atomic_sub:
6006 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6007 break;
6008 case Intrinsic::amdgcn_raw_buffer_atomic_smin:
6009 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6010 break;
6011 case Intrinsic::amdgcn_raw_buffer_atomic_umin:
6012 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6013 break;
6014 case Intrinsic::amdgcn_raw_buffer_atomic_smax:
6015 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6016 break;
6017 case Intrinsic::amdgcn_raw_buffer_atomic_umax:
6018 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6019 break;
6020 case Intrinsic::amdgcn_raw_buffer_atomic_and:
6021 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6022 break;
6023 case Intrinsic::amdgcn_raw_buffer_atomic_or:
6024 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6025 break;
6026 case Intrinsic::amdgcn_raw_buffer_atomic_xor:
6027 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6028 break;
6029 default:
6030 llvm_unreachable("unhandled atomic opcode");
6031 }
6032
6033 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6034 M->getMemOperand());
6035 }
6036 case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6037 case Intrinsic::amdgcn_struct_buffer_atomic_add:
6038 case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6039 case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6040 case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6041 case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6042 case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6043 case Intrinsic::amdgcn_struct_buffer_atomic_and:
6044 case Intrinsic::amdgcn_struct_buffer_atomic_or:
6045 case Intrinsic::amdgcn_struct_buffer_atomic_xor: {
6046 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6047 SDValue Ops[] = {
6048 Op.getOperand(0), // Chain
6049 Op.getOperand(2), // vdata
6050 Op.getOperand(3), // rsrc
6051 Op.getOperand(4), // vindex
6052 Offsets.first, // voffset
6053 Op.getOperand(6), // soffset
6054 Offsets.second, // offset
6055 Op.getOperand(7), // cachepolicy
6056 DAG.getConstant(1, DL, MVT::i1), // idxen
6057 };
6058 EVT VT = Op.getValueType();
6059
6060 auto *M = cast<MemSDNode>(Op);
6061 unsigned Opcode = 0;
6062
6063 switch (IntrID) {
6064 case Intrinsic::amdgcn_struct_buffer_atomic_swap:
6065 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP;
6066 break;
6067 case Intrinsic::amdgcn_struct_buffer_atomic_add:
6068 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD;
6069 break;
6070 case Intrinsic::amdgcn_struct_buffer_atomic_sub:
6071 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB;
6072 break;
6073 case Intrinsic::amdgcn_struct_buffer_atomic_smin:
6074 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN;
6075 break;
6076 case Intrinsic::amdgcn_struct_buffer_atomic_umin:
6077 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN;
6078 break;
6079 case Intrinsic::amdgcn_struct_buffer_atomic_smax:
6080 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX;
6081 break;
6082 case Intrinsic::amdgcn_struct_buffer_atomic_umax:
6083 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX;
6084 break;
6085 case Intrinsic::amdgcn_struct_buffer_atomic_and:
6086 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND;
6087 break;
6088 case Intrinsic::amdgcn_struct_buffer_atomic_or:
6089 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR;
6090 break;
6091 case Intrinsic::amdgcn_struct_buffer_atomic_xor:
6092 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR;
6093 break;
6094 default:
6095 llvm_unreachable("unhandled atomic opcode");
6096 }
6097
6098 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT,
6099 M->getMemOperand());
6100 }
Marek Olsak5cec6412017-11-09 01:52:48 +00006101 case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
Tim Renouf4f703f52018-08-21 11:07:10 +00006102 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6103 unsigned IdxEn = 1;
6104 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5)))
6105 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00006106 SDValue Ops[] = {
6107 Op.getOperand(0), // Chain
6108 Op.getOperand(2), // src
6109 Op.getOperand(3), // cmp
6110 Op.getOperand(4), // rsrc
6111 Op.getOperand(5), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00006112 SDValue(), // voffset -- will be set by setBufferOffsets
6113 SDValue(), // soffset -- will be set by setBufferOffsets
6114 SDValue(), // offset -- will be set by setBufferOffsets
6115 DAG.getConstant(Slc << 1, DL, MVT::i32), // cachepolicy
6116 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
6117 };
6118 setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
6119 EVT VT = Op.getValueType();
6120 auto *M = cast<MemSDNode>(Op);
6121
6122 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6123 Op->getVTList(), Ops, VT, M->getMemOperand());
6124 }
6125 case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: {
6126 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6127 SDValue Ops[] = {
6128 Op.getOperand(0), // Chain
6129 Op.getOperand(2), // src
6130 Op.getOperand(3), // cmp
6131 Op.getOperand(4), // rsrc
6132 DAG.getConstant(0, DL, MVT::i32), // vindex
6133 Offsets.first, // voffset
6134 Op.getOperand(6), // soffset
6135 Offsets.second, // offset
6136 Op.getOperand(7), // cachepolicy
6137 DAG.getConstant(0, DL, MVT::i1), // idxen
6138 };
6139 EVT VT = Op.getValueType();
6140 auto *M = cast<MemSDNode>(Op);
6141
6142 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
6143 Op->getVTList(), Ops, VT, M->getMemOperand());
6144 }
6145 case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: {
6146 auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG);
6147 SDValue Ops[] = {
6148 Op.getOperand(0), // Chain
6149 Op.getOperand(2), // src
6150 Op.getOperand(3), // cmp
6151 Op.getOperand(4), // rsrc
6152 Op.getOperand(5), // vindex
6153 Offsets.first, // voffset
6154 Op.getOperand(7), // soffset
6155 Offsets.second, // offset
6156 Op.getOperand(8), // cachepolicy
6157 DAG.getConstant(1, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00006158 };
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00006159 EVT VT = Op.getValueType();
6160 auto *M = cast<MemSDNode>(Op);
Marek Olsak5cec6412017-11-09 01:52:48 +00006161
6162 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Matt Arsenaulte19bc2e2017-12-29 17:18:21 +00006163 Op->getVTList(), Ops, VT, M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00006164 }
6165
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00006166 default:
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00006167 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6168 AMDGPU::getImageDimIntrinsicInfo(IntrID))
6169 return lowerImage(Op, ImageDimIntr, DAG);
Matt Arsenault1349a042018-05-22 06:32:10 +00006170
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00006171 return SDValue();
6172 }
6173}
6174
Tim Renouf677387d2019-03-22 14:58:02 +00006175// Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
6176// dwordx4 if on SI.
6177SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL,
6178 SDVTList VTList,
6179 ArrayRef<SDValue> Ops, EVT MemVT,
6180 MachineMemOperand *MMO,
6181 SelectionDAG &DAG) const {
6182 EVT VT = VTList.VTs[0];
6183 EVT WidenedVT = VT;
6184 EVT WidenedMemVT = MemVT;
6185 if (!Subtarget->hasDwordx3LoadStores() &&
6186 (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) {
6187 WidenedVT = EVT::getVectorVT(*DAG.getContext(),
6188 WidenedVT.getVectorElementType(), 4);
6189 WidenedMemVT = EVT::getVectorVT(*DAG.getContext(),
6190 WidenedMemVT.getVectorElementType(), 4);
6191 MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16);
6192 }
6193
6194 assert(VTList.NumVTs == 2);
6195 SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]);
6196
6197 auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops,
6198 WidenedMemVT, MMO);
6199 if (WidenedVT != VT) {
6200 auto Extract = DAG.getNode(
6201 ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp,
6202 DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
6203 NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL);
6204 }
6205 return NewOp;
6206}
6207
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006208SDValue SITargetLowering::handleD16VData(SDValue VData,
6209 SelectionDAG &DAG) const {
6210 EVT StoreVT = VData.getValueType();
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006211
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006212 // No change for f16 and legal vector D16 types.
Matt Arsenault1349a042018-05-22 06:32:10 +00006213 if (!StoreVT.isVector())
6214 return VData;
6215
6216 SDLoc DL(VData);
6217 assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16");
6218
6219 if (Subtarget->hasUnpackedD16VMem()) {
6220 // We need to unpack the packed data to store.
6221 EVT IntStoreVT = StoreVT.changeTypeToInteger();
6222 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData);
6223
6224 EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
6225 StoreVT.getVectorNumElements());
6226 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData);
6227 return DAG.UnrollVectorOp(ZExt.getNode());
6228 }
6229
Matt Arsenault02dc7e12018-06-15 15:15:46 +00006230 assert(isTypeLegal(StoreVT));
6231 return VData;
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006232}
6233
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006234SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
6235 SelectionDAG &DAG) const {
Tom Stellardfc92e772015-05-12 14:18:14 +00006236 SDLoc DL(Op);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006237 SDValue Chain = Op.getOperand(0);
6238 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
David Stuttard70e8bc12017-06-22 16:29:22 +00006239 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006240
6241 switch (IntrinsicID) {
Matt Arsenault7d6b71d2017-02-21 22:50:41 +00006242 case Intrinsic::amdgcn_exp: {
Matt Arsenault4165efd2017-01-17 07:26:53 +00006243 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6244 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6245 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
6246 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
6247
6248 const SDValue Ops[] = {
6249 Chain,
6250 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6251 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
6252 Op.getOperand(4), // src0
6253 Op.getOperand(5), // src1
6254 Op.getOperand(6), // src2
6255 Op.getOperand(7), // src3
6256 DAG.getTargetConstant(0, DL, MVT::i1), // compr
6257 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6258 };
6259
6260 unsigned Opc = Done->isNullValue() ?
6261 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6262 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6263 }
6264 case Intrinsic::amdgcn_exp_compr: {
6265 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
6266 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
6267 SDValue Src0 = Op.getOperand(4);
6268 SDValue Src1 = Op.getOperand(5);
6269 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
6270 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
6271
6272 SDValue Undef = DAG.getUNDEF(MVT::f32);
6273 const SDValue Ops[] = {
6274 Chain,
6275 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
6276 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
6277 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
6278 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
6279 Undef, // src2
6280 Undef, // src3
6281 DAG.getTargetConstant(1, DL, MVT::i1), // compr
6282 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
6283 };
6284
6285 unsigned Opc = Done->isNullValue() ?
6286 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
6287 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
6288 }
6289 case Intrinsic::amdgcn_s_sendmsg:
Matt Arsenaultd3e5cb72017-02-16 02:01:17 +00006290 case Intrinsic::amdgcn_s_sendmsghalt: {
6291 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
6292 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
Tom Stellardfc92e772015-05-12 14:18:14 +00006293 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
6294 SDValue Glue = Chain.getValue(1);
Matt Arsenaulta78ca622017-02-15 22:17:09 +00006295 return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
Jan Veselyd48445d2017-01-04 18:06:55 +00006296 Op.getOperand(2), Glue);
6297 }
Marek Olsak2d825902017-04-28 20:21:58 +00006298 case Intrinsic::amdgcn_init_exec: {
6299 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
6300 Op.getOperand(2));
6301 }
6302 case Intrinsic::amdgcn_init_exec_from_input: {
6303 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
6304 Op.getOperand(2), Op.getOperand(3));
6305 }
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00006306 case Intrinsic::amdgcn_s_barrier: {
6307 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
Tom Stellard5bfbae52018-07-11 20:59:01 +00006308 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matthias Braunf1caa282017-12-15 22:22:58 +00006309 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
Stanislav Mekhanoshinea57c382017-04-06 16:48:30 +00006310 if (WGSize <= ST.getWavefrontSize())
6311 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
6312 Op.getOperand(0)), 0);
6313 }
6314 return SDValue();
6315 };
David Stuttard70e8bc12017-06-22 16:29:22 +00006316 case Intrinsic::amdgcn_tbuffer_store: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006317 SDValue VData = Op.getOperand(2);
6318 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6319 if (IsD16)
6320 VData = handleD16VData(VData, DAG);
Tim Renouf35484c92018-08-21 11:06:05 +00006321 unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
6322 unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
6323 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
6324 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
6325 unsigned IdxEn = 1;
6326 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6327 IdxEn = Idx->getZExtValue() != 0;
David Stuttard70e8bc12017-06-22 16:29:22 +00006328 SDValue Ops[] = {
6329 Chain,
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006330 VData, // vdata
David Stuttard70e8bc12017-06-22 16:29:22 +00006331 Op.getOperand(3), // rsrc
6332 Op.getOperand(4), // vindex
6333 Op.getOperand(5), // voffset
6334 Op.getOperand(6), // soffset
6335 Op.getOperand(7), // offset
Tim Renouf35484c92018-08-21 11:06:05 +00006336 DAG.getConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format
6337 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6338 DAG.getConstant(IdxEn, DL, MVT::i1), // idexen
6339 };
6340 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6341 AMDGPUISD::TBUFFER_STORE_FORMAT;
6342 MemSDNode *M = cast<MemSDNode>(Op);
6343 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6344 M->getMemoryVT(), M->getMemOperand());
6345 }
6346
6347 case Intrinsic::amdgcn_struct_tbuffer_store: {
6348 SDValue VData = Op.getOperand(2);
6349 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6350 if (IsD16)
6351 VData = handleD16VData(VData, DAG);
6352 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6353 SDValue Ops[] = {
6354 Chain,
6355 VData, // vdata
6356 Op.getOperand(3), // rsrc
6357 Op.getOperand(4), // vindex
6358 Offsets.first, // voffset
6359 Op.getOperand(6), // soffset
6360 Offsets.second, // offset
6361 Op.getOperand(7), // format
6362 Op.getOperand(8), // cachepolicy
6363 DAG.getConstant(1, DL, MVT::i1), // idexen
6364 };
6365 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6366 AMDGPUISD::TBUFFER_STORE_FORMAT;
6367 MemSDNode *M = cast<MemSDNode>(Op);
6368 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6369 M->getMemoryVT(), M->getMemOperand());
6370 }
6371
6372 case Intrinsic::amdgcn_raw_tbuffer_store: {
6373 SDValue VData = Op.getOperand(2);
6374 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6375 if (IsD16)
6376 VData = handleD16VData(VData, DAG);
6377 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6378 SDValue Ops[] = {
6379 Chain,
6380 VData, // vdata
6381 Op.getOperand(3), // rsrc
6382 DAG.getConstant(0, DL, MVT::i32), // vindex
6383 Offsets.first, // voffset
6384 Op.getOperand(5), // soffset
6385 Offsets.second, // offset
6386 Op.getOperand(6), // format
6387 Op.getOperand(7), // cachepolicy
6388 DAG.getConstant(0, DL, MVT::i1), // idexen
David Stuttard70e8bc12017-06-22 16:29:22 +00006389 };
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006390 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 :
6391 AMDGPUISD::TBUFFER_STORE_FORMAT;
6392 MemSDNode *M = cast<MemSDNode>(Op);
6393 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6394 M->getMemoryVT(), M->getMemOperand());
David Stuttard70e8bc12017-06-22 16:29:22 +00006395 }
6396
Marek Olsak5cec6412017-11-09 01:52:48 +00006397 case Intrinsic::amdgcn_buffer_store:
6398 case Intrinsic::amdgcn_buffer_store_format: {
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006399 SDValue VData = Op.getOperand(2);
6400 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6401 if (IsD16)
6402 VData = handleD16VData(VData, DAG);
Tim Renouf4f703f52018-08-21 11:07:10 +00006403 unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
6404 unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
6405 unsigned IdxEn = 1;
6406 if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
6407 IdxEn = Idx->getZExtValue() != 0;
Marek Olsak5cec6412017-11-09 01:52:48 +00006408 SDValue Ops[] = {
6409 Chain,
Tim Renouf4f703f52018-08-21 11:07:10 +00006410 VData,
Marek Olsak5cec6412017-11-09 01:52:48 +00006411 Op.getOperand(3), // rsrc
6412 Op.getOperand(4), // vindex
Tim Renouf4f703f52018-08-21 11:07:10 +00006413 SDValue(), // voffset -- will be set by setBufferOffsets
6414 SDValue(), // soffset -- will be set by setBufferOffsets
6415 SDValue(), // offset -- will be set by setBufferOffsets
6416 DAG.getConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
6417 DAG.getConstant(IdxEn, DL, MVT::i1), // idxen
Marek Olsak5cec6412017-11-09 01:52:48 +00006418 };
Tim Renouf4f703f52018-08-21 11:07:10 +00006419 setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006420 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
6421 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6422 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6423 MemSDNode *M = cast<MemSDNode>(Op);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006424
6425 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6426 EVT VDataType = VData.getValueType().getScalarType();
6427 if (VDataType == MVT::i8 || VDataType == MVT::i16)
6428 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6429
Changpeng Fang44dfa1d2018-01-12 21:12:19 +00006430 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6431 M->getMemoryVT(), M->getMemOperand());
Marek Olsak5cec6412017-11-09 01:52:48 +00006432 }
Tim Renouf4f703f52018-08-21 11:07:10 +00006433
6434 case Intrinsic::amdgcn_raw_buffer_store:
6435 case Intrinsic::amdgcn_raw_buffer_store_format: {
6436 SDValue VData = Op.getOperand(2);
6437 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6438 if (IsD16)
6439 VData = handleD16VData(VData, DAG);
6440 auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG);
6441 SDValue Ops[] = {
6442 Chain,
6443 VData,
6444 Op.getOperand(3), // rsrc
6445 DAG.getConstant(0, DL, MVT::i32), // vindex
6446 Offsets.first, // voffset
6447 Op.getOperand(5), // soffset
6448 Offsets.second, // offset
6449 Op.getOperand(6), // cachepolicy
6450 DAG.getConstant(0, DL, MVT::i1), // idxen
6451 };
6452 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_raw_buffer_store ?
6453 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6454 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6455 MemSDNode *M = cast<MemSDNode>(Op);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006456
6457 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6458 EVT VDataType = VData.getValueType().getScalarType();
6459 if (VDataType == MVT::i8 || VDataType == MVT::i16)
6460 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6461
Tim Renouf4f703f52018-08-21 11:07:10 +00006462 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6463 M->getMemoryVT(), M->getMemOperand());
6464 }
6465
6466 case Intrinsic::amdgcn_struct_buffer_store:
6467 case Intrinsic::amdgcn_struct_buffer_store_format: {
6468 SDValue VData = Op.getOperand(2);
6469 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
6470 if (IsD16)
6471 VData = handleD16VData(VData, DAG);
6472 auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG);
6473 SDValue Ops[] = {
6474 Chain,
6475 VData,
6476 Op.getOperand(3), // rsrc
6477 Op.getOperand(4), // vindex
6478 Offsets.first, // voffset
6479 Op.getOperand(6), // soffset
6480 Offsets.second, // offset
6481 Op.getOperand(7), // cachepolicy
6482 DAG.getConstant(1, DL, MVT::i1), // idxen
6483 };
6484 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ?
6485 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
6486 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
6487 MemSDNode *M = cast<MemSDNode>(Op);
Ryan Taylor00e063a2019-03-19 16:07:00 +00006488
6489 // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
6490 EVT VDataType = VData.getValueType().getScalarType();
6491 if (VDataType == MVT::i8 || VDataType == MVT::i16)
6492 return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M);
6493
Tim Renouf4f703f52018-08-21 11:07:10 +00006494 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops,
6495 M->getMemoryVT(), M->getMemOperand());
6496 }
6497
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00006498 default: {
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +00006499 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
6500 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
6501 return lowerImage(Op, ImageDimIntr, DAG);
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00006502
Matt Arsenault754dd3e2017-04-03 18:08:08 +00006503 return Op;
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006504 }
Nicolai Haehnle2f5a7382018-04-04 10:58:54 +00006505 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00006506}
6507
Tim Renouf4f703f52018-08-21 11:07:10 +00006508// The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
6509// offset (the offset that is included in bounds checking and swizzling, to be
6510// split between the instruction's voffset and immoffset fields) and soffset
6511// (the offset that is excluded from bounds checking and swizzling, to go in
6512// the instruction's soffset field). This function takes the first kind of
6513// offset and figures out how to split it between voffset and immoffset.
Tim Renouf35484c92018-08-21 11:06:05 +00006514std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
6515 SDValue Offset, SelectionDAG &DAG) const {
6516 SDLoc DL(Offset);
6517 const unsigned MaxImm = 4095;
6518 SDValue N0 = Offset;
6519 ConstantSDNode *C1 = nullptr;
Piotr Sobczak378131b2019-01-02 09:47:41 +00006520
6521 if ((C1 = dyn_cast<ConstantSDNode>(N0)))
Tim Renouf35484c92018-08-21 11:06:05 +00006522 N0 = SDValue();
Piotr Sobczak378131b2019-01-02 09:47:41 +00006523 else if (DAG.isBaseWithConstantOffset(N0)) {
6524 C1 = cast<ConstantSDNode>(N0.getOperand(1));
6525 N0 = N0.getOperand(0);
6526 }
Tim Renouf35484c92018-08-21 11:06:05 +00006527
6528 if (C1) {
6529 unsigned ImmOffset = C1->getZExtValue();
6530 // If the immediate value is too big for the immoffset field, put the value
Tim Renoufa37679d2018-10-03 10:29:43 +00006531 // and -4096 into the immoffset field so that the value that is copied/added
Tim Renouf35484c92018-08-21 11:06:05 +00006532 // for the voffset field is a multiple of 4096, and it stands more chance
6533 // of being CSEd with the copy/add for another similar load/store.
Tim Renoufa37679d2018-10-03 10:29:43 +00006534 // However, do not do that rounding down to a multiple of 4096 if that is a
6535 // negative number, as it appears to be illegal to have a negative offset
6536 // in the vgpr, even if adding the immediate offset makes it positive.
Tim Renouf35484c92018-08-21 11:06:05 +00006537 unsigned Overflow = ImmOffset & ~MaxImm;
6538 ImmOffset -= Overflow;
Tim Renoufa37679d2018-10-03 10:29:43 +00006539 if ((int32_t)Overflow < 0) {
6540 Overflow += ImmOffset;
6541 ImmOffset = 0;
6542 }
Tim Renouf35484c92018-08-21 11:06:05 +00006543 C1 = cast<ConstantSDNode>(DAG.getConstant(ImmOffset, DL, MVT::i32));
6544 if (Overflow) {
6545 auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32);
6546 if (!N0)
6547 N0 = OverflowVal;
6548 else {
6549 SDValue Ops[] = { N0, OverflowVal };
6550 N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops);
6551 }
6552 }
6553 }
6554 if (!N0)
6555 N0 = DAG.getConstant(0, DL, MVT::i32);
6556 if (!C1)
6557 C1 = cast<ConstantSDNode>(DAG.getConstant(0, DL, MVT::i32));
6558 return {N0, SDValue(C1, 0)};
6559}
6560
Tim Renouf4f703f52018-08-21 11:07:10 +00006561// Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
6562// three offsets (voffset, soffset and instoffset) into the SDValue[3] array
6563// pointed to by Offsets.
6564void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00006565 SelectionDAG &DAG, SDValue *Offsets,
6566 unsigned Align) const {
Tim Renouf4f703f52018-08-21 11:07:10 +00006567 SDLoc DL(CombinedOffset);
6568 if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) {
6569 uint32_t Imm = C->getZExtValue();
6570 uint32_t SOffset, ImmOffset;
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00006571 if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) {
Tim Renouf4f703f52018-08-21 11:07:10 +00006572 Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
6573 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
6574 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
6575 return;
6576 }
6577 }
6578 if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
6579 SDValue N0 = CombinedOffset.getOperand(0);
6580 SDValue N1 = CombinedOffset.getOperand(1);
6581 uint32_t SOffset, ImmOffset;
6582 int Offset = cast<ConstantSDNode>(N1)->getSExtValue();
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00006583 if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset,
6584 Subtarget, Align)) {
Tim Renouf4f703f52018-08-21 11:07:10 +00006585 Offsets[0] = N0;
6586 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
6587 Offsets[2] = DAG.getConstant(ImmOffset, DL, MVT::i32);
6588 return;
6589 }
6590 }
6591 Offsets[0] = CombinedOffset;
6592 Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
6593 Offsets[2] = DAG.getConstant(0, DL, MVT::i32);
6594}
6595
Ryan Taylor00e063a2019-03-19 16:07:00 +00006596// Handle 8 bit and 16 bit buffer loads
6597SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
6598 EVT LoadVT, SDLoc DL,
6599 ArrayRef<SDValue> Ops,
6600 MemSDNode *M) const {
6601 EVT IntVT = LoadVT.changeTypeToInteger();
6602 unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ?
6603 AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT;
6604
6605 SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other);
6606 SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList,
6607 Ops, IntVT,
6608 M->getMemOperand());
6609 SDValue BufferLoadTrunc = DAG.getNode(ISD::TRUNCATE, DL,
6610 LoadVT.getScalarType(), BufferLoad);
6611 return DAG.getMergeValues({BufferLoadTrunc, BufferLoad.getValue(1)}, DL);
6612}
6613
6614// Handle 8 bit and 16 bit buffer stores
6615SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG,
6616 EVT VDataType, SDLoc DL,
6617 SDValue Ops[],
6618 MemSDNode *M) const {
6619 SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]);
6620 Ops[1] = BufferStoreExt;
6621 unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE :
6622 AMDGPUISD::BUFFER_STORE_SHORT;
6623 ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9);
6624 return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType,
6625 M->getMemOperand());
6626}
6627
Matt Arsenault90083d32018-06-07 09:54:49 +00006628static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
6629 ISD::LoadExtType ExtType, SDValue Op,
6630 const SDLoc &SL, EVT VT) {
6631 if (VT.bitsLT(Op.getValueType()))
6632 return DAG.getNode(ISD::TRUNCATE, SL, VT, Op);
6633
6634 switch (ExtType) {
6635 case ISD::SEXTLOAD:
6636 return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op);
6637 case ISD::ZEXTLOAD:
6638 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op);
6639 case ISD::EXTLOAD:
6640 return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op);
6641 case ISD::NON_EXTLOAD:
6642 return Op;
6643 }
6644
6645 llvm_unreachable("invalid ext type");
6646}
6647
6648SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const {
6649 SelectionDAG &DAG = DCI.DAG;
6650 if (Ld->getAlignment() < 4 || Ld->isDivergent())
6651 return SDValue();
6652
6653 // FIXME: Constant loads should all be marked invariant.
6654 unsigned AS = Ld->getAddressSpace();
Matt Arsenault0da63502018-08-31 05:49:54 +00006655 if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
6656 AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
Matt Arsenault90083d32018-06-07 09:54:49 +00006657 (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant()))
6658 return SDValue();
6659
6660 // Don't do this early, since it may interfere with adjacent load merging for
6661 // illegal types. We can avoid losing alignment information for exotic types
6662 // pre-legalize.
6663 EVT MemVT = Ld->getMemoryVT();
6664 if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) ||
6665 MemVT.getSizeInBits() >= 32)
6666 return SDValue();
6667
6668 SDLoc SL(Ld);
6669
6670 assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&
6671 "unexpected vector extload");
6672
6673 // TODO: Drop only high part of range.
6674 SDValue Ptr = Ld->getBasePtr();
6675 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
6676 MVT::i32, SL, Ld->getChain(), Ptr,
6677 Ld->getOffset(),
6678 Ld->getPointerInfo(), MVT::i32,
6679 Ld->getAlignment(),
6680 Ld->getMemOperand()->getFlags(),
6681 Ld->getAAInfo(),
6682 nullptr); // Drop ranges
6683
6684 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
6685 if (MemVT.isFloatingPoint()) {
6686 assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&
6687 "unexpected fp extload");
6688 TruncVT = MemVT.changeTypeToInteger();
6689 }
6690
6691 SDValue Cvt = NewLoad;
6692 if (Ld->getExtensionType() == ISD::SEXTLOAD) {
6693 Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad,
6694 DAG.getValueType(TruncVT));
6695 } else if (Ld->getExtensionType() == ISD::ZEXTLOAD ||
6696 Ld->getExtensionType() == ISD::NON_EXTLOAD) {
6697 Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT);
6698 } else {
6699 assert(Ld->getExtensionType() == ISD::EXTLOAD);
6700 }
6701
6702 EVT VT = Ld->getValueType(0);
6703 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
6704
6705 DCI.AddToWorklist(Cvt.getNode());
6706
6707 // We may need to handle exotic cases, such as i16->i64 extloads, so insert
6708 // the appropriate extension from the 32-bit load.
6709 Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT);
6710 DCI.AddToWorklist(Cvt.getNode());
6711
6712 // Handle conversion back to floating point if necessary.
6713 Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt);
6714
6715 return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL);
6716}
6717
Tom Stellard81d871d2013-11-13 23:36:50 +00006718SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
6719 SDLoc DL(Op);
6720 LoadSDNode *Load = cast<LoadSDNode>(Op);
Matt Arsenault6dfda962016-02-10 18:21:39 +00006721 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaulta1436412016-02-10 18:21:45 +00006722 EVT MemVT = Load->getMemoryVT();
Matt Arsenault6dfda962016-02-10 18:21:39 +00006723
Matt Arsenaulta1436412016-02-10 18:21:45 +00006724 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
Matt Arsenault65ca292a2017-09-07 05:37:34 +00006725 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
6726 return SDValue();
6727
Matt Arsenault6dfda962016-02-10 18:21:39 +00006728 // FIXME: Copied from PPC
6729 // First, load into 32 bits, then truncate to 1 bit.
6730
6731 SDValue Chain = Load->getChain();
6732 SDValue BasePtr = Load->getBasePtr();
6733 MachineMemOperand *MMO = Load->getMemOperand();
6734
Tom Stellard115a6152016-11-10 16:02:37 +00006735 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
6736
Matt Arsenault6dfda962016-02-10 18:21:39 +00006737 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
Tom Stellard115a6152016-11-10 16:02:37 +00006738 BasePtr, RealMemVT, MMO);
Matt Arsenault6dfda962016-02-10 18:21:39 +00006739
Tim Renouf361b5b22019-03-21 12:01:21 +00006740 if (!MemVT.isVector()) {
6741 SDValue Ops[] = {
6742 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
6743 NewLD.getValue(1)
6744 };
6745
6746 return DAG.getMergeValues(Ops, DL);
6747 }
6748
6749 SmallVector<SDValue, 3> Elts;
6750 for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) {
6751 SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD,
6752 DAG.getConstant(I, DL, MVT::i32));
6753
6754 Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt));
6755 }
6756
Matt Arsenault6dfda962016-02-10 18:21:39 +00006757 SDValue Ops[] = {
Tim Renouf361b5b22019-03-21 12:01:21 +00006758 DAG.getBuildVector(MemVT, DL, Elts),
Matt Arsenault6dfda962016-02-10 18:21:39 +00006759 NewLD.getValue(1)
6760 };
6761
6762 return DAG.getMergeValues(Ops, DL);
6763 }
Tom Stellard81d871d2013-11-13 23:36:50 +00006764
Matt Arsenaulta1436412016-02-10 18:21:45 +00006765 if (!MemVT.isVector())
6766 return SDValue();
Matt Arsenault4d801cd2015-11-24 12:05:03 +00006767
Matt Arsenaulta1436412016-02-10 18:21:45 +00006768 assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
6769 "Custom lowering for non-i32 vectors hasn't been implemented.");
Matt Arsenault4d801cd2015-11-24 12:05:03 +00006770
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006771 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
Simon Pilgrim266f4392019-06-11 11:00:23 +00006772 *Load->getMemOperand())) {
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006773 SDValue Ops[2];
6774 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
6775 return DAG.getMergeValues(Ops, DL);
6776 }
Simon Pilgrim266f4392019-06-11 11:00:23 +00006777
6778 unsigned Alignment = Load->getAlignment();
6779 unsigned AS = Load->getAddressSpace();
Stanislav Mekhanoshina224f682019-05-01 16:11:11 +00006780 if (Subtarget->hasLDSMisalignedBug() &&
6781 AS == AMDGPUAS::FLAT_ADDRESS &&
6782 Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
6783 return SplitVectorLoad(Op, DAG);
6784 }
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006785
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006786 MachineFunction &MF = DAG.getMachineFunction();
6787 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
6788 // If there is a possibilty that flat instruction access scratch memory
6789 // then we need to use the same legalization rules we use for private.
Matt Arsenault0da63502018-08-31 05:49:54 +00006790 if (AS == AMDGPUAS::FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006791 AS = MFI->hasFlatScratchInit() ?
Matt Arsenault0da63502018-08-31 05:49:54 +00006792 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00006793
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006794 unsigned NumElements = MemVT.getVectorNumElements();
Matt Arsenault6c041a32018-03-29 19:59:28 +00006795
Matt Arsenault0da63502018-08-31 05:49:54 +00006796 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6797 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
Tim Renouf361b5b22019-03-21 12:01:21 +00006798 if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) {
6799 if (MemVT.isPow2VectorType())
6800 return SDValue();
6801 if (NumElements == 3)
6802 return WidenVectorLoad(Op, DAG);
6803 return SplitVectorLoad(Op, DAG);
6804 }
Matt Arsenaulta1436412016-02-10 18:21:45 +00006805 // Non-uniform loads will be selected to MUBUF instructions, so they
Alexander Timofeev18009562016-12-08 17:28:47 +00006806 // have the same legalization requirements as global and private
Matt Arsenaulta1436412016-02-10 18:21:45 +00006807 // loads.
6808 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006809 }
Matt Arsenault6c041a32018-03-29 19:59:28 +00006810
Matt Arsenault0da63502018-08-31 05:49:54 +00006811 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6812 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
6813 AS == AMDGPUAS::GLOBAL_ADDRESS) {
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00006814 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
Farhana Aleen89196642018-03-07 17:09:18 +00006815 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) &&
Tim Renouf361b5b22019-03-21 12:01:21 +00006816 Alignment >= 4 && NumElements < 32) {
6817 if (MemVT.isPow2VectorType())
6818 return SDValue();
6819 if (NumElements == 3)
6820 return WidenVectorLoad(Op, DAG);
6821 return SplitVectorLoad(Op, DAG);
6822 }
Alexander Timofeev18009562016-12-08 17:28:47 +00006823 // Non-uniform loads will be selected to MUBUF instructions, so they
6824 // have the same legalization requirements as global and private
6825 // loads.
6826 //
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006827 }
Matt Arsenault0da63502018-08-31 05:49:54 +00006828 if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
6829 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
6830 AS == AMDGPUAS::GLOBAL_ADDRESS ||
6831 AS == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006832 if (NumElements > 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00006833 return SplitVectorLoad(Op, DAG);
Tim Renouf361b5b22019-03-21 12:01:21 +00006834 // v3 loads not supported on SI.
6835 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
6836 return WidenVectorLoad(Op, DAG);
6837 // v3 and v4 loads are supported for private and global memory.
Matt Arsenaulta1436412016-02-10 18:21:45 +00006838 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006839 }
Matt Arsenault0da63502018-08-31 05:49:54 +00006840 if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006841 // Depending on the setting of the private_element_size field in the
6842 // resource descriptor, we can only make private accesses up to a certain
6843 // size.
6844 switch (Subtarget->getMaxPrivateElementSize()) {
6845 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00006846 return scalarizeVectorLoad(Load, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006847 case 8:
6848 if (NumElements > 2)
6849 return SplitVectorLoad(Op, DAG);
6850 return SDValue();
6851 case 16:
6852 // Same as global/flat
6853 if (NumElements > 4)
6854 return SplitVectorLoad(Op, DAG);
Tim Renouf361b5b22019-03-21 12:01:21 +00006855 // v3 loads not supported on SI.
6856 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
6857 return WidenVectorLoad(Op, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00006858 return SDValue();
6859 default:
6860 llvm_unreachable("unsupported private_element_size");
6861 }
Matt Arsenault0da63502018-08-31 05:49:54 +00006862 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
Farhana Aleena7cb3112018-03-09 17:41:39 +00006863 // Use ds_read_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00006864 if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
Farhana Aleena7cb3112018-03-09 17:41:39 +00006865 MemVT.getStoreSize() == 16)
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00006866 return SDValue();
6867
Farhana Aleena7cb3112018-03-09 17:41:39 +00006868 if (NumElements > 2)
6869 return SplitVectorLoad(Op, DAG);
Nicolai Haehnle48219372018-10-17 15:37:48 +00006870
6871 // SI has a hardware bug in the LDS / GDS boounds checking: if the base
6872 // address is negative, then the instruction is incorrectly treated as
6873 // out-of-bounds even if base + offsets is in bounds. Split vectorized
6874 // loads here to avoid emitting ds_read2_b32. We may re-combine the
6875 // load later in the SILoadStoreOptimizer.
6876 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
6877 NumElements == 2 && MemVT.getStoreSize() == 8 &&
6878 Load->getAlignment() < 8) {
6879 return SplitVectorLoad(Op, DAG);
6880 }
Tom Stellarde9373602014-01-22 19:24:14 +00006881 }
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00006882 return SDValue();
Tom Stellard81d871d2013-11-13 23:36:50 +00006883}
6884
Tom Stellard0ec134f2014-02-04 17:18:40 +00006885SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault02dc7e12018-06-15 15:15:46 +00006886 EVT VT = Op.getValueType();
6887 assert(VT.getSizeInBits() == 64);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006888
6889 SDLoc DL(Op);
6890 SDValue Cond = Op.getOperand(0);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006891
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00006892 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
6893 SDValue One = DAG.getConstant(1, DL, MVT::i32);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006894
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00006895 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
6896 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
6897
6898 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
6899 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006900
6901 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
6902
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00006903 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
6904 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006905
6906 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
6907
Ahmed Bougacha128f8732016-04-26 21:15:30 +00006908 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
Matt Arsenault02dc7e12018-06-15 15:15:46 +00006909 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
Tom Stellard0ec134f2014-02-04 17:18:40 +00006910}
6911
Matt Arsenault22ca3f82014-07-15 23:50:10 +00006912// Catch division cases where we can use shortcuts with rcp and rsq
6913// instructions.
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00006914SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
6915 SelectionDAG &DAG) const {
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006916 SDLoc SL(Op);
6917 SDValue LHS = Op.getOperand(0);
6918 SDValue RHS = Op.getOperand(1);
6919 EVT VT = Op.getValueType();
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00006920 const SDNodeFlags Flags = Op->getFlags();
Michael Berg7acc81b2018-05-04 18:48:20 +00006921 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006922
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00006923 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
6924 return SDValue();
6925
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006926 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
Konstantin Zhuravlyovc4b18e72017-04-21 19:25:33 +00006927 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
Matt Arsenault979902b2016-08-02 22:25:04 +00006928 if (CLHS->isExactlyValue(1.0)) {
6929 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
6930 // the CI documentation has a worst case error of 1 ulp.
6931 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
6932 // use it as long as we aren't trying to use denormals.
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00006933 //
6934 // v_rcp_f16 and v_rsq_f16 DO support denormals.
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006935
Matt Arsenault979902b2016-08-02 22:25:04 +00006936 // 1.0 / sqrt(x) -> rsq(x)
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00006937
Matt Arsenault979902b2016-08-02 22:25:04 +00006938 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
6939 // error seems really high at 2^29 ULP.
6940 if (RHS.getOpcode() == ISD::FSQRT)
6941 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
6942
6943 // 1.0 / x -> rcp(x)
6944 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
6945 }
6946
6947 // Same as for 1.0, but expand the sign out of the constant.
6948 if (CLHS->isExactlyValue(-1.0)) {
6949 // -1.0 / x -> rcp (fneg x)
6950 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
6951 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
6952 }
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006953 }
6954 }
6955
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00006956 if (Unsafe) {
Matt Arsenault22ca3f82014-07-15 23:50:10 +00006957 // Turn into multiply by the reciprocal.
6958 // x / y -> x * (1.0 / y)
6959 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
Stanislav Mekhanoshin9d7b1c92017-07-06 20:34:21 +00006960 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
Matt Arsenault22ca3f82014-07-15 23:50:10 +00006961 }
6962
6963 return SDValue();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00006964}
6965
Tom Stellard8485fa02016-12-07 02:42:15 +00006966static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
6967 EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
6968 if (GlueChain->getNumValues() <= 1) {
6969 return DAG.getNode(Opcode, SL, VT, A, B);
6970 }
6971
6972 assert(GlueChain->getNumValues() == 3);
6973
6974 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
6975 switch (Opcode) {
6976 default: llvm_unreachable("no chain equivalent for opcode");
6977 case ISD::FMUL:
6978 Opcode = AMDGPUISD::FMUL_W_CHAIN;
6979 break;
6980 }
6981
6982 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
6983 GlueChain.getValue(2));
6984}
6985
6986static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
6987 EVT VT, SDValue A, SDValue B, SDValue C,
6988 SDValue GlueChain) {
6989 if (GlueChain->getNumValues() <= 1) {
6990 return DAG.getNode(Opcode, SL, VT, A, B, C);
6991 }
6992
6993 assert(GlueChain->getNumValues() == 3);
6994
6995 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
6996 switch (Opcode) {
6997 default: llvm_unreachable("no chain equivalent for opcode");
6998 case ISD::FMA:
6999 Opcode = AMDGPUISD::FMA_W_CHAIN;
7000 break;
7001 }
7002
7003 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
7004 GlueChain.getValue(2));
7005}
7006
Matt Arsenault4052a572016-12-22 03:05:41 +00007007SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaultcdff21b2016-12-22 03:05:44 +00007008 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
7009 return FastLowered;
7010
Matt Arsenault4052a572016-12-22 03:05:41 +00007011 SDLoc SL(Op);
7012 SDValue Src0 = Op.getOperand(0);
7013 SDValue Src1 = Op.getOperand(1);
7014
7015 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
7016 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
7017
7018 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
7019 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
7020
7021 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
7022 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
7023
7024 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
7025}
7026
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00007027// Faster 2.5 ULP division that does not support denormals.
7028SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
7029 SDLoc SL(Op);
7030 SDValue LHS = Op.getOperand(1);
7031 SDValue RHS = Op.getOperand(2);
7032
7033 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
7034
7035 const APFloat K0Val(BitsToFloat(0x6f800000));
7036 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
7037
7038 const APFloat K1Val(BitsToFloat(0x2f800000));
7039 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
7040
7041 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
7042
7043 EVT SetCCVT =
7044 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
7045
7046 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
7047
7048 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
7049
7050 // TODO: Should this propagate fast-math-flags?
7051 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
7052
7053 // rcp does not support denormals.
7054 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
7055
7056 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
7057
7058 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
7059}
7060
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007061SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00007062 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
Eric Christopher538d09d02016-06-07 20:27:12 +00007063 return FastLowered;
Matt Arsenault22ca3f82014-07-15 23:50:10 +00007064
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007065 SDLoc SL(Op);
7066 SDValue LHS = Op.getOperand(0);
7067 SDValue RHS = Op.getOperand(1);
7068
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007069 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007070
Wei Dinged0f97f2016-06-09 19:17:15 +00007071 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007072
Tom Stellard8485fa02016-12-07 02:42:15 +00007073 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7074 RHS, RHS, LHS);
7075 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
7076 LHS, RHS, LHS);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007077
Matt Arsenaultdfec5ce2016-07-09 07:48:11 +00007078 // Denominator is scaled to not be denormal, so using rcp is ok.
Tom Stellard8485fa02016-12-07 02:42:15 +00007079 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
7080 DenominatorScaled);
7081 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
7082 DenominatorScaled);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007083
Tom Stellard8485fa02016-12-07 02:42:15 +00007084 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
7085 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
7086 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007087
Tom Stellard8485fa02016-12-07 02:42:15 +00007088 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007089
Tom Stellard8485fa02016-12-07 02:42:15 +00007090 if (!Subtarget->hasFP32Denormals()) {
7091 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
7092 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
7093 SL, MVT::i32);
7094 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
7095 DAG.getEntryNode(),
7096 EnableDenormValue, BitField);
7097 SDValue Ops[3] = {
7098 NegDivScale0,
7099 EnableDenorm.getValue(0),
7100 EnableDenorm.getValue(1)
7101 };
Matt Arsenault37fefd62016-06-10 02:18:02 +00007102
Tom Stellard8485fa02016-12-07 02:42:15 +00007103 NegDivScale0 = DAG.getMergeValues(Ops, SL);
7104 }
7105
7106 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
7107 ApproxRcp, One, NegDivScale0);
7108
7109 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
7110 ApproxRcp, Fma0);
7111
7112 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
7113 Fma1, Fma1);
7114
7115 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
7116 NumeratorScaled, Mul);
7117
7118 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
7119
7120 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
7121 NumeratorScaled, Fma3);
7122
7123 if (!Subtarget->hasFP32Denormals()) {
7124 const SDValue DisableDenormValue =
7125 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
7126 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
7127 Fma4.getValue(1),
7128 DisableDenormValue,
7129 BitField,
7130 Fma4.getValue(2));
7131
7132 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
7133 DisableDenorm, DAG.getRoot());
7134 DAG.setRoot(OutputChain);
7135 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00007136
Wei Dinged0f97f2016-06-09 19:17:15 +00007137 SDValue Scale = NumeratorScaled.getValue(1);
Tom Stellard8485fa02016-12-07 02:42:15 +00007138 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
7139 Fma4, Fma1, Fma3, Scale);
Matt Arsenault37fefd62016-06-10 02:18:02 +00007140
Wei Dinged0f97f2016-06-09 19:17:15 +00007141 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007142}
7143
7144SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007145 if (DAG.getTarget().Options.UnsafeFPMath)
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00007146 return lowerFastUnsafeFDIV(Op, DAG);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007147
7148 SDLoc SL(Op);
7149 SDValue X = Op.getOperand(0);
7150 SDValue Y = Op.getOperand(1);
7151
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007152 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007153
7154 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
7155
7156 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
7157
7158 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
7159
7160 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
7161
7162 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
7163
7164 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
7165
7166 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
7167
7168 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
7169
7170 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
7171 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
7172
7173 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
7174 NegDivScale0, Mul, DivScale1);
7175
7176 SDValue Scale;
7177
Tom Stellard5bfbae52018-07-11 20:59:01 +00007178 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007179 // Workaround a hardware bug on SI where the condition output from div_scale
7180 // is not usable.
7181
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007182 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00007183
7184 // Figure out if the scale to use for div_fmas.
7185 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
7186 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
7187 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
7188 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
7189
7190 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
7191 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
7192
7193 SDValue Scale0Hi
7194 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
7195 SDValue Scale1Hi
7196 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
7197
7198 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
7199 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
7200 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
7201 } else {
7202 Scale = DivScale1.getValue(1);
7203 }
7204
7205 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
7206 Fma4, Fma3, Mul, Scale);
7207
7208 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007209}
7210
7211SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
7212 EVT VT = Op.getValueType();
7213
7214 if (VT == MVT::f32)
7215 return LowerFDIV32(Op, DAG);
7216
7217 if (VT == MVT::f64)
7218 return LowerFDIV64(Op, DAG);
7219
Matt Arsenault4052a572016-12-22 03:05:41 +00007220 if (VT == MVT::f16)
7221 return LowerFDIV16(Op, DAG);
7222
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00007223 llvm_unreachable("Unexpected type for fdiv");
7224}
7225
Tom Stellard81d871d2013-11-13 23:36:50 +00007226SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7227 SDLoc DL(Op);
7228 StoreSDNode *Store = cast<StoreSDNode>(Op);
7229 EVT VT = Store->getMemoryVT();
7230
Matt Arsenault95245662016-02-11 05:32:46 +00007231 if (VT == MVT::i1) {
7232 return DAG.getTruncStore(Store->getChain(), DL,
7233 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
7234 Store->getBasePtr(), MVT::i1, Store->getMemOperand());
Tom Stellardb02094e2014-07-21 15:45:01 +00007235 }
7236
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007237 assert(VT.isVector() &&
7238 Store->getValue().getValueType().getScalarType() == MVT::i32);
7239
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007240 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
Simon Pilgrim266f4392019-06-11 11:00:23 +00007241 *Store->getMemOperand())) {
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007242 return expandUnalignedStore(Store, DAG);
7243 }
Tom Stellard81d871d2013-11-13 23:36:50 +00007244
Simon Pilgrim266f4392019-06-11 11:00:23 +00007245 unsigned AS = Store->getAddressSpace();
Stanislav Mekhanoshina224f682019-05-01 16:11:11 +00007246 if (Subtarget->hasLDSMisalignedBug() &&
7247 AS == AMDGPUAS::FLAT_ADDRESS &&
7248 Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
7249 return SplitVectorStore(Op, DAG);
7250 }
7251
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007252 MachineFunction &MF = DAG.getMachineFunction();
7253 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
7254 // If there is a possibilty that flat instruction access scratch memory
7255 // then we need to use the same legalization rules we use for private.
Matt Arsenault0da63502018-08-31 05:49:54 +00007256 if (AS == AMDGPUAS::FLAT_ADDRESS)
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007257 AS = MFI->hasFlatScratchInit() ?
Matt Arsenault0da63502018-08-31 05:49:54 +00007258 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00007259
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007260 unsigned NumElements = VT.getVectorNumElements();
Matt Arsenault0da63502018-08-31 05:49:54 +00007261 if (AS == AMDGPUAS::GLOBAL_ADDRESS ||
7262 AS == AMDGPUAS::FLAT_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007263 if (NumElements > 4)
7264 return SplitVectorStore(Op, DAG);
Tim Renouf361b5b22019-03-21 12:01:21 +00007265 // v3 stores not supported on SI.
7266 if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
7267 return SplitVectorStore(Op, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007268 return SDValue();
Matt Arsenault0da63502018-08-31 05:49:54 +00007269 } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007270 switch (Subtarget->getMaxPrivateElementSize()) {
7271 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00007272 return scalarizeVectorStore(Store, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007273 case 8:
7274 if (NumElements > 2)
7275 return SplitVectorStore(Op, DAG);
7276 return SDValue();
7277 case 16:
Tim Renouf361b5b22019-03-21 12:01:21 +00007278 if (NumElements > 4 || NumElements == 3)
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007279 return SplitVectorStore(Op, DAG);
7280 return SDValue();
7281 default:
7282 llvm_unreachable("unsupported private_element_size");
7283 }
Matt Arsenault0da63502018-08-31 05:49:54 +00007284 } else if (AS == AMDGPUAS::LOCAL_ADDRESS) {
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00007285 // Use ds_write_b128 if possible.
Marek Olsaka9a58fa2018-04-10 22:48:23 +00007286 if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
Tim Renouf361b5b22019-03-21 12:01:21 +00007287 VT.getStoreSize() == 16 && NumElements != 3)
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00007288 return SDValue();
7289
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00007290 if (NumElements > 2)
7291 return SplitVectorStore(Op, DAG);
Nicolai Haehnle48219372018-10-17 15:37:48 +00007292
7293 // SI has a hardware bug in the LDS / GDS boounds checking: if the base
7294 // address is negative, then the instruction is incorrectly treated as
7295 // out-of-bounds even if base + offsets is in bounds. Split vectorized
7296 // stores here to avoid emitting ds_write2_b32. We may re-combine the
7297 // store later in the SILoadStoreOptimizer.
7298 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
7299 NumElements == 2 && VT.getStoreSize() == 8 &&
7300 Store->getAlignment() < 8) {
7301 return SplitVectorStore(Op, DAG);
7302 }
7303
Farhana Aleenc6c9dc82018-03-16 18:12:00 +00007304 return SDValue();
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00007305 } else {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00007306 llvm_unreachable("unhandled address space");
Matt Arsenault95245662016-02-11 05:32:46 +00007307 }
Tom Stellard81d871d2013-11-13 23:36:50 +00007308}
7309
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007310SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007311 SDLoc DL(Op);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007312 EVT VT = Op.getValueType();
7313 SDValue Arg = Op.getOperand(0);
David Stuttard20de3e92018-09-14 10:27:19 +00007314 SDValue TrigVal;
7315
Sanjay Patela2607012015-09-16 16:31:21 +00007316 // TODO: Should this propagate fast-math-flags?
David Stuttard20de3e92018-09-14 10:27:19 +00007317
7318 SDValue OneOver2Pi = DAG.getConstantFP(0.5 / M_PI, DL, VT);
7319
7320 if (Subtarget->hasTrigReducedRange()) {
7321 SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7322 TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal);
7323 } else {
7324 TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi);
7325 }
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007326
7327 switch (Op.getOpcode()) {
7328 case ISD::FCOS:
David Stuttard20de3e92018-09-14 10:27:19 +00007329 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007330 case ISD::FSIN:
David Stuttard20de3e92018-09-14 10:27:19 +00007331 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00007332 default:
7333 llvm_unreachable("Wrong trig opcode");
7334 }
7335}
7336
Tom Stellard354a43c2016-04-01 18:27:37 +00007337SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
7338 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
7339 assert(AtomicNode->isCompareAndSwap());
7340 unsigned AS = AtomicNode->getAddressSpace();
7341
7342 // No custom lowering required for local address space
Matt Arsenault0da63502018-08-31 05:49:54 +00007343 if (!isFlatGlobalAddrSpace(AS))
Tom Stellard354a43c2016-04-01 18:27:37 +00007344 return Op;
7345
7346 // Non-local address space requires custom lowering for atomic compare
7347 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
7348 SDLoc DL(Op);
7349 SDValue ChainIn = Op.getOperand(0);
7350 SDValue Addr = Op.getOperand(1);
7351 SDValue Old = Op.getOperand(2);
7352 SDValue New = Op.getOperand(3);
7353 EVT VT = Op.getValueType();
7354 MVT SimpleVT = VT.getSimpleVT();
7355 MVT VecType = MVT::getVectorVT(SimpleVT, 2);
7356
Ahmed Bougacha128f8732016-04-26 21:15:30 +00007357 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
Tom Stellard354a43c2016-04-01 18:27:37 +00007358 SDValue Ops[] = { ChainIn, Addr, NewOld };
Matt Arsenault88701812016-06-09 23:42:48 +00007359
7360 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
7361 Ops, VT, AtomicNode->getMemOperand());
Tom Stellard354a43c2016-04-01 18:27:37 +00007362}
7363
Tom Stellard75aadc22012-12-11 21:25:42 +00007364//===----------------------------------------------------------------------===//
7365// Custom DAG optimizations
7366//===----------------------------------------------------------------------===//
7367
Matt Arsenault364a6742014-06-11 17:50:44 +00007368SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
Matt Arsenaulte6986632015-01-14 01:35:22 +00007369 DAGCombinerInfo &DCI) const {
Matt Arsenault364a6742014-06-11 17:50:44 +00007370 EVT VT = N->getValueType(0);
7371 EVT ScalarVT = VT.getScalarType();
7372 if (ScalarVT != MVT::f32)
7373 return SDValue();
7374
7375 SelectionDAG &DAG = DCI.DAG;
7376 SDLoc DL(N);
7377
7378 SDValue Src = N->getOperand(0);
7379 EVT SrcVT = Src.getValueType();
7380
7381 // TODO: We could try to match extracting the higher bytes, which would be
7382 // easier if i8 vectors weren't promoted to i32 vectors, particularly after
7383 // types are legalized. v4i8 -> v4f32 is probably the only case to worry
7384 // about in practice.
Craig Topper80d3bb32018-03-06 19:44:52 +00007385 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) {
Matt Arsenault364a6742014-06-11 17:50:44 +00007386 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
7387 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
7388 DCI.AddToWorklist(Cvt.getNode());
7389 return Cvt;
7390 }
7391 }
7392
Matt Arsenault364a6742014-06-11 17:50:44 +00007393 return SDValue();
7394}
7395
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007396// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
7397
7398// This is a variant of
7399// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
7400//
7401// The normal DAG combiner will do this, but only if the add has one use since
7402// that would increase the number of instructions.
7403//
7404// This prevents us from seeing a constant offset that can be folded into a
7405// memory instruction's addressing mode. If we know the resulting add offset of
7406// a pointer can be folded into an addressing offset, we can replace the pointer
7407// operand with the add of new constant offset. This eliminates one of the uses,
7408// and may allow the remaining use to also be simplified.
7409//
7410SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
7411 unsigned AddrSpace,
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007412 EVT MemVT,
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007413 DAGCombinerInfo &DCI) const {
7414 SDValue N0 = N->getOperand(0);
7415 SDValue N1 = N->getOperand(1);
7416
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007417 // We only do this to handle cases where it's profitable when there are
7418 // multiple uses of the add, so defer to the standard combine.
Matt Arsenaultc8903122017-11-14 23:46:42 +00007419 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) ||
7420 N0->hasOneUse())
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007421 return SDValue();
7422
7423 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
7424 if (!CN1)
7425 return SDValue();
7426
7427 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
7428 if (!CAdd)
7429 return SDValue();
7430
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007431 // If the resulting offset is too large, we can't fold it into the addressing
7432 // mode offset.
7433 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007434 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext());
7435
7436 AddrMode AM;
7437 AM.HasBaseReg = true;
7438 AM.BaseOffs = Offset.getSExtValue();
7439 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace))
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007440 return SDValue();
7441
7442 SelectionDAG &DAG = DCI.DAG;
7443 SDLoc SL(N);
7444 EVT VT = N->getValueType(0);
7445
7446 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007447 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007448
Matt Arsenaulte5e0c742017-11-13 05:33:35 +00007449 SDNodeFlags Flags;
7450 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
7451 (N0.getOpcode() == ISD::OR ||
7452 N0->getFlags().hasNoUnsignedWrap()));
7453
7454 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00007455}
7456
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007457SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
7458 DAGCombinerInfo &DCI) const {
7459 SDValue Ptr = N->getBasePtr();
7460 SelectionDAG &DAG = DCI.DAG;
7461 SDLoc SL(N);
7462
7463 // TODO: We could also do this for multiplies.
Matt Arsenaultfbe95332017-11-13 05:11:54 +00007464 if (Ptr.getOpcode() == ISD::SHL) {
7465 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(),
7466 N->getMemoryVT(), DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00007467 if (NewPtr) {
7468 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
7469
7470 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
7471 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
7472 }
7473 }
7474
7475 return SDValue();
7476}
7477
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007478static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
7479 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
7480 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
7481 (Opc == ISD::XOR && Val == 0);
7482}
7483
7484// Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
7485// will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
7486// integer combine opportunities since most 64-bit operations are decomposed
7487// this way. TODO: We won't want this for SALU especially if it is an inline
7488// immediate.
7489SDValue SITargetLowering::splitBinaryBitConstantOp(
7490 DAGCombinerInfo &DCI,
7491 const SDLoc &SL,
7492 unsigned Opc, SDValue LHS,
7493 const ConstantSDNode *CRHS) const {
7494 uint64_t Val = CRHS->getZExtValue();
7495 uint32_t ValLo = Lo_32(Val);
7496 uint32_t ValHi = Hi_32(Val);
7497 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7498
7499 if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
7500 bitOpWithConstantIsReducible(Opc, ValHi)) ||
7501 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
7502 // If we need to materialize a 64-bit immediate, it will be split up later
7503 // anyway. Avoid creating the harder to understand 64-bit immediate
7504 // materialization.
7505 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
7506 }
7507
7508 return SDValue();
7509}
7510
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00007511// Returns true if argument is a boolean value which is not serialized into
7512// memory or argument and does not require v_cmdmask_b32 to be deserialized.
7513static bool isBoolSGPR(SDValue V) {
7514 if (V.getValueType() != MVT::i1)
7515 return false;
7516 switch (V.getOpcode()) {
7517 default: break;
7518 case ISD::SETCC:
7519 case ISD::AND:
7520 case ISD::OR:
7521 case ISD::XOR:
7522 case AMDGPUISD::FP_CLASS:
7523 return true;
7524 }
7525 return false;
7526}
7527
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007528// If a constant has all zeroes or all ones within each byte return it.
7529// Otherwise return 0.
7530static uint32_t getConstantPermuteMask(uint32_t C) {
7531 // 0xff for any zero byte in the mask
7532 uint32_t ZeroByteMask = 0;
7533 if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff;
7534 if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00;
7535 if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000;
7536 if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000;
7537 uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte
7538 if ((NonZeroByteMask & C) != NonZeroByteMask)
7539 return 0; // Partial bytes selected.
7540 return C;
7541}
7542
7543// Check if a node selects whole bytes from its operand 0 starting at a byte
7544// boundary while masking the rest. Returns select mask as in the v_perm_b32
7545// or -1 if not succeeded.
7546// Note byte select encoding:
7547// value 0-3 selects corresponding source byte;
7548// value 0xc selects zero;
7549// value 0xff selects 0xff.
7550static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) {
7551 assert(V.getValueSizeInBits() == 32);
7552
7553 if (V.getNumOperands() != 2)
7554 return ~0;
7555
7556 ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1));
7557 if (!N1)
7558 return ~0;
7559
7560 uint32_t C = N1->getZExtValue();
7561
7562 switch (V.getOpcode()) {
7563 default:
7564 break;
7565 case ISD::AND:
7566 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
7567 return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask);
7568 }
7569 break;
7570
7571 case ISD::OR:
7572 if (uint32_t ConstMask = getConstantPermuteMask(C)) {
7573 return (0x03020100 & ~ConstMask) | ConstMask;
7574 }
7575 break;
7576
7577 case ISD::SHL:
7578 if (C % 8)
7579 return ~0;
7580
7581 return uint32_t((0x030201000c0c0c0cull << C) >> 32);
7582
7583 case ISD::SRL:
7584 if (C % 8)
7585 return ~0;
7586
7587 return uint32_t(0x0c0c0c0c03020100ull >> C);
7588 }
7589
7590 return ~0;
7591}
7592
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007593SDValue SITargetLowering::performAndCombine(SDNode *N,
7594 DAGCombinerInfo &DCI) const {
7595 if (DCI.isBeforeLegalize())
7596 return SDValue();
7597
7598 SelectionDAG &DAG = DCI.DAG;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007599 EVT VT = N->getValueType(0);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007600 SDValue LHS = N->getOperand(0);
7601 SDValue RHS = N->getOperand(1);
7602
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007603
Stanislav Mekhanoshin53a21292017-05-23 19:54:48 +00007604 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
7605 if (VT == MVT::i64 && CRHS) {
7606 if (SDValue Split
7607 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
7608 return Split;
7609 }
7610
7611 if (CRHS && VT == MVT::i32) {
7612 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
7613 // nb = number of trailing zeroes in mask
7614 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
7615 // given that we are selecting 8 or 16 bit fields starting at byte boundary.
7616 uint64_t Mask = CRHS->getZExtValue();
7617 unsigned Bits = countPopulation(Mask);
7618 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
7619 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
7620 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
7621 unsigned Shift = CShift->getZExtValue();
7622 unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
7623 unsigned Offset = NB + Shift;
7624 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
7625 SDLoc SL(N);
7626 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
7627 LHS->getOperand(0),
7628 DAG.getConstant(Offset, SL, MVT::i32),
7629 DAG.getConstant(Bits, SL, MVT::i32));
7630 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
7631 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
7632 DAG.getValueType(NarrowVT));
7633 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
7634 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
7635 return Shl;
7636 }
7637 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007638 }
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007639
7640 // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
7641 if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM &&
7642 isa<ConstantSDNode>(LHS.getOperand(2))) {
7643 uint32_t Sel = getConstantPermuteMask(Mask);
7644 if (!Sel)
7645 return SDValue();
7646
7647 // Select 0xc for all zero bytes
7648 Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c);
7649 SDLoc DL(N);
7650 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
7651 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
7652 }
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007653 }
7654
7655 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
7656 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
7657 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007658 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7659 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
7660
7661 SDValue X = LHS.getOperand(0);
7662 SDValue Y = RHS.getOperand(0);
7663 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
7664 return SDValue();
7665
7666 if (LCC == ISD::SETO) {
7667 if (X != LHS.getOperand(1))
7668 return SDValue();
7669
7670 if (RCC == ISD::SETUNE) {
7671 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
7672 if (!C1 || !C1->isInfinity() || C1->isNegative())
7673 return SDValue();
7674
7675 const uint32_t Mask = SIInstrFlags::N_NORMAL |
7676 SIInstrFlags::N_SUBNORMAL |
7677 SIInstrFlags::N_ZERO |
7678 SIInstrFlags::P_ZERO |
7679 SIInstrFlags::P_SUBNORMAL |
7680 SIInstrFlags::P_NORMAL;
7681
7682 static_assert(((~(SIInstrFlags::S_NAN |
7683 SIInstrFlags::Q_NAN |
7684 SIInstrFlags::N_INFINITY |
7685 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
7686 "mask not equal");
7687
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00007688 SDLoc DL(N);
7689 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
7690 X, DAG.getConstant(Mask, DL, MVT::i32));
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007691 }
7692 }
7693 }
7694
Matt Arsenault3dcf4ce2018-08-10 18:58:56 +00007695 if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS)
7696 std::swap(LHS, RHS);
7697
7698 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS &&
7699 RHS.hasOneUse()) {
7700 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7701 // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan)
7702 // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan)
7703 const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
7704 if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask &&
7705 (RHS.getOperand(0) == LHS.getOperand(0) &&
7706 LHS.getOperand(0) == LHS.getOperand(1))) {
7707 const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN;
7708 unsigned NewMask = LCC == ISD::SETO ?
7709 Mask->getZExtValue() & ~OrdMask :
7710 Mask->getZExtValue() & OrdMask;
7711
7712 SDLoc DL(N);
7713 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0),
7714 DAG.getConstant(NewMask, DL, MVT::i32));
7715 }
7716 }
7717
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00007718 if (VT == MVT::i32 &&
7719 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
7720 // and x, (sext cc from i1) => select cc, x, 0
7721 if (RHS.getOpcode() != ISD::SIGN_EXTEND)
7722 std::swap(LHS, RHS);
7723 if (isBoolSGPR(RHS.getOperand(0)))
7724 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
7725 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
7726 }
7727
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007728 // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
7729 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7730 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
7731 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
7732 uint32_t LHSMask = getPermuteMask(DAG, LHS);
7733 uint32_t RHSMask = getPermuteMask(DAG, RHS);
7734 if (LHSMask != ~0u && RHSMask != ~0u) {
7735 // Canonicalize the expression in an attempt to have fewer unique masks
7736 // and therefore fewer registers used to hold the masks.
7737 if (LHSMask > RHSMask) {
7738 std::swap(LHSMask, RHSMask);
7739 std::swap(LHS, RHS);
7740 }
7741
7742 // Select 0xc for each lane used from source operand. Zero has 0xc mask
7743 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
7744 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7745 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7746
7747 // Check of we need to combine values from two sources within a byte.
7748 if (!(LHSUsedLanes & RHSUsedLanes) &&
7749 // If we select high and lower word keep it for SDWA.
7750 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
7751 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
7752 // Each byte in each mask is either selector mask 0-3, or has higher
7753 // bits set in either of masks, which can be 0xff for 0xff or 0x0c for
7754 // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise
7755 // mask which is not 0xff wins. By anding both masks we have a correct
7756 // result except that 0x0c shall be corrected to give 0x0c only.
7757 uint32_t Mask = LHSMask & RHSMask;
7758 for (unsigned I = 0; I < 32; I += 8) {
7759 uint32_t ByteSel = 0xff << I;
7760 if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c)
7761 Mask &= (0x0c << I) & 0xffffffff;
7762 }
7763
7764 // Add 4 to each active LHS lane. It will not affect any existing 0xff
7765 // or 0x0c.
7766 uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404);
7767 SDLoc DL(N);
7768
7769 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
7770 LHS.getOperand(0), RHS.getOperand(0),
7771 DAG.getConstant(Sel, DL, MVT::i32));
7772 }
7773 }
7774 }
7775
Matt Arsenaultd0101a22015-01-06 23:00:46 +00007776 return SDValue();
7777}
7778
Matt Arsenaultf2290332015-01-06 23:00:39 +00007779SDValue SITargetLowering::performOrCombine(SDNode *N,
7780 DAGCombinerInfo &DCI) const {
7781 SelectionDAG &DAG = DCI.DAG;
7782 SDValue LHS = N->getOperand(0);
7783 SDValue RHS = N->getOperand(1);
7784
Matt Arsenault3b082382016-04-12 18:24:38 +00007785 EVT VT = N->getValueType(0);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007786 if (VT == MVT::i1) {
7787 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
7788 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
7789 RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
7790 SDValue Src = LHS.getOperand(0);
7791 if (Src != RHS.getOperand(0))
7792 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00007793
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007794 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
7795 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
7796 if (!CLHS || !CRHS)
7797 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00007798
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007799 // Only 10 bits are used.
7800 static const uint32_t MaxMask = 0x3ff;
Matt Arsenault3b082382016-04-12 18:24:38 +00007801
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007802 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
7803 SDLoc DL(N);
7804 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
7805 Src, DAG.getConstant(NewMask, DL, MVT::i32));
7806 }
Matt Arsenault3b082382016-04-12 18:24:38 +00007807
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007808 return SDValue();
7809 }
7810
Stanislav Mekhanoshin8fd3c4e2018-06-12 23:50:37 +00007811 // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2)
7812 if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() &&
7813 LHS.getOpcode() == AMDGPUISD::PERM &&
7814 isa<ConstantSDNode>(LHS.getOperand(2))) {
7815 uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1));
7816 if (!Sel)
7817 return SDValue();
7818
7819 Sel |= LHS.getConstantOperandVal(2);
7820 SDLoc DL(N);
7821 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0),
7822 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32));
7823 }
7824
7825 // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2)
7826 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
7827 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() &&
7828 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) {
7829 uint32_t LHSMask = getPermuteMask(DAG, LHS);
7830 uint32_t RHSMask = getPermuteMask(DAG, RHS);
7831 if (LHSMask != ~0u && RHSMask != ~0u) {
7832 // Canonicalize the expression in an attempt to have fewer unique masks
7833 // and therefore fewer registers used to hold the masks.
7834 if (LHSMask > RHSMask) {
7835 std::swap(LHSMask, RHSMask);
7836 std::swap(LHS, RHS);
7837 }
7838
7839 // Select 0xc for each lane used from source operand. Zero has 0xc mask
7840 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range.
7841 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7842 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c;
7843
7844 // Check of we need to combine values from two sources within a byte.
7845 if (!(LHSUsedLanes & RHSUsedLanes) &&
7846 // If we select high and lower word keep it for SDWA.
7847 // TODO: teach SDWA to work with v_perm_b32 and remove the check.
7848 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) {
7849 // Kill zero bytes selected by other mask. Zero value is 0xc.
7850 LHSMask &= ~RHSUsedLanes;
7851 RHSMask &= ~LHSUsedLanes;
7852 // Add 4 to each active LHS lane
7853 LHSMask |= LHSUsedLanes & 0x04040404;
7854 // Combine masks
7855 uint32_t Sel = LHSMask | RHSMask;
7856 SDLoc DL(N);
7857
7858 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32,
7859 LHS.getOperand(0), RHS.getOperand(0),
7860 DAG.getConstant(Sel, DL, MVT::i32));
7861 }
7862 }
7863 }
7864
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007865 if (VT != MVT::i64)
7866 return SDValue();
7867
7868 // TODO: This could be a generic combine with a predicate for extracting the
7869 // high half of an integer being free.
7870
7871 // (or i64:x, (zero_extend i32:y)) ->
7872 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
7873 if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
7874 RHS.getOpcode() != ISD::ZERO_EXTEND)
7875 std::swap(LHS, RHS);
7876
7877 if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
7878 SDValue ExtSrc = RHS.getOperand(0);
7879 EVT SrcVT = ExtSrc.getValueType();
7880 if (SrcVT == MVT::i32) {
7881 SDLoc SL(N);
7882 SDValue LowLHS, HiBits;
7883 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
7884 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
7885
7886 DCI.AddToWorklist(LowOr.getNode());
7887 DCI.AddToWorklist(HiBits.getNode());
7888
7889 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
7890 LowOr, HiBits);
7891 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
Matt Arsenault3b082382016-04-12 18:24:38 +00007892 }
7893 }
7894
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007895 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
7896 if (CRHS) {
7897 if (SDValue Split
7898 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
7899 return Split;
7900 }
Matt Arsenaultf2290332015-01-06 23:00:39 +00007901
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007902 return SDValue();
7903}
Matt Arsenaultf2290332015-01-06 23:00:39 +00007904
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007905SDValue SITargetLowering::performXorCombine(SDNode *N,
7906 DAGCombinerInfo &DCI) const {
7907 EVT VT = N->getValueType(0);
7908 if (VT != MVT::i64)
7909 return SDValue();
Matt Arsenaultf2290332015-01-06 23:00:39 +00007910
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00007911 SDValue LHS = N->getOperand(0);
7912 SDValue RHS = N->getOperand(1);
7913
7914 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
7915 if (CRHS) {
7916 if (SDValue Split
7917 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
7918 return Split;
Matt Arsenaultf2290332015-01-06 23:00:39 +00007919 }
7920
7921 return SDValue();
7922}
7923
Matt Arsenault5cf42712017-04-06 20:58:30 +00007924// Instructions that will be lowered with a final instruction that zeros the
7925// high result bits.
7926// XXX - probably only need to list legal operations.
Matt Arsenault8edfaee2017-03-31 19:53:03 +00007927static bool fp16SrcZerosHighBits(unsigned Opc) {
7928 switch (Opc) {
Matt Arsenault5cf42712017-04-06 20:58:30 +00007929 case ISD::FADD:
7930 case ISD::FSUB:
7931 case ISD::FMUL:
7932 case ISD::FDIV:
7933 case ISD::FREM:
7934 case ISD::FMA:
7935 case ISD::FMAD:
7936 case ISD::FCANONICALIZE:
7937 case ISD::FP_ROUND:
7938 case ISD::UINT_TO_FP:
7939 case ISD::SINT_TO_FP:
7940 case ISD::FABS:
7941 // Fabs is lowered to a bit operation, but it's an and which will clear the
7942 // high bits anyway.
7943 case ISD::FSQRT:
7944 case ISD::FSIN:
7945 case ISD::FCOS:
7946 case ISD::FPOWI:
7947 case ISD::FPOW:
7948 case ISD::FLOG:
7949 case ISD::FLOG2:
7950 case ISD::FLOG10:
7951 case ISD::FEXP:
7952 case ISD::FEXP2:
7953 case ISD::FCEIL:
7954 case ISD::FTRUNC:
7955 case ISD::FRINT:
7956 case ISD::FNEARBYINT:
7957 case ISD::FROUND:
7958 case ISD::FFLOOR:
7959 case ISD::FMINNUM:
7960 case ISD::FMAXNUM:
7961 case AMDGPUISD::FRACT:
7962 case AMDGPUISD::CLAMP:
7963 case AMDGPUISD::COS_HW:
7964 case AMDGPUISD::SIN_HW:
7965 case AMDGPUISD::FMIN3:
7966 case AMDGPUISD::FMAX3:
7967 case AMDGPUISD::FMED3:
7968 case AMDGPUISD::FMAD_FTZ:
7969 case AMDGPUISD::RCP:
7970 case AMDGPUISD::RSQ:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00007971 case AMDGPUISD::RCP_IFLAG:
Matt Arsenault5cf42712017-04-06 20:58:30 +00007972 case AMDGPUISD::LDEXP:
Matt Arsenault8edfaee2017-03-31 19:53:03 +00007973 return true;
Matt Arsenault5cf42712017-04-06 20:58:30 +00007974 default:
7975 // fcopysign, select and others may be lowered to 32-bit bit operations
7976 // which don't zero the high bits.
7977 return false;
Matt Arsenault8edfaee2017-03-31 19:53:03 +00007978 }
7979}
7980
7981SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
7982 DAGCombinerInfo &DCI) const {
7983 if (!Subtarget->has16BitInsts() ||
7984 DCI.getDAGCombineLevel() < AfterLegalizeDAG)
7985 return SDValue();
7986
7987 EVT VT = N->getValueType(0);
7988 if (VT != MVT::i32)
7989 return SDValue();
7990
7991 SDValue Src = N->getOperand(0);
7992 if (Src.getValueType() != MVT::i16)
7993 return SDValue();
7994
7995 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
7996 // FIXME: It is not universally true that the high bits are zeroed on gfx9.
7997 if (Src.getOpcode() == ISD::BITCAST) {
7998 SDValue BCSrc = Src.getOperand(0);
7999 if (BCSrc.getValueType() == MVT::f16 &&
8000 fp16SrcZerosHighBits(BCSrc.getOpcode()))
8001 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
8002 }
8003
8004 return SDValue();
8005}
8006
Ryan Taylor00e063a2019-03-19 16:07:00 +00008007SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N,
8008 DAGCombinerInfo &DCI)
8009 const {
8010 SDValue Src = N->getOperand(0);
8011 auto *VTSign = cast<VTSDNode>(N->getOperand(1));
8012
8013 if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE &&
8014 VTSign->getVT() == MVT::i8) ||
8015 (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT &&
8016 VTSign->getVT() == MVT::i16)) &&
8017 Src.hasOneUse()) {
8018 auto *M = cast<MemSDNode>(Src);
8019 SDValue Ops[] = {
8020 Src.getOperand(0), // Chain
8021 Src.getOperand(1), // rsrc
8022 Src.getOperand(2), // vindex
8023 Src.getOperand(3), // voffset
8024 Src.getOperand(4), // soffset
8025 Src.getOperand(5), // offset
8026 Src.getOperand(6),
8027 Src.getOperand(7)
8028 };
8029 // replace with BUFFER_LOAD_BYTE/SHORT
8030 SDVTList ResList = DCI.DAG.getVTList(MVT::i32,
8031 Src.getOperand(0).getValueType());
8032 unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ?
8033 AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT;
8034 SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N),
8035 ResList,
8036 Ops, M->getMemoryVT(),
8037 M->getMemOperand());
8038 return DCI.DAG.getMergeValues({BufferLoadSignExt,
8039 BufferLoadSignExt.getValue(1)}, SDLoc(N));
8040 }
8041 return SDValue();
8042}
8043
Matt Arsenaultf2290332015-01-06 23:00:39 +00008044SDValue SITargetLowering::performClassCombine(SDNode *N,
8045 DAGCombinerInfo &DCI) const {
8046 SelectionDAG &DAG = DCI.DAG;
8047 SDValue Mask = N->getOperand(1);
8048
8049 // fp_class x, 0 -> false
8050 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
8051 if (CMask->isNullValue())
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00008052 return DAG.getConstant(0, SDLoc(N), MVT::i1);
Matt Arsenaultf2290332015-01-06 23:00:39 +00008053 }
8054
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00008055 if (N->getOperand(0).isUndef())
8056 return DAG.getUNDEF(MVT::i1);
8057
Matt Arsenaultf2290332015-01-06 23:00:39 +00008058 return SDValue();
8059}
8060
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00008061SDValue SITargetLowering::performRcpCombine(SDNode *N,
8062 DAGCombinerInfo &DCI) const {
8063 EVT VT = N->getValueType(0);
8064 SDValue N0 = N->getOperand(0);
8065
8066 if (N0.isUndef())
8067 return N0;
8068
8069 if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP ||
8070 N0.getOpcode() == ISD::SINT_TO_FP)) {
8071 return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0,
8072 N->getFlags());
8073 }
8074
8075 return AMDGPUTargetLowering::performRcpCombine(N, DCI);
8076}
8077
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008078bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
8079 unsigned MaxDepth) const {
8080 unsigned Opcode = Op.getOpcode();
8081 if (Opcode == ISD::FCANONICALIZE)
8082 return true;
8083
8084 if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8085 auto F = CFP->getValueAPF();
8086 if (F.isNaN() && F.isSignaling())
8087 return false;
8088 return !F.isDenormal() || denormalsEnabledForType(Op.getValueType());
8089 }
8090
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008091 // If source is a result of another standard FP operation it is already in
8092 // canonical form.
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008093 if (MaxDepth == 0)
8094 return false;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008095
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008096 switch (Opcode) {
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008097 // These will flush denorms if required.
8098 case ISD::FADD:
8099 case ISD::FSUB:
8100 case ISD::FMUL:
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008101 case ISD::FCEIL:
8102 case ISD::FFLOOR:
8103 case ISD::FMA:
8104 case ISD::FMAD:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008105 case ISD::FSQRT:
8106 case ISD::FDIV:
8107 case ISD::FREM:
Matt Arsenaultce6d61f2018-08-06 21:51:52 +00008108 case ISD::FP_ROUND:
8109 case ISD::FP_EXTEND:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008110 case AMDGPUISD::FMUL_LEGACY:
8111 case AMDGPUISD::FMAD_FTZ:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00008112 case AMDGPUISD::RCP:
8113 case AMDGPUISD::RSQ:
8114 case AMDGPUISD::RSQ_CLAMP:
8115 case AMDGPUISD::RCP_LEGACY:
8116 case AMDGPUISD::RSQ_LEGACY:
8117 case AMDGPUISD::RCP_IFLAG:
8118 case AMDGPUISD::TRIG_PREOP:
8119 case AMDGPUISD::DIV_SCALE:
8120 case AMDGPUISD::DIV_FMAS:
8121 case AMDGPUISD::DIV_FIXUP:
8122 case AMDGPUISD::FRACT:
8123 case AMDGPUISD::LDEXP:
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008124 case AMDGPUISD::CVT_PKRTZ_F16_F32:
Matt Arsenault940e6072018-08-10 19:20:17 +00008125 case AMDGPUISD::CVT_F32_UBYTE0:
8126 case AMDGPUISD::CVT_F32_UBYTE1:
8127 case AMDGPUISD::CVT_F32_UBYTE2:
8128 case AMDGPUISD::CVT_F32_UBYTE3:
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008129 return true;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008130
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008131 // It can/will be lowered or combined as a bit operation.
8132 // Need to check their input recursively to handle.
8133 case ISD::FNEG:
8134 case ISD::FABS:
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008135 case ISD::FCOPYSIGN:
8136 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008137
8138 case ISD::FSIN:
8139 case ISD::FCOS:
8140 case ISD::FSINCOS:
8141 return Op.getValueType().getScalarType() != MVT::f16;
8142
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008143 case ISD::FMINNUM:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00008144 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008145 case ISD::FMINNUM_IEEE:
8146 case ISD::FMAXNUM_IEEE:
Matt Arsenaultd49ab0b2018-08-06 21:58:11 +00008147 case AMDGPUISD::CLAMP:
8148 case AMDGPUISD::FMED3:
8149 case AMDGPUISD::FMAX3:
8150 case AMDGPUISD::FMIN3: {
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008151 // FIXME: Shouldn't treat the generic operations different based these.
Matt Arsenault687ec752018-10-22 16:27:27 +00008152 // However, we aren't really required to flush the result from
8153 // minnum/maxnum..
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008154
Matt Arsenault687ec752018-10-22 16:27:27 +00008155 // snans will be quieted, so we only need to worry about denormals.
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008156 if (Subtarget->supportsMinMaxDenormModes() ||
Matt Arsenault687ec752018-10-22 16:27:27 +00008157 denormalsEnabledForType(Op.getValueType()))
8158 return true;
8159
8160 // Flushing may be required.
8161 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such
8162 // targets need to check their input recursively.
8163
8164 // FIXME: Does this apply with clamp? It's implemented with max.
8165 for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
8166 if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1))
8167 return false;
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008168 }
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008169
Matt Arsenault687ec752018-10-22 16:27:27 +00008170 return true;
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008171 }
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008172 case ISD::SELECT: {
8173 return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) &&
8174 isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008175 }
Matt Arsenaulte94ee832018-08-06 22:45:51 +00008176 case ISD::BUILD_VECTOR: {
8177 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
8178 SDValue SrcOp = Op.getOperand(i);
8179 if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1))
8180 return false;
8181 }
8182
8183 return true;
8184 }
8185 case ISD::EXTRACT_VECTOR_ELT:
8186 case ISD::EXTRACT_SUBVECTOR: {
8187 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1);
8188 }
8189 case ISD::INSERT_VECTOR_ELT: {
8190 return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) &&
8191 isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1);
8192 }
8193 case ISD::UNDEF:
8194 // Could be anything.
8195 return false;
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008196
Matt Arsenault687ec752018-10-22 16:27:27 +00008197 case ISD::BITCAST: {
8198 // Hack round the mess we make when legalizing extract_vector_elt
8199 SDValue Src = Op.getOperand(0);
8200 if (Src.getValueType() == MVT::i16 &&
8201 Src.getOpcode() == ISD::TRUNCATE) {
8202 SDValue TruncSrc = Src.getOperand(0);
8203 if (TruncSrc.getValueType() == MVT::i32 &&
8204 TruncSrc.getOpcode() == ISD::BITCAST &&
8205 TruncSrc.getOperand(0).getValueType() == MVT::v2f16) {
8206 return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1);
8207 }
8208 }
8209
8210 return false;
8211 }
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008212 case ISD::INTRINSIC_WO_CHAIN: {
8213 unsigned IntrinsicID
8214 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
8215 // TODO: Handle more intrinsics
8216 switch (IntrinsicID) {
8217 case Intrinsic::amdgcn_cvt_pkrtz:
Matt Arsenault940e6072018-08-10 19:20:17 +00008218 case Intrinsic::amdgcn_cubeid:
8219 case Intrinsic::amdgcn_frexp_mant:
8220 case Intrinsic::amdgcn_fdot2:
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008221 return true;
8222 default:
8223 break;
8224 }
Matt Arsenault5bb9d792018-08-10 17:57:12 +00008225
8226 LLVM_FALLTHROUGH;
Matt Arsenault08f3fe42018-08-06 23:01:31 +00008227 }
Matt Arsenaultf8768bf2018-08-06 21:38:27 +00008228 default:
8229 return denormalsEnabledForType(Op.getValueType()) &&
8230 DAG.isKnownNeverSNaN(Op);
8231 }
8232
8233 llvm_unreachable("invalid operation");
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008234}
8235
Matt Arsenault9cd90712016-04-14 01:42:16 +00008236// Constant fold canonicalize.
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008237SDValue SITargetLowering::getCanonicalConstantFP(
8238 SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const {
8239 // Flush denormals to 0 if not enabled.
8240 if (C.isDenormal() && !denormalsEnabledForType(VT))
8241 return DAG.getConstantFP(0.0, SL, VT);
8242
8243 if (C.isNaN()) {
8244 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
8245 if (C.isSignaling()) {
8246 // Quiet a signaling NaN.
8247 // FIXME: Is this supposed to preserve payload bits?
8248 return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8249 }
8250
8251 // Make sure it is the canonical NaN bitpattern.
8252 //
8253 // TODO: Can we use -1 as the canonical NaN value since it's an inline
8254 // immediate?
8255 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
8256 return DAG.getConstantFP(CanonicalQNaN, SL, VT);
8257 }
8258
8259 // Already canonical.
8260 return DAG.getConstantFP(C, SL, VT);
8261}
8262
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008263static bool vectorEltWillFoldAway(SDValue Op) {
8264 return Op.isUndef() || isa<ConstantFPSDNode>(Op);
8265}
8266
Matt Arsenault9cd90712016-04-14 01:42:16 +00008267SDValue SITargetLowering::performFCanonicalizeCombine(
8268 SDNode *N,
8269 DAGCombinerInfo &DCI) const {
Matt Arsenault9cd90712016-04-14 01:42:16 +00008270 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault4aec86d2018-07-31 13:34:31 +00008271 SDValue N0 = N->getOperand(0);
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008272 EVT VT = N->getValueType(0);
Stanislav Mekhanoshin5680b0c2017-07-12 21:20:28 +00008273
Matt Arsenault4aec86d2018-07-31 13:34:31 +00008274 // fcanonicalize undef -> qnan
8275 if (N0.isUndef()) {
Matt Arsenault4aec86d2018-07-31 13:34:31 +00008276 APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT));
8277 return DAG.getConstantFP(QNaN, SDLoc(N), VT);
8278 }
8279
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008280 if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) {
Matt Arsenault9cd90712016-04-14 01:42:16 +00008281 EVT VT = N->getValueType(0);
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008282 return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF());
Matt Arsenault9cd90712016-04-14 01:42:16 +00008283 }
8284
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008285 // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x),
8286 // (fcanonicalize k)
8287 //
8288 // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0
8289
8290 // TODO: This could be better with wider vectors that will be split to v2f16,
8291 // and to consider uses since there aren't that many packed operations.
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008292 if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 &&
8293 isTypeLegal(MVT::v2f16)) {
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008294 SDLoc SL(N);
8295 SDValue NewElts[2];
8296 SDValue Lo = N0.getOperand(0);
8297 SDValue Hi = N0.getOperand(1);
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008298 EVT EltVT = Lo.getValueType();
8299
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008300 if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) {
8301 for (unsigned I = 0; I != 2; ++I) {
8302 SDValue Op = N0.getOperand(I);
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008303 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
8304 NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT,
8305 CFP->getValueAPF());
8306 } else if (Op.isUndef()) {
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008307 // Handled below based on what the other operand is.
8308 NewElts[I] = Op;
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008309 } else {
8310 NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op);
8311 }
8312 }
8313
Matt Arsenaultb5acec12018-08-12 08:42:54 +00008314 // If one half is undef, and one is constant, perfer a splat vector rather
8315 // than the normal qNaN. If it's a register, prefer 0.0 since that's
8316 // cheaper to use and may be free with a packed operation.
8317 if (NewElts[0].isUndef()) {
8318 if (isa<ConstantFPSDNode>(NewElts[1]))
8319 NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ?
8320 NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT);
8321 }
8322
8323 if (NewElts[1].isUndef()) {
8324 NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ?
8325 NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT);
8326 }
8327
Matt Arsenaulta29e7622018-08-06 22:30:44 +00008328 return DAG.getBuildVector(VT, SL, NewElts);
8329 }
8330 }
8331
Matt Arsenault687ec752018-10-22 16:27:27 +00008332 unsigned SrcOpc = N0.getOpcode();
8333
8334 // If it's free to do so, push canonicalizes further up the source, which may
8335 // find a canonical source.
8336 //
8337 // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for
8338 // sNaNs.
8339 if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) {
8340 auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
8341 if (CRHS && N0.hasOneUse()) {
8342 SDLoc SL(N);
8343 SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT,
8344 N0.getOperand(0));
8345 SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF());
8346 DCI.AddToWorklist(Canon0.getNode());
8347
8348 return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1);
8349 }
8350 }
8351
Matt Arsenaultf2a167f2018-08-06 22:10:26 +00008352 return isCanonicalized(DAG, N0) ? N0 : SDValue();
Matt Arsenault9cd90712016-04-14 01:42:16 +00008353}
8354
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008355static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
8356 switch (Opc) {
8357 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008358 case ISD::FMAXNUM_IEEE:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008359 return AMDGPUISD::FMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008360 case ISD::SMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008361 return AMDGPUISD::SMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008362 case ISD::UMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008363 return AMDGPUISD::UMAX3;
8364 case ISD::FMINNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008365 case ISD::FMINNUM_IEEE:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008366 return AMDGPUISD::FMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008367 case ISD::SMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008368 return AMDGPUISD::SMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00008369 case ISD::UMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008370 return AMDGPUISD::UMIN3;
8371 default:
8372 llvm_unreachable("Not a min/max opcode");
8373 }
8374}
8375
Matt Arsenault10268f92017-02-27 22:40:39 +00008376SDValue SITargetLowering::performIntMed3ImmCombine(
8377 SelectionDAG &DAG, const SDLoc &SL,
8378 SDValue Op0, SDValue Op1, bool Signed) const {
Matt Arsenaultf639c322016-01-28 20:53:42 +00008379 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
8380 if (!K1)
8381 return SDValue();
8382
8383 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
8384 if (!K0)
8385 return SDValue();
8386
Matt Arsenaultf639c322016-01-28 20:53:42 +00008387 if (Signed) {
8388 if (K0->getAPIntValue().sge(K1->getAPIntValue()))
8389 return SDValue();
8390 } else {
8391 if (K0->getAPIntValue().uge(K1->getAPIntValue()))
8392 return SDValue();
8393 }
8394
8395 EVT VT = K0->getValueType(0);
Matt Arsenault10268f92017-02-27 22:40:39 +00008396 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
8397 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
8398 return DAG.getNode(Med3Opc, SL, VT,
8399 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
8400 }
Tom Stellard115a6152016-11-10 16:02:37 +00008401
Matt Arsenault10268f92017-02-27 22:40:39 +00008402 // If there isn't a 16-bit med3 operation, convert to 32-bit.
Tom Stellard115a6152016-11-10 16:02:37 +00008403 MVT NVT = MVT::i32;
8404 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8405
Matt Arsenault10268f92017-02-27 22:40:39 +00008406 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
8407 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
8408 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
Tom Stellard115a6152016-11-10 16:02:37 +00008409
Matt Arsenault10268f92017-02-27 22:40:39 +00008410 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
8411 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
Matt Arsenaultf639c322016-01-28 20:53:42 +00008412}
8413
Matt Arsenault6b114d22017-08-30 01:20:17 +00008414static ConstantFPSDNode *getSplatConstantFP(SDValue Op) {
8415 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
8416 return C;
8417
8418 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) {
8419 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode())
8420 return C;
8421 }
8422
8423 return nullptr;
8424}
8425
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008426SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
8427 const SDLoc &SL,
8428 SDValue Op0,
8429 SDValue Op1) const {
Matt Arsenault6b114d22017-08-30 01:20:17 +00008430 ConstantFPSDNode *K1 = getSplatConstantFP(Op1);
Matt Arsenaultf639c322016-01-28 20:53:42 +00008431 if (!K1)
8432 return SDValue();
8433
Matt Arsenault6b114d22017-08-30 01:20:17 +00008434 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1));
Matt Arsenaultf639c322016-01-28 20:53:42 +00008435 if (!K0)
8436 return SDValue();
8437
8438 // Ordered >= (although NaN inputs should have folded away by now).
8439 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
8440 if (Cmp == APFloat::cmpGreaterThan)
8441 return SDValue();
8442
Matt Arsenault055e4dc2019-03-29 19:14:54 +00008443 const MachineFunction &MF = DAG.getMachineFunction();
8444 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
8445
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008446 // TODO: Check IEEE bit enabled?
Matt Arsenault6b114d22017-08-30 01:20:17 +00008447 EVT VT = Op0.getValueType();
Matt Arsenault055e4dc2019-03-29 19:14:54 +00008448 if (Info->getMode().DX10Clamp) {
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008449 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
8450 // hardware fmed3 behavior converting to a min.
8451 // FIXME: Should this be allowing -0.0?
8452 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
8453 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
8454 }
8455
Matt Arsenault6b114d22017-08-30 01:20:17 +00008456 // med3 for f16 is only available on gfx9+, and not available for v2f16.
8457 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) {
8458 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
8459 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would
8460 // then give the other result, which is different from med3 with a NaN
8461 // input.
8462 SDValue Var = Op0.getOperand(0);
Matt Arsenaultc3dc8e62018-08-03 18:27:52 +00008463 if (!DAG.isKnownNeverSNaN(Var))
Matt Arsenault6b114d22017-08-30 01:20:17 +00008464 return SDValue();
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008465
Matt Arsenaultebf46142018-09-18 02:34:54 +00008466 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
8467
8468 if ((!K0->hasOneUse() ||
8469 TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) &&
8470 (!K1->hasOneUse() ||
8471 TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) {
8472 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
8473 Var, SDValue(K0, 0), SDValue(K1, 0));
8474 }
Matt Arsenault6b114d22017-08-30 01:20:17 +00008475 }
Matt Arsenaultf639c322016-01-28 20:53:42 +00008476
Matt Arsenault6b114d22017-08-30 01:20:17 +00008477 return SDValue();
Matt Arsenaultf639c322016-01-28 20:53:42 +00008478}
8479
8480SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
8481 DAGCombinerInfo &DCI) const {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008482 SelectionDAG &DAG = DCI.DAG;
8483
Matt Arsenault79a45db2017-02-22 23:53:37 +00008484 EVT VT = N->getValueType(0);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008485 unsigned Opc = N->getOpcode();
8486 SDValue Op0 = N->getOperand(0);
8487 SDValue Op1 = N->getOperand(1);
8488
8489 // Only do this if the inner op has one use since this will just increases
8490 // register pressure for no benefit.
8491
Matt Arsenault79a45db2017-02-22 23:53:37 +00008492 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
Neil Henninge85f6bd2019-03-19 15:50:24 +00008493 !VT.isVector() &&
8494 (VT == MVT::i32 || VT == MVT::f32 ||
8495 ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) {
Matt Arsenault5b39b342016-01-28 20:53:48 +00008496 // max(max(a, b), c) -> max3(a, b, c)
8497 // min(min(a, b), c) -> min3(a, b, c)
8498 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
8499 SDLoc DL(N);
8500 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
8501 DL,
8502 N->getValueType(0),
8503 Op0.getOperand(0),
8504 Op0.getOperand(1),
8505 Op1);
8506 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008507
Matt Arsenault5b39b342016-01-28 20:53:48 +00008508 // Try commuted.
8509 // max(a, max(b, c)) -> max3(a, b, c)
8510 // min(a, min(b, c)) -> min3(a, b, c)
8511 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
8512 SDLoc DL(N);
8513 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
8514 DL,
8515 N->getValueType(0),
8516 Op0,
8517 Op1.getOperand(0),
8518 Op1.getOperand(1));
8519 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008520 }
8521
Matt Arsenaultf639c322016-01-28 20:53:42 +00008522 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
8523 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
8524 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
8525 return Med3;
8526 }
8527
8528 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
8529 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
8530 return Med3;
8531 }
8532
8533 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
Matt Arsenault5b39b342016-01-28 20:53:48 +00008534 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
Matt Arsenault687ec752018-10-22 16:27:27 +00008535 (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) ||
Matt Arsenault5b39b342016-01-28 20:53:48 +00008536 (Opc == AMDGPUISD::FMIN_LEGACY &&
8537 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
Matt Arsenault79a45db2017-02-22 23:53:37 +00008538 (VT == MVT::f32 || VT == MVT::f64 ||
Matt Arsenault6b114d22017-08-30 01:20:17 +00008539 (VT == MVT::f16 && Subtarget->has16BitInsts()) ||
8540 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) &&
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008541 Op0.hasOneUse()) {
Matt Arsenaultf639c322016-01-28 20:53:42 +00008542 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
8543 return Res;
8544 }
8545
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00008546 return SDValue();
8547}
8548
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008549static bool isClampZeroToOne(SDValue A, SDValue B) {
8550 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
8551 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
8552 // FIXME: Should this be allowing -0.0?
8553 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
8554 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
8555 }
8556 }
8557
8558 return false;
8559}
8560
8561// FIXME: Should only worry about snans for version with chain.
8562SDValue SITargetLowering::performFMed3Combine(SDNode *N,
8563 DAGCombinerInfo &DCI) const {
8564 EVT VT = N->getValueType(0);
8565 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
8566 // NaNs. With a NaN input, the order of the operands may change the result.
8567
8568 SelectionDAG &DAG = DCI.DAG;
8569 SDLoc SL(N);
8570
8571 SDValue Src0 = N->getOperand(0);
8572 SDValue Src1 = N->getOperand(1);
8573 SDValue Src2 = N->getOperand(2);
8574
8575 if (isClampZeroToOne(Src0, Src1)) {
8576 // const_a, const_b, x -> clamp is safe in all cases including signaling
8577 // nans.
8578 // FIXME: Should this be allowing -0.0?
8579 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
8580 }
8581
Matt Arsenault055e4dc2019-03-29 19:14:54 +00008582 const MachineFunction &MF = DAG.getMachineFunction();
8583 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
8584
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008585 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
8586 // handling no dx10-clamp?
Matt Arsenault055e4dc2019-03-29 19:14:54 +00008587 if (Info->getMode().DX10Clamp) {
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00008588 // If NaNs is clamped to 0, we are free to reorder the inputs.
8589
8590 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
8591 std::swap(Src0, Src1);
8592
8593 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
8594 std::swap(Src1, Src2);
8595
8596 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
8597 std::swap(Src0, Src1);
8598
8599 if (isClampZeroToOne(Src1, Src2))
8600 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
8601 }
8602
8603 return SDValue();
8604}
8605
Matt Arsenault1f17c662017-02-22 00:27:34 +00008606SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
8607 DAGCombinerInfo &DCI) const {
8608 SDValue Src0 = N->getOperand(0);
8609 SDValue Src1 = N->getOperand(1);
8610 if (Src0.isUndef() && Src1.isUndef())
8611 return DCI.DAG.getUNDEF(N->getValueType(0));
8612 return SDValue();
8613}
8614
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008615SDValue SITargetLowering::performExtractVectorEltCombine(
8616 SDNode *N, DAGCombinerInfo &DCI) const {
8617 SDValue Vec = N->getOperand(0);
Matt Arsenault8cbb4882017-09-20 21:01:24 +00008618 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008619
8620 EVT VecVT = Vec.getValueType();
8621 EVT EltVT = VecVT.getVectorElementType();
8622
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00008623 if ((Vec.getOpcode() == ISD::FNEG ||
8624 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) {
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008625 SDLoc SL(N);
8626 EVT EltVT = N->getValueType(0);
8627 SDValue Idx = N->getOperand(1);
8628 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8629 Vec.getOperand(0), Idx);
Matt Arsenaultfcc5ba42018-04-26 19:21:32 +00008630 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt);
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008631 }
8632
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008633 // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
8634 // =>
8635 // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
8636 // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
8637 // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
Farhana Aleene24f3ff2018-05-09 21:18:34 +00008638 if (Vec.hasOneUse() && DCI.isBeforeLegalize()) {
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008639 SDLoc SL(N);
8640 EVT EltVT = N->getValueType(0);
8641 SDValue Idx = N->getOperand(1);
8642 unsigned Opc = Vec.getOpcode();
8643
8644 switch(Opc) {
8645 default:
Stanislav Mekhanoshinbcb34ac2018-11-13 21:18:21 +00008646 break;
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008647 // TODO: Support other binary operations.
8648 case ISD::FADD:
Matt Arsenaulta8160732018-08-15 21:34:06 +00008649 case ISD::FSUB:
8650 case ISD::FMUL:
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008651 case ISD::ADD:
Farhana Aleene24f3ff2018-05-09 21:18:34 +00008652 case ISD::UMIN:
8653 case ISD::UMAX:
8654 case ISD::SMIN:
8655 case ISD::SMAX:
8656 case ISD::FMAXNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00008657 case ISD::FMINNUM:
8658 case ISD::FMAXNUM_IEEE:
8659 case ISD::FMINNUM_IEEE: {
Matt Arsenaulta8160732018-08-15 21:34:06 +00008660 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8661 Vec.getOperand(0), Idx);
8662 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8663 Vec.getOperand(1), Idx);
8664
8665 DCI.AddToWorklist(Elt0.getNode());
8666 DCI.AddToWorklist(Elt1.getNode());
8667 return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags());
8668 }
Farhana Aleene2dfe8a2018-05-01 21:41:12 +00008669 }
8670 }
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008671
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008672 unsigned VecSize = VecVT.getSizeInBits();
8673 unsigned EltSize = EltVT.getSizeInBits();
8674
Stanislav Mekhanoshinbcb34ac2018-11-13 21:18:21 +00008675 // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
8676 // This elminates non-constant index and subsequent movrel or scratch access.
8677 // Sub-dword vectors of size 2 dword or less have better implementation.
8678 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
8679 // instructions.
8680 if (VecSize <= 256 && (VecSize > 64 || EltSize >= 32) &&
8681 !isa<ConstantSDNode>(N->getOperand(1))) {
8682 SDLoc SL(N);
8683 SDValue Idx = N->getOperand(1);
8684 EVT IdxVT = Idx.getValueType();
8685 SDValue V;
8686 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
8687 SDValue IC = DAG.getConstant(I, SL, IdxVT);
8688 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
8689 if (I == 0)
8690 V = Elt;
8691 else
8692 V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
8693 }
8694 return V;
8695 }
8696
8697 if (!DCI.isBeforeLegalize())
8698 return SDValue();
8699
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008700 // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
8701 // elements. This exposes more load reduction opportunities by replacing
8702 // multiple small extract_vector_elements with a single 32-bit extract.
8703 auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
Matt Arsenaultbf07a502018-08-31 15:39:52 +00008704 if (isa<MemSDNode>(Vec) &&
8705 EltSize <= 16 &&
Matt Arsenault63bc0e32018-06-15 15:31:36 +00008706 EltVT.isByteSized() &&
8707 VecSize > 32 &&
8708 VecSize % 32 == 0 &&
8709 Idx) {
8710 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
8711
8712 unsigned BitIndex = Idx->getZExtValue() * EltSize;
8713 unsigned EltIdx = BitIndex / 32;
8714 unsigned LeftoverBitIdx = BitIndex % 32;
8715 SDLoc SL(N);
8716
8717 SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
8718 DCI.AddToWorklist(Cast.getNode());
8719
8720 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
8721 DAG.getConstant(EltIdx, SL, MVT::i32));
8722 DCI.AddToWorklist(Elt.getNode());
8723 SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
8724 DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
8725 DCI.AddToWorklist(Srl.getNode());
8726
8727 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl);
8728 DCI.AddToWorklist(Trunc.getNode());
8729 return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc);
8730 }
8731
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00008732 return SDValue();
8733}
8734
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00008735SDValue
8736SITargetLowering::performInsertVectorEltCombine(SDNode *N,
8737 DAGCombinerInfo &DCI) const {
8738 SDValue Vec = N->getOperand(0);
8739 SDValue Idx = N->getOperand(2);
8740 EVT VecVT = Vec.getValueType();
8741 EVT EltVT = VecVT.getVectorElementType();
8742 unsigned VecSize = VecVT.getSizeInBits();
8743 unsigned EltSize = EltVT.getSizeInBits();
8744
8745 // INSERT_VECTOR_ELT (<n x e>, var-idx)
8746 // => BUILD_VECTOR n x select (e, const-idx)
8747 // This elminates non-constant index and subsequent movrel or scratch access.
8748 // Sub-dword vectors of size 2 dword or less have better implementation.
8749 // Vectors of size bigger than 8 dwords would yield too many v_cndmask_b32
8750 // instructions.
8751 if (isa<ConstantSDNode>(Idx) ||
8752 VecSize > 256 || (VecSize <= 64 && EltSize < 32))
8753 return SDValue();
8754
8755 SelectionDAG &DAG = DCI.DAG;
8756 SDLoc SL(N);
8757 SDValue Ins = N->getOperand(1);
8758 EVT IdxVT = Idx.getValueType();
8759
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00008760 SmallVector<SDValue, 16> Ops;
8761 for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
8762 SDValue IC = DAG.getConstant(I, SL, IdxVT);
8763 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC);
8764 SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ);
8765 Ops.push_back(V);
8766 }
8767
8768 return DAG.getBuildVector(VecVT, SL, Ops);
8769}
8770
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00008771unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
8772 const SDNode *N0,
8773 const SDNode *N1) const {
8774 EVT VT = N0->getValueType(0);
8775
Matt Arsenault770ec862016-12-22 03:55:35 +00008776 // Only do this if we are not trying to support denormals. v_mad_f32 does not
8777 // support denormals ever.
Stanislav Mekhanoshin28a19362019-05-04 04:20:37 +00008778 if (((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
8779 (VT == MVT::f16 && !Subtarget->hasFP16Denormals() &&
8780 getSubtarget()->hasMadF16())) &&
8781 isOperationLegal(ISD::FMAD, VT))
Matt Arsenault770ec862016-12-22 03:55:35 +00008782 return ISD::FMAD;
8783
8784 const TargetOptions &Options = DAG.getTarget().Options;
Amara Emersond28f0cd42017-05-01 15:17:51 +00008785 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
Michael Berg7acc81b2018-05-04 18:48:20 +00008786 (N0->getFlags().hasAllowContract() &&
8787 N1->getFlags().hasAllowContract())) &&
Matt Arsenault770ec862016-12-22 03:55:35 +00008788 isFMAFasterThanFMulAndFAdd(VT)) {
8789 return ISD::FMA;
8790 }
8791
8792 return 0;
8793}
8794
Stanislav Mekhanoshin871821f2019-02-14 22:11:25 +00008795// For a reassociatable opcode perform:
8796// op x, (op y, z) -> op (op x, z), y, if x and z are uniform
8797SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
8798 SelectionDAG &DAG) const {
8799 EVT VT = N->getValueType(0);
8800 if (VT != MVT::i32 && VT != MVT::i64)
8801 return SDValue();
8802
8803 unsigned Opc = N->getOpcode();
8804 SDValue Op0 = N->getOperand(0);
8805 SDValue Op1 = N->getOperand(1);
8806
8807 if (!(Op0->isDivergent() ^ Op1->isDivergent()))
8808 return SDValue();
8809
8810 if (Op0->isDivergent())
8811 std::swap(Op0, Op1);
8812
8813 if (Op1.getOpcode() != Opc || !Op1.hasOneUse())
8814 return SDValue();
8815
8816 SDValue Op2 = Op1.getOperand(1);
8817 Op1 = Op1.getOperand(0);
8818 if (!(Op1->isDivergent() ^ Op2->isDivergent()))
8819 return SDValue();
8820
8821 if (Op1->isDivergent())
8822 std::swap(Op1, Op2);
8823
8824 // If either operand is constant this will conflict with
8825 // DAGCombiner::ReassociateOps().
Stanislav Mekhanoshinda1628e2019-02-26 20:56:25 +00008826 if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
8827 DAG.isConstantIntBuildVectorOrConstantInt(Op1))
Stanislav Mekhanoshin871821f2019-02-14 22:11:25 +00008828 return SDValue();
8829
8830 SDLoc SL(N);
8831 SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1);
8832 return DAG.getNode(Opc, SL, VT, Add1, Op2);
8833}
8834
Matt Arsenault4f6318f2017-11-06 17:04:37 +00008835static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
8836 EVT VT,
8837 SDValue N0, SDValue N1, SDValue N2,
8838 bool Signed) {
8839 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32;
8840 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1);
8841 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2);
8842 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
8843}
8844
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008845SDValue SITargetLowering::performAddCombine(SDNode *N,
8846 DAGCombinerInfo &DCI) const {
8847 SelectionDAG &DAG = DCI.DAG;
8848 EVT VT = N->getValueType(0);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008849 SDLoc SL(N);
8850 SDValue LHS = N->getOperand(0);
8851 SDValue RHS = N->getOperand(1);
8852
Matt Arsenault4f6318f2017-11-06 17:04:37 +00008853 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL)
8854 && Subtarget->hasMad64_32() &&
8855 !VT.isVector() && VT.getScalarSizeInBits() > 32 &&
8856 VT.getScalarSizeInBits() <= 64) {
8857 if (LHS.getOpcode() != ISD::MUL)
8858 std::swap(LHS, RHS);
8859
8860 SDValue MulLHS = LHS.getOperand(0);
8861 SDValue MulRHS = LHS.getOperand(1);
8862 SDValue AddRHS = RHS;
8863
8864 // TODO: Maybe restrict if SGPR inputs.
8865 if (numBitsUnsigned(MulLHS, DAG) <= 32 &&
8866 numBitsUnsigned(MulRHS, DAG) <= 32) {
8867 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32);
8868 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32);
8869 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64);
8870 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false);
8871 }
8872
8873 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) {
8874 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32);
8875 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32);
8876 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64);
8877 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true);
8878 }
8879
8880 return SDValue();
8881 }
8882
Stanislav Mekhanoshin871821f2019-02-14 22:11:25 +00008883 if (SDValue V = reassociateScalarOps(N, DAG)) {
8884 return V;
8885 }
8886
Farhana Aleen07e61232018-05-02 18:16:39 +00008887 if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
Matt Arsenault4f6318f2017-11-06 17:04:37 +00008888 return SDValue();
8889
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008890 // add x, zext (setcc) => addcarry x, 0, setcc
8891 // add x, sext (setcc) => subcarry x, 0, setcc
8892 unsigned Opc = LHS.getOpcode();
8893 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008894 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008895 std::swap(RHS, LHS);
8896
8897 Opc = RHS.getOpcode();
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008898 switch (Opc) {
8899 default: break;
8900 case ISD::ZERO_EXTEND:
8901 case ISD::SIGN_EXTEND:
8902 case ISD::ANY_EXTEND: {
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008903 auto Cond = RHS.getOperand(0);
Stanislav Mekhanoshin6851ddf2017-06-27 18:25:26 +00008904 if (!isBoolSGPR(Cond))
Stanislav Mekhanoshin3ed38c62017-06-21 23:46:22 +00008905 break;
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008906 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
8907 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
8908 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
8909 return DAG.getNode(Opc, SL, VTList, Args);
8910 }
8911 case ISD::ADDCARRY: {
8912 // add x, (addcarry y, 0, cc) => addcarry x, y, cc
8913 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
8914 if (!C || C->getZExtValue() != 0) break;
8915 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
8916 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
8917 }
8918 }
8919 return SDValue();
8920}
8921
8922SDValue SITargetLowering::performSubCombine(SDNode *N,
8923 DAGCombinerInfo &DCI) const {
8924 SelectionDAG &DAG = DCI.DAG;
8925 EVT VT = N->getValueType(0);
8926
8927 if (VT != MVT::i32)
8928 return SDValue();
8929
8930 SDLoc SL(N);
8931 SDValue LHS = N->getOperand(0);
8932 SDValue RHS = N->getOperand(1);
8933
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008934 if (LHS.getOpcode() == ISD::SUBCARRY) {
8935 // sub (subcarry x, 0, cc), y => subcarry x, y, cc
8936 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
Stanislav Mekhanoshin42e229e2019-02-21 02:58:00 +00008937 if (!C || !C->isNullValue())
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00008938 return SDValue();
8939 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
8940 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
8941 }
8942 return SDValue();
8943}
8944
8945SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
8946 DAGCombinerInfo &DCI) const {
8947
8948 if (N->getValueType(0) != MVT::i32)
8949 return SDValue();
8950
8951 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
8952 if (!C || C->getZExtValue() != 0)
8953 return SDValue();
8954
8955 SelectionDAG &DAG = DCI.DAG;
8956 SDValue LHS = N->getOperand(0);
8957
8958 // addcarry (add x, y), 0, cc => addcarry x, y, cc
8959 // subcarry (sub x, y), 0, cc => subcarry x, y, cc
8960 unsigned LHSOpc = LHS.getOpcode();
8961 unsigned Opc = N->getOpcode();
8962 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
8963 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
8964 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
8965 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00008966 }
8967 return SDValue();
8968}
8969
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008970SDValue SITargetLowering::performFAddCombine(SDNode *N,
8971 DAGCombinerInfo &DCI) const {
8972 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
8973 return SDValue();
8974
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008975 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault770ec862016-12-22 03:55:35 +00008976 EVT VT = N->getValueType(0);
Matt Arsenault770ec862016-12-22 03:55:35 +00008977
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008978 SDLoc SL(N);
8979 SDValue LHS = N->getOperand(0);
8980 SDValue RHS = N->getOperand(1);
8981
8982 // These should really be instruction patterns, but writing patterns with
8983 // source modiifiers is a pain.
8984
8985 // fadd (fadd (a, a), b) -> mad 2.0, a, b
8986 if (LHS.getOpcode() == ISD::FADD) {
8987 SDValue A = LHS.getOperand(0);
8988 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00008989 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00008990 if (FusedOp != 0) {
8991 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00008992 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00008993 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00008994 }
8995 }
8996
8997 // fadd (b, fadd (a, a)) -> mad 2.0, a, b
8998 if (RHS.getOpcode() == ISD::FADD) {
8999 SDValue A = RHS.getOperand(0);
9000 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009001 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00009002 if (FusedOp != 0) {
9003 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00009004 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
Matt Arsenault770ec862016-12-22 03:55:35 +00009005 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009006 }
9007 }
9008
9009 return SDValue();
9010}
9011
9012SDValue SITargetLowering::performFSubCombine(SDNode *N,
9013 DAGCombinerInfo &DCI) const {
9014 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
9015 return SDValue();
9016
9017 SelectionDAG &DAG = DCI.DAG;
9018 SDLoc SL(N);
9019 EVT VT = N->getValueType(0);
9020 assert(!VT.isVector());
9021
9022 // Try to get the fneg to fold into the source modifier. This undoes generic
9023 // DAG combines and folds them into the mad.
9024 //
9025 // Only do this if we are not trying to support denormals. v_mad_f32 does
9026 // not support denormals ever.
Matt Arsenault770ec862016-12-22 03:55:35 +00009027 SDValue LHS = N->getOperand(0);
9028 SDValue RHS = N->getOperand(1);
9029 if (LHS.getOpcode() == ISD::FADD) {
9030 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
9031 SDValue A = LHS.getOperand(0);
9032 if (A == LHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009033 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00009034 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009035 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
9036 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
9037
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00009038 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009039 }
9040 }
Matt Arsenault770ec862016-12-22 03:55:35 +00009041 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009042
Matt Arsenault770ec862016-12-22 03:55:35 +00009043 if (RHS.getOpcode() == ISD::FADD) {
9044 // (fsub c, (fadd a, a)) -> mad -2.0, a, c
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009045
Matt Arsenault770ec862016-12-22 03:55:35 +00009046 SDValue A = RHS.getOperand(0);
9047 if (A == RHS.getOperand(1)) {
Matt Arsenault46e6b7a2016-12-22 04:03:35 +00009048 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
Matt Arsenault770ec862016-12-22 03:55:35 +00009049 if (FusedOp != 0){
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009050 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
Matt Arsenaulte7d8ed32016-12-22 04:03:40 +00009051 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009052 }
9053 }
9054 }
9055
9056 return SDValue();
9057}
9058
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009059SDValue SITargetLowering::performFMACombine(SDNode *N,
9060 DAGCombinerInfo &DCI) const {
9061 SelectionDAG &DAG = DCI.DAG;
9062 EVT VT = N->getValueType(0);
9063 SDLoc SL(N);
9064
Stanislav Mekhanoshin0e858b02019-02-09 00:34:21 +00009065 if (!Subtarget->hasDot2Insts() || VT != MVT::f32)
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009066 return SDValue();
9067
9068 // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) ->
9069 // FDOT2((V2F16)S0, (V2F16)S1, (F32)z))
9070 SDValue Op1 = N->getOperand(0);
9071 SDValue Op2 = N->getOperand(1);
9072 SDValue FMA = N->getOperand(2);
9073
9074 if (FMA.getOpcode() != ISD::FMA ||
9075 Op1.getOpcode() != ISD::FP_EXTEND ||
9076 Op2.getOpcode() != ISD::FP_EXTEND)
9077 return SDValue();
9078
9079 // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
9080 // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract
9081 // is sufficient to allow generaing fdot2.
9082 const TargetOptions &Options = DAG.getTarget().Options;
9083 if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
9084 (N->getFlags().hasAllowContract() &&
9085 FMA->getFlags().hasAllowContract())) {
9086 Op1 = Op1.getOperand(0);
9087 Op2 = Op2.getOperand(0);
9088 if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9089 Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9090 return SDValue();
9091
9092 SDValue Vec1 = Op1.getOperand(0);
9093 SDValue Idx1 = Op1.getOperand(1);
9094 SDValue Vec2 = Op2.getOperand(0);
9095
9096 SDValue FMAOp1 = FMA.getOperand(0);
9097 SDValue FMAOp2 = FMA.getOperand(1);
9098 SDValue FMAAcc = FMA.getOperand(2);
9099
9100 if (FMAOp1.getOpcode() != ISD::FP_EXTEND ||
9101 FMAOp2.getOpcode() != ISD::FP_EXTEND)
9102 return SDValue();
9103
9104 FMAOp1 = FMAOp1.getOperand(0);
9105 FMAOp2 = FMAOp2.getOperand(0);
9106 if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9107 FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9108 return SDValue();
9109
9110 SDValue Vec3 = FMAOp1.getOperand(0);
9111 SDValue Vec4 = FMAOp2.getOperand(0);
9112 SDValue Idx2 = FMAOp1.getOperand(1);
9113
9114 if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) ||
9115 // Idx1 and Idx2 cannot be the same.
9116 Idx1 == Idx2)
9117 return SDValue();
9118
9119 if (Vec1 == Vec2 || Vec3 == Vec4)
9120 return SDValue();
9121
9122 if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16)
9123 return SDValue();
9124
9125 if ((Vec1 == Vec3 && Vec2 == Vec4) ||
Konstantin Zhuravlyovbb30ef72018-08-01 01:31:30 +00009126 (Vec1 == Vec4 && Vec2 == Vec3)) {
9127 return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc,
9128 DAG.getTargetConstant(0, SL, MVT::i1));
9129 }
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009130 }
9131 return SDValue();
9132}
9133
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009134SDValue SITargetLowering::performSetCCCombine(SDNode *N,
9135 DAGCombinerInfo &DCI) const {
9136 SelectionDAG &DAG = DCI.DAG;
9137 SDLoc SL(N);
9138
9139 SDValue LHS = N->getOperand(0);
9140 SDValue RHS = N->getOperand(1);
9141 EVT VT = LHS.getValueType();
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00009142 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
9143
9144 auto CRHS = dyn_cast<ConstantSDNode>(RHS);
9145 if (!CRHS) {
9146 CRHS = dyn_cast<ConstantSDNode>(LHS);
9147 if (CRHS) {
9148 std::swap(LHS, RHS);
9149 CC = getSetCCSwappedOperands(CC);
9150 }
9151 }
9152
Stanislav Mekhanoshin3b117942018-06-16 03:46:59 +00009153 if (CRHS) {
9154 if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
9155 isBoolSGPR(LHS.getOperand(0))) {
9156 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
9157 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
9158 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1
9159 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc
9160 if ((CRHS->isAllOnesValue() &&
9161 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
9162 (CRHS->isNullValue() &&
9163 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
9164 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9165 DAG.getConstant(-1, SL, MVT::i1));
9166 if ((CRHS->isAllOnesValue() &&
9167 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
9168 (CRHS->isNullValue() &&
9169 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
9170 return LHS.getOperand(0);
9171 }
9172
9173 uint64_t CRHSVal = CRHS->getZExtValue();
9174 if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
9175 LHS.getOpcode() == ISD::SELECT &&
9176 isa<ConstantSDNode>(LHS.getOperand(1)) &&
9177 isa<ConstantSDNode>(LHS.getOperand(2)) &&
9178 LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) &&
9179 isBoolSGPR(LHS.getOperand(0))) {
9180 // Given CT != FT:
9181 // setcc (select cc, CT, CF), CF, eq => xor cc, -1
9182 // setcc (select cc, CT, CF), CF, ne => cc
9183 // setcc (select cc, CT, CF), CT, ne => xor cc, -1
9184 // setcc (select cc, CT, CF), CT, eq => cc
9185 uint64_t CT = LHS.getConstantOperandVal(1);
9186 uint64_t CF = LHS.getConstantOperandVal(2);
9187
9188 if ((CF == CRHSVal && CC == ISD::SETEQ) ||
9189 (CT == CRHSVal && CC == ISD::SETNE))
9190 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
9191 DAG.getConstant(-1, SL, MVT::i1));
9192 if ((CF == CRHSVal && CC == ISD::SETNE) ||
9193 (CT == CRHSVal && CC == ISD::SETEQ))
9194 return LHS.getOperand(0);
9195 }
Stanislav Mekhanoshinc9bd53a2017-06-27 18:53:03 +00009196 }
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009197
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00009198 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
9199 VT != MVT::f16))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009200 return SDValue();
9201
Matt Arsenault8ad00d32018-08-10 18:58:41 +00009202 // Match isinf/isfinite pattern
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009203 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
Matt Arsenault8ad00d32018-08-10 18:58:41 +00009204 // (fcmp one (fabs x), inf) -> (fp_class x,
9205 // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero)
9206 if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) {
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009207 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
9208 if (!CRHS)
9209 return SDValue();
9210
9211 const APFloat &APF = CRHS->getValueAPF();
9212 if (APF.isInfinity() && !APF.isNegative()) {
Matt Arsenault8ad00d32018-08-10 18:58:41 +00009213 const unsigned IsInfMask = SIInstrFlags::P_INFINITY |
9214 SIInstrFlags::N_INFINITY;
9215 const unsigned IsFiniteMask = SIInstrFlags::N_ZERO |
9216 SIInstrFlags::P_ZERO |
9217 SIInstrFlags::N_NORMAL |
9218 SIInstrFlags::P_NORMAL |
9219 SIInstrFlags::N_SUBNORMAL |
9220 SIInstrFlags::P_SUBNORMAL;
9221 unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009222 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
9223 DAG.getConstant(Mask, SL, MVT::i32));
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009224 }
9225 }
9226
9227 return SDValue();
9228}
9229
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009230SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
9231 DAGCombinerInfo &DCI) const {
9232 SelectionDAG &DAG = DCI.DAG;
9233 SDLoc SL(N);
9234 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
9235
9236 SDValue Src = N->getOperand(0);
9237 SDValue Srl = N->getOperand(0);
9238 if (Srl.getOpcode() == ISD::ZERO_EXTEND)
9239 Srl = Srl.getOperand(0);
9240
9241 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
9242 if (Srl.getOpcode() == ISD::SRL) {
9243 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
9244 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
9245 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
9246
9247 if (const ConstantSDNode *C =
9248 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
9249 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
9250 EVT(MVT::i32));
9251
9252 unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
9253 if (SrcOffset < 32 && SrcOffset % 8 == 0) {
9254 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
9255 MVT::f32, Srl);
9256 }
9257 }
9258 }
9259
9260 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
9261
Craig Topperd0af7e82017-04-28 05:31:46 +00009262 KnownBits Known;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009263 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
9264 !DCI.isBeforeLegalizeOps());
9265 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Stanislav Mekhanoshined0d6c62019-01-09 02:24:22 +00009266 if (TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009267 DCI.CommitTargetLoweringOpt(TLO);
9268 }
9269
9270 return SDValue();
9271}
9272
Tom Stellard1b95fed2018-05-24 05:28:34 +00009273SDValue SITargetLowering::performClampCombine(SDNode *N,
9274 DAGCombinerInfo &DCI) const {
9275 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
9276 if (!CSrc)
9277 return SDValue();
9278
Matt Arsenault055e4dc2019-03-29 19:14:54 +00009279 const MachineFunction &MF = DCI.DAG.getMachineFunction();
Tom Stellard1b95fed2018-05-24 05:28:34 +00009280 const APFloat &F = CSrc->getValueAPF();
9281 APFloat Zero = APFloat::getZero(F.getSemantics());
9282 APFloat::cmpResult Cmp0 = F.compare(Zero);
9283 if (Cmp0 == APFloat::cmpLessThan ||
Matt Arsenault055e4dc2019-03-29 19:14:54 +00009284 (Cmp0 == APFloat::cmpUnordered &&
9285 MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) {
Tom Stellard1b95fed2018-05-24 05:28:34 +00009286 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
9287 }
9288
9289 APFloat One(F.getSemantics(), "1.0");
9290 APFloat::cmpResult Cmp1 = F.compare(One);
9291 if (Cmp1 == APFloat::cmpGreaterThan)
9292 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
9293
9294 return SDValue(CSrc, 0);
9295}
9296
9297
Tom Stellard75aadc22012-12-11 21:25:42 +00009298SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
9299 DAGCombinerInfo &DCI) const {
Stanislav Mekhanoshin443a7f92018-11-27 15:13:37 +00009300 if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
9301 return SDValue();
Tom Stellard75aadc22012-12-11 21:25:42 +00009302 switch (N->getOpcode()) {
Matt Arsenault22b4c252014-12-21 16:48:42 +00009303 default:
9304 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Stanislav Mekhanoshine3eb42c2017-06-21 22:05:06 +00009305 case ISD::ADD:
9306 return performAddCombine(N, DCI);
Stanislav Mekhanoshina8b26932017-06-21 22:30:01 +00009307 case ISD::SUB:
9308 return performSubCombine(N, DCI);
9309 case ISD::ADDCARRY:
9310 case ISD::SUBCARRY:
9311 return performAddCarrySubCarryCombine(N, DCI);
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009312 case ISD::FADD:
9313 return performFAddCombine(N, DCI);
9314 case ISD::FSUB:
9315 return performFSubCombine(N, DCI);
Matt Arsenault6f6233d2015-01-06 23:00:41 +00009316 case ISD::SETCC:
9317 return performSetCCCombine(N, DCI);
Matt Arsenault5b39b342016-01-28 20:53:48 +00009318 case ISD::FMAXNUM:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00009319 case ISD::FMINNUM:
Matt Arsenault687ec752018-10-22 16:27:27 +00009320 case ISD::FMAXNUM_IEEE:
9321 case ISD::FMINNUM_IEEE:
Matt Arsenault5881f4e2015-06-09 00:52:37 +00009322 case ISD::SMAX:
9323 case ISD::SMIN:
9324 case ISD::UMAX:
Matt Arsenault5b39b342016-01-28 20:53:48 +00009325 case ISD::UMIN:
9326 case AMDGPUISD::FMIN_LEGACY:
Stanislav Mekhanoshin443a7f92018-11-27 15:13:37 +00009327 case AMDGPUISD::FMAX_LEGACY:
9328 return performMinMaxCombine(N, DCI);
Farhana Aleenc370d7b2018-07-16 18:19:59 +00009329 case ISD::FMA:
9330 return performFMACombine(N, DCI);
Matt Arsenault90083d32018-06-07 09:54:49 +00009331 case ISD::LOAD: {
9332 if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
9333 return Widended;
9334 LLVM_FALLTHROUGH;
9335 }
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00009336 case ISD::STORE:
9337 case ISD::ATOMIC_LOAD:
9338 case ISD::ATOMIC_STORE:
9339 case ISD::ATOMIC_CMP_SWAP:
9340 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
9341 case ISD::ATOMIC_SWAP:
9342 case ISD::ATOMIC_LOAD_ADD:
9343 case ISD::ATOMIC_LOAD_SUB:
9344 case ISD::ATOMIC_LOAD_AND:
9345 case ISD::ATOMIC_LOAD_OR:
9346 case ISD::ATOMIC_LOAD_XOR:
9347 case ISD::ATOMIC_LOAD_NAND:
9348 case ISD::ATOMIC_LOAD_MIN:
9349 case ISD::ATOMIC_LOAD_MAX:
9350 case ISD::ATOMIC_LOAD_UMIN:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00009351 case ISD::ATOMIC_LOAD_UMAX:
Matt Arsenaulta5840c32019-01-22 18:36:06 +00009352 case ISD::ATOMIC_LOAD_FADD:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00009353 case AMDGPUISD::ATOMIC_INC:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00009354 case AMDGPUISD::ATOMIC_DEC:
Daniil Fukalovd5fca552018-01-17 14:05:05 +00009355 case AMDGPUISD::ATOMIC_LOAD_FMIN:
Matt Arsenaulta5840c32019-01-22 18:36:06 +00009356 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00009357 if (DCI.isBeforeLegalize())
9358 break;
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009359 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00009360 case ISD::AND:
9361 return performAndCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00009362 case ISD::OR:
9363 return performOrCombine(N, DCI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00009364 case ISD::XOR:
9365 return performXorCombine(N, DCI);
Matt Arsenault8edfaee2017-03-31 19:53:03 +00009366 case ISD::ZERO_EXTEND:
9367 return performZeroExtendCombine(N, DCI);
Ryan Taylor00e063a2019-03-19 16:07:00 +00009368 case ISD::SIGN_EXTEND_INREG:
9369 return performSignExtendInRegCombine(N , DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00009370 case AMDGPUISD::FP_CLASS:
9371 return performClassCombine(N, DCI);
Matt Arsenault9cd90712016-04-14 01:42:16 +00009372 case ISD::FCANONICALIZE:
9373 return performFCanonicalizeCombine(N, DCI);
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009374 case AMDGPUISD::RCP:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00009375 return performRcpCombine(N, DCI);
9376 case AMDGPUISD::FRACT:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009377 case AMDGPUISD::RSQ:
Matt Arsenault32fc5272016-07-26 16:45:45 +00009378 case AMDGPUISD::RCP_LEGACY:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009379 case AMDGPUISD::RSQ_LEGACY:
Stanislav Mekhanoshin1a1687f2018-06-27 15:33:33 +00009380 case AMDGPUISD::RCP_IFLAG:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00009381 case AMDGPUISD::RSQ_CLAMP:
9382 case AMDGPUISD::LDEXP: {
9383 SDValue Src = N->getOperand(0);
9384 if (Src.isUndef())
9385 return Src;
9386 break;
9387 }
Matt Arsenaultd8b73d52016-12-22 03:44:42 +00009388 case ISD::SINT_TO_FP:
9389 case ISD::UINT_TO_FP:
9390 return performUCharToFloatCombine(N, DCI);
9391 case AMDGPUISD::CVT_F32_UBYTE0:
9392 case AMDGPUISD::CVT_F32_UBYTE1:
9393 case AMDGPUISD::CVT_F32_UBYTE2:
9394 case AMDGPUISD::CVT_F32_UBYTE3:
9395 return performCvtF32UByteNCombine(N, DCI);
Matt Arsenault2fdf2a12017-02-21 23:35:48 +00009396 case AMDGPUISD::FMED3:
9397 return performFMed3Combine(N, DCI);
Matt Arsenault1f17c662017-02-22 00:27:34 +00009398 case AMDGPUISD::CVT_PKRTZ_F16_F32:
9399 return performCvtPkRTZCombine(N, DCI);
Tom Stellard1b95fed2018-05-24 05:28:34 +00009400 case AMDGPUISD::CLAMP:
9401 return performClampCombine(N, DCI);
Matt Arsenaulteb522e62017-02-27 22:15:25 +00009402 case ISD::SCALAR_TO_VECTOR: {
9403 SelectionDAG &DAG = DCI.DAG;
9404 EVT VT = N->getValueType(0);
9405
9406 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
9407 if (VT == MVT::v2i16 || VT == MVT::v2f16) {
9408 SDLoc SL(N);
9409 SDValue Src = N->getOperand(0);
9410 EVT EltVT = Src.getValueType();
9411 if (EltVT == MVT::f16)
9412 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
9413
9414 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
9415 return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
9416 }
9417
9418 break;
9419 }
Matt Arsenaultbf5482e2017-05-11 17:26:25 +00009420 case ISD::EXTRACT_VECTOR_ELT:
9421 return performExtractVectorEltCombine(N, DCI);
Stanislav Mekhanoshin054f8102018-11-19 17:39:20 +00009422 case ISD::INSERT_VECTOR_ELT:
9423 return performInsertVectorEltCombine(N, DCI);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00009424 }
Matt Arsenault5565f65e2014-05-22 18:09:07 +00009425 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Tom Stellard75aadc22012-12-11 21:25:42 +00009426}
Christian Konigd910b7d2013-02-26 17:52:16 +00009427
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009428/// Helper function for adjustWritemask
Benjamin Kramer635e3682013-05-23 15:43:05 +00009429static unsigned SubIdx2Lane(unsigned Idx) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00009430 switch (Idx) {
9431 default: return 0;
9432 case AMDGPU::sub0: return 0;
9433 case AMDGPU::sub1: return 1;
9434 case AMDGPU::sub2: return 2;
9435 case AMDGPU::sub3: return 3;
David Stuttardf77079f2019-01-14 11:55:24 +00009436 case AMDGPU::sub4: return 4; // Possible with TFE/LWE
Christian Konig8e06e2a2013-04-10 08:39:08 +00009437 }
9438}
9439
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009440/// Adjust the writemask of MIMG instructions
Matt Arsenault68f05052017-12-04 22:18:27 +00009441SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
9442 SelectionDAG &DAG) const {
Nicolai Haehnlef2674312018-06-21 13:36:01 +00009443 unsigned Opcode = Node->getMachineOpcode();
9444
9445 // Subtract 1 because the vdata output is not a MachineSDNode operand.
9446 int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1;
9447 if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx))
9448 return Node; // not implemented for D16
9449
David Stuttardf77079f2019-01-14 11:55:24 +00009450 SDNode *Users[5] = { nullptr };
Tom Stellard54774e52013-10-23 02:53:47 +00009451 unsigned Lane = 0;
Nicolai Haehnlef2674312018-06-21 13:36:01 +00009452 unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00009453 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
Tom Stellard54774e52013-10-23 02:53:47 +00009454 unsigned NewDmask = 0;
David Stuttardf77079f2019-01-14 11:55:24 +00009455 unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1;
9456 unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1;
9457 bool UsesTFC = (Node->getConstantOperandVal(TFEIdx) ||
9458 Node->getConstantOperandVal(LWEIdx)) ? 1 : 0;
9459 unsigned TFCLane = 0;
Matt Arsenault856777d2017-12-08 20:00:57 +00009460 bool HasChain = Node->getNumValues() > 1;
9461
9462 if (OldDmask == 0) {
9463 // These are folded out, but on the chance it happens don't assert.
9464 return Node;
9465 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009466
David Stuttardf77079f2019-01-14 11:55:24 +00009467 unsigned OldBitsSet = countPopulation(OldDmask);
9468 // Work out which is the TFE/LWE lane if that is enabled.
9469 if (UsesTFC) {
9470 TFCLane = OldBitsSet;
9471 }
9472
Christian Konig8e06e2a2013-04-10 08:39:08 +00009473 // Try to figure out the used register components
9474 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
9475 I != E; ++I) {
9476
Matt Arsenault93e65ea2017-02-22 21:16:41 +00009477 // Don't look at users of the chain.
9478 if (I.getUse().getResNo() != 0)
9479 continue;
9480
Christian Konig8e06e2a2013-04-10 08:39:08 +00009481 // Abort if we can't understand the usage
9482 if (!I->isMachineOpcode() ||
9483 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
Matt Arsenault68f05052017-12-04 22:18:27 +00009484 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009485
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +00009486 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
Tom Stellard54774e52013-10-23 02:53:47 +00009487 // Note that subregs are packed, i.e. Lane==0 is the first bit set
9488 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
9489 // set, etc.
Christian Konig8b1ed282013-04-10 08:39:16 +00009490 Lane = SubIdx2Lane(I->getConstantOperandVal(1));
Christian Konig8e06e2a2013-04-10 08:39:08 +00009491
David Stuttardf77079f2019-01-14 11:55:24 +00009492 // Check if the use is for the TFE/LWE generated result at VGPRn+1.
9493 if (UsesTFC && Lane == TFCLane) {
9494 Users[Lane] = *I;
9495 } else {
9496 // Set which texture component corresponds to the lane.
9497 unsigned Comp;
9498 for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) {
9499 Comp = countTrailingZeros(Dmask);
9500 Dmask &= ~(1 << Comp);
9501 }
9502
9503 // Abort if we have more than one user per component.
9504 if (Users[Lane])
9505 return Node;
9506
9507 Users[Lane] = *I;
9508 NewDmask |= 1 << Comp;
Tom Stellard54774e52013-10-23 02:53:47 +00009509 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009510 }
9511
David Stuttardf77079f2019-01-14 11:55:24 +00009512 // Don't allow 0 dmask, as hardware assumes one channel enabled.
9513 bool NoChannels = !NewDmask;
9514 if (NoChannels) {
David Stuttardfc2a7472019-03-20 09:29:55 +00009515 if (!UsesTFC) {
9516 // No uses of the result and not using TFC. Then do nothing.
9517 return Node;
9518 }
David Stuttardf77079f2019-01-14 11:55:24 +00009519 // If the original dmask has one channel - then nothing to do
9520 if (OldBitsSet == 1)
9521 return Node;
9522 // Use an arbitrary dmask - required for the instruction to work
9523 NewDmask = 1;
9524 }
Tom Stellard54774e52013-10-23 02:53:47 +00009525 // Abort if there's no change
9526 if (NewDmask == OldDmask)
Matt Arsenault68f05052017-12-04 22:18:27 +00009527 return Node;
9528
9529 unsigned BitsSet = countPopulation(NewDmask);
9530
David Stuttardf77079f2019-01-14 11:55:24 +00009531 // Check for TFE or LWE - increase the number of channels by one to account
9532 // for the extra return value
9533 // This will need adjustment for D16 if this is also included in
9534 // adjustWriteMask (this function) but at present D16 are excluded.
9535 unsigned NewChannels = BitsSet + UsesTFC;
9536
9537 int NewOpcode =
9538 AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels);
Matt Arsenault68f05052017-12-04 22:18:27 +00009539 assert(NewOpcode != -1 &&
9540 NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&
9541 "failed to find equivalent MIMG op");
Christian Konig8e06e2a2013-04-10 08:39:08 +00009542
9543 // Adjust the writemask in the node
Matt Arsenault68f05052017-12-04 22:18:27 +00009544 SmallVector<SDValue, 12> Ops;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00009545 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009546 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
Nikolay Haustov2f684f12016-02-26 09:51:05 +00009547 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
Christian Konig8e06e2a2013-04-10 08:39:08 +00009548
Matt Arsenault68f05052017-12-04 22:18:27 +00009549 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT();
9550
David Stuttardf77079f2019-01-14 11:55:24 +00009551 MVT ResultVT = NewChannels == 1 ?
9552 SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 :
9553 NewChannels == 5 ? 8 : NewChannels);
Matt Arsenault856777d2017-12-08 20:00:57 +00009554 SDVTList NewVTList = HasChain ?
9555 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT);
9556
Matt Arsenault68f05052017-12-04 22:18:27 +00009557
9558 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node),
9559 NewVTList, Ops);
Matt Arsenaultecad0d532017-12-08 20:00:45 +00009560
Matt Arsenault856777d2017-12-08 20:00:57 +00009561 if (HasChain) {
9562 // Update chain.
Chandler Carruth66654b72018-08-14 23:30:32 +00009563 DAG.setNodeMemRefs(NewNode, Node->memoperands());
Matt Arsenault856777d2017-12-08 20:00:57 +00009564 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1));
9565 }
Matt Arsenault68f05052017-12-04 22:18:27 +00009566
David Stuttardf77079f2019-01-14 11:55:24 +00009567 if (NewChannels == 1) {
Matt Arsenault68f05052017-12-04 22:18:27 +00009568 assert(Node->hasNUsesOfValue(1, 0));
9569 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY,
9570 SDLoc(Node), Users[Lane]->getValueType(0),
9571 SDValue(NewNode, 0));
Christian Konig8b1ed282013-04-10 08:39:16 +00009572 DAG.ReplaceAllUsesWith(Users[Lane], Copy);
Matt Arsenault68f05052017-12-04 22:18:27 +00009573 return nullptr;
Christian Konig8b1ed282013-04-10 08:39:16 +00009574 }
9575
Christian Konig8e06e2a2013-04-10 08:39:08 +00009576 // Update the users of the node with the new indices
David Stuttardf77079f2019-01-14 11:55:24 +00009577 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00009578 SDNode *User = Users[i];
David Stuttardf77079f2019-01-14 11:55:24 +00009579 if (!User) {
9580 // Handle the special case of NoChannels. We set NewDmask to 1 above, but
9581 // Users[0] is still nullptr because channel 0 doesn't really have a use.
9582 if (i || !NoChannels)
9583 continue;
9584 } else {
9585 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
9586 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op);
9587 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009588
9589 switch (Idx) {
9590 default: break;
9591 case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
9592 case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
9593 case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
David Stuttardf77079f2019-01-14 11:55:24 +00009594 case AMDGPU::sub3: Idx = AMDGPU::sub4; break;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009595 }
9596 }
Matt Arsenault68f05052017-12-04 22:18:27 +00009597
9598 DAG.RemoveDeadNode(Node);
9599 return nullptr;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009600}
9601
Tom Stellardc98ee202015-07-16 19:40:07 +00009602static bool isFrameIndexOp(SDValue Op) {
9603 if (Op.getOpcode() == ISD::AssertZext)
9604 Op = Op.getOperand(0);
9605
9606 return isa<FrameIndexSDNode>(Op);
9607}
9608
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009609/// Legalize target independent instructions (e.g. INSERT_SUBREG)
Tom Stellard3457a842014-10-09 19:06:00 +00009610/// with frame index operands.
9611/// LLVM assumes that inputs are to these instructions are registers.
Matt Arsenault0d0d6c22017-04-12 21:58:23 +00009612SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
9613 SelectionDAG &DAG) const {
9614 if (Node->getOpcode() == ISD::CopyToReg) {
9615 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
9616 SDValue SrcVal = Node->getOperand(2);
9617
9618 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
9619 // to try understanding copies to physical registers.
9620 if (SrcVal.getValueType() == MVT::i1 &&
9621 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
9622 SDLoc SL(Node);
9623 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
9624 SDValue VReg = DAG.getRegister(
9625 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
9626
9627 SDNode *Glued = Node->getGluedNode();
9628 SDValue ToVReg
9629 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
9630 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
9631 SDValue ToResultReg
9632 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
9633 VReg, ToVReg.getValue(1));
9634 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
9635 DAG.RemoveDeadNode(Node);
9636 return ToResultReg.getNode();
9637 }
9638 }
Tom Stellard8dd392e2014-10-09 18:09:15 +00009639
9640 SmallVector<SDValue, 8> Ops;
Tom Stellard3457a842014-10-09 19:06:00 +00009641 for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
Tom Stellardc98ee202015-07-16 19:40:07 +00009642 if (!isFrameIndexOp(Node->getOperand(i))) {
Tom Stellard3457a842014-10-09 19:06:00 +00009643 Ops.push_back(Node->getOperand(i));
Tom Stellard8dd392e2014-10-09 18:09:15 +00009644 continue;
9645 }
9646
Tom Stellard3457a842014-10-09 19:06:00 +00009647 SDLoc DL(Node);
Tom Stellard8dd392e2014-10-09 18:09:15 +00009648 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
Tom Stellard3457a842014-10-09 19:06:00 +00009649 Node->getOperand(i).getValueType(),
9650 Node->getOperand(i)), 0));
Tom Stellard8dd392e2014-10-09 18:09:15 +00009651 }
9652
Mark Searles4e3d6162017-10-16 23:38:53 +00009653 return DAG.UpdateNodeOperands(Node, Ops);
Tom Stellard8dd392e2014-10-09 18:09:15 +00009654}
9655
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009656/// Fold the instructions after selecting them.
Matt Arsenault68f05052017-12-04 22:18:27 +00009657/// Returns null if users were already updated.
Christian Konig8e06e2a2013-04-10 08:39:08 +00009658SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
9659 SelectionDAG &DAG) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00009660 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00009661 unsigned Opcode = Node->getMachineOpcode();
Christian Konig8e06e2a2013-04-10 08:39:08 +00009662
Nicolai Haehnlec06bfa12016-07-11 21:59:43 +00009663 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
Nicolai Haehnlef2674312018-06-21 13:36:01 +00009664 !TII->isGather4(Opcode)) {
Matt Arsenault68f05052017-12-04 22:18:27 +00009665 return adjustWritemask(Node, DAG);
9666 }
Christian Konig8e06e2a2013-04-10 08:39:08 +00009667
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00009668 if (Opcode == AMDGPU::INSERT_SUBREG ||
9669 Opcode == AMDGPU::REG_SEQUENCE) {
Tom Stellard8dd392e2014-10-09 18:09:15 +00009670 legalizeTargetIndependentNode(Node, DAG);
9671 return Node;
9672 }
Matt Arsenault206f8262017-08-01 20:49:41 +00009673
9674 switch (Opcode) {
9675 case AMDGPU::V_DIV_SCALE_F32:
9676 case AMDGPU::V_DIV_SCALE_F64: {
9677 // Satisfy the operand register constraint when one of the inputs is
9678 // undefined. Ordinarily each undef value will have its own implicit_def of
9679 // a vreg, so force these to use a single register.
9680 SDValue Src0 = Node->getOperand(0);
9681 SDValue Src1 = Node->getOperand(1);
9682 SDValue Src2 = Node->getOperand(2);
9683
9684 if ((Src0.isMachineOpcode() &&
9685 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) &&
9686 (Src0 == Src1 || Src0 == Src2))
9687 break;
9688
9689 MVT VT = Src0.getValueType().getSimpleVT();
Alexander Timofeevba447ba2019-05-26 20:33:26 +00009690 const TargetRegisterClass *RC =
9691 getRegClassFor(VT, Src0.getNode()->isDivergent());
Matt Arsenault206f8262017-08-01 20:49:41 +00009692
9693 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
9694 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT);
9695
9696 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node),
9697 UndefReg, Src0, SDValue());
9698
9699 // src0 must be the same register as src1 or src2, even if the value is
9700 // undefined, so make sure we don't violate this constraint.
9701 if (Src0.isMachineOpcode() &&
9702 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
9703 if (Src1.isMachineOpcode() &&
9704 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
9705 Src0 = Src1;
9706 else if (Src2.isMachineOpcode() &&
9707 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF)
9708 Src0 = Src2;
9709 else {
9710 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF);
9711 Src0 = UndefReg;
9712 Src1 = UndefReg;
9713 }
9714 } else
9715 break;
9716
9717 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 };
9718 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I)
9719 Ops.push_back(Node->getOperand(I));
9720
9721 Ops.push_back(ImpDef.getValue(1));
9722 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
9723 }
9724 default:
9725 break;
9726 }
9727
Tom Stellard654d6692015-01-08 15:08:17 +00009728 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00009729}
Christian Konig8b1ed282013-04-10 08:39:16 +00009730
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009731/// Assign the register class depending on the number of
Christian Konig8b1ed282013-04-10 08:39:16 +00009732/// bits set in the writemask
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009733void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
Christian Konig8b1ed282013-04-10 08:39:16 +00009734 SDNode *Node) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00009735 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009736
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009737 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Matt Arsenault6005fcb2015-10-21 21:51:02 +00009738
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009739 if (TII->isVOP3(MI.getOpcode())) {
Matt Arsenault6005fcb2015-10-21 21:51:02 +00009740 // Make sure constant bus requirements are respected.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009741 TII->legalizeOperandsVOP3(MRI, MI);
Matt Arsenault6005fcb2015-10-21 21:51:02 +00009742 return;
9743 }
Matt Arsenaultcb0ac3d2014-09-26 17:54:59 +00009744
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009745 // Replace unused atomics with the no return version.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009746 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009747 if (NoRetAtomicOp != -1) {
9748 if (!Node->hasAnyUseOfValue(0)) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009749 MI.setDesc(TII->get(NoRetAtomicOp));
9750 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00009751 return;
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009752 }
9753
Tom Stellard354a43c2016-04-01 18:27:37 +00009754 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
9755 // instruction, because the return type of these instructions is a vec2 of
9756 // the memory type, so it can be tied to the input operand.
9757 // This means these instructions always have a use, so we need to add a
9758 // special case to check if the atomic has only one extract_subreg use,
9759 // which itself has no uses.
9760 if ((Node->hasNUsesOfValue(1, 0) &&
Nicolai Haehnle750082d2016-04-15 14:42:36 +00009761 Node->use_begin()->isMachineOpcode() &&
Tom Stellard354a43c2016-04-01 18:27:37 +00009762 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
9763 !Node->use_begin()->hasAnyUseOfValue(0))) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009764 unsigned Def = MI.getOperand(0).getReg();
Tom Stellard354a43c2016-04-01 18:27:37 +00009765
9766 // Change this into a noret atomic.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009767 MI.setDesc(TII->get(NoRetAtomicOp));
9768 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00009769
9770 // If we only remove the def operand from the atomic instruction, the
9771 // extract_subreg will be left with a use of a vreg without a def.
9772 // So we need to insert an implicit_def to avoid machine verifier
9773 // errors.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00009774 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
Tom Stellard354a43c2016-04-01 18:27:37 +00009775 TII->get(AMDGPU::IMPLICIT_DEF), Def);
9776 }
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00009777 return;
9778 }
Christian Konig8b1ed282013-04-10 08:39:16 +00009779}
Tom Stellard0518ff82013-06-03 17:39:58 +00009780
Benjamin Kramerbdc49562016-06-12 15:39:02 +00009781static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
9782 uint64_t Val) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009783 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
Matt Arsenault485defe2014-11-05 19:01:17 +00009784 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
9785}
9786
9787MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00009788 const SDLoc &DL,
Matt Arsenault485defe2014-11-05 19:01:17 +00009789 SDValue Ptr) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00009790 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault485defe2014-11-05 19:01:17 +00009791
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009792 // Build the half of the subregister with the constants before building the
9793 // full 128-bit register. If we are building multiple resource descriptors,
9794 // this will allow CSEing of the 2-component register.
9795 const SDValue Ops0[] = {
9796 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
9797 buildSMovImm32(DAG, DL, 0),
9798 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
9799 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
9800 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
9801 };
Matt Arsenault485defe2014-11-05 19:01:17 +00009802
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009803 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
9804 MVT::v2i32, Ops0), 0);
Matt Arsenault485defe2014-11-05 19:01:17 +00009805
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009806 // Combine the constants and the pointer.
9807 const SDValue Ops1[] = {
9808 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
9809 Ptr,
9810 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
9811 SubRegHi,
9812 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
9813 };
Matt Arsenault485defe2014-11-05 19:01:17 +00009814
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00009815 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
Matt Arsenault485defe2014-11-05 19:01:17 +00009816}
9817
Adrian Prantl5f8f34e42018-05-01 15:54:18 +00009818/// Return a resource descriptor with the 'Add TID' bit enabled
Benjamin Kramerdf005cb2015-08-08 18:27:36 +00009819/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
9820/// of the resource descriptor) to create an offset, which is added to
9821/// the resource pointer.
Benjamin Kramerbdc49562016-06-12 15:39:02 +00009822MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
9823 SDValue Ptr, uint32_t RsrcDword1,
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009824 uint64_t RsrcDword2And3) const {
9825 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
9826 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
9827 if (RsrcDword1) {
9828 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009829 DAG.getConstant(RsrcDword1, DL, MVT::i32)),
9830 0);
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009831 }
9832
9833 SDValue DataLo = buildSMovImm32(DAG, DL,
9834 RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
9835 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
9836
9837 const SDValue Ops[] = {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009838 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009839 PtrLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009840 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009841 PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009842 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009843 DataLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009844 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009845 DataHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00009846 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00009847 };
9848
9849 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
9850}
9851
Tom Stellardd7e6f132015-04-08 01:09:26 +00009852//===----------------------------------------------------------------------===//
9853// SI Inline Assembly Support
9854//===----------------------------------------------------------------------===//
9855
9856std::pair<unsigned, const TargetRegisterClass *>
9857SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +00009858 StringRef Constraint,
Tom Stellardd7e6f132015-04-08 01:09:26 +00009859 MVT VT) const {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009860 const TargetRegisterClass *RC = nullptr;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009861 if (Constraint.size() == 1) {
9862 switch (Constraint[0]) {
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009863 default:
9864 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009865 case 's':
9866 case 'r':
9867 switch (VT.getSizeInBits()) {
9868 default:
9869 return std::make_pair(0U, nullptr);
9870 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00009871 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009872 RC = &AMDGPU::SReg_32_XM0RegClass;
9873 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009874 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009875 RC = &AMDGPU::SGPR_64RegClass;
9876 break;
Tim Renouf361b5b22019-03-21 12:01:21 +00009877 case 96:
9878 RC = &AMDGPU::SReg_96RegClass;
9879 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009880 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009881 RC = &AMDGPU::SReg_128RegClass;
9882 break;
Tim Renouf033f99a2019-03-22 10:11:21 +00009883 case 160:
9884 RC = &AMDGPU::SReg_160RegClass;
9885 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009886 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009887 RC = &AMDGPU::SReg_256RegClass;
9888 break;
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +00009889 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009890 RC = &AMDGPU::SReg_512RegClass;
9891 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009892 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009893 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009894 case 'v':
9895 switch (VT.getSizeInBits()) {
9896 default:
9897 return std::make_pair(0U, nullptr);
9898 case 32:
Matt Arsenault9e910142016-12-20 19:06:12 +00009899 case 16:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009900 RC = &AMDGPU::VGPR_32RegClass;
9901 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009902 case 64:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009903 RC = &AMDGPU::VReg_64RegClass;
9904 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009905 case 96:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009906 RC = &AMDGPU::VReg_96RegClass;
9907 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009908 case 128:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009909 RC = &AMDGPU::VReg_128RegClass;
9910 break;
Tim Renouf033f99a2019-03-22 10:11:21 +00009911 case 160:
9912 RC = &AMDGPU::VReg_160RegClass;
9913 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009914 case 256:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009915 RC = &AMDGPU::VReg_256RegClass;
9916 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009917 case 512:
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009918 RC = &AMDGPU::VReg_512RegClass;
9919 break;
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009920 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009921 break;
Tom Stellardd7e6f132015-04-08 01:09:26 +00009922 }
Daniil Fukalovc9a098b2018-06-08 16:29:04 +00009923 // We actually support i128, i16 and f16 as inline parameters
9924 // even if they are not reported as legal
9925 if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 ||
9926 VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16))
9927 return std::make_pair(0U, RC);
Tom Stellardd7e6f132015-04-08 01:09:26 +00009928 }
9929
9930 if (Constraint.size() > 1) {
Tom Stellardd7e6f132015-04-08 01:09:26 +00009931 if (Constraint[1] == 'v') {
9932 RC = &AMDGPU::VGPR_32RegClass;
9933 } else if (Constraint[1] == 's') {
9934 RC = &AMDGPU::SGPR_32RegClass;
9935 }
9936
9937 if (RC) {
Matt Arsenault0b554ed2015-06-23 02:05:55 +00009938 uint32_t Idx;
9939 bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
9940 if (!Failed && Idx < RC->getNumRegs())
Tom Stellardd7e6f132015-04-08 01:09:26 +00009941 return std::make_pair(RC->getRegister(Idx), RC);
9942 }
9943 }
9944 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9945}
Tom Stellardb3c3bda2015-12-10 02:12:53 +00009946
9947SITargetLowering::ConstraintType
9948SITargetLowering::getConstraintType(StringRef Constraint) const {
9949 if (Constraint.size() == 1) {
9950 switch (Constraint[0]) {
9951 default: break;
9952 case 's':
9953 case 'v':
9954 return C_RegisterClass;
9955 }
9956 }
9957 return TargetLowering::getConstraintType(Constraint);
9958}
Matt Arsenault1cc47f82017-07-18 16:44:56 +00009959
9960// Figure out which registers should be reserved for stack access. Only after
9961// the function is legalized do we know all of the non-spill stack objects or if
9962// calls are present.
9963void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
9964 MachineRegisterInfo &MRI = MF.getRegInfo();
9965 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellardc5a154d2018-06-28 23:47:12 +00009966 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
Matt Arsenault1cc47f82017-07-18 16:44:56 +00009967
9968 if (Info->isEntryFunction()) {
9969 // Callable functions have fixed registers used for stack access.
9970 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
9971 }
9972
Matt Arsenaultb812b7a2019-06-05 22:20:47 +00009973 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
9974 Info->getStackPtrOffsetReg()));
9975 if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG)
9976 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
Matt Arsenault1cc47f82017-07-18 16:44:56 +00009977
Matt Arsenaultbc6d07c2019-03-14 22:54:43 +00009978 // We need to worry about replacing the default register with itself in case
9979 // of MIR testcases missing the MFI.
9980 if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG)
9981 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
9982
9983 if (Info->getFrameOffsetReg() != AMDGPU::FP_REG)
9984 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
9985
9986 if (Info->getScratchWaveOffsetReg() != AMDGPU::SCRATCH_WAVE_OFFSET_REG) {
9987 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
9988 Info->getScratchWaveOffsetReg());
9989 }
Matt Arsenault1cc47f82017-07-18 16:44:56 +00009990
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +00009991 Info->limitOccupancy(MF);
9992
Matt Arsenault1cc47f82017-07-18 16:44:56 +00009993 TargetLoweringBase::finalizeLowering(MF);
9994}
Matt Arsenault45b98182017-11-15 00:45:43 +00009995
9996void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,
9997 KnownBits &Known,
9998 const APInt &DemandedElts,
9999 const SelectionDAG &DAG,
10000 unsigned Depth) const {
10001 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts,
10002 DAG, Depth);
10003
Matt Arsenault5c714cb2019-05-23 19:38:14 +000010004 // Set the high bits to zero based on the maximum allowed scratch size per
10005 // wave. We can't use vaddr in MUBUF instructions if we don't know the address
Matt Arsenault45b98182017-11-15 00:45:43 +000010006 // calculation won't overflow, so assume the sign bit is never set.
Matt Arsenault5c714cb2019-05-23 19:38:14 +000010007 Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
Matt Arsenault45b98182017-11-15 00:45:43 +000010008}
Tom Stellard264c1712018-06-13 15:06:37 +000010009
Stanislav Mekhanoshin93f15c92019-05-03 21:17:29 +000010010unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
10011 const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
10012 const unsigned CacheLineAlign = 6; // log2(64)
10013
10014 // Pre-GFX10 target did not benefit from loop alignment
10015 if (!ML || DisableLoopAlignment ||
10016 (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) ||
10017 getSubtarget()->hasInstFwdPrefetchBug())
10018 return PrefAlign;
10019
10020 // On GFX10 I$ is 4 x 64 bytes cache lines.
10021 // By default prefetcher keeps one cache line behind and reads two ahead.
10022 // We can modify it with S_INST_PREFETCH for larger loops to have two lines
10023 // behind and one ahead.
10024 // Therefor we can benefit from aligning loop headers if loop fits 192 bytes.
10025 // If loop fits 64 bytes it always spans no more than two cache lines and
10026 // does not need an alignment.
10027 // Else if loop is less or equal 128 bytes we do not need to modify prefetch,
10028 // Else if loop is less or equal 192 bytes we need two lines behind.
10029
10030 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
10031 const MachineBasicBlock *Header = ML->getHeader();
10032 if (Header->getAlignment() != PrefAlign)
10033 return Header->getAlignment(); // Already processed.
10034
10035 unsigned LoopSize = 0;
10036 for (const MachineBasicBlock *MBB : ML->blocks()) {
10037 // If inner loop block is aligned assume in average half of the alignment
10038 // size to be added as nops.
10039 if (MBB != Header)
10040 LoopSize += (1 << MBB->getAlignment()) / 2;
10041
10042 for (const MachineInstr &MI : *MBB) {
10043 LoopSize += TII->getInstSizeInBytes(MI);
10044 if (LoopSize > 192)
10045 return PrefAlign;
10046 }
10047 }
10048
10049 if (LoopSize <= 64)
10050 return PrefAlign;
10051
10052 if (LoopSize <= 128)
10053 return CacheLineAlign;
10054
10055 // If any of parent loops is surrounded by prefetch instructions do not
10056 // insert new for inner loop, which would reset parent's settings.
10057 for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) {
10058 if (MachineBasicBlock *Exit = P->getExitBlock()) {
10059 auto I = Exit->getFirstNonDebugInstr();
10060 if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH)
10061 return CacheLineAlign;
10062 }
10063 }
10064
10065 MachineBasicBlock *Pre = ML->getLoopPreheader();
10066 MachineBasicBlock *Exit = ML->getExitBlock();
10067
10068 if (Pre && Exit) {
10069 BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(),
10070 TII->get(AMDGPU::S_INST_PREFETCH))
10071 .addImm(1); // prefetch 2 lines behind PC
10072
10073 BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(),
10074 TII->get(AMDGPU::S_INST_PREFETCH))
10075 .addImm(2); // prefetch 1 line behind PC
10076 }
10077
10078 return CacheLineAlign;
10079}
10080
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010081LLVM_ATTRIBUTE_UNUSED
10082static bool isCopyFromRegOfInlineAsm(const SDNode *N) {
10083 assert(N->getOpcode() == ISD::CopyFromReg);
10084 do {
10085 // Follow the chain until we find an INLINEASM node.
10086 N = N->getOperand(0).getNode();
Craig Topper784929d2019-02-08 20:48:56 +000010087 if (N->getOpcode() == ISD::INLINEASM ||
10088 N->getOpcode() == ISD::INLINEASM_BR)
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010089 return true;
10090 } while (N->getOpcode() == ISD::CopyFromReg);
10091 return false;
10092}
10093
Tom Stellard264c1712018-06-13 15:06:37 +000010094bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N,
Nicolai Haehnle35617ed2018-08-30 14:21:36 +000010095 FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const
Tom Stellard264c1712018-06-13 15:06:37 +000010096{
10097 switch (N->getOpcode()) {
Tom Stellard264c1712018-06-13 15:06:37 +000010098 case ISD::CopyFromReg:
10099 {
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010100 const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1));
10101 const MachineFunction * MF = FLI->MF;
10102 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
10103 const MachineRegisterInfo &MRI = MF->getRegInfo();
10104 const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo();
10105 unsigned Reg = R->getReg();
10106 if (TRI.isPhysicalRegister(Reg))
10107 return !TRI.isSGPRReg(MRI, Reg);
Tom Stellard264c1712018-06-13 15:06:37 +000010108
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010109 if (MRI.isLiveIn(Reg)) {
10110 // workitem.id.x workitem.id.y workitem.id.z
10111 // Any VGPR formal argument is also considered divergent
10112 if (!TRI.isSGPRReg(MRI, Reg))
10113 return true;
10114 // Formal arguments of non-entry functions
10115 // are conservatively considered divergent
10116 else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv()))
10117 return true;
10118 return false;
Tom Stellard264c1712018-06-13 15:06:37 +000010119 }
Nicolai Haehnlea9cc92c2018-11-30 22:55:29 +000010120 const Value *V = FLI->getValueFromVirtualReg(Reg);
10121 if (V)
10122 return KDA->isDivergent(V);
10123 assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N));
10124 return !TRI.isSGPRReg(MRI, Reg);
Tom Stellard264c1712018-06-13 15:06:37 +000010125 }
10126 break;
10127 case ISD::LOAD: {
Matt Arsenault813613c2018-09-04 18:58:19 +000010128 const LoadSDNode *L = cast<LoadSDNode>(N);
10129 unsigned AS = L->getAddressSpace();
10130 // A flat load may access private memory.
10131 return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
Tom Stellard264c1712018-06-13 15:06:37 +000010132 } break;
10133 case ISD::CALLSEQ_END:
10134 return true;
10135 break;
10136 case ISD::INTRINSIC_WO_CHAIN:
10137 {
10138
10139 }
10140 return AMDGPU::isIntrinsicSourceOfDivergence(
10141 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
10142 case ISD::INTRINSIC_W_CHAIN:
10143 return AMDGPU::isIntrinsicSourceOfDivergence(
10144 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
10145 // In some cases intrinsics that are a source of divergence have been
10146 // lowered to AMDGPUISD so we also need to check those too.
10147 case AMDGPUISD::INTERP_MOV:
10148 case AMDGPUISD::INTERP_P1:
10149 case AMDGPUISD::INTERP_P2:
10150 return true;
10151 }
10152 return false;
10153}
Matt Arsenaultf8768bf2018-08-06 21:38:27 +000010154
10155bool SITargetLowering::denormalsEnabledForType(EVT VT) const {
10156 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
10157 case MVT::f32:
10158 return Subtarget->hasFP32Denormals();
10159 case MVT::f64:
10160 return Subtarget->hasFP64Denormals();
10161 case MVT::f16:
10162 return Subtarget->hasFP16Denormals();
10163 default:
10164 return false;
10165 }
10166}
Matt Arsenault687ec752018-10-22 16:27:27 +000010167
10168bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
10169 const SelectionDAG &DAG,
10170 bool SNaN,
10171 unsigned Depth) const {
10172 if (Op.getOpcode() == AMDGPUISD::CLAMP) {
Matt Arsenault055e4dc2019-03-29 19:14:54 +000010173 const MachineFunction &MF = DAG.getMachineFunction();
10174 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
10175
10176 if (Info->getMode().DX10Clamp)
Matt Arsenault687ec752018-10-22 16:27:27 +000010177 return true; // Clamped to 0.
10178 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
10179 }
10180
10181 return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
10182 SNaN, Depth);
10183}
Matt Arsenaulta5840c32019-01-22 18:36:06 +000010184
10185TargetLowering::AtomicExpansionKind
10186SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
10187 switch (RMW->getOperation()) {
10188 case AtomicRMWInst::FAdd: {
10189 Type *Ty = RMW->getType();
10190
10191 // We don't have a way to support 16-bit atomics now, so just leave them
10192 // as-is.
10193 if (Ty->isHalfTy())
10194 return AtomicExpansionKind::None;
10195
10196 if (!Ty->isFloatTy())
10197 return AtomicExpansionKind::CmpXChg;
10198
10199 // TODO: Do have these for flat. Older targets also had them for buffers.
10200 unsigned AS = RMW->getPointerAddressSpace();
10201 return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ?
10202 AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg;
10203 }
10204 default:
10205 break;
10206 }
10207
10208 return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
10209}