Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 1 | //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
| 11 | /// \brief Custom DAG lowering for SI |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
NAKAMURA Takumi | 45e0a83 | 2014-07-20 11:15:07 +0000 | [diff] [blame] | 15 | #ifdef _MSC_VER |
| 16 | // Provide M_PI. |
| 17 | #define _USE_MATH_DEFINES |
| 18 | #include <cmath> |
| 19 | #endif |
| 20 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 21 | #include "SIISelLowering.h" |
Christian Konig | 99ee0f4 | 2013-03-07 09:04:14 +0000 | [diff] [blame] | 22 | #include "AMDGPU.h" |
Matt Arsenault | d48da14 | 2015-11-02 23:23:02 +0000 | [diff] [blame] | 23 | #include "AMDGPUDiagnosticInfoUnsupported.h" |
Matt Arsenault | c791f39 | 2014-06-23 18:00:31 +0000 | [diff] [blame] | 24 | #include "AMDGPUIntrinsicInfo.h" |
Matt Arsenault | 41e2f2b | 2014-02-24 21:01:28 +0000 | [diff] [blame] | 25 | #include "AMDGPUSubtarget.h" |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 26 | #include "SIInstrInfo.h" |
| 27 | #include "SIMachineFunctionInfo.h" |
| 28 | #include "SIRegisterInfo.h" |
Alexey Samsonov | a253bf9 | 2014-08-27 19:36:53 +0000 | [diff] [blame] | 29 | #include "llvm/ADT/BitVector.h" |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 30 | #include "llvm/CodeGen/CallingConvLower.h" |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 31 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 32 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 33 | #include "llvm/CodeGen/SelectionDAG.h" |
Benjamin Kramer | d78bb46 | 2013-05-23 17:10:37 +0000 | [diff] [blame] | 34 | #include "llvm/IR/Function.h" |
Matt Arsenault | 364a674 | 2014-06-11 17:50:44 +0000 | [diff] [blame] | 35 | #include "llvm/ADT/SmallString.h" |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 36 | |
| 37 | using namespace llvm; |
| 38 | |
Eric Christopher | 7792e32 | 2015-01-30 23:24:40 +0000 | [diff] [blame] | 39 | SITargetLowering::SITargetLowering(TargetMachine &TM, |
| 40 | const AMDGPUSubtarget &STI) |
| 41 | : AMDGPUTargetLowering(TM, STI) { |
Tom Stellard | 1bd8072 | 2014-04-30 15:31:33 +0000 | [diff] [blame] | 42 | addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); |
Tom Stellard | 436780b | 2014-05-15 14:41:57 +0000 | [diff] [blame] | 43 | addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); |
Christian Konig | 2214f14 | 2013-03-07 09:03:38 +0000 | [diff] [blame] | 44 | |
Christian Konig | 2214f14 | 2013-03-07 09:03:38 +0000 | [diff] [blame] | 45 | addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass); |
| 46 | addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass); |
| 47 | |
Tom Stellard | 334b29c | 2014-04-17 21:00:09 +0000 | [diff] [blame] | 48 | addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); |
Tom Stellard | 45c0b3a | 2015-01-07 20:59:25 +0000 | [diff] [blame] | 49 | addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 50 | |
Tom Stellard | 436780b | 2014-05-15 14:41:57 +0000 | [diff] [blame] | 51 | addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); |
| 52 | addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); |
| 53 | addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); |
Christian Konig | 2214f14 | 2013-03-07 09:03:38 +0000 | [diff] [blame] | 54 | |
Matt Arsenault | 61001bb | 2015-11-25 19:58:34 +0000 | [diff] [blame] | 55 | addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); |
| 56 | addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); |
| 57 | |
Tom Stellard | 436780b | 2014-05-15 14:41:57 +0000 | [diff] [blame] | 58 | addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); |
| 59 | addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); |
Christian Konig | 2214f14 | 2013-03-07 09:03:38 +0000 | [diff] [blame] | 60 | |
Tom Stellard | f0a2107 | 2014-11-18 20:39:39 +0000 | [diff] [blame] | 61 | addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); |
Christian Konig | 2214f14 | 2013-03-07 09:03:38 +0000 | [diff] [blame] | 62 | addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); |
| 63 | |
Tom Stellard | f0a2107 | 2014-11-18 20:39:39 +0000 | [diff] [blame] | 64 | addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); |
Christian Konig | 2214f14 | 2013-03-07 09:03:38 +0000 | [diff] [blame] | 65 | addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 66 | |
Eric Christopher | 23a3a7c | 2015-02-26 00:00:24 +0000 | [diff] [blame] | 67 | computeRegisterProperties(STI.getRegisterInfo()); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 68 | |
Christian Konig | 2989ffc | 2013-03-18 11:34:16 +0000 | [diff] [blame] | 69 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); |
| 70 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); |
| 71 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); |
| 72 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); |
| 73 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 74 | setOperationAction(ISD::ADD, MVT::i32, Legal); |
Matt Arsenault | e8d2146 | 2013-11-18 20:09:40 +0000 | [diff] [blame] | 75 | setOperationAction(ISD::ADDC, MVT::i32, Legal); |
| 76 | setOperationAction(ISD::ADDE, MVT::i32, Legal); |
Matt Arsenault | b8b5153 | 2014-06-23 18:00:38 +0000 | [diff] [blame] | 77 | setOperationAction(ISD::SUBC, MVT::i32, Legal); |
| 78 | setOperationAction(ISD::SUBE, MVT::i32, Legal); |
Aaron Watry | daabb20 | 2013-06-25 13:55:52 +0000 | [diff] [blame] | 79 | |
Matt Arsenault | ad14ce8 | 2014-07-19 18:44:39 +0000 | [diff] [blame] | 80 | setOperationAction(ISD::FSIN, MVT::f32, Custom); |
| 81 | setOperationAction(ISD::FCOS, MVT::f32, Custom); |
| 82 | |
Matt Arsenault | 7c93690 | 2014-10-21 23:01:01 +0000 | [diff] [blame] | 83 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); |
| 84 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); |
| 85 | |
Tom Stellard | 35bb18c | 2013-08-26 15:06:04 +0000 | [diff] [blame] | 86 | // We need to custom lower vector stores from local memory |
Tom Stellard | 35bb18c | 2013-08-26 15:06:04 +0000 | [diff] [blame] | 87 | setOperationAction(ISD::LOAD, MVT::v4i32, Custom); |
Tom Stellard | af77543 | 2013-10-23 00:44:32 +0000 | [diff] [blame] | 88 | setOperationAction(ISD::LOAD, MVT::v8i32, Custom); |
| 89 | setOperationAction(ISD::LOAD, MVT::v16i32, Custom); |
| 90 | |
| 91 | setOperationAction(ISD::STORE, MVT::v8i32, Custom); |
| 92 | setOperationAction(ISD::STORE, MVT::v16i32, Custom); |
Tom Stellard | 35bb18c | 2013-08-26 15:06:04 +0000 | [diff] [blame] | 93 | |
Tom Stellard | 1c8788e | 2014-03-07 20:12:33 +0000 | [diff] [blame] | 94 | setOperationAction(ISD::STORE, MVT::i1, Custom); |
Tom Stellard | 81d871d | 2013-11-13 23:36:50 +0000 | [diff] [blame] | 95 | setOperationAction(ISD::STORE, MVT::v4i32, Custom); |
| 96 | |
Tom Stellard | 0ec134f | 2014-02-04 17:18:40 +0000 | [diff] [blame] | 97 | setOperationAction(ISD::SELECT, MVT::i64, Custom); |
Tom Stellard | da99c6e | 2014-03-24 16:07:30 +0000 | [diff] [blame] | 98 | setOperationAction(ISD::SELECT, MVT::f64, Promote); |
| 99 | AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); |
Tom Stellard | 81d871d | 2013-11-13 23:36:50 +0000 | [diff] [blame] | 100 | |
Tom Stellard | 3ca1bfc | 2014-06-10 16:01:22 +0000 | [diff] [blame] | 101 | setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); |
| 102 | setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); |
| 103 | setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); |
| 104 | setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); |
Tom Stellard | 754f80f | 2013-04-05 23:31:51 +0000 | [diff] [blame] | 105 | |
Tom Stellard | 8374720 | 2013-07-18 21:43:53 +0000 | [diff] [blame] | 106 | setOperationAction(ISD::SETCC, MVT::v2i1, Expand); |
| 107 | setOperationAction(ISD::SETCC, MVT::v4i1, Expand); |
| 108 | |
Matt Arsenault | e306a32 | 2014-10-21 16:25:08 +0000 | [diff] [blame] | 109 | setOperationAction(ISD::BSWAP, MVT::i32, Legal); |
Matt Arsenault | d079285 | 2015-12-14 17:25:38 +0000 | [diff] [blame] | 110 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); |
Matt Arsenault | e306a32 | 2014-10-21 16:25:08 +0000 | [diff] [blame] | 111 | |
Matt Arsenault | 5dbd5db | 2014-04-22 03:49:30 +0000 | [diff] [blame] | 112 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Legal); |
Matt Arsenault | 4e46665 | 2014-04-16 01:41:30 +0000 | [diff] [blame] | 113 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); |
| 114 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); |
| 115 | |
Matt Arsenault | 5dbd5db | 2014-04-22 03:49:30 +0000 | [diff] [blame] | 116 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Legal); |
Matt Arsenault | 4e46665 | 2014-04-16 01:41:30 +0000 | [diff] [blame] | 117 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); |
| 118 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); |
| 119 | |
Matt Arsenault | 5dbd5db | 2014-04-22 03:49:30 +0000 | [diff] [blame] | 120 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal); |
Matt Arsenault | 4e46665 | 2014-04-16 01:41:30 +0000 | [diff] [blame] | 121 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); |
| 122 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); |
| 123 | |
Matt Arsenault | 9481221 | 2014-11-14 18:18:16 +0000 | [diff] [blame] | 124 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); |
Matt Arsenault | 4e46665 | 2014-04-16 01:41:30 +0000 | [diff] [blame] | 125 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); |
| 126 | |
Tom Stellard | 94593ee | 2013-06-03 17:40:18 +0000 | [diff] [blame] | 127 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
Tom Stellard | 9fa1791 | 2013-08-14 23:24:45 +0000 | [diff] [blame] | 128 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); |
| 129 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v16i8, Custom); |
| 130 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); |
Tom Stellard | 94593ee | 2013-06-03 17:40:18 +0000 | [diff] [blame] | 131 | |
Tom Stellard | afcf12f | 2013-09-12 02:55:14 +0000 | [diff] [blame] | 132 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); |
Matt Arsenault | e54e1c3 | 2014-06-23 18:00:44 +0000 | [diff] [blame] | 133 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); |
Tom Stellard | afcf12f | 2013-09-12 02:55:14 +0000 | [diff] [blame] | 134 | |
Ahmed Bougacha | 2b6917b | 2015-01-08 00:51:32 +0000 | [diff] [blame] | 135 | for (MVT VT : MVT::integer_valuetypes()) { |
Matt Arsenault | bd22342 | 2015-01-14 01:35:17 +0000 | [diff] [blame] | 136 | if (VT == MVT::i64) |
| 137 | continue; |
| 138 | |
Ahmed Bougacha | 2b6917b | 2015-01-08 00:51:32 +0000 | [diff] [blame] | 139 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); |
Matt Arsenault | bd22342 | 2015-01-14 01:35:17 +0000 | [diff] [blame] | 140 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal); |
| 141 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal); |
Ahmed Bougacha | 2b6917b | 2015-01-08 00:51:32 +0000 | [diff] [blame] | 142 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); |
Tom Stellard | 31209cc | 2013-07-15 19:00:09 +0000 | [diff] [blame] | 143 | |
Ahmed Bougacha | 2b6917b | 2015-01-08 00:51:32 +0000 | [diff] [blame] | 144 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); |
Matt Arsenault | bd22342 | 2015-01-14 01:35:17 +0000 | [diff] [blame] | 145 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal); |
| 146 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal); |
Ahmed Bougacha | 2b6917b | 2015-01-08 00:51:32 +0000 | [diff] [blame] | 147 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); |
Matt Arsenault | 470acd8 | 2014-04-15 22:28:39 +0000 | [diff] [blame] | 148 | |
Ahmed Bougacha | 2b6917b | 2015-01-08 00:51:32 +0000 | [diff] [blame] | 149 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); |
Matt Arsenault | bd22342 | 2015-01-14 01:35:17 +0000 | [diff] [blame] | 150 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal); |
| 151 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal); |
Ahmed Bougacha | 2b6917b | 2015-01-08 00:51:32 +0000 | [diff] [blame] | 152 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); |
| 153 | } |
| 154 | |
| 155 | for (MVT VT : MVT::integer_vector_valuetypes()) { |
| 156 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i16, Expand); |
| 157 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v16i16, Expand); |
| 158 | } |
| 159 | |
| 160 | for (MVT VT : MVT::fp_valuetypes()) |
| 161 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); |
Matt Arsenault | 470acd8 | 2014-04-15 22:28:39 +0000 | [diff] [blame] | 162 | |
Matt Arsenault | 61001bb | 2015-11-25 19:58:34 +0000 | [diff] [blame] | 163 | setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand); |
| 164 | setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand); |
| 165 | |
Matt Arsenault | 6f24379 | 2013-09-05 19:41:10 +0000 | [diff] [blame] | 166 | setTruncStoreAction(MVT::i64, MVT::i32, Expand); |
Tom Stellard | af77543 | 2013-10-23 00:44:32 +0000 | [diff] [blame] | 167 | setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); |
Matt Arsenault | e1ce344 | 2015-07-31 04:12:04 +0000 | [diff] [blame] | 168 | setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); |
Tom Stellard | af77543 | 2013-10-23 00:44:32 +0000 | [diff] [blame] | 169 | setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); |
Niels Ole Salscheider | 719fbc9 | 2013-08-08 16:06:15 +0000 | [diff] [blame] | 170 | |
Matt Arsenault | 61001bb | 2015-11-25 19:58:34 +0000 | [diff] [blame] | 171 | |
| 172 | setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand); |
| 173 | |
| 174 | setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); |
| 175 | setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand); |
| 176 | |
Matt Arsenault | 470acd8 | 2014-04-15 22:28:39 +0000 | [diff] [blame] | 177 | setOperationAction(ISD::LOAD, MVT::i1, Custom); |
| 178 | |
Matt Arsenault | 61001bb | 2015-11-25 19:58:34 +0000 | [diff] [blame] | 179 | setOperationAction(ISD::LOAD, MVT::v2i64, Promote); |
| 180 | AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32); |
| 181 | |
| 182 | setOperationAction(ISD::STORE, MVT::v2i64, Promote); |
| 183 | AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32); |
| 184 | |
| 185 | setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); |
| 186 | |
Tom Stellard | fd15582 | 2013-08-26 15:05:36 +0000 | [diff] [blame] | 187 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
Tom Stellard | 04c0e98 | 2014-01-22 19:24:21 +0000 | [diff] [blame] | 188 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); |
Matt Arsenault | a98cd6a | 2013-12-19 05:32:55 +0000 | [diff] [blame] | 189 | setOperationAction(ISD::FrameIndex, MVT::i32, Custom); |
Michel Danzer | 49812b5 | 2013-07-10 16:37:07 +0000 | [diff] [blame] | 190 | |
Tom Stellard | 5f33788 | 2014-04-29 23:12:43 +0000 | [diff] [blame] | 191 | // These should use UDIVREM, so set them to expand |
| 192 | setOperationAction(ISD::UDIV, MVT::i64, Expand); |
| 193 | setOperationAction(ISD::UREM, MVT::i64, Expand); |
| 194 | |
Matt Arsenault | 0d89e84 | 2014-07-15 21:44:37 +0000 | [diff] [blame] | 195 | setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); |
| 196 | setOperationAction(ISD::SELECT, MVT::i1, Promote); |
| 197 | |
Matt Arsenault | 61001bb | 2015-11-25 19:58:34 +0000 | [diff] [blame] | 198 | setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); |
| 199 | |
| 200 | |
| 201 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); |
| 202 | |
Benjamin Kramer | 867bfc5 | 2015-03-07 17:41:00 +0000 | [diff] [blame] | 203 | // We only support LOAD/STORE and vector manipulation ops for vectors |
| 204 | // with > 4 elements. |
Matt Arsenault | 61001bb | 2015-11-25 19:58:34 +0000 | [diff] [blame] | 205 | for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) { |
Tom Stellard | 967bf58 | 2014-02-13 23:34:15 +0000 | [diff] [blame] | 206 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { |
| 207 | switch(Op) { |
| 208 | case ISD::LOAD: |
| 209 | case ISD::STORE: |
| 210 | case ISD::BUILD_VECTOR: |
| 211 | case ISD::BITCAST: |
| 212 | case ISD::EXTRACT_VECTOR_ELT: |
| 213 | case ISD::INSERT_VECTOR_ELT: |
Tom Stellard | 967bf58 | 2014-02-13 23:34:15 +0000 | [diff] [blame] | 214 | case ISD::INSERT_SUBVECTOR: |
| 215 | case ISD::EXTRACT_SUBVECTOR: |
Matt Arsenault | 61001bb | 2015-11-25 19:58:34 +0000 | [diff] [blame] | 216 | case ISD::SCALAR_TO_VECTOR: |
Tom Stellard | 967bf58 | 2014-02-13 23:34:15 +0000 | [diff] [blame] | 217 | break; |
Tom Stellard | c0503db | 2014-08-09 01:06:56 +0000 | [diff] [blame] | 218 | case ISD::CONCAT_VECTORS: |
| 219 | setOperationAction(Op, VT, Custom); |
| 220 | break; |
Tom Stellard | 967bf58 | 2014-02-13 23:34:15 +0000 | [diff] [blame] | 221 | default: |
Matt Arsenault | d504a74 | 2014-05-15 21:44:05 +0000 | [diff] [blame] | 222 | setOperationAction(Op, VT, Expand); |
Tom Stellard | 967bf58 | 2014-02-13 23:34:15 +0000 | [diff] [blame] | 223 | break; |
| 224 | } |
| 225 | } |
| 226 | } |
| 227 | |
Matt Arsenault | 61001bb | 2015-11-25 19:58:34 +0000 | [diff] [blame] | 228 | // Most operations are naturally 32-bit vector operations. We only support |
| 229 | // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. |
| 230 | for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { |
| 231 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
| 232 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); |
| 233 | |
| 234 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
| 235 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); |
| 236 | |
| 237 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
| 238 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); |
| 239 | |
| 240 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
| 241 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); |
| 242 | } |
| 243 | |
Matt Arsenault | 41e2f2b | 2014-02-24 21:01:28 +0000 | [diff] [blame] | 244 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) { |
| 245 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); |
| 246 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); |
Matt Arsenault | a90d22f | 2014-04-17 17:06:37 +0000 | [diff] [blame] | 247 | setOperationAction(ISD::FRINT, MVT::f64, Legal); |
Matt Arsenault | 41e2f2b | 2014-02-24 21:01:28 +0000 | [diff] [blame] | 248 | } |
| 249 | |
Marek Olsak | 7d77728 | 2015-03-24 13:40:15 +0000 | [diff] [blame] | 250 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 251 | setOperationAction(ISD::FDIV, MVT::f32, Custom); |
Matt Arsenault | 0bbcd8b | 2015-02-14 04:30:08 +0000 | [diff] [blame] | 252 | setOperationAction(ISD::FDIV, MVT::f64, Custom); |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 253 | |
Matt Arsenault | 02cb0ff | 2014-09-29 14:59:34 +0000 | [diff] [blame] | 254 | setTargetDAGCombine(ISD::FADD); |
Matt Arsenault | 8675db1 | 2014-08-29 16:01:14 +0000 | [diff] [blame] | 255 | setTargetDAGCombine(ISD::FSUB); |
Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 256 | setTargetDAGCombine(ISD::FMINNUM); |
| 257 | setTargetDAGCombine(ISD::FMAXNUM); |
Matt Arsenault | 5881f4e | 2015-06-09 00:52:37 +0000 | [diff] [blame] | 258 | setTargetDAGCombine(ISD::SMIN); |
| 259 | setTargetDAGCombine(ISD::SMAX); |
| 260 | setTargetDAGCombine(ISD::UMIN); |
| 261 | setTargetDAGCombine(ISD::UMAX); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 262 | setTargetDAGCombine(ISD::SETCC); |
Matt Arsenault | d0101a2 | 2015-01-06 23:00:46 +0000 | [diff] [blame] | 263 | setTargetDAGCombine(ISD::AND); |
Matt Arsenault | f229033 | 2015-01-06 23:00:39 +0000 | [diff] [blame] | 264 | setTargetDAGCombine(ISD::OR); |
Matt Arsenault | 364a674 | 2014-06-11 17:50:44 +0000 | [diff] [blame] | 265 | setTargetDAGCombine(ISD::UINT_TO_FP); |
| 266 | |
Matt Arsenault | b2baffa | 2014-08-15 17:49:05 +0000 | [diff] [blame] | 267 | // All memory operations. Some folding on the pointer operand is done to help |
| 268 | // matching the constant offsets in the addressing modes. |
| 269 | setTargetDAGCombine(ISD::LOAD); |
| 270 | setTargetDAGCombine(ISD::STORE); |
| 271 | setTargetDAGCombine(ISD::ATOMIC_LOAD); |
| 272 | setTargetDAGCombine(ISD::ATOMIC_STORE); |
| 273 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); |
| 274 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); |
| 275 | setTargetDAGCombine(ISD::ATOMIC_SWAP); |
| 276 | setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); |
| 277 | setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); |
| 278 | setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); |
| 279 | setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); |
| 280 | setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); |
| 281 | setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); |
| 282 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); |
| 283 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); |
| 284 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); |
| 285 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); |
| 286 | |
Christian Konig | eecebd0 | 2013-03-26 14:04:02 +0000 | [diff] [blame] | 287 | setSchedulingPreference(Sched::RegPressure); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 288 | } |
| 289 | |
Tom Stellard | 0125f2a | 2013-06-25 02:39:35 +0000 | [diff] [blame] | 290 | //===----------------------------------------------------------------------===// |
| 291 | // TargetLowering queries |
| 292 | //===----------------------------------------------------------------------===// |
| 293 | |
Matt Arsenault | e306a32 | 2014-10-21 16:25:08 +0000 | [diff] [blame] | 294 | bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &, |
| 295 | EVT) const { |
| 296 | // SI has some legal vector types, but no legal vector operations. Say no |
| 297 | // shuffles are legal in order to prefer scalarizing some vector operations. |
| 298 | return false; |
| 299 | } |
| 300 | |
Tom Stellard | 70580f8 | 2015-07-20 14:28:41 +0000 | [diff] [blame] | 301 | bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { |
| 302 | // Flat instructions do not have offsets, and only have the register |
| 303 | // address. |
| 304 | return AM.BaseOffs == 0 && (AM.Scale == 0 || AM.Scale == 1); |
| 305 | } |
| 306 | |
Matt Arsenault | 711b390 | 2015-08-07 20:18:34 +0000 | [diff] [blame] | 307 | bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { |
| 308 | // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and |
| 309 | // additionally can do r + r + i with addr64. 32-bit has more addressing |
| 310 | // mode options. Depending on the resource constant, it can also do |
| 311 | // (i64 r0) + (i32 r1) * (i14 i). |
| 312 | // |
| 313 | // Private arrays end up using a scratch buffer most of the time, so also |
| 314 | // assume those use MUBUF instructions. Scratch loads / stores are currently |
| 315 | // implemented as mubuf instructions with offen bit set, so slightly |
| 316 | // different than the normal addr64. |
| 317 | if (!isUInt<12>(AM.BaseOffs)) |
| 318 | return false; |
| 319 | |
| 320 | // FIXME: Since we can split immediate into soffset and immediate offset, |
| 321 | // would it make sense to allow any immediate? |
| 322 | |
| 323 | switch (AM.Scale) { |
| 324 | case 0: // r + i or just i, depending on HasBaseReg. |
| 325 | return true; |
| 326 | case 1: |
| 327 | return true; // We have r + r or r + i. |
| 328 | case 2: |
| 329 | if (AM.HasBaseReg) { |
| 330 | // Reject 2 * r + r. |
| 331 | return false; |
| 332 | } |
| 333 | |
| 334 | // Allow 2 * r as r + r |
| 335 | // Or 2 * r + i is allowed as r + r + i. |
| 336 | return true; |
| 337 | default: // Don't allow n * r |
| 338 | return false; |
| 339 | } |
| 340 | } |
| 341 | |
Mehdi Amini | 0cdec1e | 2015-07-09 02:09:40 +0000 | [diff] [blame] | 342 | bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, |
| 343 | const AddrMode &AM, Type *Ty, |
| 344 | unsigned AS) const { |
Matt Arsenault | 5015a89 | 2014-08-15 17:17:07 +0000 | [diff] [blame] | 345 | // No global is ever allowed as a base. |
| 346 | if (AM.BaseGV) |
| 347 | return false; |
| 348 | |
Matt Arsenault | 73e06fa | 2015-06-04 16:17:42 +0000 | [diff] [blame] | 349 | switch (AS) { |
Matt Arsenault | 711b390 | 2015-08-07 20:18:34 +0000 | [diff] [blame] | 350 | case AMDGPUAS::GLOBAL_ADDRESS: { |
Tom Stellard | 70580f8 | 2015-07-20 14:28:41 +0000 | [diff] [blame] | 351 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { |
| 352 | // Assume the we will use FLAT for all global memory accesses |
| 353 | // on VI. |
| 354 | // FIXME: This assumption is currently wrong. On VI we still use |
| 355 | // MUBUF instructions for the r + i addressing mode. As currently |
| 356 | // implemented, the MUBUF instructions only work on buffer < 4GB. |
| 357 | // It may be possible to support > 4GB buffers with MUBUF instructions, |
| 358 | // by setting the stride value in the resource descriptor which would |
| 359 | // increase the size limit to (stride * 4GB). However, this is risky, |
| 360 | // because it has never been validated. |
| 361 | return isLegalFlatAddressingMode(AM); |
| 362 | } |
Matt Arsenault | 5015a89 | 2014-08-15 17:17:07 +0000 | [diff] [blame] | 363 | |
Matt Arsenault | 711b390 | 2015-08-07 20:18:34 +0000 | [diff] [blame] | 364 | return isLegalMUBUFAddressingMode(AM); |
Matt Arsenault | 73e06fa | 2015-06-04 16:17:42 +0000 | [diff] [blame] | 365 | } |
Matt Arsenault | 711b390 | 2015-08-07 20:18:34 +0000 | [diff] [blame] | 366 | case AMDGPUAS::CONSTANT_ADDRESS: { |
| 367 | // If the offset isn't a multiple of 4, it probably isn't going to be |
| 368 | // correctly aligned. |
| 369 | if (AM.BaseOffs % 4 != 0) |
| 370 | return isLegalMUBUFAddressingMode(AM); |
| 371 | |
| 372 | // There are no SMRD extloads, so if we have to do a small type access we |
| 373 | // will use a MUBUF load. |
| 374 | // FIXME?: We also need to do this if unaligned, but we don't know the |
| 375 | // alignment here. |
| 376 | if (DL.getTypeStoreSize(Ty) < 4) |
| 377 | return isLegalMUBUFAddressingMode(AM); |
| 378 | |
| 379 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { |
| 380 | // SMRD instructions have an 8-bit, dword offset on SI. |
| 381 | if (!isUInt<8>(AM.BaseOffs / 4)) |
| 382 | return false; |
| 383 | } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { |
| 384 | // On CI+, this can also be a 32-bit literal constant offset. If it fits |
| 385 | // in 8-bits, it can use a smaller encoding. |
| 386 | if (!isUInt<32>(AM.BaseOffs / 4)) |
| 387 | return false; |
| 388 | } else if (Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) { |
| 389 | // On VI, these use the SMEM format and the offset is 20-bit in bytes. |
| 390 | if (!isUInt<20>(AM.BaseOffs)) |
| 391 | return false; |
| 392 | } else |
| 393 | llvm_unreachable("unhandled generation"); |
| 394 | |
| 395 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. |
| 396 | return true; |
| 397 | |
| 398 | if (AM.Scale == 1 && AM.HasBaseReg) |
| 399 | return true; |
| 400 | |
| 401 | return false; |
| 402 | } |
| 403 | |
| 404 | case AMDGPUAS::PRIVATE_ADDRESS: |
| 405 | case AMDGPUAS::UNKNOWN_ADDRESS_SPACE: |
| 406 | return isLegalMUBUFAddressingMode(AM); |
| 407 | |
Matt Arsenault | 73e06fa | 2015-06-04 16:17:42 +0000 | [diff] [blame] | 408 | case AMDGPUAS::LOCAL_ADDRESS: |
| 409 | case AMDGPUAS::REGION_ADDRESS: { |
| 410 | // Basic, single offset DS instructions allow a 16-bit unsigned immediate |
| 411 | // field. |
| 412 | // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have |
| 413 | // an 8-bit dword offset but we don't know the alignment here. |
| 414 | if (!isUInt<16>(AM.BaseOffs)) |
Matt Arsenault | 5015a89 | 2014-08-15 17:17:07 +0000 | [diff] [blame] | 415 | return false; |
Matt Arsenault | 73e06fa | 2015-06-04 16:17:42 +0000 | [diff] [blame] | 416 | |
| 417 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. |
| 418 | return true; |
| 419 | |
| 420 | if (AM.Scale == 1 && AM.HasBaseReg) |
| 421 | return true; |
| 422 | |
Matt Arsenault | 5015a89 | 2014-08-15 17:17:07 +0000 | [diff] [blame] | 423 | return false; |
| 424 | } |
Tom Stellard | 70580f8 | 2015-07-20 14:28:41 +0000 | [diff] [blame] | 425 | case AMDGPUAS::FLAT_ADDRESS: |
| 426 | return isLegalFlatAddressingMode(AM); |
| 427 | |
Matt Arsenault | 73e06fa | 2015-06-04 16:17:42 +0000 | [diff] [blame] | 428 | default: |
| 429 | llvm_unreachable("unhandled address space"); |
| 430 | } |
Matt Arsenault | 5015a89 | 2014-08-15 17:17:07 +0000 | [diff] [blame] | 431 | } |
| 432 | |
Matt Arsenault | e698663 | 2015-01-14 01:35:22 +0000 | [diff] [blame] | 433 | bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, |
Matt Arsenault | 6f2a526 | 2014-07-27 17:46:40 +0000 | [diff] [blame] | 434 | unsigned AddrSpace, |
| 435 | unsigned Align, |
| 436 | bool *IsFast) const { |
Matt Arsenault | 1018c89 | 2014-04-24 17:08:26 +0000 | [diff] [blame] | 437 | if (IsFast) |
| 438 | *IsFast = false; |
| 439 | |
Matt Arsenault | 1018c89 | 2014-04-24 17:08:26 +0000 | [diff] [blame] | 440 | // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, |
| 441 | // which isn't a simple VT. |
Tom Stellard | 81d871d | 2013-11-13 23:36:50 +0000 | [diff] [blame] | 442 | if (!VT.isSimple() || VT == MVT::Other) |
| 443 | return false; |
Matt Arsenault | 1018c89 | 2014-04-24 17:08:26 +0000 | [diff] [blame] | 444 | |
Tom Stellard | c6b299c | 2015-02-02 18:02:28 +0000 | [diff] [blame] | 445 | // TODO - CI+ supports unaligned memory accesses, but this requires driver |
| 446 | // support. |
Matt Arsenault | 1018c89 | 2014-04-24 17:08:26 +0000 | [diff] [blame] | 447 | |
Matt Arsenault | 1018c89 | 2014-04-24 17:08:26 +0000 | [diff] [blame] | 448 | // XXX - The only mention I see of this in the ISA manual is for LDS direct |
| 449 | // reads the "byte address and must be dword aligned". Is it also true for the |
| 450 | // normal loads and stores? |
Matt Arsenault | 6f2a526 | 2014-07-27 17:46:40 +0000 | [diff] [blame] | 451 | if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS) { |
| 452 | // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte |
| 453 | // aligned, 8 byte access in a single operation using ds_read2/write2_b32 |
| 454 | // with adjacent offsets. |
Sanjay Patel | ce74db9 | 2015-09-03 15:03:19 +0000 | [diff] [blame] | 455 | bool AlignedBy4 = (Align % 4 == 0); |
| 456 | if (IsFast) |
| 457 | *IsFast = AlignedBy4; |
| 458 | return AlignedBy4; |
Matt Arsenault | 6f2a526 | 2014-07-27 17:46:40 +0000 | [diff] [blame] | 459 | } |
Matt Arsenault | 1018c89 | 2014-04-24 17:08:26 +0000 | [diff] [blame] | 460 | |
Tom Stellard | 33e64c6 | 2015-02-04 20:49:52 +0000 | [diff] [blame] | 461 | // Smaller than dword value must be aligned. |
| 462 | // FIXME: This should be allowed on CI+ |
| 463 | if (VT.bitsLT(MVT::i32)) |
| 464 | return false; |
| 465 | |
Matt Arsenault | 1018c89 | 2014-04-24 17:08:26 +0000 | [diff] [blame] | 466 | // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the |
| 467 | // byte-address are ignored, thus forcing Dword alignment. |
Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 468 | // This applies to private, global, and constant memory. |
Matt Arsenault | 1018c89 | 2014-04-24 17:08:26 +0000 | [diff] [blame] | 469 | if (IsFast) |
| 470 | *IsFast = true; |
Tom Stellard | c6b299c | 2015-02-02 18:02:28 +0000 | [diff] [blame] | 471 | |
| 472 | return VT.bitsGT(MVT::i32) && Align % 4 == 0; |
Tom Stellard | 0125f2a | 2013-06-25 02:39:35 +0000 | [diff] [blame] | 473 | } |
| 474 | |
Matt Arsenault | 46645fa | 2014-07-28 17:49:26 +0000 | [diff] [blame] | 475 | EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, |
| 476 | unsigned SrcAlign, bool IsMemset, |
| 477 | bool ZeroMemset, |
| 478 | bool MemcpyStrSrc, |
| 479 | MachineFunction &MF) const { |
| 480 | // FIXME: Should account for address space here. |
| 481 | |
| 482 | // The default fallback uses the private pointer size as a guess for a type to |
| 483 | // use. Make sure we switch these to 64-bit accesses. |
| 484 | |
| 485 | if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global |
| 486 | return MVT::v4i32; |
| 487 | |
| 488 | if (Size >= 8 && DstAlign >= 4) |
| 489 | return MVT::v2i32; |
| 490 | |
| 491 | // Use the default. |
| 492 | return MVT::Other; |
| 493 | } |
| 494 | |
Matt Arsenault | f9bfeaf | 2015-12-01 23:04:00 +0000 | [diff] [blame] | 495 | static bool isFlatGlobalAddrSpace(unsigned AS) { |
| 496 | return AS == AMDGPUAS::GLOBAL_ADDRESS || |
| 497 | AS == AMDGPUAS::FLAT_ADDRESS || |
| 498 | AS == AMDGPUAS::CONSTANT_ADDRESS; |
| 499 | } |
| 500 | |
| 501 | bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, |
| 502 | unsigned DestAS) const { |
| 503 | return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); |
| 504 | } |
| 505 | |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 506 | |
| 507 | bool SITargetLowering::isMemOpUniform(const SDNode *N) const { |
| 508 | const MemSDNode *MemNode = cast<MemSDNode>(N); |
| 509 | const Value *Ptr = MemNode->getMemOperand()->getValue(); |
| 510 | |
| 511 | // UndefValue means this is a load of a kernel input. These are uniform. |
| 512 | // Sometimes LDS instructions have constant pointers |
| 513 | if (isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || isa<Constant>(Ptr) || |
| 514 | isa<GlobalValue>(Ptr)) |
| 515 | return true; |
| 516 | |
| 517 | const Instruction *I = dyn_cast_or_null<Instruction>(Ptr); |
| 518 | return I && I->getMetadata("amdgpu.uniform"); |
| 519 | } |
| 520 | |
Chandler Carruth | 9d010ff | 2014-07-03 00:23:43 +0000 | [diff] [blame] | 521 | TargetLoweringBase::LegalizeTypeAction |
| 522 | SITargetLowering::getPreferredVectorAction(EVT VT) const { |
| 523 | if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) |
| 524 | return TypeSplitVector; |
| 525 | |
| 526 | return TargetLoweringBase::getPreferredVectorAction(VT); |
Tom Stellard | d86003e | 2013-08-14 23:25:00 +0000 | [diff] [blame] | 527 | } |
Tom Stellard | 0125f2a | 2013-06-25 02:39:35 +0000 | [diff] [blame] | 528 | |
Matt Arsenault | d7bdcc4 | 2014-03-31 19:54:27 +0000 | [diff] [blame] | 529 | bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
| 530 | Type *Ty) const { |
Eric Christopher | 7792e32 | 2015-01-30 23:24:40 +0000 | [diff] [blame] | 531 | const SIInstrInfo *TII = |
| 532 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
Matt Arsenault | d7bdcc4 | 2014-03-31 19:54:27 +0000 | [diff] [blame] | 533 | return TII->isInlineConstant(Imm); |
| 534 | } |
| 535 | |
Tom Stellard | af77543 | 2013-10-23 00:44:32 +0000 | [diff] [blame] | 536 | SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, |
Matt Arsenault | 86033ca | 2014-07-28 17:31:39 +0000 | [diff] [blame] | 537 | SDLoc SL, SDValue Chain, |
Matt Arsenault | e1f030c | 2014-04-11 20:59:54 +0000 | [diff] [blame] | 538 | unsigned Offset, bool Signed) const { |
Mehdi Amini | a749f2a | 2015-07-09 02:09:52 +0000 | [diff] [blame] | 539 | const DataLayout &DL = DAG.getDataLayout(); |
Tom Stellard | ec2e43c | 2014-09-22 15:35:29 +0000 | [diff] [blame] | 540 | MachineFunction &MF = DAG.getMachineFunction(); |
| 541 | const SIRegisterInfo *TRI = |
| 542 | static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); |
Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 543 | unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); |
Tom Stellard | 94593ee | 2013-06-03 17:40:18 +0000 | [diff] [blame] | 544 | |
Matt Arsenault | 86033ca | 2014-07-28 17:31:39 +0000 | [diff] [blame] | 545 | Type *Ty = VT.getTypeForEVT(*DAG.getContext()); |
| 546 | |
| 547 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
Mehdi Amini | a749f2a | 2015-07-09 02:09:52 +0000 | [diff] [blame] | 548 | MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); |
Matt Arsenault | 86033ca | 2014-07-28 17:31:39 +0000 | [diff] [blame] | 549 | PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); |
Matt Arsenault | a0269b6 | 2015-06-01 21:58:24 +0000 | [diff] [blame] | 550 | SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, |
| 551 | MRI.getLiveInVirtReg(InputPtrReg), PtrVT); |
| 552 | SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, |
| 553 | DAG.getConstant(Offset, SL, PtrVT)); |
Mehdi Amini | 44ede33 | 2015-07-09 02:09:04 +0000 | [diff] [blame] | 554 | SDValue PtrOffset = DAG.getUNDEF(PtrVT); |
Matt Arsenault | 86033ca | 2014-07-28 17:31:39 +0000 | [diff] [blame] | 555 | MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); |
| 556 | |
Mehdi Amini | a749f2a | 2015-07-09 02:09:52 +0000 | [diff] [blame] | 557 | unsigned Align = DL.getABITypeAlignment(Ty); |
Matt Arsenault | 81c7ae2 | 2015-06-04 16:00:27 +0000 | [diff] [blame] | 558 | |
Matt Arsenault | 81c7ae2 | 2015-06-04 16:00:27 +0000 | [diff] [blame] | 559 | ISD::LoadExtType ExtTy = Signed ? ISD::SEXTLOAD : ISD::ZEXTLOAD; |
Matt Arsenault | acd68b5 | 2015-09-09 01:12:27 +0000 | [diff] [blame] | 560 | if (MemVT.isFloatingPoint()) |
| 561 | ExtTy = ISD::EXTLOAD; |
| 562 | |
Matt Arsenault | 81c7ae2 | 2015-06-04 16:00:27 +0000 | [diff] [blame] | 563 | return DAG.getLoad(ISD::UNINDEXED, ExtTy, |
Matt Arsenault | 86033ca | 2014-07-28 17:31:39 +0000 | [diff] [blame] | 564 | VT, SL, Chain, Ptr, PtrOffset, PtrInfo, MemVT, |
| 565 | false, // isVolatile |
| 566 | true, // isNonTemporal |
| 567 | true, // isInvariant |
Matt Arsenault | 81c7ae2 | 2015-06-04 16:00:27 +0000 | [diff] [blame] | 568 | Align); // Alignment |
Tom Stellard | 94593ee | 2013-06-03 17:40:18 +0000 | [diff] [blame] | 569 | } |
| 570 | |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 571 | SDValue SITargetLowering::LowerFormalArguments( |
Eric Christopher | 7792e32 | 2015-01-30 23:24:40 +0000 | [diff] [blame] | 572 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 573 | const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG, |
| 574 | SmallVectorImpl<SDValue> &InVals) const { |
Tom Stellard | ec2e43c | 2014-09-22 15:35:29 +0000 | [diff] [blame] | 575 | const SIRegisterInfo *TRI = |
Eric Christopher | 7792e32 | 2015-01-30 23:24:40 +0000 | [diff] [blame] | 576 | static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo()); |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 577 | |
| 578 | MachineFunction &MF = DAG.getMachineFunction(); |
| 579 | FunctionType *FType = MF.getFunction()->getFunctionType(); |
Christian Konig | 99ee0f4 | 2013-03-07 09:04:14 +0000 | [diff] [blame] | 580 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 581 | const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>(); |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 582 | |
Matt Arsenault | d48da14 | 2015-11-02 23:23:02 +0000 | [diff] [blame] | 583 | if (Subtarget->isAmdHsaOS() && Info->getShaderType() != ShaderType::COMPUTE) { |
| 584 | const Function *Fn = MF.getFunction(); |
| 585 | DiagnosticInfoUnsupported NoGraphicsHSA(*Fn, "non-compute shaders with HSA"); |
| 586 | DAG.getContext()->diagnose(NoGraphicsHSA); |
| 587 | return SDValue(); |
| 588 | } |
| 589 | |
Tom Stellard | 0fbf899 | 2015-10-06 21:16:34 +0000 | [diff] [blame] | 590 | // FIXME: We currently assume all calling conventions are kernels. |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 591 | |
| 592 | SmallVector<ISD::InputArg, 16> Splits; |
Alexey Samsonov | a253bf9 | 2014-08-27 19:36:53 +0000 | [diff] [blame] | 593 | BitVector Skipped(Ins.size()); |
Christian Konig | 99ee0f4 | 2013-03-07 09:04:14 +0000 | [diff] [blame] | 594 | |
| 595 | for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) { |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 596 | const ISD::InputArg &Arg = Ins[i]; |
Matt Arsenault | 75865923 | 2013-05-18 00:21:46 +0000 | [diff] [blame] | 597 | |
| 598 | // First check if it's a PS input addr |
Matt Arsenault | 762af96 | 2014-07-13 03:06:39 +0000 | [diff] [blame] | 599 | if (Info->getShaderType() == ShaderType::PIXEL && !Arg.Flags.isInReg() && |
Vincent Lejeune | d623644 | 2013-10-13 17:56:16 +0000 | [diff] [blame] | 600 | !Arg.Flags.isByVal()) { |
Christian Konig | 99ee0f4 | 2013-03-07 09:04:14 +0000 | [diff] [blame] | 601 | |
| 602 | assert((PSInputNum <= 15) && "Too many PS inputs!"); |
| 603 | |
| 604 | if (!Arg.Used) { |
Benjamin Kramer | df005cb | 2015-08-08 18:27:36 +0000 | [diff] [blame] | 605 | // We can safely skip PS inputs |
Alexey Samsonov | a253bf9 | 2014-08-27 19:36:53 +0000 | [diff] [blame] | 606 | Skipped.set(i); |
Christian Konig | 99ee0f4 | 2013-03-07 09:04:14 +0000 | [diff] [blame] | 607 | ++PSInputNum; |
| 608 | continue; |
| 609 | } |
| 610 | |
| 611 | Info->PSInputAddr |= 1 << PSInputNum++; |
| 612 | } |
| 613 | |
| 614 | // Second split vertices into their elements |
Matt Arsenault | 762af96 | 2014-07-13 03:06:39 +0000 | [diff] [blame] | 615 | if (Info->getShaderType() != ShaderType::COMPUTE && Arg.VT.isVector()) { |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 616 | ISD::InputArg NewArg = Arg; |
| 617 | NewArg.Flags.setSplit(); |
| 618 | NewArg.VT = Arg.VT.getVectorElementType(); |
| 619 | |
| 620 | // We REALLY want the ORIGINAL number of vertex elements here, e.g. a |
| 621 | // three or five element vertex only needs three or five registers, |
Benjamin Kramer | df005cb | 2015-08-08 18:27:36 +0000 | [diff] [blame] | 622 | // NOT four or eight. |
Andrew Trick | 05938a5 | 2015-02-16 18:10:47 +0000 | [diff] [blame] | 623 | Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 624 | unsigned NumElements = ParamType->getVectorNumElements(); |
| 625 | |
| 626 | for (unsigned j = 0; j != NumElements; ++j) { |
| 627 | Splits.push_back(NewArg); |
| 628 | NewArg.PartOffset += NewArg.VT.getStoreSize(); |
| 629 | } |
| 630 | |
Matt Arsenault | 762af96 | 2014-07-13 03:06:39 +0000 | [diff] [blame] | 631 | } else if (Info->getShaderType() != ShaderType::COMPUTE) { |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 632 | Splits.push_back(Arg); |
| 633 | } |
| 634 | } |
| 635 | |
| 636 | SmallVector<CCValAssign, 16> ArgLocs; |
Eric Christopher | b521750 | 2014-08-06 18:45:26 +0000 | [diff] [blame] | 637 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 638 | *DAG.getContext()); |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 639 | |
Christian Konig | 99ee0f4 | 2013-03-07 09:04:14 +0000 | [diff] [blame] | 640 | // At least one interpolation mode must be enabled or else the GPU will hang. |
Matt Arsenault | 762af96 | 2014-07-13 03:06:39 +0000 | [diff] [blame] | 641 | if (Info->getShaderType() == ShaderType::PIXEL && |
| 642 | (Info->PSInputAddr & 0x7F) == 0) { |
Christian Konig | 99ee0f4 | 2013-03-07 09:04:14 +0000 | [diff] [blame] | 643 | Info->PSInputAddr |= 1; |
| 644 | CCInfo.AllocateReg(AMDGPU::VGPR0); |
| 645 | CCInfo.AllocateReg(AMDGPU::VGPR1); |
| 646 | } |
| 647 | |
Matt Arsenault | 762af96 | 2014-07-13 03:06:39 +0000 | [diff] [blame] | 648 | if (Info->getShaderType() == ShaderType::COMPUTE) { |
Tom Stellard | af77543 | 2013-10-23 00:44:32 +0000 | [diff] [blame] | 649 | getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins, |
| 650 | Splits); |
| 651 | } |
| 652 | |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 653 | // FIXME: How should these inputs interact with inreg / custom SGPR inputs? |
| 654 | if (Info->hasPrivateSegmentBuffer()) { |
| 655 | unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI); |
| 656 | MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass); |
| 657 | CCInfo.AllocateReg(PrivateSegmentBufferReg); |
| 658 | } |
| 659 | |
| 660 | if (Info->hasDispatchPtr()) { |
| 661 | unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI); |
| 662 | MF.addLiveIn(DispatchPtrReg, &AMDGPU::SReg_64RegClass); |
| 663 | CCInfo.AllocateReg(DispatchPtrReg); |
| 664 | } |
| 665 | |
| 666 | if (Info->hasKernargSegmentPtr()) { |
| 667 | unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI); |
| 668 | MF.addLiveIn(InputPtrReg, &AMDGPU::SReg_64RegClass); |
| 669 | CCInfo.AllocateReg(InputPtrReg); |
| 670 | } |
| 671 | |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 672 | AnalyzeFormalArguments(CCInfo, Splits); |
| 673 | |
Matt Arsenault | cf13d18 | 2015-07-10 22:51:36 +0000 | [diff] [blame] | 674 | SmallVector<SDValue, 16> Chains; |
| 675 | |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 676 | for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { |
| 677 | |
Christian Konig | b7be72d | 2013-05-17 09:46:48 +0000 | [diff] [blame] | 678 | const ISD::InputArg &Arg = Ins[i]; |
Alexey Samsonov | a253bf9 | 2014-08-27 19:36:53 +0000 | [diff] [blame] | 679 | if (Skipped[i]) { |
Christian Konig | b7be72d | 2013-05-17 09:46:48 +0000 | [diff] [blame] | 680 | InVals.push_back(DAG.getUNDEF(Arg.VT)); |
Christian Konig | 99ee0f4 | 2013-03-07 09:04:14 +0000 | [diff] [blame] | 681 | continue; |
| 682 | } |
| 683 | |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 684 | CCValAssign &VA = ArgLocs[ArgIdx++]; |
Craig Topper | 7f416c8 | 2014-11-16 21:17:18 +0000 | [diff] [blame] | 685 | MVT VT = VA.getLocVT(); |
Tom Stellard | ed882c2 | 2013-06-03 17:40:11 +0000 | [diff] [blame] | 686 | |
| 687 | if (VA.isMemLoc()) { |
Tom Stellard | af77543 | 2013-10-23 00:44:32 +0000 | [diff] [blame] | 688 | VT = Ins[i].VT; |
| 689 | EVT MemVT = Splits[i].VT; |
Tom Stellard | b5798b0 | 2015-06-26 21:15:03 +0000 | [diff] [blame] | 690 | const unsigned Offset = Subtarget->getExplicitKernelArgOffset() + |
| 691 | VA.getLocMemOffset(); |
Tom Stellard | 94593ee | 2013-06-03 17:40:18 +0000 | [diff] [blame] | 692 | // The first 36 bytes of the input buffer contains information about |
| 693 | // thread group and global sizes. |
Matt Arsenault | 0d51973 | 2015-07-10 22:28:41 +0000 | [diff] [blame] | 694 | SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, Chain, |
Jan Vesely | e5121f3 | 2014-10-14 20:05:26 +0000 | [diff] [blame] | 695 | Offset, Ins[i].Flags.isSExt()); |
Matt Arsenault | cf13d18 | 2015-07-10 22:51:36 +0000 | [diff] [blame] | 696 | Chains.push_back(Arg.getValue(1)); |
Tom Stellard | ca7ecf3 | 2014-08-22 18:49:31 +0000 | [diff] [blame] | 697 | |
Craig Topper | e3dcce9 | 2015-08-01 22:20:21 +0000 | [diff] [blame] | 698 | auto *ParamTy = |
Andrew Trick | 05938a5 | 2015-02-16 18:10:47 +0000 | [diff] [blame] | 699 | dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); |
Tom Stellard | ca7ecf3 | 2014-08-22 18:49:31 +0000 | [diff] [blame] | 700 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && |
| 701 | ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { |
| 702 | // On SI local pointers are just offsets into LDS, so they are always |
| 703 | // less than 16-bits. On CI and newer they could potentially be |
| 704 | // real pointers, so we can't guarantee their size. |
| 705 | Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, |
| 706 | DAG.getValueType(MVT::i16)); |
| 707 | } |
| 708 | |
Tom Stellard | ed882c2 | 2013-06-03 17:40:11 +0000 | [diff] [blame] | 709 | InVals.push_back(Arg); |
Jan Vesely | e5121f3 | 2014-10-14 20:05:26 +0000 | [diff] [blame] | 710 | Info->ABIArgOffset = Offset + MemVT.getStoreSize(); |
Tom Stellard | ed882c2 | 2013-06-03 17:40:11 +0000 | [diff] [blame] | 711 | continue; |
| 712 | } |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 713 | assert(VA.isRegLoc() && "Parameter must be in a register!"); |
| 714 | |
| 715 | unsigned Reg = VA.getLocReg(); |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 716 | |
| 717 | if (VT == MVT::i64) { |
| 718 | // For now assume it is a pointer |
| 719 | Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, |
| 720 | &AMDGPU::SReg_64RegClass); |
| 721 | Reg = MF.addLiveIn(Reg, &AMDGPU::SReg_64RegClass); |
Matt Arsenault | cf13d18 | 2015-07-10 22:51:36 +0000 | [diff] [blame] | 722 | SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); |
| 723 | InVals.push_back(Copy); |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 724 | continue; |
| 725 | } |
| 726 | |
| 727 | const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); |
| 728 | |
| 729 | Reg = MF.addLiveIn(Reg, RC); |
| 730 | SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); |
| 731 | |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 732 | if (Arg.VT.isVector()) { |
| 733 | |
| 734 | // Build a vector from the registers |
Andrew Trick | 05938a5 | 2015-02-16 18:10:47 +0000 | [diff] [blame] | 735 | Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 736 | unsigned NumElements = ParamType->getVectorNumElements(); |
| 737 | |
| 738 | SmallVector<SDValue, 4> Regs; |
| 739 | Regs.push_back(Val); |
| 740 | for (unsigned j = 1; j != NumElements; ++j) { |
| 741 | Reg = ArgLocs[ArgIdx++].getLocReg(); |
| 742 | Reg = MF.addLiveIn(Reg, RC); |
Matt Arsenault | cf13d18 | 2015-07-10 22:51:36 +0000 | [diff] [blame] | 743 | |
| 744 | SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); |
| 745 | Regs.push_back(Copy); |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 746 | } |
| 747 | |
| 748 | // Fill up the missing vector elements |
| 749 | NumElements = Arg.VT.getVectorNumElements() - NumElements; |
Benjamin Kramer | 6cd780f | 2015-02-17 15:29:18 +0000 | [diff] [blame] | 750 | Regs.append(NumElements, DAG.getUNDEF(VT)); |
Matt Arsenault | 75865923 | 2013-05-18 00:21:46 +0000 | [diff] [blame] | 751 | |
Craig Topper | 48d114b | 2014-04-26 18:35:24 +0000 | [diff] [blame] | 752 | InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT, Regs)); |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 753 | continue; |
| 754 | } |
| 755 | |
| 756 | InVals.push_back(Val); |
| 757 | } |
Tom Stellard | e99fb65 | 2015-01-20 19:33:04 +0000 | [diff] [blame] | 758 | |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 759 | // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read |
| 760 | // these from the dispatch pointer. |
| 761 | |
| 762 | // Start adding system SGPRs. |
| 763 | if (Info->hasWorkGroupIDX()) { |
| 764 | unsigned Reg = Info->addWorkGroupIDX(); |
| 765 | MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); |
| 766 | CCInfo.AllocateReg(Reg); |
| 767 | } else |
| 768 | llvm_unreachable("work group id x is always enabled"); |
| 769 | |
| 770 | if (Info->hasWorkGroupIDY()) { |
| 771 | unsigned Reg = Info->addWorkGroupIDY(); |
| 772 | MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); |
| 773 | CCInfo.AllocateReg(Reg); |
Tom Stellard | e99fb65 | 2015-01-20 19:33:04 +0000 | [diff] [blame] | 774 | } |
Matt Arsenault | cf13d18 | 2015-07-10 22:51:36 +0000 | [diff] [blame] | 775 | |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 776 | if (Info->hasWorkGroupIDZ()) { |
| 777 | unsigned Reg = Info->addWorkGroupIDZ(); |
| 778 | MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); |
| 779 | CCInfo.AllocateReg(Reg); |
| 780 | } |
| 781 | |
| 782 | if (Info->hasWorkGroupInfo()) { |
| 783 | unsigned Reg = Info->addWorkGroupInfo(); |
| 784 | MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); |
| 785 | CCInfo.AllocateReg(Reg); |
| 786 | } |
| 787 | |
| 788 | if (Info->hasPrivateSegmentWaveByteOffset()) { |
| 789 | // Scratch wave offset passed in system SGPR. |
| 790 | unsigned PrivateSegmentWaveByteOffsetReg |
| 791 | = Info->addPrivateSegmentWaveByteOffset(); |
| 792 | |
| 793 | MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); |
| 794 | CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); |
| 795 | } |
| 796 | |
| 797 | // Now that we've figured out where the scratch register inputs are, see if |
| 798 | // should reserve the arguments and use them directly. |
| 799 | |
| 800 | bool HasStackObjects = MF.getFrameInfo()->hasStackObjects(); |
| 801 | |
| 802 | if (ST.isAmdHsaOS()) { |
| 803 | // TODO: Assume we will spill without optimizations. |
| 804 | if (HasStackObjects) { |
| 805 | // If we have stack objects, we unquestionably need the private buffer |
| 806 | // resource. For the HSA ABI, this will be the first 4 user SGPR |
| 807 | // inputs. We can reserve those and use them directly. |
| 808 | |
| 809 | unsigned PrivateSegmentBufferReg = TRI->getPreloadedValue( |
| 810 | MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); |
| 811 | Info->setScratchRSrcReg(PrivateSegmentBufferReg); |
| 812 | |
| 813 | unsigned PrivateSegmentWaveByteOffsetReg = TRI->getPreloadedValue( |
| 814 | MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); |
| 815 | Info->setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); |
| 816 | } else { |
| 817 | unsigned ReservedBufferReg |
| 818 | = TRI->reservedPrivateSegmentBufferReg(MF); |
| 819 | unsigned ReservedOffsetReg |
| 820 | = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); |
| 821 | |
| 822 | // We tentatively reserve the last registers (skipping the last two |
| 823 | // which may contain VCC). After register allocation, we'll replace |
| 824 | // these with the ones immediately after those which were really |
| 825 | // allocated. In the prologue copies will be inserted from the argument |
| 826 | // to these reserved registers. |
| 827 | Info->setScratchRSrcReg(ReservedBufferReg); |
| 828 | Info->setScratchWaveOffsetReg(ReservedOffsetReg); |
| 829 | } |
| 830 | } else { |
| 831 | unsigned ReservedBufferReg = TRI->reservedPrivateSegmentBufferReg(MF); |
| 832 | |
| 833 | // Without HSA, relocations are used for the scratch pointer and the |
| 834 | // buffer resource setup is always inserted in the prologue. Scratch wave |
| 835 | // offset is still in an input SGPR. |
| 836 | Info->setScratchRSrcReg(ReservedBufferReg); |
| 837 | |
| 838 | if (HasStackObjects) { |
| 839 | unsigned ScratchWaveOffsetReg = TRI->getPreloadedValue( |
| 840 | MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); |
| 841 | Info->setScratchWaveOffsetReg(ScratchWaveOffsetReg); |
| 842 | } else { |
| 843 | unsigned ReservedOffsetReg |
| 844 | = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); |
| 845 | Info->setScratchWaveOffsetReg(ReservedOffsetReg); |
| 846 | } |
| 847 | } |
| 848 | |
| 849 | if (Info->hasWorkItemIDX()) { |
| 850 | unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X); |
| 851 | MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); |
| 852 | CCInfo.AllocateReg(Reg); |
| 853 | } else |
| 854 | llvm_unreachable("workitem id x should always be enabled"); |
| 855 | |
| 856 | if (Info->hasWorkItemIDY()) { |
| 857 | unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y); |
| 858 | MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); |
| 859 | CCInfo.AllocateReg(Reg); |
| 860 | } |
| 861 | |
| 862 | if (Info->hasWorkItemIDZ()) { |
| 863 | unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z); |
| 864 | MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); |
| 865 | CCInfo.AllocateReg(Reg); |
| 866 | } |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 867 | |
Matt Arsenault | cf13d18 | 2015-07-10 22:51:36 +0000 | [diff] [blame] | 868 | if (Chains.empty()) |
| 869 | return Chain; |
| 870 | |
| 871 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); |
Christian Konig | 2c8f6d5 | 2013-03-07 09:03:52 +0000 | [diff] [blame] | 872 | } |
| 873 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 874 | MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( |
| 875 | MachineInstr * MI, MachineBasicBlock * BB) const { |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 876 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 877 | switch (MI->getOpcode()) { |
| 878 | default: |
| 879 | return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); |
Matt Arsenault | 20711b7 | 2015-02-20 22:10:45 +0000 | [diff] [blame] | 880 | case AMDGPU::BRANCH: |
| 881 | return BB; |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 882 | } |
| 883 | return BB; |
| 884 | } |
| 885 | |
Matt Arsenault | 423bf3f | 2015-01-29 19:34:32 +0000 | [diff] [blame] | 886 | bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { |
| 887 | // This currently forces unfolding various combinations of fsub into fma with |
| 888 | // free fneg'd operands. As long as we have fast FMA (controlled by |
| 889 | // isFMAFasterThanFMulAndFAdd), we should perform these. |
| 890 | |
| 891 | // When fma is quarter rate, for f64 where add / sub are at best half rate, |
| 892 | // most of these combines appear to be cycle neutral but save on instruction |
| 893 | // count / code size. |
| 894 | return true; |
| 895 | } |
| 896 | |
Mehdi Amini | 44ede33 | 2015-07-09 02:09:04 +0000 | [diff] [blame] | 897 | EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, |
| 898 | EVT VT) const { |
Tom Stellard | 8374720 | 2013-07-18 21:43:53 +0000 | [diff] [blame] | 899 | if (!VT.isVector()) { |
| 900 | return MVT::i1; |
| 901 | } |
Matt Arsenault | 8596f71 | 2014-11-28 22:51:38 +0000 | [diff] [blame] | 902 | return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 903 | } |
| 904 | |
Mehdi Amini | eaabc51 | 2015-07-09 15:12:23 +0000 | [diff] [blame] | 905 | MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT) const { |
Christian Konig | 082a14a | 2013-03-18 11:34:05 +0000 | [diff] [blame] | 906 | return MVT::i32; |
| 907 | } |
| 908 | |
Matt Arsenault | 423bf3f | 2015-01-29 19:34:32 +0000 | [diff] [blame] | 909 | // Answering this is somewhat tricky and depends on the specific device which |
| 910 | // have different rates for fma or all f64 operations. |
| 911 | // |
| 912 | // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other |
| 913 | // regardless of which device (although the number of cycles differs between |
| 914 | // devices), so it is always profitable for f64. |
| 915 | // |
| 916 | // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable |
| 917 | // only on full rate devices. Normally, we should prefer selecting v_mad_f32 |
| 918 | // which we can always do even without fused FP ops since it returns the same |
| 919 | // result as the separate operations and since it is always full |
| 920 | // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 |
| 921 | // however does not support denormals, so we do report fma as faster if we have |
| 922 | // a fast fma device and require denormals. |
| 923 | // |
Niels Ole Salscheider | d3a039f | 2013-08-10 10:38:54 +0000 | [diff] [blame] | 924 | bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { |
| 925 | VT = VT.getScalarType(); |
| 926 | |
| 927 | if (!VT.isSimple()) |
| 928 | return false; |
| 929 | |
| 930 | switch (VT.getSimpleVT().SimpleTy) { |
| 931 | case MVT::f32: |
Matt Arsenault | 423bf3f | 2015-01-29 19:34:32 +0000 | [diff] [blame] | 932 | // This is as fast on some subtargets. However, we always have full rate f32 |
| 933 | // mad available which returns the same result as the separate operations |
Matt Arsenault | 8d63003 | 2015-02-20 22:10:41 +0000 | [diff] [blame] | 934 | // which we should prefer over fma. We can't use this if we want to support |
| 935 | // denormals, so only report this in these cases. |
| 936 | return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); |
Niels Ole Salscheider | d3a039f | 2013-08-10 10:38:54 +0000 | [diff] [blame] | 937 | case MVT::f64: |
| 938 | return true; |
| 939 | default: |
| 940 | break; |
| 941 | } |
| 942 | |
| 943 | return false; |
| 944 | } |
| 945 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 946 | //===----------------------------------------------------------------------===// |
| 947 | // Custom DAG Lowering Operations |
| 948 | //===----------------------------------------------------------------------===// |
| 949 | |
| 950 | SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
| 951 | switch (Op.getOpcode()) { |
| 952 | default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); |
Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 953 | case ISD::FrameIndex: return LowerFrameIndex(Op, DAG); |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 954 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
Tom Stellard | 35bb18c | 2013-08-26 15:06:04 +0000 | [diff] [blame] | 955 | case ISD::LOAD: { |
Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 956 | SDValue Result = LowerLOAD(Op, DAG); |
| 957 | assert((!Result.getNode() || |
| 958 | Result.getNode()->getNumValues() == 2) && |
| 959 | "Load should return a value and a chain"); |
| 960 | return Result; |
Tom Stellard | 35bb18c | 2013-08-26 15:06:04 +0000 | [diff] [blame] | 961 | } |
Tom Stellard | af77543 | 2013-10-23 00:44:32 +0000 | [diff] [blame] | 962 | |
Matt Arsenault | ad14ce8 | 2014-07-19 18:44:39 +0000 | [diff] [blame] | 963 | case ISD::FSIN: |
| 964 | case ISD::FCOS: |
| 965 | return LowerTrig(Op, DAG); |
Tom Stellard | 0ec134f | 2014-02-04 17:18:40 +0000 | [diff] [blame] | 966 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 967 | case ISD::FDIV: return LowerFDIV(Op, DAG); |
Tom Stellard | 81d871d | 2013-11-13 23:36:50 +0000 | [diff] [blame] | 968 | case ISD::STORE: return LowerSTORE(Op, DAG); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 969 | case ISD::GlobalAddress: { |
| 970 | MachineFunction &MF = DAG.getMachineFunction(); |
| 971 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 972 | return LowerGlobalAddress(MFI, Op, DAG); |
Tom Stellard | 94593ee | 2013-06-03 17:40:18 +0000 | [diff] [blame] | 973 | } |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 974 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); |
| 975 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 976 | } |
| 977 | return SDValue(); |
| 978 | } |
| 979 | |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 980 | /// \brief Helper function for LowerBRCOND |
| 981 | static SDNode *findUser(SDValue Value, unsigned Opcode) { |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 982 | |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 983 | SDNode *Parent = Value.getNode(); |
| 984 | for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); |
| 985 | I != E; ++I) { |
| 986 | |
| 987 | if (I.getUse().get() != Value) |
| 988 | continue; |
| 989 | |
| 990 | if (I->getOpcode() == Opcode) |
| 991 | return *I; |
| 992 | } |
Craig Topper | 062a2ba | 2014-04-25 05:30:21 +0000 | [diff] [blame] | 993 | return nullptr; |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 994 | } |
| 995 | |
Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 996 | SDValue SITargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const { |
| 997 | |
Tom Stellard | c98ee20 | 2015-07-16 19:40:07 +0000 | [diff] [blame] | 998 | SDLoc SL(Op); |
Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 999 | FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Op); |
| 1000 | unsigned FrameIndex = FINode->getIndex(); |
| 1001 | |
Tom Stellard | c98ee20 | 2015-07-16 19:40:07 +0000 | [diff] [blame] | 1002 | // A FrameIndex node represents a 32-bit offset into scratch memory. If |
| 1003 | // the high bit of a frame index offset were to be set, this would mean |
| 1004 | // that it represented an offset of ~2GB * 64 = ~128GB from the start of the |
| 1005 | // scratch buffer, with 64 being the number of threads per wave. |
| 1006 | // |
| 1007 | // If we know the machine uses less than 128GB of scratch, then we can |
| 1008 | // amrk the high bit of the FrameIndex node as known zero, |
| 1009 | // which is important, because it means in most situations we can |
| 1010 | // prove that values derived from FrameIndex nodes are non-negative. |
| 1011 | // This enables us to take advantage of more addressing modes when |
| 1012 | // accessing scratch buffers, since for scratch reads/writes, the register |
| 1013 | // offset must always be positive. |
| 1014 | |
| 1015 | SDValue TFI = DAG.getTargetFrameIndex(FrameIndex, MVT::i32); |
| 1016 | if (Subtarget->enableHugeScratchBuffer()) |
| 1017 | return TFI; |
| 1018 | |
| 1019 | return DAG.getNode(ISD::AssertZext, SL, MVT::i32, TFI, |
| 1020 | DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), 31))); |
Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 1021 | } |
| 1022 | |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 1023 | /// This transforms the control flow intrinsics to get the branch destination as |
| 1024 | /// last parameter, also switches branch target with BR if the need arise |
| 1025 | SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, |
| 1026 | SelectionDAG &DAG) const { |
| 1027 | |
Andrew Trick | ef9de2a | 2013-05-25 02:42:55 +0000 | [diff] [blame] | 1028 | SDLoc DL(BRCOND); |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 1029 | |
| 1030 | SDNode *Intr = BRCOND.getOperand(1).getNode(); |
| 1031 | SDValue Target = BRCOND.getOperand(2); |
Craig Topper | 062a2ba | 2014-04-25 05:30:21 +0000 | [diff] [blame] | 1032 | SDNode *BR = nullptr; |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 1033 | |
| 1034 | if (Intr->getOpcode() == ISD::SETCC) { |
| 1035 | // As long as we negate the condition everything is fine |
| 1036 | SDNode *SetCC = Intr; |
| 1037 | assert(SetCC->getConstantOperandVal(1) == 1); |
NAKAMURA Takumi | 458a827 | 2013-01-07 11:14:44 +0000 | [diff] [blame] | 1038 | assert(cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == |
| 1039 | ISD::SETNE); |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 1040 | Intr = SetCC->getOperand(0).getNode(); |
| 1041 | |
| 1042 | } else { |
| 1043 | // Get the target from BR if we don't negate the condition |
| 1044 | BR = findUser(BRCOND, ISD::BR); |
| 1045 | Target = BR->getOperand(1); |
| 1046 | } |
| 1047 | |
| 1048 | assert(Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN); |
| 1049 | |
| 1050 | // Build the result and |
Benjamin Kramer | 6cd780f | 2015-02-17 15:29:18 +0000 | [diff] [blame] | 1051 | ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 1052 | |
| 1053 | // operands of the new intrinsic call |
| 1054 | SmallVector<SDValue, 4> Ops; |
| 1055 | Ops.push_back(BRCOND.getOperand(0)); |
Benjamin Kramer | 6cd780f | 2015-02-17 15:29:18 +0000 | [diff] [blame] | 1056 | Ops.append(Intr->op_begin() + 1, Intr->op_end()); |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 1057 | Ops.push_back(Target); |
| 1058 | |
| 1059 | // build the new intrinsic call |
| 1060 | SDNode *Result = DAG.getNode( |
| 1061 | Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL, |
Craig Topper | 48d114b | 2014-04-26 18:35:24 +0000 | [diff] [blame] | 1062 | DAG.getVTList(Res), Ops).getNode(); |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 1063 | |
| 1064 | if (BR) { |
| 1065 | // Give the branch instruction our target |
| 1066 | SDValue Ops[] = { |
| 1067 | BR->getOperand(0), |
| 1068 | BRCOND.getOperand(2) |
| 1069 | }; |
Chandler Carruth | 356665a | 2014-08-01 22:09:43 +0000 | [diff] [blame] | 1070 | SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); |
| 1071 | DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); |
| 1072 | BR = NewBR.getNode(); |
Tom Stellard | f879435 | 2012-12-19 22:10:31 +0000 | [diff] [blame] | 1073 | } |
| 1074 | |
| 1075 | SDValue Chain = SDValue(Result, Result->getNumValues() - 1); |
| 1076 | |
| 1077 | // Copy the intrinsic results to registers |
| 1078 | for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { |
| 1079 | SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); |
| 1080 | if (!CopyToReg) |
| 1081 | continue; |
| 1082 | |
| 1083 | Chain = DAG.getCopyToReg( |
| 1084 | Chain, DL, |
| 1085 | CopyToReg->getOperand(1), |
| 1086 | SDValue(Result, i - 1), |
| 1087 | SDValue()); |
| 1088 | |
| 1089 | DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); |
| 1090 | } |
| 1091 | |
| 1092 | // Remove the old intrinsic from the chain |
| 1093 | DAG.ReplaceAllUsesOfValueWith( |
| 1094 | SDValue(Intr, Intr->getNumValues() - 1), |
| 1095 | Intr->getOperand(0)); |
| 1096 | |
| 1097 | return Chain; |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 1098 | } |
| 1099 | |
Tom Stellard | 067c815 | 2014-07-21 14:01:14 +0000 | [diff] [blame] | 1100 | SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, |
| 1101 | SDValue Op, |
| 1102 | SelectionDAG &DAG) const { |
| 1103 | GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); |
| 1104 | |
| 1105 | if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) |
| 1106 | return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); |
| 1107 | |
| 1108 | SDLoc DL(GSD); |
| 1109 | const GlobalValue *GV = GSD->getGlobal(); |
Mehdi Amini | 44ede33 | 2015-07-09 02:09:04 +0000 | [diff] [blame] | 1110 | MVT PtrVT = getPointerTy(DAG.getDataLayout(), GSD->getAddressSpace()); |
Tom Stellard | 067c815 | 2014-07-21 14:01:14 +0000 | [diff] [blame] | 1111 | |
Tom Stellard | 067c815 | 2014-07-21 14:01:14 +0000 | [diff] [blame] | 1112 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32); |
Tom Stellard | c93fc11 | 2015-12-10 02:13:01 +0000 | [diff] [blame] | 1113 | return DAG.getNode(AMDGPUISD::CONST_DATA_PTR, DL, PtrVT, GA); |
Tom Stellard | 067c815 | 2014-07-21 14:01:14 +0000 | [diff] [blame] | 1114 | } |
| 1115 | |
Tom Stellard | fc92e77 | 2015-05-12 14:18:14 +0000 | [diff] [blame] | 1116 | SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, SDLoc DL, |
| 1117 | SDValue V) const { |
| 1118 | // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, |
| 1119 | // so we will end up with redundant moves to m0. |
| 1120 | // |
| 1121 | // We can't use S_MOV_B32, because there is no way to specify m0 as the |
| 1122 | // destination register. |
| 1123 | // |
| 1124 | // We have to use them both. Machine cse will combine all the S_MOV_B32 |
| 1125 | // instructions and the register coalescer eliminate the extra copies. |
| 1126 | SDNode *M0 = DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, V.getValueType(), V); |
| 1127 | return DAG.getCopyToReg(Chain, DL, DAG.getRegister(AMDGPU::M0, MVT::i32), |
| 1128 | SDValue(M0, 0), SDValue()); // Glue |
| 1129 | // A Null SDValue creates |
| 1130 | // a glue result. |
| 1131 | } |
| 1132 | |
Matt Arsenault | ff6da2f | 2015-11-30 21:15:45 +0000 | [diff] [blame] | 1133 | SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, |
| 1134 | SDValue Op, |
| 1135 | MVT VT, |
| 1136 | unsigned Offset) const { |
| 1137 | SDLoc SL(Op); |
| 1138 | SDValue Param = LowerParameter(DAG, MVT::i32, MVT::i32, SL, |
| 1139 | DAG.getEntryNode(), Offset, false); |
| 1140 | // The local size values will have the hi 16-bits as zero. |
| 1141 | return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, |
| 1142 | DAG.getValueType(VT)); |
| 1143 | } |
| 1144 | |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1145 | SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, |
| 1146 | SelectionDAG &DAG) const { |
| 1147 | MachineFunction &MF = DAG.getMachineFunction(); |
Tom Stellard | dcb9f09 | 2015-07-09 21:20:37 +0000 | [diff] [blame] | 1148 | auto MFI = MF.getInfo<SIMachineFunctionInfo>(); |
Tom Stellard | ec2e43c | 2014-09-22 15:35:29 +0000 | [diff] [blame] | 1149 | const SIRegisterInfo *TRI = |
Eric Christopher | 7792e32 | 2015-01-30 23:24:40 +0000 | [diff] [blame] | 1150 | static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo()); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1151 | |
| 1152 | EVT VT = Op.getValueType(); |
| 1153 | SDLoc DL(Op); |
| 1154 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
| 1155 | |
Sanjay Patel | a260701 | 2015-09-16 16:31:21 +0000 | [diff] [blame] | 1156 | // TODO: Should this propagate fast-math-flags? |
| 1157 | |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1158 | switch (IntrinsicID) { |
Tom Stellard | 48f29f2 | 2015-11-26 00:43:29 +0000 | [diff] [blame] | 1159 | case Intrinsic::amdgcn_dispatch_ptr: |
Matt Arsenault | 800fecf | 2016-01-11 21:18:33 +0000 | [diff] [blame^] | 1160 | if (!Subtarget->isAmdHsaOS()) { |
| 1161 | DiagnosticInfoUnsupported BadIntrin(*MF.getFunction(), |
| 1162 | "hsa intrinsic without hsa target"); |
| 1163 | DAG.getContext()->diagnose(BadIntrin); |
| 1164 | return DAG.getUNDEF(VT); |
| 1165 | } |
| 1166 | |
Tom Stellard | 48f29f2 | 2015-11-26 00:43:29 +0000 | [diff] [blame] | 1167 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, |
| 1168 | TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_PTR), VT); |
| 1169 | |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1170 | case Intrinsic::r600_read_ngroups_x: |
Tom Stellard | ec2e43c | 2014-09-22 15:35:29 +0000 | [diff] [blame] | 1171 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 1172 | SI::KernelInputOffsets::NGROUPS_X, false); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1173 | case Intrinsic::r600_read_ngroups_y: |
Tom Stellard | ec2e43c | 2014-09-22 15:35:29 +0000 | [diff] [blame] | 1174 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 1175 | SI::KernelInputOffsets::NGROUPS_Y, false); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1176 | case Intrinsic::r600_read_ngroups_z: |
Tom Stellard | ec2e43c | 2014-09-22 15:35:29 +0000 | [diff] [blame] | 1177 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 1178 | SI::KernelInputOffsets::NGROUPS_Z, false); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1179 | case Intrinsic::r600_read_global_size_x: |
Tom Stellard | ec2e43c | 2014-09-22 15:35:29 +0000 | [diff] [blame] | 1180 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 1181 | SI::KernelInputOffsets::GLOBAL_SIZE_X, false); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1182 | case Intrinsic::r600_read_global_size_y: |
Tom Stellard | ec2e43c | 2014-09-22 15:35:29 +0000 | [diff] [blame] | 1183 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 1184 | SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1185 | case Intrinsic::r600_read_global_size_z: |
Tom Stellard | ec2e43c | 2014-09-22 15:35:29 +0000 | [diff] [blame] | 1186 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
| 1187 | SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1188 | case Intrinsic::r600_read_local_size_x: |
Matt Arsenault | ff6da2f | 2015-11-30 21:15:45 +0000 | [diff] [blame] | 1189 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
| 1190 | SI::KernelInputOffsets::LOCAL_SIZE_X); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1191 | case Intrinsic::r600_read_local_size_y: |
Matt Arsenault | ff6da2f | 2015-11-30 21:15:45 +0000 | [diff] [blame] | 1192 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
| 1193 | SI::KernelInputOffsets::LOCAL_SIZE_Y); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1194 | case Intrinsic::r600_read_local_size_z: |
Matt Arsenault | ff6da2f | 2015-11-30 21:15:45 +0000 | [diff] [blame] | 1195 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
| 1196 | SI::KernelInputOffsets::LOCAL_SIZE_Z); |
Jan Vesely | e5121f3 | 2014-10-14 20:05:26 +0000 | [diff] [blame] | 1197 | case Intrinsic::AMDGPU_read_workdim: |
Matt Arsenault | ff6da2f | 2015-11-30 21:15:45 +0000 | [diff] [blame] | 1198 | // Really only 2 bits. |
| 1199 | return lowerImplicitZextParam(DAG, Op, MVT::i8, |
| 1200 | getImplicitParameterOffset(MFI, GRID_DIM)); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1201 | case Intrinsic::r600_read_tgid_x: |
| 1202 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, |
Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 1203 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1204 | case Intrinsic::r600_read_tgid_y: |
| 1205 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, |
Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 1206 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1207 | case Intrinsic::r600_read_tgid_z: |
| 1208 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, |
Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 1209 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1210 | case Intrinsic::r600_read_tidig_x: |
Tom Stellard | 45c0b3a | 2015-01-07 20:59:25 +0000 | [diff] [blame] | 1211 | return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, |
Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 1212 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1213 | case Intrinsic::r600_read_tidig_y: |
Tom Stellard | 45c0b3a | 2015-01-07 20:59:25 +0000 | [diff] [blame] | 1214 | return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, |
Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 1215 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1216 | case Intrinsic::r600_read_tidig_z: |
Tom Stellard | 45c0b3a | 2015-01-07 20:59:25 +0000 | [diff] [blame] | 1217 | return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, |
Matt Arsenault | ac234b6 | 2015-11-30 21:15:57 +0000 | [diff] [blame] | 1218 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1219 | case AMDGPUIntrinsic::SI_load_const: { |
| 1220 | SDValue Ops[] = { |
| 1221 | Op.getOperand(1), |
| 1222 | Op.getOperand(2) |
| 1223 | }; |
| 1224 | |
| 1225 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
| 1226 | MachinePointerInfo(), |
| 1227 | MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, |
| 1228 | VT.getStoreSize(), 4); |
| 1229 | return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, |
| 1230 | Op->getVTList(), Ops, VT, MMO); |
| 1231 | } |
| 1232 | case AMDGPUIntrinsic::SI_sample: |
| 1233 | return LowerSampleIntrinsic(AMDGPUISD::SAMPLE, Op, DAG); |
| 1234 | case AMDGPUIntrinsic::SI_sampleb: |
| 1235 | return LowerSampleIntrinsic(AMDGPUISD::SAMPLEB, Op, DAG); |
| 1236 | case AMDGPUIntrinsic::SI_sampled: |
| 1237 | return LowerSampleIntrinsic(AMDGPUISD::SAMPLED, Op, DAG); |
| 1238 | case AMDGPUIntrinsic::SI_samplel: |
| 1239 | return LowerSampleIntrinsic(AMDGPUISD::SAMPLEL, Op, DAG); |
| 1240 | case AMDGPUIntrinsic::SI_vs_load_input: |
| 1241 | return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT, |
| 1242 | Op.getOperand(1), |
| 1243 | Op.getOperand(2), |
| 1244 | Op.getOperand(3)); |
Marek Olsak | 43650e4 | 2015-03-24 13:40:08 +0000 | [diff] [blame] | 1245 | |
| 1246 | case AMDGPUIntrinsic::AMDGPU_fract: |
| 1247 | case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name. |
| 1248 | return DAG.getNode(ISD::FSUB, DL, VT, Op.getOperand(1), |
| 1249 | DAG.getNode(ISD::FFLOOR, DL, VT, Op.getOperand(1))); |
Tom Stellard | 2a9d947 | 2015-05-12 15:00:46 +0000 | [diff] [blame] | 1250 | case AMDGPUIntrinsic::SI_fs_constant: { |
| 1251 | SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); |
| 1252 | SDValue Glue = M0.getValue(1); |
| 1253 | return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, |
| 1254 | DAG.getConstant(2, DL, MVT::i32), // P0 |
| 1255 | Op.getOperand(1), Op.getOperand(2), Glue); |
| 1256 | } |
Marek Olsak | 6f6d318 | 2015-10-29 15:29:09 +0000 | [diff] [blame] | 1257 | case AMDGPUIntrinsic::SI_packf16: |
| 1258 | if (Op.getOperand(1).isUndef() && Op.getOperand(2).isUndef()) |
| 1259 | return DAG.getUNDEF(MVT::i32); |
| 1260 | return Op; |
Tom Stellard | 2a9d947 | 2015-05-12 15:00:46 +0000 | [diff] [blame] | 1261 | case AMDGPUIntrinsic::SI_fs_interp: { |
| 1262 | SDValue IJ = Op.getOperand(4); |
| 1263 | SDValue I = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, |
| 1264 | DAG.getConstant(0, DL, MVT::i32)); |
| 1265 | SDValue J = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, |
| 1266 | DAG.getConstant(1, DL, MVT::i32)); |
| 1267 | SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); |
| 1268 | SDValue Glue = M0.getValue(1); |
| 1269 | SDValue P1 = DAG.getNode(AMDGPUISD::INTERP_P1, DL, |
| 1270 | DAG.getVTList(MVT::f32, MVT::Glue), |
| 1271 | I, Op.getOperand(1), Op.getOperand(2), Glue); |
| 1272 | Glue = SDValue(P1.getNode(), 1); |
| 1273 | return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, P1, J, |
| 1274 | Op.getOperand(1), Op.getOperand(2), Glue); |
| 1275 | } |
Tom Stellard | ad7d03d | 2015-12-15 17:02:49 +0000 | [diff] [blame] | 1276 | case Intrinsic::amdgcn_interp_p1: { |
| 1277 | SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); |
| 1278 | SDValue Glue = M0.getValue(1); |
| 1279 | return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), |
| 1280 | Op.getOperand(2), Op.getOperand(3), Glue); |
| 1281 | } |
| 1282 | case Intrinsic::amdgcn_interp_p2: { |
| 1283 | SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); |
| 1284 | SDValue Glue = SDValue(M0.getNode(), 1); |
| 1285 | return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), |
| 1286 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), |
| 1287 | Glue); |
| 1288 | } |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1289 | default: |
| 1290 | return AMDGPUTargetLowering::LowerOperation(Op, DAG); |
| 1291 | } |
| 1292 | } |
| 1293 | |
| 1294 | SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, |
| 1295 | SelectionDAG &DAG) const { |
| 1296 | MachineFunction &MF = DAG.getMachineFunction(); |
Tom Stellard | fc92e77 | 2015-05-12 14:18:14 +0000 | [diff] [blame] | 1297 | SDLoc DL(Op); |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1298 | SDValue Chain = Op.getOperand(0); |
| 1299 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
| 1300 | |
| 1301 | switch (IntrinsicID) { |
Tom Stellard | fc92e77 | 2015-05-12 14:18:14 +0000 | [diff] [blame] | 1302 | case AMDGPUIntrinsic::SI_sendmsg: { |
| 1303 | Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); |
| 1304 | SDValue Glue = Chain.getValue(1); |
| 1305 | return DAG.getNode(AMDGPUISD::SENDMSG, DL, MVT::Other, Chain, |
| 1306 | Op.getOperand(2), Glue); |
| 1307 | } |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1308 | case AMDGPUIntrinsic::SI_tbuffer_store: { |
Matt Arsenault | a5789bb | 2014-07-26 06:23:37 +0000 | [diff] [blame] | 1309 | SDValue Ops[] = { |
| 1310 | Chain, |
| 1311 | Op.getOperand(2), |
| 1312 | Op.getOperand(3), |
| 1313 | Op.getOperand(4), |
| 1314 | Op.getOperand(5), |
| 1315 | Op.getOperand(6), |
| 1316 | Op.getOperand(7), |
| 1317 | Op.getOperand(8), |
| 1318 | Op.getOperand(9), |
| 1319 | Op.getOperand(10), |
| 1320 | Op.getOperand(11), |
| 1321 | Op.getOperand(12), |
| 1322 | Op.getOperand(13), |
| 1323 | Op.getOperand(14) |
| 1324 | }; |
| 1325 | |
| 1326 | EVT VT = Op.getOperand(3).getValueType(); |
| 1327 | |
| 1328 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
| 1329 | MachinePointerInfo(), |
| 1330 | MachineMemOperand::MOStore, |
| 1331 | VT.getStoreSize(), 4); |
| 1332 | return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, |
| 1333 | Op->getVTList(), Ops, VT, MMO); |
| 1334 | } |
| 1335 | default: |
| 1336 | return SDValue(); |
| 1337 | } |
| 1338 | } |
| 1339 | |
Tom Stellard | 81d871d | 2013-11-13 23:36:50 +0000 | [diff] [blame] | 1340 | SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { |
| 1341 | SDLoc DL(Op); |
| 1342 | LoadSDNode *Load = cast<LoadSDNode>(Op); |
| 1343 | |
Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 1344 | if (Op.getValueType().isVector()) { |
| 1345 | assert(Op.getValueType().getVectorElementType() == MVT::i32 && |
| 1346 | "Custom lowering for non-i32 vectors hasn't been implemented."); |
| 1347 | unsigned NumElements = Op.getValueType().getVectorNumElements(); |
| 1348 | assert(NumElements != 2 && "v2 loads are supported for all address spaces."); |
Matt Arsenault | 4d801cd | 2015-11-24 12:05:03 +0000 | [diff] [blame] | 1349 | |
Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 1350 | switch (Load->getAddressSpace()) { |
| 1351 | default: break; |
Tom Stellard | a6f24c6 | 2015-12-15 20:55:55 +0000 | [diff] [blame] | 1352 | case AMDGPUAS::CONSTANT_ADDRESS: |
| 1353 | if (isMemOpUniform(Load)) |
| 1354 | break; |
| 1355 | // Non-uniform loads will be selected to MUBUF instructions, so they |
| 1356 | // have the same legalization requires ments as global and private |
| 1357 | // loads. |
| 1358 | // |
| 1359 | // Fall-through |
Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 1360 | case AMDGPUAS::GLOBAL_ADDRESS: |
| 1361 | case AMDGPUAS::PRIVATE_ADDRESS: |
Matt Arsenault | 4d801cd | 2015-11-24 12:05:03 +0000 | [diff] [blame] | 1362 | if (NumElements >= 8) |
| 1363 | return SplitVectorLoad(Op, DAG); |
| 1364 | |
Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 1365 | // v4 loads are supported for private and global memory. |
| 1366 | if (NumElements <= 4) |
| 1367 | break; |
| 1368 | // fall-through |
| 1369 | case AMDGPUAS::LOCAL_ADDRESS: |
Matt Arsenault | ff05da8 | 2015-11-24 12:18:54 +0000 | [diff] [blame] | 1370 | // If properly aligned, if we split we might be able to use ds_read_b64. |
| 1371 | return SplitVectorLoad(Op, DAG); |
Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 1372 | } |
Tom Stellard | e937360 | 2014-01-22 19:24:14 +0000 | [diff] [blame] | 1373 | } |
Tom Stellard | 81d871d | 2013-11-13 23:36:50 +0000 | [diff] [blame] | 1374 | |
Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 1375 | return AMDGPUTargetLowering::LowerLOAD(Op, DAG); |
Tom Stellard | 81d871d | 2013-11-13 23:36:50 +0000 | [diff] [blame] | 1376 | } |
| 1377 | |
Tom Stellard | 9fa1791 | 2013-08-14 23:24:45 +0000 | [diff] [blame] | 1378 | SDValue SITargetLowering::LowerSampleIntrinsic(unsigned Opcode, |
| 1379 | const SDValue &Op, |
| 1380 | SelectionDAG &DAG) const { |
| 1381 | return DAG.getNode(Opcode, SDLoc(Op), Op.getValueType(), Op.getOperand(1), |
| 1382 | Op.getOperand(2), |
Tom Stellard | 868fd92 | 2014-04-17 21:00:11 +0000 | [diff] [blame] | 1383 | Op.getOperand(3), |
Tom Stellard | 9fa1791 | 2013-08-14 23:24:45 +0000 | [diff] [blame] | 1384 | Op.getOperand(4)); |
| 1385 | } |
| 1386 | |
Tom Stellard | 0ec134f | 2014-02-04 17:18:40 +0000 | [diff] [blame] | 1387 | SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
| 1388 | if (Op.getValueType() != MVT::i64) |
| 1389 | return SDValue(); |
| 1390 | |
| 1391 | SDLoc DL(Op); |
| 1392 | SDValue Cond = Op.getOperand(0); |
Tom Stellard | 0ec134f | 2014-02-04 17:18:40 +0000 | [diff] [blame] | 1393 | |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1394 | SDValue Zero = DAG.getConstant(0, DL, MVT::i32); |
| 1395 | SDValue One = DAG.getConstant(1, DL, MVT::i32); |
Tom Stellard | 0ec134f | 2014-02-04 17:18:40 +0000 | [diff] [blame] | 1396 | |
Tom Stellard | 7ea3d6d | 2014-03-31 14:01:55 +0000 | [diff] [blame] | 1397 | SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); |
| 1398 | SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); |
| 1399 | |
| 1400 | SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); |
| 1401 | SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); |
Tom Stellard | 0ec134f | 2014-02-04 17:18:40 +0000 | [diff] [blame] | 1402 | |
| 1403 | SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); |
| 1404 | |
Tom Stellard | 7ea3d6d | 2014-03-31 14:01:55 +0000 | [diff] [blame] | 1405 | SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); |
| 1406 | SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); |
Tom Stellard | 0ec134f | 2014-02-04 17:18:40 +0000 | [diff] [blame] | 1407 | |
| 1408 | SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); |
| 1409 | |
Tom Stellard | 7ea3d6d | 2014-03-31 14:01:55 +0000 | [diff] [blame] | 1410 | SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, Lo, Hi); |
| 1411 | return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); |
Tom Stellard | 0ec134f | 2014-02-04 17:18:40 +0000 | [diff] [blame] | 1412 | } |
| 1413 | |
Matt Arsenault | 22ca3f8 | 2014-07-15 23:50:10 +0000 | [diff] [blame] | 1414 | // Catch division cases where we can use shortcuts with rcp and rsq |
| 1415 | // instructions. |
| 1416 | SDValue SITargetLowering::LowerFastFDIV(SDValue Op, SelectionDAG &DAG) const { |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1417 | SDLoc SL(Op); |
| 1418 | SDValue LHS = Op.getOperand(0); |
| 1419 | SDValue RHS = Op.getOperand(1); |
| 1420 | EVT VT = Op.getValueType(); |
Matt Arsenault | 22ca3f8 | 2014-07-15 23:50:10 +0000 | [diff] [blame] | 1421 | bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1422 | |
| 1423 | if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { |
Matt Arsenault | 22ca3f8 | 2014-07-15 23:50:10 +0000 | [diff] [blame] | 1424 | if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals())) && |
| 1425 | CLHS->isExactlyValue(1.0)) { |
| 1426 | // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to |
| 1427 | // the CI documentation has a worst case error of 1 ulp. |
| 1428 | // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to |
| 1429 | // use it as long as we aren't trying to use denormals. |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1430 | |
| 1431 | // 1.0 / sqrt(x) -> rsq(x) |
Matt Arsenault | 22ca3f8 | 2014-07-15 23:50:10 +0000 | [diff] [blame] | 1432 | // |
| 1433 | // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP |
| 1434 | // error seems really high at 2^29 ULP. |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1435 | if (RHS.getOpcode() == ISD::FSQRT) |
| 1436 | return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); |
| 1437 | |
| 1438 | // 1.0 / x -> rcp(x) |
| 1439 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
| 1440 | } |
| 1441 | } |
| 1442 | |
Matt Arsenault | 22ca3f8 | 2014-07-15 23:50:10 +0000 | [diff] [blame] | 1443 | if (Unsafe) { |
| 1444 | // Turn into multiply by the reciprocal. |
| 1445 | // x / y -> x * (1.0 / y) |
Sanjay Patel | a260701 | 2015-09-16 16:31:21 +0000 | [diff] [blame] | 1446 | SDNodeFlags Flags; |
| 1447 | Flags.setUnsafeAlgebra(true); |
Matt Arsenault | 22ca3f8 | 2014-07-15 23:50:10 +0000 | [diff] [blame] | 1448 | SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
Sanjay Patel | a260701 | 2015-09-16 16:31:21 +0000 | [diff] [blame] | 1449 | return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, &Flags); |
Matt Arsenault | 22ca3f8 | 2014-07-15 23:50:10 +0000 | [diff] [blame] | 1450 | } |
| 1451 | |
| 1452 | return SDValue(); |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1453 | } |
| 1454 | |
| 1455 | SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { |
Matt Arsenault | 22ca3f8 | 2014-07-15 23:50:10 +0000 | [diff] [blame] | 1456 | SDValue FastLowered = LowerFastFDIV(Op, DAG); |
| 1457 | if (FastLowered.getNode()) |
| 1458 | return FastLowered; |
| 1459 | |
| 1460 | // This uses v_rcp_f32 which does not handle denormals. Let this hit a |
| 1461 | // selection error for now rather than do something incorrect. |
| 1462 | if (Subtarget->hasFP32Denormals()) |
| 1463 | return SDValue(); |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1464 | |
| 1465 | SDLoc SL(Op); |
| 1466 | SDValue LHS = Op.getOperand(0); |
| 1467 | SDValue RHS = Op.getOperand(1); |
| 1468 | |
| 1469 | SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); |
| 1470 | |
| 1471 | const APFloat K0Val(BitsToFloat(0x6f800000)); |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1472 | const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1473 | |
| 1474 | const APFloat K1Val(BitsToFloat(0x2f800000)); |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1475 | const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1476 | |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1477 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1478 | |
Mehdi Amini | 44ede33 | 2015-07-09 02:09:04 +0000 | [diff] [blame] | 1479 | EVT SetCCVT = |
| 1480 | getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1481 | |
| 1482 | SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); |
| 1483 | |
| 1484 | SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); |
| 1485 | |
Sanjay Patel | a260701 | 2015-09-16 16:31:21 +0000 | [diff] [blame] | 1486 | // TODO: Should this propagate fast-math-flags? |
| 1487 | |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1488 | r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); |
| 1489 | |
| 1490 | SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); |
| 1491 | |
| 1492 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); |
| 1493 | |
| 1494 | return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); |
| 1495 | } |
| 1496 | |
| 1497 | SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { |
Matt Arsenault | 0bbcd8b | 2015-02-14 04:30:08 +0000 | [diff] [blame] | 1498 | if (DAG.getTarget().Options.UnsafeFPMath) |
| 1499 | return LowerFastFDIV(Op, DAG); |
| 1500 | |
| 1501 | SDLoc SL(Op); |
| 1502 | SDValue X = Op.getOperand(0); |
| 1503 | SDValue Y = Op.getOperand(1); |
| 1504 | |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1505 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); |
Matt Arsenault | 0bbcd8b | 2015-02-14 04:30:08 +0000 | [diff] [blame] | 1506 | |
| 1507 | SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); |
| 1508 | |
| 1509 | SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); |
| 1510 | |
| 1511 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); |
| 1512 | |
| 1513 | SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); |
| 1514 | |
| 1515 | SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); |
| 1516 | |
| 1517 | SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); |
| 1518 | |
| 1519 | SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); |
| 1520 | |
| 1521 | SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); |
| 1522 | |
| 1523 | SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); |
| 1524 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); |
| 1525 | |
| 1526 | SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, |
| 1527 | NegDivScale0, Mul, DivScale1); |
| 1528 | |
| 1529 | SDValue Scale; |
| 1530 | |
| 1531 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { |
| 1532 | // Workaround a hardware bug on SI where the condition output from div_scale |
| 1533 | // is not usable. |
| 1534 | |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1535 | const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); |
Matt Arsenault | 0bbcd8b | 2015-02-14 04:30:08 +0000 | [diff] [blame] | 1536 | |
| 1537 | // Figure out if the scale to use for div_fmas. |
| 1538 | SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); |
| 1539 | SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); |
| 1540 | SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); |
| 1541 | SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); |
| 1542 | |
| 1543 | SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); |
| 1544 | SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); |
| 1545 | |
| 1546 | SDValue Scale0Hi |
| 1547 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); |
| 1548 | SDValue Scale1Hi |
| 1549 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); |
| 1550 | |
| 1551 | SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); |
| 1552 | SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); |
| 1553 | Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); |
| 1554 | } else { |
| 1555 | Scale = DivScale1.getValue(1); |
| 1556 | } |
| 1557 | |
| 1558 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, |
| 1559 | Fma4, Fma3, Mul, Scale); |
| 1560 | |
| 1561 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); |
Matt Arsenault | e9fa3b8 | 2014-07-15 20:18:31 +0000 | [diff] [blame] | 1562 | } |
| 1563 | |
| 1564 | SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { |
| 1565 | EVT VT = Op.getValueType(); |
| 1566 | |
| 1567 | if (VT == MVT::f32) |
| 1568 | return LowerFDIV32(Op, DAG); |
| 1569 | |
| 1570 | if (VT == MVT::f64) |
| 1571 | return LowerFDIV64(Op, DAG); |
| 1572 | |
| 1573 | llvm_unreachable("Unexpected type for fdiv"); |
| 1574 | } |
| 1575 | |
Tom Stellard | 81d871d | 2013-11-13 23:36:50 +0000 | [diff] [blame] | 1576 | SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { |
| 1577 | SDLoc DL(Op); |
| 1578 | StoreSDNode *Store = cast<StoreSDNode>(Op); |
| 1579 | EVT VT = Store->getMemoryVT(); |
| 1580 | |
Tom Stellard | 9b3816b | 2014-06-24 23:33:04 +0000 | [diff] [blame] | 1581 | // These stores are legal. |
Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 1582 | if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { |
| 1583 | if (VT.isVector() && VT.getVectorNumElements() > 4) |
Matt Arsenault | 83e6058 | 2014-07-24 17:10:35 +0000 | [diff] [blame] | 1584 | return ScalarizeVectorStore(Op, DAG); |
Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 1585 | return SDValue(); |
| 1586 | } |
| 1587 | |
Tom Stellard | 81d871d | 2013-11-13 23:36:50 +0000 | [diff] [blame] | 1588 | SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG); |
| 1589 | if (Ret.getNode()) |
| 1590 | return Ret; |
| 1591 | |
| 1592 | if (VT.isVector() && VT.getVectorNumElements() >= 8) |
Matt Arsenault | 4d801cd | 2015-11-24 12:05:03 +0000 | [diff] [blame] | 1593 | return SplitVectorStore(Op, DAG); |
Tom Stellard | 81d871d | 2013-11-13 23:36:50 +0000 | [diff] [blame] | 1594 | |
Tom Stellard | 1c8788e | 2014-03-07 20:12:33 +0000 | [diff] [blame] | 1595 | if (VT == MVT::i1) |
| 1596 | return DAG.getTruncStore(Store->getChain(), DL, |
| 1597 | DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), |
| 1598 | Store->getBasePtr(), MVT::i1, Store->getMemOperand()); |
| 1599 | |
Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 1600 | return SDValue(); |
Tom Stellard | 81d871d | 2013-11-13 23:36:50 +0000 | [diff] [blame] | 1601 | } |
| 1602 | |
Matt Arsenault | ad14ce8 | 2014-07-19 18:44:39 +0000 | [diff] [blame] | 1603 | SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1604 | SDLoc DL(Op); |
Matt Arsenault | ad14ce8 | 2014-07-19 18:44:39 +0000 | [diff] [blame] | 1605 | EVT VT = Op.getValueType(); |
| 1606 | SDValue Arg = Op.getOperand(0); |
Sanjay Patel | a260701 | 2015-09-16 16:31:21 +0000 | [diff] [blame] | 1607 | // TODO: Should this propagate fast-math-flags? |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1608 | SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, |
| 1609 | DAG.getNode(ISD::FMUL, DL, VT, Arg, |
| 1610 | DAG.getConstantFP(0.5/M_PI, DL, |
| 1611 | VT))); |
Matt Arsenault | ad14ce8 | 2014-07-19 18:44:39 +0000 | [diff] [blame] | 1612 | |
| 1613 | switch (Op.getOpcode()) { |
| 1614 | case ISD::FCOS: |
| 1615 | return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); |
| 1616 | case ISD::FSIN: |
| 1617 | return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); |
| 1618 | default: |
| 1619 | llvm_unreachable("Wrong trig opcode"); |
| 1620 | } |
| 1621 | } |
| 1622 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 1623 | //===----------------------------------------------------------------------===// |
| 1624 | // Custom DAG optimizations |
| 1625 | //===----------------------------------------------------------------------===// |
| 1626 | |
Matt Arsenault | 364a674 | 2014-06-11 17:50:44 +0000 | [diff] [blame] | 1627 | SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, |
Matt Arsenault | e698663 | 2015-01-14 01:35:22 +0000 | [diff] [blame] | 1628 | DAGCombinerInfo &DCI) const { |
Matt Arsenault | 364a674 | 2014-06-11 17:50:44 +0000 | [diff] [blame] | 1629 | EVT VT = N->getValueType(0); |
| 1630 | EVT ScalarVT = VT.getScalarType(); |
| 1631 | if (ScalarVT != MVT::f32) |
| 1632 | return SDValue(); |
| 1633 | |
| 1634 | SelectionDAG &DAG = DCI.DAG; |
| 1635 | SDLoc DL(N); |
| 1636 | |
| 1637 | SDValue Src = N->getOperand(0); |
| 1638 | EVT SrcVT = Src.getValueType(); |
| 1639 | |
| 1640 | // TODO: We could try to match extracting the higher bytes, which would be |
| 1641 | // easier if i8 vectors weren't promoted to i32 vectors, particularly after |
| 1642 | // types are legalized. v4i8 -> v4f32 is probably the only case to worry |
| 1643 | // about in practice. |
| 1644 | if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { |
| 1645 | if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { |
| 1646 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); |
| 1647 | DCI.AddToWorklist(Cvt.getNode()); |
| 1648 | return Cvt; |
| 1649 | } |
| 1650 | } |
| 1651 | |
| 1652 | // We are primarily trying to catch operations on illegal vector types |
| 1653 | // before they are expanded. |
| 1654 | // For scalars, we can use the more flexible method of checking masked bits |
| 1655 | // after legalization. |
| 1656 | if (!DCI.isBeforeLegalize() || |
| 1657 | !SrcVT.isVector() || |
| 1658 | SrcVT.getVectorElementType() != MVT::i8) { |
| 1659 | return SDValue(); |
| 1660 | } |
| 1661 | |
| 1662 | assert(DCI.isBeforeLegalize() && "Unexpected legal type"); |
| 1663 | |
| 1664 | // Weird sized vectors are a pain to handle, but we know 3 is really the same |
| 1665 | // size as 4. |
| 1666 | unsigned NElts = SrcVT.getVectorNumElements(); |
| 1667 | if (!SrcVT.isSimple() && NElts != 3) |
| 1668 | return SDValue(); |
| 1669 | |
| 1670 | // Handle v4i8 -> v4f32 extload. Replace the v4i8 with a legal i32 load to |
| 1671 | // prevent a mess from expanding to v4i32 and repacking. |
| 1672 | if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) { |
| 1673 | EVT LoadVT = getEquivalentMemType(*DAG.getContext(), SrcVT); |
| 1674 | EVT RegVT = getEquivalentLoadRegType(*DAG.getContext(), SrcVT); |
| 1675 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32, NElts); |
Matt Arsenault | 364a674 | 2014-06-11 17:50:44 +0000 | [diff] [blame] | 1676 | LoadSDNode *Load = cast<LoadSDNode>(Src); |
Matt Arsenault | e698663 | 2015-01-14 01:35:22 +0000 | [diff] [blame] | 1677 | |
| 1678 | unsigned AS = Load->getAddressSpace(); |
| 1679 | unsigned Align = Load->getAlignment(); |
| 1680 | Type *Ty = LoadVT.getTypeForEVT(*DAG.getContext()); |
Mehdi Amini | a749f2a | 2015-07-09 02:09:52 +0000 | [diff] [blame] | 1681 | unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); |
Matt Arsenault | e698663 | 2015-01-14 01:35:22 +0000 | [diff] [blame] | 1682 | |
| 1683 | // Don't try to replace the load if we have to expand it due to alignment |
| 1684 | // problems. Otherwise we will end up scalarizing the load, and trying to |
| 1685 | // repack into the vector for no real reason. |
| 1686 | if (Align < ABIAlignment && |
| 1687 | !allowsMisalignedMemoryAccesses(LoadVT, AS, Align, nullptr)) { |
| 1688 | return SDValue(); |
| 1689 | } |
| 1690 | |
Matt Arsenault | 364a674 | 2014-06-11 17:50:44 +0000 | [diff] [blame] | 1691 | SDValue NewLoad = DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegVT, |
| 1692 | Load->getChain(), |
| 1693 | Load->getBasePtr(), |
| 1694 | LoadVT, |
| 1695 | Load->getMemOperand()); |
| 1696 | |
| 1697 | // Make sure successors of the original load stay after it by updating |
| 1698 | // them to use the new Chain. |
| 1699 | DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), NewLoad.getValue(1)); |
| 1700 | |
| 1701 | SmallVector<SDValue, 4> Elts; |
| 1702 | if (RegVT.isVector()) |
| 1703 | DAG.ExtractVectorElements(NewLoad, Elts); |
| 1704 | else |
| 1705 | Elts.push_back(NewLoad); |
| 1706 | |
| 1707 | SmallVector<SDValue, 4> Ops; |
| 1708 | |
| 1709 | unsigned EltIdx = 0; |
| 1710 | for (SDValue Elt : Elts) { |
| 1711 | unsigned ComponentsInElt = std::min(4u, NElts - 4 * EltIdx); |
| 1712 | for (unsigned I = 0; I < ComponentsInElt; ++I) { |
| 1713 | unsigned Opc = AMDGPUISD::CVT_F32_UBYTE0 + I; |
| 1714 | SDValue Cvt = DAG.getNode(Opc, DL, MVT::f32, Elt); |
| 1715 | DCI.AddToWorklist(Cvt.getNode()); |
| 1716 | Ops.push_back(Cvt); |
| 1717 | } |
| 1718 | |
| 1719 | ++EltIdx; |
| 1720 | } |
| 1721 | |
| 1722 | assert(Ops.size() == NElts); |
| 1723 | |
| 1724 | return DAG.getNode(ISD::BUILD_VECTOR, DL, FloatVT, Ops); |
| 1725 | } |
| 1726 | |
| 1727 | return SDValue(); |
| 1728 | } |
| 1729 | |
Eric Christopher | 6c5b511 | 2015-03-11 18:43:21 +0000 | [diff] [blame] | 1730 | /// \brief Return true if the given offset Size in bytes can be folded into |
| 1731 | /// the immediate offsets of a memory instruction for the given address space. |
| 1732 | static bool canFoldOffset(unsigned OffsetSize, unsigned AS, |
| 1733 | const AMDGPUSubtarget &STI) { |
| 1734 | switch (AS) { |
| 1735 | case AMDGPUAS::GLOBAL_ADDRESS: { |
| 1736 | // MUBUF instructions a 12-bit offset in bytes. |
| 1737 | return isUInt<12>(OffsetSize); |
| 1738 | } |
| 1739 | case AMDGPUAS::CONSTANT_ADDRESS: { |
| 1740 | // SMRD instructions have an 8-bit offset in dwords on SI and |
| 1741 | // a 20-bit offset in bytes on VI. |
| 1742 | if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
| 1743 | return isUInt<20>(OffsetSize); |
| 1744 | else |
| 1745 | return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); |
| 1746 | } |
| 1747 | case AMDGPUAS::LOCAL_ADDRESS: |
| 1748 | case AMDGPUAS::REGION_ADDRESS: { |
| 1749 | // The single offset versions have a 16-bit offset in bytes. |
| 1750 | return isUInt<16>(OffsetSize); |
| 1751 | } |
| 1752 | case AMDGPUAS::PRIVATE_ADDRESS: |
| 1753 | // Indirect register addressing does not use any offsets. |
| 1754 | default: |
| 1755 | return 0; |
| 1756 | } |
| 1757 | } |
| 1758 | |
Matt Arsenault | b2baffa | 2014-08-15 17:49:05 +0000 | [diff] [blame] | 1759 | // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) |
| 1760 | |
| 1761 | // This is a variant of |
| 1762 | // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), |
| 1763 | // |
| 1764 | // The normal DAG combiner will do this, but only if the add has one use since |
| 1765 | // that would increase the number of instructions. |
| 1766 | // |
| 1767 | // This prevents us from seeing a constant offset that can be folded into a |
| 1768 | // memory instruction's addressing mode. If we know the resulting add offset of |
| 1769 | // a pointer can be folded into an addressing offset, we can replace the pointer |
| 1770 | // operand with the add of new constant offset. This eliminates one of the uses, |
| 1771 | // and may allow the remaining use to also be simplified. |
| 1772 | // |
| 1773 | SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, |
| 1774 | unsigned AddrSpace, |
| 1775 | DAGCombinerInfo &DCI) const { |
| 1776 | SDValue N0 = N->getOperand(0); |
| 1777 | SDValue N1 = N->getOperand(1); |
| 1778 | |
| 1779 | if (N0.getOpcode() != ISD::ADD) |
| 1780 | return SDValue(); |
| 1781 | |
| 1782 | const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); |
| 1783 | if (!CN1) |
| 1784 | return SDValue(); |
| 1785 | |
| 1786 | const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
| 1787 | if (!CAdd) |
| 1788 | return SDValue(); |
| 1789 | |
Matt Arsenault | b2baffa | 2014-08-15 17:49:05 +0000 | [diff] [blame] | 1790 | // If the resulting offset is too large, we can't fold it into the addressing |
| 1791 | // mode offset. |
| 1792 | APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); |
Eric Christopher | 6c5b511 | 2015-03-11 18:43:21 +0000 | [diff] [blame] | 1793 | if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *Subtarget)) |
Matt Arsenault | b2baffa | 2014-08-15 17:49:05 +0000 | [diff] [blame] | 1794 | return SDValue(); |
| 1795 | |
| 1796 | SelectionDAG &DAG = DCI.DAG; |
| 1797 | SDLoc SL(N); |
| 1798 | EVT VT = N->getValueType(0); |
| 1799 | |
| 1800 | SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1801 | SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); |
Matt Arsenault | b2baffa | 2014-08-15 17:49:05 +0000 | [diff] [blame] | 1802 | |
| 1803 | return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset); |
| 1804 | } |
| 1805 | |
Matt Arsenault | d0101a2 | 2015-01-06 23:00:46 +0000 | [diff] [blame] | 1806 | SDValue SITargetLowering::performAndCombine(SDNode *N, |
| 1807 | DAGCombinerInfo &DCI) const { |
| 1808 | if (DCI.isBeforeLegalize()) |
| 1809 | return SDValue(); |
| 1810 | |
| 1811 | SelectionDAG &DAG = DCI.DAG; |
| 1812 | |
| 1813 | // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> |
| 1814 | // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) |
| 1815 | SDValue LHS = N->getOperand(0); |
| 1816 | SDValue RHS = N->getOperand(1); |
| 1817 | |
| 1818 | if (LHS.getOpcode() == ISD::SETCC && |
| 1819 | RHS.getOpcode() == ISD::SETCC) { |
| 1820 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); |
| 1821 | ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); |
| 1822 | |
| 1823 | SDValue X = LHS.getOperand(0); |
| 1824 | SDValue Y = RHS.getOperand(0); |
| 1825 | if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) |
| 1826 | return SDValue(); |
| 1827 | |
| 1828 | if (LCC == ISD::SETO) { |
| 1829 | if (X != LHS.getOperand(1)) |
| 1830 | return SDValue(); |
| 1831 | |
| 1832 | if (RCC == ISD::SETUNE) { |
| 1833 | const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); |
| 1834 | if (!C1 || !C1->isInfinity() || C1->isNegative()) |
| 1835 | return SDValue(); |
| 1836 | |
| 1837 | const uint32_t Mask = SIInstrFlags::N_NORMAL | |
| 1838 | SIInstrFlags::N_SUBNORMAL | |
| 1839 | SIInstrFlags::N_ZERO | |
| 1840 | SIInstrFlags::P_ZERO | |
| 1841 | SIInstrFlags::P_SUBNORMAL | |
| 1842 | SIInstrFlags::P_NORMAL; |
| 1843 | |
| 1844 | static_assert(((~(SIInstrFlags::S_NAN | |
| 1845 | SIInstrFlags::Q_NAN | |
| 1846 | SIInstrFlags::N_INFINITY | |
| 1847 | SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, |
| 1848 | "mask not equal"); |
| 1849 | |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1850 | SDLoc DL(N); |
| 1851 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, |
| 1852 | X, DAG.getConstant(Mask, DL, MVT::i32)); |
Matt Arsenault | d0101a2 | 2015-01-06 23:00:46 +0000 | [diff] [blame] | 1853 | } |
| 1854 | } |
| 1855 | } |
| 1856 | |
| 1857 | return SDValue(); |
| 1858 | } |
| 1859 | |
Matt Arsenault | f229033 | 2015-01-06 23:00:39 +0000 | [diff] [blame] | 1860 | SDValue SITargetLowering::performOrCombine(SDNode *N, |
| 1861 | DAGCombinerInfo &DCI) const { |
| 1862 | SelectionDAG &DAG = DCI.DAG; |
| 1863 | SDValue LHS = N->getOperand(0); |
| 1864 | SDValue RHS = N->getOperand(1); |
| 1865 | |
| 1866 | // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) |
| 1867 | if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && |
| 1868 | RHS.getOpcode() == AMDGPUISD::FP_CLASS) { |
| 1869 | SDValue Src = LHS.getOperand(0); |
| 1870 | if (Src != RHS.getOperand(0)) |
| 1871 | return SDValue(); |
| 1872 | |
| 1873 | const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); |
| 1874 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
| 1875 | if (!CLHS || !CRHS) |
| 1876 | return SDValue(); |
| 1877 | |
| 1878 | // Only 10 bits are used. |
| 1879 | static const uint32_t MaxMask = 0x3ff; |
| 1880 | |
| 1881 | uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1882 | SDLoc DL(N); |
| 1883 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, |
| 1884 | Src, DAG.getConstant(NewMask, DL, MVT::i32)); |
Matt Arsenault | f229033 | 2015-01-06 23:00:39 +0000 | [diff] [blame] | 1885 | } |
| 1886 | |
| 1887 | return SDValue(); |
| 1888 | } |
| 1889 | |
| 1890 | SDValue SITargetLowering::performClassCombine(SDNode *N, |
| 1891 | DAGCombinerInfo &DCI) const { |
| 1892 | SelectionDAG &DAG = DCI.DAG; |
| 1893 | SDValue Mask = N->getOperand(1); |
| 1894 | |
| 1895 | // fp_class x, 0 -> false |
| 1896 | if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { |
| 1897 | if (CMask->isNullValue()) |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1898 | return DAG.getConstant(0, SDLoc(N), MVT::i1); |
Matt Arsenault | f229033 | 2015-01-06 23:00:39 +0000 | [diff] [blame] | 1899 | } |
| 1900 | |
| 1901 | return SDValue(); |
| 1902 | } |
| 1903 | |
Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 1904 | static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { |
| 1905 | switch (Opc) { |
| 1906 | case ISD::FMAXNUM: |
| 1907 | return AMDGPUISD::FMAX3; |
Matt Arsenault | 5881f4e | 2015-06-09 00:52:37 +0000 | [diff] [blame] | 1908 | case ISD::SMAX: |
Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 1909 | return AMDGPUISD::SMAX3; |
Matt Arsenault | 5881f4e | 2015-06-09 00:52:37 +0000 | [diff] [blame] | 1910 | case ISD::UMAX: |
Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 1911 | return AMDGPUISD::UMAX3; |
| 1912 | case ISD::FMINNUM: |
| 1913 | return AMDGPUISD::FMIN3; |
Matt Arsenault | 5881f4e | 2015-06-09 00:52:37 +0000 | [diff] [blame] | 1914 | case ISD::SMIN: |
Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 1915 | return AMDGPUISD::SMIN3; |
Matt Arsenault | 5881f4e | 2015-06-09 00:52:37 +0000 | [diff] [blame] | 1916 | case ISD::UMIN: |
Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 1917 | return AMDGPUISD::UMIN3; |
| 1918 | default: |
| 1919 | llvm_unreachable("Not a min/max opcode"); |
| 1920 | } |
| 1921 | } |
| 1922 | |
| 1923 | SDValue SITargetLowering::performMin3Max3Combine(SDNode *N, |
| 1924 | DAGCombinerInfo &DCI) const { |
| 1925 | SelectionDAG &DAG = DCI.DAG; |
| 1926 | |
| 1927 | unsigned Opc = N->getOpcode(); |
| 1928 | SDValue Op0 = N->getOperand(0); |
| 1929 | SDValue Op1 = N->getOperand(1); |
| 1930 | |
| 1931 | // Only do this if the inner op has one use since this will just increases |
| 1932 | // register pressure for no benefit. |
| 1933 | |
| 1934 | // max(max(a, b), c) |
| 1935 | if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { |
| 1936 | SDLoc DL(N); |
| 1937 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
| 1938 | DL, |
| 1939 | N->getValueType(0), |
| 1940 | Op0.getOperand(0), |
| 1941 | Op0.getOperand(1), |
| 1942 | Op1); |
| 1943 | } |
| 1944 | |
| 1945 | // max(a, max(b, c)) |
| 1946 | if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { |
| 1947 | SDLoc DL(N); |
| 1948 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
| 1949 | DL, |
| 1950 | N->getValueType(0), |
| 1951 | Op0, |
| 1952 | Op1.getOperand(0), |
| 1953 | Op1.getOperand(1)); |
| 1954 | } |
| 1955 | |
| 1956 | return SDValue(); |
| 1957 | } |
| 1958 | |
Matt Arsenault | 6f6233d | 2015-01-06 23:00:41 +0000 | [diff] [blame] | 1959 | SDValue SITargetLowering::performSetCCCombine(SDNode *N, |
| 1960 | DAGCombinerInfo &DCI) const { |
| 1961 | SelectionDAG &DAG = DCI.DAG; |
| 1962 | SDLoc SL(N); |
| 1963 | |
| 1964 | SDValue LHS = N->getOperand(0); |
| 1965 | SDValue RHS = N->getOperand(1); |
| 1966 | EVT VT = LHS.getValueType(); |
| 1967 | |
| 1968 | if (VT != MVT::f32 && VT != MVT::f64) |
| 1969 | return SDValue(); |
| 1970 | |
| 1971 | // Match isinf pattern |
| 1972 | // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) |
| 1973 | ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); |
| 1974 | if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { |
| 1975 | const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); |
| 1976 | if (!CRHS) |
| 1977 | return SDValue(); |
| 1978 | |
| 1979 | const APFloat &APF = CRHS->getValueAPF(); |
| 1980 | if (APF.isInfinity() && !APF.isNegative()) { |
| 1981 | unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 1982 | return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), |
| 1983 | DAG.getConstant(Mask, SL, MVT::i32)); |
Matt Arsenault | 6f6233d | 2015-01-06 23:00:41 +0000 | [diff] [blame] | 1984 | } |
| 1985 | } |
| 1986 | |
| 1987 | return SDValue(); |
| 1988 | } |
| 1989 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 1990 | SDValue SITargetLowering::PerformDAGCombine(SDNode *N, |
| 1991 | DAGCombinerInfo &DCI) const { |
| 1992 | SelectionDAG &DAG = DCI.DAG; |
Andrew Trick | ef9de2a | 2013-05-25 02:42:55 +0000 | [diff] [blame] | 1993 | SDLoc DL(N); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 1994 | |
| 1995 | switch (N->getOpcode()) { |
Matt Arsenault | 22b4c25 | 2014-12-21 16:48:42 +0000 | [diff] [blame] | 1996 | default: |
| 1997 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); |
Matt Arsenault | 6f6233d | 2015-01-06 23:00:41 +0000 | [diff] [blame] | 1998 | case ISD::SETCC: |
| 1999 | return performSetCCCombine(N, DCI); |
Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 2000 | case ISD::FMAXNUM: // TODO: What about fmax_legacy? |
| 2001 | case ISD::FMINNUM: |
Matt Arsenault | 5881f4e | 2015-06-09 00:52:37 +0000 | [diff] [blame] | 2002 | case ISD::SMAX: |
| 2003 | case ISD::SMIN: |
| 2004 | case ISD::UMAX: |
| 2005 | case ISD::UMIN: { |
Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 2006 | if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && |
Tom Stellard | 7c840bc | 2015-03-16 15:53:55 +0000 | [diff] [blame] | 2007 | N->getValueType(0) != MVT::f64 && |
Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 2008 | getTargetMachine().getOptLevel() > CodeGenOpt::None) |
| 2009 | return performMin3Max3Combine(N, DCI); |
| 2010 | break; |
| 2011 | } |
Matt Arsenault | 364a674 | 2014-06-11 17:50:44 +0000 | [diff] [blame] | 2012 | |
| 2013 | case AMDGPUISD::CVT_F32_UBYTE0: |
| 2014 | case AMDGPUISD::CVT_F32_UBYTE1: |
| 2015 | case AMDGPUISD::CVT_F32_UBYTE2: |
| 2016 | case AMDGPUISD::CVT_F32_UBYTE3: { |
| 2017 | unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; |
| 2018 | |
| 2019 | SDValue Src = N->getOperand(0); |
| 2020 | APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); |
| 2021 | |
| 2022 | APInt KnownZero, KnownOne; |
| 2023 | TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), |
| 2024 | !DCI.isBeforeLegalizeOps()); |
| 2025 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 2026 | if (TLO.ShrinkDemandedConstant(Src, Demanded) || |
| 2027 | TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) { |
| 2028 | DCI.CommitTargetLoweringOpt(TLO); |
| 2029 | } |
| 2030 | |
| 2031 | break; |
| 2032 | } |
| 2033 | |
| 2034 | case ISD::UINT_TO_FP: { |
| 2035 | return performUCharToFloatCombine(N, DCI); |
Matt Arsenault | de5fbe9 | 2016-01-11 17:02:00 +0000 | [diff] [blame] | 2036 | } |
Matt Arsenault | 02cb0ff | 2014-09-29 14:59:34 +0000 | [diff] [blame] | 2037 | case ISD::FADD: { |
| 2038 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
| 2039 | break; |
| 2040 | |
| 2041 | EVT VT = N->getValueType(0); |
| 2042 | if (VT != MVT::f32) |
| 2043 | break; |
| 2044 | |
Matt Arsenault | 8d63003 | 2015-02-20 22:10:41 +0000 | [diff] [blame] | 2045 | // Only do this if we are not trying to support denormals. v_mad_f32 does |
| 2046 | // not support denormals ever. |
| 2047 | if (Subtarget->hasFP32Denormals()) |
| 2048 | break; |
| 2049 | |
Matt Arsenault | 02cb0ff | 2014-09-29 14:59:34 +0000 | [diff] [blame] | 2050 | SDValue LHS = N->getOperand(0); |
| 2051 | SDValue RHS = N->getOperand(1); |
| 2052 | |
| 2053 | // These should really be instruction patterns, but writing patterns with |
| 2054 | // source modiifiers is a pain. |
| 2055 | |
| 2056 | // fadd (fadd (a, a), b) -> mad 2.0, a, b |
| 2057 | if (LHS.getOpcode() == ISD::FADD) { |
| 2058 | SDValue A = LHS.getOperand(0); |
| 2059 | if (A == LHS.getOperand(1)) { |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2060 | const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); |
Matt Arsenault | 8d63003 | 2015-02-20 22:10:41 +0000 | [diff] [blame] | 2061 | return DAG.getNode(ISD::FMAD, DL, VT, Two, A, RHS); |
Matt Arsenault | 02cb0ff | 2014-09-29 14:59:34 +0000 | [diff] [blame] | 2062 | } |
| 2063 | } |
| 2064 | |
| 2065 | // fadd (b, fadd (a, a)) -> mad 2.0, a, b |
| 2066 | if (RHS.getOpcode() == ISD::FADD) { |
| 2067 | SDValue A = RHS.getOperand(0); |
| 2068 | if (A == RHS.getOperand(1)) { |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2069 | const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); |
Matt Arsenault | 8d63003 | 2015-02-20 22:10:41 +0000 | [diff] [blame] | 2070 | return DAG.getNode(ISD::FMAD, DL, VT, Two, A, LHS); |
Matt Arsenault | 02cb0ff | 2014-09-29 14:59:34 +0000 | [diff] [blame] | 2071 | } |
| 2072 | } |
| 2073 | |
Matt Arsenault | 8d63003 | 2015-02-20 22:10:41 +0000 | [diff] [blame] | 2074 | return SDValue(); |
Matt Arsenault | 02cb0ff | 2014-09-29 14:59:34 +0000 | [diff] [blame] | 2075 | } |
Matt Arsenault | 8675db1 | 2014-08-29 16:01:14 +0000 | [diff] [blame] | 2076 | case ISD::FSUB: { |
| 2077 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
| 2078 | break; |
| 2079 | |
| 2080 | EVT VT = N->getValueType(0); |
| 2081 | |
| 2082 | // Try to get the fneg to fold into the source modifier. This undoes generic |
| 2083 | // DAG combines and folds them into the mad. |
Matt Arsenault | 8d63003 | 2015-02-20 22:10:41 +0000 | [diff] [blame] | 2084 | // |
| 2085 | // Only do this if we are not trying to support denormals. v_mad_f32 does |
| 2086 | // not support denormals ever. |
| 2087 | if (VT == MVT::f32 && |
| 2088 | !Subtarget->hasFP32Denormals()) { |
Matt Arsenault | 8675db1 | 2014-08-29 16:01:14 +0000 | [diff] [blame] | 2089 | SDValue LHS = N->getOperand(0); |
| 2090 | SDValue RHS = N->getOperand(1); |
Matt Arsenault | 3d4233f | 2014-09-29 14:59:38 +0000 | [diff] [blame] | 2091 | if (LHS.getOpcode() == ISD::FADD) { |
| 2092 | // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) |
| 2093 | |
| 2094 | SDValue A = LHS.getOperand(0); |
| 2095 | if (A == LHS.getOperand(1)) { |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2096 | const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); |
Matt Arsenault | 3d4233f | 2014-09-29 14:59:38 +0000 | [diff] [blame] | 2097 | SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS); |
| 2098 | |
Matt Arsenault | 8d63003 | 2015-02-20 22:10:41 +0000 | [diff] [blame] | 2099 | return DAG.getNode(ISD::FMAD, DL, VT, Two, A, NegRHS); |
Matt Arsenault | 3d4233f | 2014-09-29 14:59:38 +0000 | [diff] [blame] | 2100 | } |
| 2101 | } |
| 2102 | |
| 2103 | if (RHS.getOpcode() == ISD::FADD) { |
| 2104 | // (fsub c, (fadd a, a)) -> mad -2.0, a, c |
| 2105 | |
| 2106 | SDValue A = RHS.getOperand(0); |
| 2107 | if (A == RHS.getOperand(1)) { |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2108 | const SDValue NegTwo = DAG.getConstantFP(-2.0, DL, MVT::f32); |
Matt Arsenault | 8d63003 | 2015-02-20 22:10:41 +0000 | [diff] [blame] | 2109 | return DAG.getNode(ISD::FMAD, DL, VT, NegTwo, A, LHS); |
Matt Arsenault | 3d4233f | 2014-09-29 14:59:38 +0000 | [diff] [blame] | 2110 | } |
| 2111 | } |
Matt Arsenault | 8d63003 | 2015-02-20 22:10:41 +0000 | [diff] [blame] | 2112 | |
| 2113 | return SDValue(); |
Matt Arsenault | 8675db1 | 2014-08-29 16:01:14 +0000 | [diff] [blame] | 2114 | } |
| 2115 | |
| 2116 | break; |
| 2117 | } |
Matt Arsenault | b2baffa | 2014-08-15 17:49:05 +0000 | [diff] [blame] | 2118 | case ISD::LOAD: |
| 2119 | case ISD::STORE: |
| 2120 | case ISD::ATOMIC_LOAD: |
| 2121 | case ISD::ATOMIC_STORE: |
| 2122 | case ISD::ATOMIC_CMP_SWAP: |
| 2123 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: |
| 2124 | case ISD::ATOMIC_SWAP: |
| 2125 | case ISD::ATOMIC_LOAD_ADD: |
| 2126 | case ISD::ATOMIC_LOAD_SUB: |
| 2127 | case ISD::ATOMIC_LOAD_AND: |
| 2128 | case ISD::ATOMIC_LOAD_OR: |
| 2129 | case ISD::ATOMIC_LOAD_XOR: |
| 2130 | case ISD::ATOMIC_LOAD_NAND: |
| 2131 | case ISD::ATOMIC_LOAD_MIN: |
| 2132 | case ISD::ATOMIC_LOAD_MAX: |
| 2133 | case ISD::ATOMIC_LOAD_UMIN: |
| 2134 | case ISD::ATOMIC_LOAD_UMAX: { // TODO: Target mem intrinsics. |
| 2135 | if (DCI.isBeforeLegalize()) |
| 2136 | break; |
Matt Arsenault | 5565f65e | 2014-05-22 18:09:07 +0000 | [diff] [blame] | 2137 | |
Matt Arsenault | b2baffa | 2014-08-15 17:49:05 +0000 | [diff] [blame] | 2138 | MemSDNode *MemNode = cast<MemSDNode>(N); |
| 2139 | SDValue Ptr = MemNode->getBasePtr(); |
| 2140 | |
| 2141 | // TODO: We could also do this for multiplies. |
| 2142 | unsigned AS = MemNode->getAddressSpace(); |
| 2143 | if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) { |
| 2144 | SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI); |
| 2145 | if (NewPtr) { |
Benjamin Kramer | 6cd780f | 2015-02-17 15:29:18 +0000 | [diff] [blame] | 2146 | SmallVector<SDValue, 8> NewOps(MemNode->op_begin(), MemNode->op_end()); |
Matt Arsenault | b2baffa | 2014-08-15 17:49:05 +0000 | [diff] [blame] | 2147 | |
| 2148 | NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; |
| 2149 | return SDValue(DAG.UpdateNodeOperands(MemNode, NewOps), 0); |
| 2150 | } |
| 2151 | } |
| 2152 | break; |
| 2153 | } |
Matt Arsenault | d0101a2 | 2015-01-06 23:00:46 +0000 | [diff] [blame] | 2154 | case ISD::AND: |
| 2155 | return performAndCombine(N, DCI); |
Matt Arsenault | f229033 | 2015-01-06 23:00:39 +0000 | [diff] [blame] | 2156 | case ISD::OR: |
| 2157 | return performOrCombine(N, DCI); |
| 2158 | case AMDGPUISD::FP_CLASS: |
| 2159 | return performClassCombine(N, DCI); |
Matt Arsenault | b2baffa | 2014-08-15 17:49:05 +0000 | [diff] [blame] | 2160 | } |
Matt Arsenault | 5565f65e | 2014-05-22 18:09:07 +0000 | [diff] [blame] | 2161 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 2162 | } |
Christian Konig | d910b7d | 2013-02-26 17:52:16 +0000 | [diff] [blame] | 2163 | |
Christian Konig | f82901a | 2013-02-26 17:52:23 +0000 | [diff] [blame] | 2164 | /// \brief Analyze the possible immediate value Op |
| 2165 | /// |
| 2166 | /// Returns -1 if it isn't an immediate, 0 if it's and inline immediate |
| 2167 | /// and the immediate value if it's a literal immediate |
| 2168 | int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const { |
| 2169 | |
Eric Christopher | 7792e32 | 2015-01-30 23:24:40 +0000 | [diff] [blame] | 2170 | const SIInstrInfo *TII = |
| 2171 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
Christian Konig | f82901a | 2013-02-26 17:52:23 +0000 | [diff] [blame] | 2172 | |
Tom Stellard | edbf1eb | 2013-04-05 23:31:20 +0000 | [diff] [blame] | 2173 | if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N)) { |
Matt Arsenault | 303011a | 2014-12-17 21:04:08 +0000 | [diff] [blame] | 2174 | if (TII->isInlineConstant(Node->getAPIntValue())) |
| 2175 | return 0; |
Christian Konig | f82901a | 2013-02-26 17:52:23 +0000 | [diff] [blame] | 2176 | |
Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 2177 | uint64_t Val = Node->getZExtValue(); |
| 2178 | return isUInt<32>(Val) ? Val : -1; |
Matt Arsenault | 303011a | 2014-12-17 21:04:08 +0000 | [diff] [blame] | 2179 | } |
| 2180 | |
| 2181 | if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N)) { |
| 2182 | if (TII->isInlineConstant(Node->getValueAPF().bitcastToAPInt())) |
| 2183 | return 0; |
| 2184 | |
| 2185 | if (Node->getValueType(0) == MVT::f32) |
| 2186 | return FloatToBits(Node->getValueAPF().convertToFloat()); |
| 2187 | |
| 2188 | return -1; |
| 2189 | } |
| 2190 | |
| 2191 | return -1; |
Christian Konig | f82901a | 2013-02-26 17:52:23 +0000 | [diff] [blame] | 2192 | } |
| 2193 | |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2194 | /// \brief Helper function for adjustWritemask |
Benjamin Kramer | 635e368 | 2013-05-23 15:43:05 +0000 | [diff] [blame] | 2195 | static unsigned SubIdx2Lane(unsigned Idx) { |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2196 | switch (Idx) { |
| 2197 | default: return 0; |
| 2198 | case AMDGPU::sub0: return 0; |
| 2199 | case AMDGPU::sub1: return 1; |
| 2200 | case AMDGPU::sub2: return 2; |
| 2201 | case AMDGPU::sub3: return 3; |
| 2202 | } |
| 2203 | } |
| 2204 | |
| 2205 | /// \brief Adjust the writemask of MIMG instructions |
| 2206 | void SITargetLowering::adjustWritemask(MachineSDNode *&Node, |
| 2207 | SelectionDAG &DAG) const { |
| 2208 | SDNode *Users[4] = { }; |
Tom Stellard | 54774e5 | 2013-10-23 02:53:47 +0000 | [diff] [blame] | 2209 | unsigned Lane = 0; |
| 2210 | unsigned OldDmask = Node->getConstantOperandVal(0); |
| 2211 | unsigned NewDmask = 0; |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2212 | |
| 2213 | // Try to figure out the used register components |
| 2214 | for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); |
| 2215 | I != E; ++I) { |
| 2216 | |
| 2217 | // Abort if we can't understand the usage |
| 2218 | if (!I->isMachineOpcode() || |
| 2219 | I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) |
| 2220 | return; |
| 2221 | |
Tom Stellard | 54774e5 | 2013-10-23 02:53:47 +0000 | [diff] [blame] | 2222 | // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. |
| 2223 | // Note that subregs are packed, i.e. Lane==0 is the first bit set |
| 2224 | // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit |
| 2225 | // set, etc. |
Christian Konig | 8b1ed28 | 2013-04-10 08:39:16 +0000 | [diff] [blame] | 2226 | Lane = SubIdx2Lane(I->getConstantOperandVal(1)); |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2227 | |
Tom Stellard | 54774e5 | 2013-10-23 02:53:47 +0000 | [diff] [blame] | 2228 | // Set which texture component corresponds to the lane. |
| 2229 | unsigned Comp; |
| 2230 | for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { |
| 2231 | assert(Dmask); |
Tom Stellard | 03a5c08 | 2013-10-23 03:50:25 +0000 | [diff] [blame] | 2232 | Comp = countTrailingZeros(Dmask); |
Tom Stellard | 54774e5 | 2013-10-23 02:53:47 +0000 | [diff] [blame] | 2233 | Dmask &= ~(1 << Comp); |
| 2234 | } |
| 2235 | |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2236 | // Abort if we have more than one user per component |
| 2237 | if (Users[Lane]) |
| 2238 | return; |
| 2239 | |
| 2240 | Users[Lane] = *I; |
Tom Stellard | 54774e5 | 2013-10-23 02:53:47 +0000 | [diff] [blame] | 2241 | NewDmask |= 1 << Comp; |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2242 | } |
| 2243 | |
Tom Stellard | 54774e5 | 2013-10-23 02:53:47 +0000 | [diff] [blame] | 2244 | // Abort if there's no change |
| 2245 | if (NewDmask == OldDmask) |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2246 | return; |
| 2247 | |
| 2248 | // Adjust the writemask in the node |
| 2249 | std::vector<SDValue> Ops; |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2250 | Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); |
Benjamin Kramer | 6cd780f | 2015-02-17 15:29:18 +0000 | [diff] [blame] | 2251 | Ops.insert(Ops.end(), Node->op_begin() + 1, Node->op_end()); |
Craig Topper | 8c0b4d0 | 2014-04-28 05:57:50 +0000 | [diff] [blame] | 2252 | Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops); |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2253 | |
Christian Konig | 8b1ed28 | 2013-04-10 08:39:16 +0000 | [diff] [blame] | 2254 | // If we only got one lane, replace it with a copy |
Tom Stellard | 54774e5 | 2013-10-23 02:53:47 +0000 | [diff] [blame] | 2255 | // (if NewDmask has only one bit set...) |
| 2256 | if (NewDmask && (NewDmask & (NewDmask-1)) == 0) { |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2257 | SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(), |
| 2258 | MVT::i32); |
Christian Konig | 8b1ed28 | 2013-04-10 08:39:16 +0000 | [diff] [blame] | 2259 | SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, |
Andrew Trick | ef9de2a | 2013-05-25 02:42:55 +0000 | [diff] [blame] | 2260 | SDLoc(), Users[Lane]->getValueType(0), |
Christian Konig | 8b1ed28 | 2013-04-10 08:39:16 +0000 | [diff] [blame] | 2261 | SDValue(Node, 0), RC); |
| 2262 | DAG.ReplaceAllUsesWith(Users[Lane], Copy); |
| 2263 | return; |
| 2264 | } |
| 2265 | |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2266 | // Update the users of the node with the new indices |
| 2267 | for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { |
| 2268 | |
| 2269 | SDNode *User = Users[i]; |
| 2270 | if (!User) |
| 2271 | continue; |
| 2272 | |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2273 | SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2274 | DAG.UpdateNodeOperands(User, User->getOperand(0), Op); |
| 2275 | |
| 2276 | switch (Idx) { |
| 2277 | default: break; |
| 2278 | case AMDGPU::sub0: Idx = AMDGPU::sub1; break; |
| 2279 | case AMDGPU::sub1: Idx = AMDGPU::sub2; break; |
| 2280 | case AMDGPU::sub2: Idx = AMDGPU::sub3; break; |
| 2281 | } |
| 2282 | } |
| 2283 | } |
| 2284 | |
Tom Stellard | c98ee20 | 2015-07-16 19:40:07 +0000 | [diff] [blame] | 2285 | static bool isFrameIndexOp(SDValue Op) { |
| 2286 | if (Op.getOpcode() == ISD::AssertZext) |
| 2287 | Op = Op.getOperand(0); |
| 2288 | |
| 2289 | return isa<FrameIndexSDNode>(Op); |
| 2290 | } |
| 2291 | |
Tom Stellard | 3457a84 | 2014-10-09 19:06:00 +0000 | [diff] [blame] | 2292 | /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) |
| 2293 | /// with frame index operands. |
| 2294 | /// LLVM assumes that inputs are to these instructions are registers. |
| 2295 | void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, |
| 2296 | SelectionDAG &DAG) const { |
Tom Stellard | 8dd392e | 2014-10-09 18:09:15 +0000 | [diff] [blame] | 2297 | |
| 2298 | SmallVector<SDValue, 8> Ops; |
Tom Stellard | 3457a84 | 2014-10-09 19:06:00 +0000 | [diff] [blame] | 2299 | for (unsigned i = 0; i < Node->getNumOperands(); ++i) { |
Tom Stellard | c98ee20 | 2015-07-16 19:40:07 +0000 | [diff] [blame] | 2300 | if (!isFrameIndexOp(Node->getOperand(i))) { |
Tom Stellard | 3457a84 | 2014-10-09 19:06:00 +0000 | [diff] [blame] | 2301 | Ops.push_back(Node->getOperand(i)); |
Tom Stellard | 8dd392e | 2014-10-09 18:09:15 +0000 | [diff] [blame] | 2302 | continue; |
| 2303 | } |
| 2304 | |
Tom Stellard | 3457a84 | 2014-10-09 19:06:00 +0000 | [diff] [blame] | 2305 | SDLoc DL(Node); |
Tom Stellard | 8dd392e | 2014-10-09 18:09:15 +0000 | [diff] [blame] | 2306 | Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, |
Tom Stellard | 3457a84 | 2014-10-09 19:06:00 +0000 | [diff] [blame] | 2307 | Node->getOperand(i).getValueType(), |
| 2308 | Node->getOperand(i)), 0)); |
Tom Stellard | 8dd392e | 2014-10-09 18:09:15 +0000 | [diff] [blame] | 2309 | } |
| 2310 | |
Tom Stellard | 3457a84 | 2014-10-09 19:06:00 +0000 | [diff] [blame] | 2311 | DAG.UpdateNodeOperands(Node, Ops); |
Tom Stellard | 8dd392e | 2014-10-09 18:09:15 +0000 | [diff] [blame] | 2312 | } |
| 2313 | |
Matt Arsenault | 08d8494 | 2014-06-03 23:06:13 +0000 | [diff] [blame] | 2314 | /// \brief Fold the instructions after selecting them. |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2315 | SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, |
| 2316 | SelectionDAG &DAG) const { |
Eric Christopher | 7792e32 | 2015-01-30 23:24:40 +0000 | [diff] [blame] | 2317 | const SIInstrInfo *TII = |
| 2318 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2319 | |
Tom Stellard | 16a9a20 | 2013-08-14 23:24:17 +0000 | [diff] [blame] | 2320 | if (TII->isMIMG(Node->getMachineOpcode())) |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2321 | adjustWritemask(Node, DAG); |
| 2322 | |
Matt Arsenault | 7d858d8 | 2014-11-02 23:46:54 +0000 | [diff] [blame] | 2323 | if (Node->getMachineOpcode() == AMDGPU::INSERT_SUBREG || |
| 2324 | Node->getMachineOpcode() == AMDGPU::REG_SEQUENCE) { |
Tom Stellard | 8dd392e | 2014-10-09 18:09:15 +0000 | [diff] [blame] | 2325 | legalizeTargetIndependentNode(Node, DAG); |
| 2326 | return Node; |
| 2327 | } |
Tom Stellard | 654d669 | 2015-01-08 15:08:17 +0000 | [diff] [blame] | 2328 | return Node; |
Christian Konig | 8e06e2a | 2013-04-10 08:39:08 +0000 | [diff] [blame] | 2329 | } |
Christian Konig | 8b1ed28 | 2013-04-10 08:39:16 +0000 | [diff] [blame] | 2330 | |
| 2331 | /// \brief Assign the register class depending on the number of |
| 2332 | /// bits set in the writemask |
| 2333 | void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, |
| 2334 | SDNode *Node) const { |
Eric Christopher | 7792e32 | 2015-01-30 23:24:40 +0000 | [diff] [blame] | 2335 | const SIInstrInfo *TII = |
| 2336 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
Matt Arsenault | 7ac9c4a | 2014-09-08 15:07:31 +0000 | [diff] [blame] | 2337 | |
Tom Stellard | a99ada5 | 2014-11-21 22:31:44 +0000 | [diff] [blame] | 2338 | MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); |
Matt Arsenault | 6005fcb | 2015-10-21 21:51:02 +0000 | [diff] [blame] | 2339 | |
| 2340 | if (TII->isVOP3(MI->getOpcode())) { |
| 2341 | // Make sure constant bus requirements are respected. |
| 2342 | TII->legalizeOperandsVOP3(MRI, MI); |
| 2343 | return; |
| 2344 | } |
Matt Arsenault | cb0ac3d | 2014-09-26 17:54:59 +0000 | [diff] [blame] | 2345 | |
Matt Arsenault | 3add643 | 2015-10-20 04:35:43 +0000 | [diff] [blame] | 2346 | if (TII->isMIMG(*MI)) { |
Matt Arsenault | 7ac9c4a | 2014-09-08 15:07:31 +0000 | [diff] [blame] | 2347 | unsigned VReg = MI->getOperand(0).getReg(); |
| 2348 | unsigned Writemask = MI->getOperand(1).getImm(); |
| 2349 | unsigned BitsSet = 0; |
| 2350 | for (unsigned i = 0; i < 4; ++i) |
| 2351 | BitsSet += Writemask & (1 << i) ? 1 : 0; |
| 2352 | |
| 2353 | const TargetRegisterClass *RC; |
| 2354 | switch (BitsSet) { |
| 2355 | default: return; |
Tom Stellard | 45c0b3a | 2015-01-07 20:59:25 +0000 | [diff] [blame] | 2356 | case 1: RC = &AMDGPU::VGPR_32RegClass; break; |
Matt Arsenault | 7ac9c4a | 2014-09-08 15:07:31 +0000 | [diff] [blame] | 2357 | case 2: RC = &AMDGPU::VReg_64RegClass; break; |
| 2358 | case 3: RC = &AMDGPU::VReg_96RegClass; break; |
| 2359 | } |
| 2360 | |
| 2361 | unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet); |
| 2362 | MI->setDesc(TII->get(NewOpcode)); |
Matt Arsenault | 7ac9c4a | 2014-09-08 15:07:31 +0000 | [diff] [blame] | 2363 | MRI.setRegClass(VReg, RC); |
Christian Konig | 8b1ed28 | 2013-04-10 08:39:16 +0000 | [diff] [blame] | 2364 | return; |
Christian Konig | 8b1ed28 | 2013-04-10 08:39:16 +0000 | [diff] [blame] | 2365 | } |
| 2366 | |
Matt Arsenault | 7ac9c4a | 2014-09-08 15:07:31 +0000 | [diff] [blame] | 2367 | // Replace unused atomics with the no return version. |
| 2368 | int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI->getOpcode()); |
| 2369 | if (NoRetAtomicOp != -1) { |
| 2370 | if (!Node->hasAnyUseOfValue(0)) { |
| 2371 | MI->setDesc(TII->get(NoRetAtomicOp)); |
| 2372 | MI->RemoveOperand(0); |
| 2373 | } |
| 2374 | |
| 2375 | return; |
| 2376 | } |
Christian Konig | 8b1ed28 | 2013-04-10 08:39:16 +0000 | [diff] [blame] | 2377 | } |
Tom Stellard | 0518ff8 | 2013-06-03 17:39:58 +0000 | [diff] [blame] | 2378 | |
Matt Arsenault | 485defe | 2014-11-05 19:01:17 +0000 | [diff] [blame] | 2379 | static SDValue buildSMovImm32(SelectionDAG &DAG, SDLoc DL, uint64_t Val) { |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2380 | SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); |
Matt Arsenault | 485defe | 2014-11-05 19:01:17 +0000 | [diff] [blame] | 2381 | return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); |
| 2382 | } |
| 2383 | |
| 2384 | MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, |
| 2385 | SDLoc DL, |
| 2386 | SDValue Ptr) const { |
Eric Christopher | 7792e32 | 2015-01-30 23:24:40 +0000 | [diff] [blame] | 2387 | const SIInstrInfo *TII = |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2388 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
Matt Arsenault | 485defe | 2014-11-05 19:01:17 +0000 | [diff] [blame] | 2389 | |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2390 | // Build the half of the subregister with the constants before building the |
| 2391 | // full 128-bit register. If we are building multiple resource descriptors, |
| 2392 | // this will allow CSEing of the 2-component register. |
| 2393 | const SDValue Ops0[] = { |
| 2394 | DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), |
| 2395 | buildSMovImm32(DAG, DL, 0), |
| 2396 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), |
| 2397 | buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), |
| 2398 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) |
| 2399 | }; |
Matt Arsenault | 485defe | 2014-11-05 19:01:17 +0000 | [diff] [blame] | 2400 | |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2401 | SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, |
| 2402 | MVT::v2i32, Ops0), 0); |
Matt Arsenault | 485defe | 2014-11-05 19:01:17 +0000 | [diff] [blame] | 2403 | |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2404 | // Combine the constants and the pointer. |
| 2405 | const SDValue Ops1[] = { |
| 2406 | DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), |
| 2407 | Ptr, |
| 2408 | DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), |
| 2409 | SubRegHi, |
| 2410 | DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) |
| 2411 | }; |
Matt Arsenault | 485defe | 2014-11-05 19:01:17 +0000 | [diff] [blame] | 2412 | |
Matt Arsenault | 2d6fdb8 | 2015-09-25 17:08:42 +0000 | [diff] [blame] | 2413 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); |
Matt Arsenault | 485defe | 2014-11-05 19:01:17 +0000 | [diff] [blame] | 2414 | } |
| 2415 | |
Matt Arsenault | f3cd451 | 2014-11-05 19:01:19 +0000 | [diff] [blame] | 2416 | /// \brief Return a resource descriptor with the 'Add TID' bit enabled |
Benjamin Kramer | df005cb | 2015-08-08 18:27:36 +0000 | [diff] [blame] | 2417 | /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] |
| 2418 | /// of the resource descriptor) to create an offset, which is added to |
| 2419 | /// the resource pointer. |
Matt Arsenault | f3cd451 | 2014-11-05 19:01:19 +0000 | [diff] [blame] | 2420 | MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, |
| 2421 | SDLoc DL, |
| 2422 | SDValue Ptr, |
| 2423 | uint32_t RsrcDword1, |
| 2424 | uint64_t RsrcDword2And3) const { |
| 2425 | SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); |
| 2426 | SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); |
| 2427 | if (RsrcDword1) { |
| 2428 | PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2429 | DAG.getConstant(RsrcDword1, DL, MVT::i32)), |
| 2430 | 0); |
Matt Arsenault | f3cd451 | 2014-11-05 19:01:19 +0000 | [diff] [blame] | 2431 | } |
| 2432 | |
| 2433 | SDValue DataLo = buildSMovImm32(DAG, DL, |
| 2434 | RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); |
| 2435 | SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); |
| 2436 | |
| 2437 | const SDValue Ops[] = { |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2438 | DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), |
Matt Arsenault | f3cd451 | 2014-11-05 19:01:19 +0000 | [diff] [blame] | 2439 | PtrLo, |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2440 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), |
Matt Arsenault | f3cd451 | 2014-11-05 19:01:19 +0000 | [diff] [blame] | 2441 | PtrHi, |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2442 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), |
Matt Arsenault | f3cd451 | 2014-11-05 19:01:19 +0000 | [diff] [blame] | 2443 | DataLo, |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2444 | DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), |
Matt Arsenault | f3cd451 | 2014-11-05 19:01:19 +0000 | [diff] [blame] | 2445 | DataHi, |
Sergey Dmitrouk | 842a51b | 2015-04-28 14:05:47 +0000 | [diff] [blame] | 2446 | DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) |
Matt Arsenault | f3cd451 | 2014-11-05 19:01:19 +0000 | [diff] [blame] | 2447 | }; |
| 2448 | |
| 2449 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); |
| 2450 | } |
| 2451 | |
Tom Stellard | 94593ee | 2013-06-03 17:40:18 +0000 | [diff] [blame] | 2452 | SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG, |
| 2453 | const TargetRegisterClass *RC, |
| 2454 | unsigned Reg, EVT VT) const { |
| 2455 | SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT); |
| 2456 | |
| 2457 | return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()), |
| 2458 | cast<RegisterSDNode>(VReg)->getReg(), VT); |
| 2459 | } |
Tom Stellard | d7e6f13 | 2015-04-08 01:09:26 +0000 | [diff] [blame] | 2460 | |
| 2461 | //===----------------------------------------------------------------------===// |
| 2462 | // SI Inline Assembly Support |
| 2463 | //===----------------------------------------------------------------------===// |
| 2464 | |
| 2465 | std::pair<unsigned, const TargetRegisterClass *> |
| 2466 | SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, |
Benjamin Kramer | 9bfb627 | 2015-07-05 19:29:18 +0000 | [diff] [blame] | 2467 | StringRef Constraint, |
Tom Stellard | d7e6f13 | 2015-04-08 01:09:26 +0000 | [diff] [blame] | 2468 | MVT VT) const { |
Tom Stellard | b3c3bda | 2015-12-10 02:12:53 +0000 | [diff] [blame] | 2469 | |
| 2470 | if (Constraint.size() == 1) { |
| 2471 | switch (Constraint[0]) { |
| 2472 | case 's': |
| 2473 | case 'r': |
| 2474 | switch (VT.getSizeInBits()) { |
| 2475 | default: |
| 2476 | return std::make_pair(0U, nullptr); |
| 2477 | case 32: |
Tom Stellard | d7e6f13 | 2015-04-08 01:09:26 +0000 | [diff] [blame] | 2478 | return std::make_pair(0U, &AMDGPU::SGPR_32RegClass); |
Tom Stellard | b3c3bda | 2015-12-10 02:12:53 +0000 | [diff] [blame] | 2479 | case 64: |
| 2480 | return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); |
| 2481 | case 128: |
| 2482 | return std::make_pair(0U, &AMDGPU::SReg_128RegClass); |
| 2483 | case 256: |
| 2484 | return std::make_pair(0U, &AMDGPU::SReg_256RegClass); |
| 2485 | } |
| 2486 | |
| 2487 | case 'v': |
| 2488 | switch (VT.getSizeInBits()) { |
| 2489 | default: |
| 2490 | return std::make_pair(0U, nullptr); |
| 2491 | case 32: |
| 2492 | return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); |
| 2493 | case 64: |
| 2494 | return std::make_pair(0U, &AMDGPU::VReg_64RegClass); |
| 2495 | case 96: |
| 2496 | return std::make_pair(0U, &AMDGPU::VReg_96RegClass); |
| 2497 | case 128: |
| 2498 | return std::make_pair(0U, &AMDGPU::VReg_128RegClass); |
| 2499 | case 256: |
| 2500 | return std::make_pair(0U, &AMDGPU::VReg_256RegClass); |
| 2501 | case 512: |
| 2502 | return std::make_pair(0U, &AMDGPU::VReg_512RegClass); |
| 2503 | } |
Tom Stellard | d7e6f13 | 2015-04-08 01:09:26 +0000 | [diff] [blame] | 2504 | } |
| 2505 | } |
| 2506 | |
| 2507 | if (Constraint.size() > 1) { |
| 2508 | const TargetRegisterClass *RC = nullptr; |
| 2509 | if (Constraint[1] == 'v') { |
| 2510 | RC = &AMDGPU::VGPR_32RegClass; |
| 2511 | } else if (Constraint[1] == 's') { |
| 2512 | RC = &AMDGPU::SGPR_32RegClass; |
| 2513 | } |
| 2514 | |
| 2515 | if (RC) { |
Matt Arsenault | 0b554ed | 2015-06-23 02:05:55 +0000 | [diff] [blame] | 2516 | uint32_t Idx; |
| 2517 | bool Failed = Constraint.substr(2).getAsInteger(10, Idx); |
| 2518 | if (!Failed && Idx < RC->getNumRegs()) |
Tom Stellard | d7e6f13 | 2015-04-08 01:09:26 +0000 | [diff] [blame] | 2519 | return std::make_pair(RC->getRegister(Idx), RC); |
| 2520 | } |
| 2521 | } |
| 2522 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
| 2523 | } |
Tom Stellard | b3c3bda | 2015-12-10 02:12:53 +0000 | [diff] [blame] | 2524 | |
| 2525 | SITargetLowering::ConstraintType |
| 2526 | SITargetLowering::getConstraintType(StringRef Constraint) const { |
| 2527 | if (Constraint.size() == 1) { |
| 2528 | switch (Constraint[0]) { |
| 2529 | default: break; |
| 2530 | case 's': |
| 2531 | case 'v': |
| 2532 | return C_RegisterClass; |
| 2533 | } |
| 2534 | } |
| 2535 | return TargetLowering::getConstraintType(Constraint); |
| 2536 | } |