Benjamin Kramer | 69e42db | 2013-01-11 20:05:37 +0000 | [diff] [blame] | 1 | //===-- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ---===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This implements the TargetLoweringBase class. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "llvm/Target/TargetLowering.h" |
| 15 | #include "llvm/ADT/BitVector.h" |
| 16 | #include "llvm/ADT/STLExtras.h" |
| 17 | #include "llvm/CodeGen/Analysis.h" |
| 18 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 19 | #include "llvm/CodeGen/MachineFunction.h" |
| 20 | #include "llvm/CodeGen/MachineJumpTableInfo.h" |
| 21 | #include "llvm/IR/DataLayout.h" |
| 22 | #include "llvm/IR/DerivedTypes.h" |
| 23 | #include "llvm/IR/GlobalVariable.h" |
| 24 | #include "llvm/MC/MCAsmInfo.h" |
| 25 | #include "llvm/MC/MCExpr.h" |
| 26 | #include "llvm/Support/CommandLine.h" |
| 27 | #include "llvm/Support/ErrorHandling.h" |
| 28 | #include "llvm/Support/MathExtras.h" |
| 29 | #include "llvm/Target/TargetLoweringObjectFile.h" |
| 30 | #include "llvm/Target/TargetMachine.h" |
| 31 | #include "llvm/Target/TargetRegisterInfo.h" |
| 32 | #include <cctype> |
| 33 | using namespace llvm; |
| 34 | |
| 35 | /// InitLibcallNames - Set default libcall names. |
| 36 | /// |
| 37 | static void InitLibcallNames(const char **Names) { |
| 38 | Names[RTLIB::SHL_I16] = "__ashlhi3"; |
| 39 | Names[RTLIB::SHL_I32] = "__ashlsi3"; |
| 40 | Names[RTLIB::SHL_I64] = "__ashldi3"; |
| 41 | Names[RTLIB::SHL_I128] = "__ashlti3"; |
| 42 | Names[RTLIB::SRL_I16] = "__lshrhi3"; |
| 43 | Names[RTLIB::SRL_I32] = "__lshrsi3"; |
| 44 | Names[RTLIB::SRL_I64] = "__lshrdi3"; |
| 45 | Names[RTLIB::SRL_I128] = "__lshrti3"; |
| 46 | Names[RTLIB::SRA_I16] = "__ashrhi3"; |
| 47 | Names[RTLIB::SRA_I32] = "__ashrsi3"; |
| 48 | Names[RTLIB::SRA_I64] = "__ashrdi3"; |
| 49 | Names[RTLIB::SRA_I128] = "__ashrti3"; |
| 50 | Names[RTLIB::MUL_I8] = "__mulqi3"; |
| 51 | Names[RTLIB::MUL_I16] = "__mulhi3"; |
| 52 | Names[RTLIB::MUL_I32] = "__mulsi3"; |
| 53 | Names[RTLIB::MUL_I64] = "__muldi3"; |
| 54 | Names[RTLIB::MUL_I128] = "__multi3"; |
| 55 | Names[RTLIB::MULO_I32] = "__mulosi4"; |
| 56 | Names[RTLIB::MULO_I64] = "__mulodi4"; |
| 57 | Names[RTLIB::MULO_I128] = "__muloti4"; |
| 58 | Names[RTLIB::SDIV_I8] = "__divqi3"; |
| 59 | Names[RTLIB::SDIV_I16] = "__divhi3"; |
| 60 | Names[RTLIB::SDIV_I32] = "__divsi3"; |
| 61 | Names[RTLIB::SDIV_I64] = "__divdi3"; |
| 62 | Names[RTLIB::SDIV_I128] = "__divti3"; |
| 63 | Names[RTLIB::UDIV_I8] = "__udivqi3"; |
| 64 | Names[RTLIB::UDIV_I16] = "__udivhi3"; |
| 65 | Names[RTLIB::UDIV_I32] = "__udivsi3"; |
| 66 | Names[RTLIB::UDIV_I64] = "__udivdi3"; |
| 67 | Names[RTLIB::UDIV_I128] = "__udivti3"; |
| 68 | Names[RTLIB::SREM_I8] = "__modqi3"; |
| 69 | Names[RTLIB::SREM_I16] = "__modhi3"; |
| 70 | Names[RTLIB::SREM_I32] = "__modsi3"; |
| 71 | Names[RTLIB::SREM_I64] = "__moddi3"; |
| 72 | Names[RTLIB::SREM_I128] = "__modti3"; |
| 73 | Names[RTLIB::UREM_I8] = "__umodqi3"; |
| 74 | Names[RTLIB::UREM_I16] = "__umodhi3"; |
| 75 | Names[RTLIB::UREM_I32] = "__umodsi3"; |
| 76 | Names[RTLIB::UREM_I64] = "__umoddi3"; |
| 77 | Names[RTLIB::UREM_I128] = "__umodti3"; |
| 78 | |
| 79 | // These are generally not available. |
| 80 | Names[RTLIB::SDIVREM_I8] = 0; |
| 81 | Names[RTLIB::SDIVREM_I16] = 0; |
| 82 | Names[RTLIB::SDIVREM_I32] = 0; |
| 83 | Names[RTLIB::SDIVREM_I64] = 0; |
| 84 | Names[RTLIB::SDIVREM_I128] = 0; |
| 85 | Names[RTLIB::UDIVREM_I8] = 0; |
| 86 | Names[RTLIB::UDIVREM_I16] = 0; |
| 87 | Names[RTLIB::UDIVREM_I32] = 0; |
| 88 | Names[RTLIB::UDIVREM_I64] = 0; |
| 89 | Names[RTLIB::UDIVREM_I128] = 0; |
| 90 | |
| 91 | Names[RTLIB::NEG_I32] = "__negsi2"; |
| 92 | Names[RTLIB::NEG_I64] = "__negdi2"; |
| 93 | Names[RTLIB::ADD_F32] = "__addsf3"; |
| 94 | Names[RTLIB::ADD_F64] = "__adddf3"; |
| 95 | Names[RTLIB::ADD_F80] = "__addxf3"; |
| 96 | Names[RTLIB::ADD_F128] = "__addtf3"; |
| 97 | Names[RTLIB::ADD_PPCF128] = "__gcc_qadd"; |
| 98 | Names[RTLIB::SUB_F32] = "__subsf3"; |
| 99 | Names[RTLIB::SUB_F64] = "__subdf3"; |
| 100 | Names[RTLIB::SUB_F80] = "__subxf3"; |
| 101 | Names[RTLIB::SUB_F128] = "__subtf3"; |
| 102 | Names[RTLIB::SUB_PPCF128] = "__gcc_qsub"; |
| 103 | Names[RTLIB::MUL_F32] = "__mulsf3"; |
| 104 | Names[RTLIB::MUL_F64] = "__muldf3"; |
| 105 | Names[RTLIB::MUL_F80] = "__mulxf3"; |
| 106 | Names[RTLIB::MUL_F128] = "__multf3"; |
| 107 | Names[RTLIB::MUL_PPCF128] = "__gcc_qmul"; |
| 108 | Names[RTLIB::DIV_F32] = "__divsf3"; |
| 109 | Names[RTLIB::DIV_F64] = "__divdf3"; |
| 110 | Names[RTLIB::DIV_F80] = "__divxf3"; |
| 111 | Names[RTLIB::DIV_F128] = "__divtf3"; |
| 112 | Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv"; |
| 113 | Names[RTLIB::REM_F32] = "fmodf"; |
| 114 | Names[RTLIB::REM_F64] = "fmod"; |
| 115 | Names[RTLIB::REM_F80] = "fmodl"; |
| 116 | Names[RTLIB::REM_F128] = "fmodl"; |
| 117 | Names[RTLIB::REM_PPCF128] = "fmodl"; |
| 118 | Names[RTLIB::FMA_F32] = "fmaf"; |
| 119 | Names[RTLIB::FMA_F64] = "fma"; |
| 120 | Names[RTLIB::FMA_F80] = "fmal"; |
| 121 | Names[RTLIB::FMA_F128] = "fmal"; |
| 122 | Names[RTLIB::FMA_PPCF128] = "fmal"; |
| 123 | Names[RTLIB::POWI_F32] = "__powisf2"; |
| 124 | Names[RTLIB::POWI_F64] = "__powidf2"; |
| 125 | Names[RTLIB::POWI_F80] = "__powixf2"; |
| 126 | Names[RTLIB::POWI_F128] = "__powitf2"; |
| 127 | Names[RTLIB::POWI_PPCF128] = "__powitf2"; |
| 128 | Names[RTLIB::SQRT_F32] = "sqrtf"; |
| 129 | Names[RTLIB::SQRT_F64] = "sqrt"; |
| 130 | Names[RTLIB::SQRT_F80] = "sqrtl"; |
| 131 | Names[RTLIB::SQRT_F128] = "sqrtl"; |
| 132 | Names[RTLIB::SQRT_PPCF128] = "sqrtl"; |
| 133 | Names[RTLIB::LOG_F32] = "logf"; |
| 134 | Names[RTLIB::LOG_F64] = "log"; |
| 135 | Names[RTLIB::LOG_F80] = "logl"; |
| 136 | Names[RTLIB::LOG_F128] = "logl"; |
| 137 | Names[RTLIB::LOG_PPCF128] = "logl"; |
| 138 | Names[RTLIB::LOG2_F32] = "log2f"; |
| 139 | Names[RTLIB::LOG2_F64] = "log2"; |
| 140 | Names[RTLIB::LOG2_F80] = "log2l"; |
| 141 | Names[RTLIB::LOG2_F128] = "log2l"; |
| 142 | Names[RTLIB::LOG2_PPCF128] = "log2l"; |
| 143 | Names[RTLIB::LOG10_F32] = "log10f"; |
| 144 | Names[RTLIB::LOG10_F64] = "log10"; |
| 145 | Names[RTLIB::LOG10_F80] = "log10l"; |
| 146 | Names[RTLIB::LOG10_F128] = "log10l"; |
| 147 | Names[RTLIB::LOG10_PPCF128] = "log10l"; |
| 148 | Names[RTLIB::EXP_F32] = "expf"; |
| 149 | Names[RTLIB::EXP_F64] = "exp"; |
| 150 | Names[RTLIB::EXP_F80] = "expl"; |
| 151 | Names[RTLIB::EXP_F128] = "expl"; |
| 152 | Names[RTLIB::EXP_PPCF128] = "expl"; |
| 153 | Names[RTLIB::EXP2_F32] = "exp2f"; |
| 154 | Names[RTLIB::EXP2_F64] = "exp2"; |
| 155 | Names[RTLIB::EXP2_F80] = "exp2l"; |
| 156 | Names[RTLIB::EXP2_F128] = "exp2l"; |
| 157 | Names[RTLIB::EXP2_PPCF128] = "exp2l"; |
| 158 | Names[RTLIB::SIN_F32] = "sinf"; |
| 159 | Names[RTLIB::SIN_F64] = "sin"; |
| 160 | Names[RTLIB::SIN_F80] = "sinl"; |
| 161 | Names[RTLIB::SIN_F128] = "sinl"; |
| 162 | Names[RTLIB::SIN_PPCF128] = "sinl"; |
| 163 | Names[RTLIB::COS_F32] = "cosf"; |
| 164 | Names[RTLIB::COS_F64] = "cos"; |
| 165 | Names[RTLIB::COS_F80] = "cosl"; |
| 166 | Names[RTLIB::COS_F128] = "cosl"; |
| 167 | Names[RTLIB::COS_PPCF128] = "cosl"; |
| 168 | Names[RTLIB::POW_F32] = "powf"; |
| 169 | Names[RTLIB::POW_F64] = "pow"; |
| 170 | Names[RTLIB::POW_F80] = "powl"; |
| 171 | Names[RTLIB::POW_F128] = "powl"; |
| 172 | Names[RTLIB::POW_PPCF128] = "powl"; |
| 173 | Names[RTLIB::CEIL_F32] = "ceilf"; |
| 174 | Names[RTLIB::CEIL_F64] = "ceil"; |
| 175 | Names[RTLIB::CEIL_F80] = "ceill"; |
| 176 | Names[RTLIB::CEIL_F128] = "ceill"; |
| 177 | Names[RTLIB::CEIL_PPCF128] = "ceill"; |
| 178 | Names[RTLIB::TRUNC_F32] = "truncf"; |
| 179 | Names[RTLIB::TRUNC_F64] = "trunc"; |
| 180 | Names[RTLIB::TRUNC_F80] = "truncl"; |
| 181 | Names[RTLIB::TRUNC_F128] = "truncl"; |
| 182 | Names[RTLIB::TRUNC_PPCF128] = "truncl"; |
| 183 | Names[RTLIB::RINT_F32] = "rintf"; |
| 184 | Names[RTLIB::RINT_F64] = "rint"; |
| 185 | Names[RTLIB::RINT_F80] = "rintl"; |
| 186 | Names[RTLIB::RINT_F128] = "rintl"; |
| 187 | Names[RTLIB::RINT_PPCF128] = "rintl"; |
| 188 | Names[RTLIB::NEARBYINT_F32] = "nearbyintf"; |
| 189 | Names[RTLIB::NEARBYINT_F64] = "nearbyint"; |
| 190 | Names[RTLIB::NEARBYINT_F80] = "nearbyintl"; |
| 191 | Names[RTLIB::NEARBYINT_F128] = "nearbyintl"; |
| 192 | Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl"; |
| 193 | Names[RTLIB::FLOOR_F32] = "floorf"; |
| 194 | Names[RTLIB::FLOOR_F64] = "floor"; |
| 195 | Names[RTLIB::FLOOR_F80] = "floorl"; |
| 196 | Names[RTLIB::FLOOR_F128] = "floorl"; |
| 197 | Names[RTLIB::FLOOR_PPCF128] = "floorl"; |
| 198 | Names[RTLIB::COPYSIGN_F32] = "copysignf"; |
| 199 | Names[RTLIB::COPYSIGN_F64] = "copysign"; |
| 200 | Names[RTLIB::COPYSIGN_F80] = "copysignl"; |
| 201 | Names[RTLIB::COPYSIGN_F128] = "copysignl"; |
| 202 | Names[RTLIB::COPYSIGN_PPCF128] = "copysignl"; |
| 203 | Names[RTLIB::FPEXT_F64_F128] = "__extenddftf2"; |
| 204 | Names[RTLIB::FPEXT_F32_F128] = "__extendsftf2"; |
| 205 | Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2"; |
| 206 | Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee"; |
| 207 | Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee"; |
| 208 | Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2"; |
| 209 | Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2"; |
| 210 | Names[RTLIB::FPROUND_F128_F32] = "__trunctfsf2"; |
| 211 | Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2"; |
| 212 | Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2"; |
| 213 | Names[RTLIB::FPROUND_F128_F64] = "__trunctfdf2"; |
| 214 | Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2"; |
| 215 | Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfqi"; |
| 216 | Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfhi"; |
| 217 | Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; |
| 218 | Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; |
| 219 | Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti"; |
| 220 | Names[RTLIB::FPTOSINT_F64_I8] = "__fixdfqi"; |
| 221 | Names[RTLIB::FPTOSINT_F64_I16] = "__fixdfhi"; |
| 222 | Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi"; |
| 223 | Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi"; |
| 224 | Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti"; |
| 225 | Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi"; |
| 226 | Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi"; |
| 227 | Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti"; |
| 228 | Names[RTLIB::FPTOSINT_F128_I32] = "__fixtfsi"; |
| 229 | Names[RTLIB::FPTOSINT_F128_I64] = "__fixtfdi"; |
| 230 | Names[RTLIB::FPTOSINT_F128_I128] = "__fixtfti"; |
| 231 | Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi"; |
| 232 | Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; |
| 233 | Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti"; |
| 234 | Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfqi"; |
| 235 | Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfhi"; |
| 236 | Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; |
| 237 | Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; |
| 238 | Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti"; |
| 239 | Names[RTLIB::FPTOUINT_F64_I8] = "__fixunsdfqi"; |
| 240 | Names[RTLIB::FPTOUINT_F64_I16] = "__fixunsdfhi"; |
| 241 | Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi"; |
| 242 | Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi"; |
| 243 | Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti"; |
| 244 | Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi"; |
| 245 | Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi"; |
| 246 | Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti"; |
| 247 | Names[RTLIB::FPTOUINT_F128_I32] = "__fixunstfsi"; |
| 248 | Names[RTLIB::FPTOUINT_F128_I64] = "__fixunstfdi"; |
| 249 | Names[RTLIB::FPTOUINT_F128_I128] = "__fixunstfti"; |
| 250 | Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi"; |
| 251 | Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi"; |
| 252 | Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti"; |
| 253 | Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf"; |
| 254 | Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf"; |
| 255 | Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf"; |
| 256 | Names[RTLIB::SINTTOFP_I32_F128] = "__floatsitf"; |
| 257 | Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf"; |
| 258 | Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf"; |
| 259 | Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf"; |
| 260 | Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf"; |
| 261 | Names[RTLIB::SINTTOFP_I64_F128] = "__floatditf"; |
| 262 | Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf"; |
| 263 | Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf"; |
| 264 | Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf"; |
| 265 | Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf"; |
| 266 | Names[RTLIB::SINTTOFP_I128_F128] = "__floattitf"; |
| 267 | Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf"; |
| 268 | Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf"; |
| 269 | Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf"; |
| 270 | Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf"; |
| 271 | Names[RTLIB::UINTTOFP_I32_F128] = "__floatunsitf"; |
| 272 | Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf"; |
| 273 | Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf"; |
| 274 | Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf"; |
| 275 | Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf"; |
| 276 | Names[RTLIB::UINTTOFP_I64_F128] = "__floatunditf"; |
| 277 | Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf"; |
| 278 | Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf"; |
| 279 | Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf"; |
| 280 | Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf"; |
| 281 | Names[RTLIB::UINTTOFP_I128_F128] = "__floatuntitf"; |
| 282 | Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf"; |
| 283 | Names[RTLIB::OEQ_F32] = "__eqsf2"; |
| 284 | Names[RTLIB::OEQ_F64] = "__eqdf2"; |
| 285 | Names[RTLIB::OEQ_F128] = "__eqtf2"; |
| 286 | Names[RTLIB::UNE_F32] = "__nesf2"; |
| 287 | Names[RTLIB::UNE_F64] = "__nedf2"; |
| 288 | Names[RTLIB::UNE_F128] = "__netf2"; |
| 289 | Names[RTLIB::OGE_F32] = "__gesf2"; |
| 290 | Names[RTLIB::OGE_F64] = "__gedf2"; |
| 291 | Names[RTLIB::OGE_F128] = "__getf2"; |
| 292 | Names[RTLIB::OLT_F32] = "__ltsf2"; |
| 293 | Names[RTLIB::OLT_F64] = "__ltdf2"; |
| 294 | Names[RTLIB::OLT_F128] = "__lttf2"; |
| 295 | Names[RTLIB::OLE_F32] = "__lesf2"; |
| 296 | Names[RTLIB::OLE_F64] = "__ledf2"; |
| 297 | Names[RTLIB::OLE_F128] = "__letf2"; |
| 298 | Names[RTLIB::OGT_F32] = "__gtsf2"; |
| 299 | Names[RTLIB::OGT_F64] = "__gtdf2"; |
| 300 | Names[RTLIB::OGT_F128] = "__gttf2"; |
| 301 | Names[RTLIB::UO_F32] = "__unordsf2"; |
| 302 | Names[RTLIB::UO_F64] = "__unorddf2"; |
| 303 | Names[RTLIB::UO_F128] = "__unordtf2"; |
| 304 | Names[RTLIB::O_F32] = "__unordsf2"; |
| 305 | Names[RTLIB::O_F64] = "__unorddf2"; |
| 306 | Names[RTLIB::O_F128] = "__unordtf2"; |
| 307 | Names[RTLIB::MEMCPY] = "memcpy"; |
| 308 | Names[RTLIB::MEMMOVE] = "memmove"; |
| 309 | Names[RTLIB::MEMSET] = "memset"; |
| 310 | Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume"; |
| 311 | Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1"; |
| 312 | Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2"; |
| 313 | Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4"; |
| 314 | Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8"; |
| 315 | Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1"; |
| 316 | Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2"; |
| 317 | Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4"; |
| 318 | Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8"; |
| 319 | Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1"; |
| 320 | Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2"; |
| 321 | Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4"; |
| 322 | Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8"; |
| 323 | Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1"; |
| 324 | Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2"; |
| 325 | Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4"; |
| 326 | Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8"; |
| 327 | Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1"; |
| 328 | Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2"; |
| 329 | Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4"; |
| 330 | Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8"; |
| 331 | Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1"; |
| 332 | Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2"; |
| 333 | Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4"; |
| 334 | Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8"; |
| 335 | Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1"; |
| 336 | Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2"; |
| 337 | Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and_xor_4"; |
| 338 | Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8"; |
| 339 | Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1"; |
| 340 | Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2"; |
| 341 | Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4"; |
| 342 | Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8"; |
| 343 | } |
| 344 | |
| 345 | /// InitLibcallCallingConvs - Set default libcall CallingConvs. |
| 346 | /// |
| 347 | static void InitLibcallCallingConvs(CallingConv::ID *CCs) { |
| 348 | for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) { |
| 349 | CCs[i] = CallingConv::C; |
| 350 | } |
| 351 | } |
| 352 | |
| 353 | /// getFPEXT - Return the FPEXT_*_* value for the given types, or |
| 354 | /// UNKNOWN_LIBCALL if there is none. |
| 355 | RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { |
| 356 | if (OpVT == MVT::f32) { |
| 357 | if (RetVT == MVT::f64) |
| 358 | return FPEXT_F32_F64; |
| 359 | if (RetVT == MVT::f128) |
| 360 | return FPEXT_F32_F128; |
| 361 | } else if (OpVT == MVT::f64) { |
| 362 | if (RetVT == MVT::f128) |
| 363 | return FPEXT_F64_F128; |
| 364 | } |
| 365 | |
| 366 | return UNKNOWN_LIBCALL; |
| 367 | } |
| 368 | |
| 369 | /// getFPROUND - Return the FPROUND_*_* value for the given types, or |
| 370 | /// UNKNOWN_LIBCALL if there is none. |
| 371 | RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { |
| 372 | if (RetVT == MVT::f32) { |
| 373 | if (OpVT == MVT::f64) |
| 374 | return FPROUND_F64_F32; |
| 375 | if (OpVT == MVT::f80) |
| 376 | return FPROUND_F80_F32; |
| 377 | if (OpVT == MVT::f128) |
| 378 | return FPROUND_F128_F32; |
| 379 | if (OpVT == MVT::ppcf128) |
| 380 | return FPROUND_PPCF128_F32; |
| 381 | } else if (RetVT == MVT::f64) { |
| 382 | if (OpVT == MVT::f80) |
| 383 | return FPROUND_F80_F64; |
| 384 | if (OpVT == MVT::f128) |
| 385 | return FPROUND_F128_F64; |
| 386 | if (OpVT == MVT::ppcf128) |
| 387 | return FPROUND_PPCF128_F64; |
| 388 | } |
| 389 | |
| 390 | return UNKNOWN_LIBCALL; |
| 391 | } |
| 392 | |
| 393 | /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or |
| 394 | /// UNKNOWN_LIBCALL if there is none. |
| 395 | RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { |
| 396 | if (OpVT == MVT::f32) { |
| 397 | if (RetVT == MVT::i8) |
| 398 | return FPTOSINT_F32_I8; |
| 399 | if (RetVT == MVT::i16) |
| 400 | return FPTOSINT_F32_I16; |
| 401 | if (RetVT == MVT::i32) |
| 402 | return FPTOSINT_F32_I32; |
| 403 | if (RetVT == MVT::i64) |
| 404 | return FPTOSINT_F32_I64; |
| 405 | if (RetVT == MVT::i128) |
| 406 | return FPTOSINT_F32_I128; |
| 407 | } else if (OpVT == MVT::f64) { |
| 408 | if (RetVT == MVT::i8) |
| 409 | return FPTOSINT_F64_I8; |
| 410 | if (RetVT == MVT::i16) |
| 411 | return FPTOSINT_F64_I16; |
| 412 | if (RetVT == MVT::i32) |
| 413 | return FPTOSINT_F64_I32; |
| 414 | if (RetVT == MVT::i64) |
| 415 | return FPTOSINT_F64_I64; |
| 416 | if (RetVT == MVT::i128) |
| 417 | return FPTOSINT_F64_I128; |
| 418 | } else if (OpVT == MVT::f80) { |
| 419 | if (RetVT == MVT::i32) |
| 420 | return FPTOSINT_F80_I32; |
| 421 | if (RetVT == MVT::i64) |
| 422 | return FPTOSINT_F80_I64; |
| 423 | if (RetVT == MVT::i128) |
| 424 | return FPTOSINT_F80_I128; |
| 425 | } else if (OpVT == MVT::f128) { |
| 426 | if (RetVT == MVT::i32) |
| 427 | return FPTOSINT_F128_I32; |
| 428 | if (RetVT == MVT::i64) |
| 429 | return FPTOSINT_F128_I64; |
| 430 | if (RetVT == MVT::i128) |
| 431 | return FPTOSINT_F128_I128; |
| 432 | } else if (OpVT == MVT::ppcf128) { |
| 433 | if (RetVT == MVT::i32) |
| 434 | return FPTOSINT_PPCF128_I32; |
| 435 | if (RetVT == MVT::i64) |
| 436 | return FPTOSINT_PPCF128_I64; |
| 437 | if (RetVT == MVT::i128) |
| 438 | return FPTOSINT_PPCF128_I128; |
| 439 | } |
| 440 | return UNKNOWN_LIBCALL; |
| 441 | } |
| 442 | |
| 443 | /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or |
| 444 | /// UNKNOWN_LIBCALL if there is none. |
| 445 | RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { |
| 446 | if (OpVT == MVT::f32) { |
| 447 | if (RetVT == MVT::i8) |
| 448 | return FPTOUINT_F32_I8; |
| 449 | if (RetVT == MVT::i16) |
| 450 | return FPTOUINT_F32_I16; |
| 451 | if (RetVT == MVT::i32) |
| 452 | return FPTOUINT_F32_I32; |
| 453 | if (RetVT == MVT::i64) |
| 454 | return FPTOUINT_F32_I64; |
| 455 | if (RetVT == MVT::i128) |
| 456 | return FPTOUINT_F32_I128; |
| 457 | } else if (OpVT == MVT::f64) { |
| 458 | if (RetVT == MVT::i8) |
| 459 | return FPTOUINT_F64_I8; |
| 460 | if (RetVT == MVT::i16) |
| 461 | return FPTOUINT_F64_I16; |
| 462 | if (RetVT == MVT::i32) |
| 463 | return FPTOUINT_F64_I32; |
| 464 | if (RetVT == MVT::i64) |
| 465 | return FPTOUINT_F64_I64; |
| 466 | if (RetVT == MVT::i128) |
| 467 | return FPTOUINT_F64_I128; |
| 468 | } else if (OpVT == MVT::f80) { |
| 469 | if (RetVT == MVT::i32) |
| 470 | return FPTOUINT_F80_I32; |
| 471 | if (RetVT == MVT::i64) |
| 472 | return FPTOUINT_F80_I64; |
| 473 | if (RetVT == MVT::i128) |
| 474 | return FPTOUINT_F80_I128; |
| 475 | } else if (OpVT == MVT::f128) { |
| 476 | if (RetVT == MVT::i32) |
| 477 | return FPTOUINT_F128_I32; |
| 478 | if (RetVT == MVT::i64) |
| 479 | return FPTOUINT_F128_I64; |
| 480 | if (RetVT == MVT::i128) |
| 481 | return FPTOUINT_F128_I128; |
| 482 | } else if (OpVT == MVT::ppcf128) { |
| 483 | if (RetVT == MVT::i32) |
| 484 | return FPTOUINT_PPCF128_I32; |
| 485 | if (RetVT == MVT::i64) |
| 486 | return FPTOUINT_PPCF128_I64; |
| 487 | if (RetVT == MVT::i128) |
| 488 | return FPTOUINT_PPCF128_I128; |
| 489 | } |
| 490 | return UNKNOWN_LIBCALL; |
| 491 | } |
| 492 | |
| 493 | /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or |
| 494 | /// UNKNOWN_LIBCALL if there is none. |
| 495 | RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { |
| 496 | if (OpVT == MVT::i32) { |
| 497 | if (RetVT == MVT::f32) |
| 498 | return SINTTOFP_I32_F32; |
| 499 | if (RetVT == MVT::f64) |
| 500 | return SINTTOFP_I32_F64; |
| 501 | if (RetVT == MVT::f80) |
| 502 | return SINTTOFP_I32_F80; |
| 503 | if (RetVT == MVT::f128) |
| 504 | return SINTTOFP_I32_F128; |
| 505 | if (RetVT == MVT::ppcf128) |
| 506 | return SINTTOFP_I32_PPCF128; |
| 507 | } else if (OpVT == MVT::i64) { |
| 508 | if (RetVT == MVT::f32) |
| 509 | return SINTTOFP_I64_F32; |
| 510 | if (RetVT == MVT::f64) |
| 511 | return SINTTOFP_I64_F64; |
| 512 | if (RetVT == MVT::f80) |
| 513 | return SINTTOFP_I64_F80; |
| 514 | if (RetVT == MVT::f128) |
| 515 | return SINTTOFP_I64_F128; |
| 516 | if (RetVT == MVT::ppcf128) |
| 517 | return SINTTOFP_I64_PPCF128; |
| 518 | } else if (OpVT == MVT::i128) { |
| 519 | if (RetVT == MVT::f32) |
| 520 | return SINTTOFP_I128_F32; |
| 521 | if (RetVT == MVT::f64) |
| 522 | return SINTTOFP_I128_F64; |
| 523 | if (RetVT == MVT::f80) |
| 524 | return SINTTOFP_I128_F80; |
| 525 | if (RetVT == MVT::f128) |
| 526 | return SINTTOFP_I128_F128; |
| 527 | if (RetVT == MVT::ppcf128) |
| 528 | return SINTTOFP_I128_PPCF128; |
| 529 | } |
| 530 | return UNKNOWN_LIBCALL; |
| 531 | } |
| 532 | |
| 533 | /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or |
| 534 | /// UNKNOWN_LIBCALL if there is none. |
| 535 | RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { |
| 536 | if (OpVT == MVT::i32) { |
| 537 | if (RetVT == MVT::f32) |
| 538 | return UINTTOFP_I32_F32; |
| 539 | if (RetVT == MVT::f64) |
| 540 | return UINTTOFP_I32_F64; |
| 541 | if (RetVT == MVT::f80) |
| 542 | return UINTTOFP_I32_F80; |
| 543 | if (RetVT == MVT::f128) |
| 544 | return UINTTOFP_I32_F128; |
| 545 | if (RetVT == MVT::ppcf128) |
| 546 | return UINTTOFP_I32_PPCF128; |
| 547 | } else if (OpVT == MVT::i64) { |
| 548 | if (RetVT == MVT::f32) |
| 549 | return UINTTOFP_I64_F32; |
| 550 | if (RetVT == MVT::f64) |
| 551 | return UINTTOFP_I64_F64; |
| 552 | if (RetVT == MVT::f80) |
| 553 | return UINTTOFP_I64_F80; |
| 554 | if (RetVT == MVT::f128) |
| 555 | return UINTTOFP_I64_F128; |
| 556 | if (RetVT == MVT::ppcf128) |
| 557 | return UINTTOFP_I64_PPCF128; |
| 558 | } else if (OpVT == MVT::i128) { |
| 559 | if (RetVT == MVT::f32) |
| 560 | return UINTTOFP_I128_F32; |
| 561 | if (RetVT == MVT::f64) |
| 562 | return UINTTOFP_I128_F64; |
| 563 | if (RetVT == MVT::f80) |
| 564 | return UINTTOFP_I128_F80; |
| 565 | if (RetVT == MVT::f128) |
| 566 | return UINTTOFP_I128_F128; |
| 567 | if (RetVT == MVT::ppcf128) |
| 568 | return UINTTOFP_I128_PPCF128; |
| 569 | } |
| 570 | return UNKNOWN_LIBCALL; |
| 571 | } |
| 572 | |
| 573 | /// InitCmpLibcallCCs - Set default comparison libcall CC. |
| 574 | /// |
| 575 | static void InitCmpLibcallCCs(ISD::CondCode *CCs) { |
| 576 | memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); |
| 577 | CCs[RTLIB::OEQ_F32] = ISD::SETEQ; |
| 578 | CCs[RTLIB::OEQ_F64] = ISD::SETEQ; |
| 579 | CCs[RTLIB::OEQ_F128] = ISD::SETEQ; |
| 580 | CCs[RTLIB::UNE_F32] = ISD::SETNE; |
| 581 | CCs[RTLIB::UNE_F64] = ISD::SETNE; |
| 582 | CCs[RTLIB::UNE_F128] = ISD::SETNE; |
| 583 | CCs[RTLIB::OGE_F32] = ISD::SETGE; |
| 584 | CCs[RTLIB::OGE_F64] = ISD::SETGE; |
| 585 | CCs[RTLIB::OGE_F128] = ISD::SETGE; |
| 586 | CCs[RTLIB::OLT_F32] = ISD::SETLT; |
| 587 | CCs[RTLIB::OLT_F64] = ISD::SETLT; |
| 588 | CCs[RTLIB::OLT_F128] = ISD::SETLT; |
| 589 | CCs[RTLIB::OLE_F32] = ISD::SETLE; |
| 590 | CCs[RTLIB::OLE_F64] = ISD::SETLE; |
| 591 | CCs[RTLIB::OLE_F128] = ISD::SETLE; |
| 592 | CCs[RTLIB::OGT_F32] = ISD::SETGT; |
| 593 | CCs[RTLIB::OGT_F64] = ISD::SETGT; |
| 594 | CCs[RTLIB::OGT_F128] = ISD::SETGT; |
| 595 | CCs[RTLIB::UO_F32] = ISD::SETNE; |
| 596 | CCs[RTLIB::UO_F64] = ISD::SETNE; |
| 597 | CCs[RTLIB::UO_F128] = ISD::SETNE; |
| 598 | CCs[RTLIB::O_F32] = ISD::SETEQ; |
| 599 | CCs[RTLIB::O_F64] = ISD::SETEQ; |
| 600 | CCs[RTLIB::O_F128] = ISD::SETEQ; |
| 601 | } |
| 602 | |
| 603 | /// NOTE: The constructor takes ownership of TLOF. |
| 604 | TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm, |
| 605 | const TargetLoweringObjectFile *tlof) |
| 606 | : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) { |
| 607 | // All operations default to being supported. |
| 608 | memset(OpActions, 0, sizeof(OpActions)); |
| 609 | memset(LoadExtActions, 0, sizeof(LoadExtActions)); |
| 610 | memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); |
| 611 | memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); |
| 612 | memset(CondCodeActions, 0, sizeof(CondCodeActions)); |
| 613 | |
| 614 | // Set default actions for various operations. |
| 615 | for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { |
| 616 | // Default all indexed load / store to expand. |
| 617 | for (unsigned IM = (unsigned)ISD::PRE_INC; |
| 618 | IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { |
| 619 | setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand); |
| 620 | setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand); |
| 621 | } |
| 622 | |
| 623 | // These operations default to expand. |
| 624 | setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand); |
| 625 | setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT, Expand); |
| 626 | } |
| 627 | |
| 628 | // Most targets ignore the @llvm.prefetch intrinsic. |
| 629 | setOperationAction(ISD::PREFETCH, MVT::Other, Expand); |
| 630 | |
| 631 | // ConstantFP nodes default to expand. Targets can either change this to |
| 632 | // Legal, in which case all fp constants are legal, or use isFPImmLegal() |
| 633 | // to optimize expansions for certain constants. |
| 634 | setOperationAction(ISD::ConstantFP, MVT::f16, Expand); |
| 635 | setOperationAction(ISD::ConstantFP, MVT::f32, Expand); |
| 636 | setOperationAction(ISD::ConstantFP, MVT::f64, Expand); |
| 637 | setOperationAction(ISD::ConstantFP, MVT::f80, Expand); |
| 638 | setOperationAction(ISD::ConstantFP, MVT::f128, Expand); |
| 639 | |
| 640 | // These library functions default to expand. |
| 641 | setOperationAction(ISD::FLOG , MVT::f16, Expand); |
| 642 | setOperationAction(ISD::FLOG2, MVT::f16, Expand); |
| 643 | setOperationAction(ISD::FLOG10, MVT::f16, Expand); |
| 644 | setOperationAction(ISD::FEXP , MVT::f16, Expand); |
| 645 | setOperationAction(ISD::FEXP2, MVT::f16, Expand); |
| 646 | setOperationAction(ISD::FFLOOR, MVT::f16, Expand); |
| 647 | setOperationAction(ISD::FNEARBYINT, MVT::f16, Expand); |
| 648 | setOperationAction(ISD::FCEIL, MVT::f16, Expand); |
| 649 | setOperationAction(ISD::FRINT, MVT::f16, Expand); |
| 650 | setOperationAction(ISD::FTRUNC, MVT::f16, Expand); |
| 651 | setOperationAction(ISD::FLOG , MVT::f32, Expand); |
| 652 | setOperationAction(ISD::FLOG2, MVT::f32, Expand); |
| 653 | setOperationAction(ISD::FLOG10, MVT::f32, Expand); |
| 654 | setOperationAction(ISD::FEXP , MVT::f32, Expand); |
| 655 | setOperationAction(ISD::FEXP2, MVT::f32, Expand); |
| 656 | setOperationAction(ISD::FFLOOR, MVT::f32, Expand); |
| 657 | setOperationAction(ISD::FNEARBYINT, MVT::f32, Expand); |
| 658 | setOperationAction(ISD::FCEIL, MVT::f32, Expand); |
| 659 | setOperationAction(ISD::FRINT, MVT::f32, Expand); |
| 660 | setOperationAction(ISD::FTRUNC, MVT::f32, Expand); |
| 661 | setOperationAction(ISD::FLOG , MVT::f64, Expand); |
| 662 | setOperationAction(ISD::FLOG2, MVT::f64, Expand); |
| 663 | setOperationAction(ISD::FLOG10, MVT::f64, Expand); |
| 664 | setOperationAction(ISD::FEXP , MVT::f64, Expand); |
| 665 | setOperationAction(ISD::FEXP2, MVT::f64, Expand); |
| 666 | setOperationAction(ISD::FFLOOR, MVT::f64, Expand); |
| 667 | setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); |
| 668 | setOperationAction(ISD::FCEIL, MVT::f64, Expand); |
| 669 | setOperationAction(ISD::FRINT, MVT::f64, Expand); |
| 670 | setOperationAction(ISD::FTRUNC, MVT::f64, Expand); |
| 671 | setOperationAction(ISD::FLOG , MVT::f128, Expand); |
| 672 | setOperationAction(ISD::FLOG2, MVT::f128, Expand); |
| 673 | setOperationAction(ISD::FLOG10, MVT::f128, Expand); |
| 674 | setOperationAction(ISD::FEXP , MVT::f128, Expand); |
| 675 | setOperationAction(ISD::FEXP2, MVT::f128, Expand); |
| 676 | setOperationAction(ISD::FFLOOR, MVT::f128, Expand); |
| 677 | setOperationAction(ISD::FNEARBYINT, MVT::f128, Expand); |
| 678 | setOperationAction(ISD::FCEIL, MVT::f128, Expand); |
| 679 | setOperationAction(ISD::FRINT, MVT::f128, Expand); |
| 680 | setOperationAction(ISD::FTRUNC, MVT::f128, Expand); |
| 681 | |
| 682 | // Default ISD::TRAP to expand (which turns it into abort). |
| 683 | setOperationAction(ISD::TRAP, MVT::Other, Expand); |
| 684 | |
| 685 | // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand" |
| 686 | // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. |
| 687 | // |
| 688 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); |
| 689 | |
| 690 | IsLittleEndian = TD->isLittleEndian(); |
| 691 | PointerTy = MVT::getIntegerVT(8*TD->getPointerSize(0)); |
| 692 | memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); |
| 693 | memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); |
| 694 | maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; |
| 695 | maxStoresPerMemsetOptSize = maxStoresPerMemcpyOptSize |
| 696 | = maxStoresPerMemmoveOptSize = 4; |
| 697 | benefitFromCodePlacementOpt = false; |
| 698 | UseUnderscoreSetJmp = false; |
| 699 | UseUnderscoreLongJmp = false; |
| 700 | SelectIsExpensive = false; |
| 701 | IntDivIsCheap = false; |
| 702 | Pow2DivIsCheap = false; |
| 703 | JumpIsExpensive = false; |
| 704 | predictableSelectIsExpensive = false; |
| 705 | StackPointerRegisterToSaveRestore = 0; |
| 706 | ExceptionPointerRegister = 0; |
| 707 | ExceptionSelectorRegister = 0; |
| 708 | BooleanContents = UndefinedBooleanContent; |
| 709 | BooleanVectorContents = UndefinedBooleanContent; |
| 710 | SchedPreferenceInfo = Sched::ILP; |
| 711 | JumpBufSize = 0; |
| 712 | JumpBufAlignment = 0; |
| 713 | MinFunctionAlignment = 0; |
| 714 | PrefFunctionAlignment = 0; |
| 715 | PrefLoopAlignment = 0; |
| 716 | MinStackArgumentAlignment = 1; |
| 717 | ShouldFoldAtomicFences = false; |
| 718 | InsertFencesForAtomic = false; |
| 719 | SupportJumpTables = true; |
| 720 | MinimumJumpTableEntries = 4; |
| 721 | |
| 722 | InitLibcallNames(LibcallRoutineNames); |
| 723 | InitCmpLibcallCCs(CmpLibcallCCs); |
| 724 | InitLibcallCallingConvs(LibcallCallingConvs); |
| 725 | } |
| 726 | |
| 727 | TargetLoweringBase::~TargetLoweringBase() { |
| 728 | delete &TLOF; |
| 729 | } |
| 730 | |
| 731 | MVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy) const { |
| 732 | return MVT::getIntegerVT(8*TD->getPointerSize(0)); |
| 733 | } |
| 734 | |
| 735 | /// canOpTrap - Returns true if the operation can trap for the value type. |
| 736 | /// VT must be a legal type. |
| 737 | bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const { |
| 738 | assert(isTypeLegal(VT)); |
| 739 | switch (Op) { |
| 740 | default: |
| 741 | return false; |
| 742 | case ISD::FDIV: |
| 743 | case ISD::FREM: |
| 744 | case ISD::SDIV: |
| 745 | case ISD::UDIV: |
| 746 | case ISD::SREM: |
| 747 | case ISD::UREM: |
| 748 | return true; |
| 749 | } |
| 750 | } |
| 751 | |
| 752 | |
| 753 | static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, |
| 754 | unsigned &NumIntermediates, |
| 755 | MVT &RegisterVT, |
| 756 | TargetLoweringBase *TLI) { |
| 757 | // Figure out the right, legal destination reg to copy into. |
| 758 | unsigned NumElts = VT.getVectorNumElements(); |
| 759 | MVT EltTy = VT.getVectorElementType(); |
| 760 | |
| 761 | unsigned NumVectorRegs = 1; |
| 762 | |
| 763 | // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we |
| 764 | // could break down into LHS/RHS like LegalizeDAG does. |
| 765 | if (!isPowerOf2_32(NumElts)) { |
| 766 | NumVectorRegs = NumElts; |
| 767 | NumElts = 1; |
| 768 | } |
| 769 | |
| 770 | // Divide the input until we get to a supported size. This will always |
| 771 | // end with a scalar if the target doesn't support vectors. |
| 772 | while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) { |
| 773 | NumElts >>= 1; |
| 774 | NumVectorRegs <<= 1; |
| 775 | } |
| 776 | |
| 777 | NumIntermediates = NumVectorRegs; |
| 778 | |
| 779 | MVT NewVT = MVT::getVectorVT(EltTy, NumElts); |
| 780 | if (!TLI->isTypeLegal(NewVT)) |
| 781 | NewVT = EltTy; |
| 782 | IntermediateVT = NewVT; |
| 783 | |
| 784 | unsigned NewVTSize = NewVT.getSizeInBits(); |
| 785 | |
| 786 | // Convert sizes such as i33 to i64. |
| 787 | if (!isPowerOf2_32(NewVTSize)) |
| 788 | NewVTSize = NextPowerOf2(NewVTSize); |
| 789 | |
| 790 | MVT DestVT = TLI->getRegisterType(NewVT); |
| 791 | RegisterVT = DestVT; |
| 792 | if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. |
| 793 | return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); |
| 794 | |
| 795 | // Otherwise, promotion or legal types use the same number of registers as |
| 796 | // the vector decimated to the appropriate level. |
| 797 | return NumVectorRegs; |
| 798 | } |
| 799 | |
| 800 | /// isLegalRC - Return true if the value types that can be represented by the |
| 801 | /// specified register class are all legal. |
| 802 | bool TargetLoweringBase::isLegalRC(const TargetRegisterClass *RC) const { |
| 803 | for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); |
| 804 | I != E; ++I) { |
| 805 | if (isTypeLegal(*I)) |
| 806 | return true; |
| 807 | } |
| 808 | return false; |
| 809 | } |
| 810 | |
| 811 | /// findRepresentativeClass - Return the largest legal super-reg register class |
| 812 | /// of the register class for the specified type and its associated "cost". |
| 813 | std::pair<const TargetRegisterClass*, uint8_t> |
| 814 | TargetLoweringBase::findRepresentativeClass(MVT VT) const { |
| 815 | const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); |
| 816 | const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; |
| 817 | if (!RC) |
| 818 | return std::make_pair(RC, 0); |
| 819 | |
| 820 | // Compute the set of all super-register classes. |
| 821 | BitVector SuperRegRC(TRI->getNumRegClasses()); |
| 822 | for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI) |
| 823 | SuperRegRC.setBitsInMask(RCI.getMask()); |
| 824 | |
| 825 | // Find the first legal register class with the largest spill size. |
| 826 | const TargetRegisterClass *BestRC = RC; |
| 827 | for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) { |
| 828 | const TargetRegisterClass *SuperRC = TRI->getRegClass(i); |
| 829 | // We want the largest possible spill size. |
| 830 | if (SuperRC->getSize() <= BestRC->getSize()) |
| 831 | continue; |
| 832 | if (!isLegalRC(SuperRC)) |
| 833 | continue; |
| 834 | BestRC = SuperRC; |
| 835 | } |
| 836 | return std::make_pair(BestRC, 1); |
| 837 | } |
| 838 | |
| 839 | /// computeRegisterProperties - Once all of the register classes are added, |
| 840 | /// this allows us to compute derived properties we expose. |
| 841 | void TargetLoweringBase::computeRegisterProperties() { |
| 842 | assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE && |
| 843 | "Too many value types for ValueTypeActions to hold!"); |
| 844 | |
| 845 | // Everything defaults to needing one register. |
| 846 | for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { |
| 847 | NumRegistersForVT[i] = 1; |
| 848 | RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; |
| 849 | } |
| 850 | // ...except isVoid, which doesn't need any registers. |
| 851 | NumRegistersForVT[MVT::isVoid] = 0; |
| 852 | |
| 853 | // Find the largest integer register class. |
| 854 | unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; |
| 855 | for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) |
| 856 | assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); |
| 857 | |
| 858 | // Every integer value type larger than this largest register takes twice as |
| 859 | // many registers to represent as the previous ValueType. |
| 860 | for (unsigned ExpandedReg = LargestIntReg + 1; |
| 861 | ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) { |
| 862 | NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; |
| 863 | RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; |
| 864 | TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); |
| 865 | ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg, |
| 866 | TypeExpandInteger); |
| 867 | } |
| 868 | |
| 869 | // Inspect all of the ValueType's smaller than the largest integer |
| 870 | // register to see which ones need promotion. |
| 871 | unsigned LegalIntReg = LargestIntReg; |
| 872 | for (unsigned IntReg = LargestIntReg - 1; |
| 873 | IntReg >= (unsigned)MVT::i1; --IntReg) { |
| 874 | MVT IVT = (MVT::SimpleValueType)IntReg; |
| 875 | if (isTypeLegal(IVT)) { |
| 876 | LegalIntReg = IntReg; |
| 877 | } else { |
| 878 | RegisterTypeForVT[IntReg] = TransformToType[IntReg] = |
| 879 | (const MVT::SimpleValueType)LegalIntReg; |
| 880 | ValueTypeActions.setTypeAction(IVT, TypePromoteInteger); |
| 881 | } |
| 882 | } |
| 883 | |
| 884 | // ppcf128 type is really two f64's. |
| 885 | if (!isTypeLegal(MVT::ppcf128)) { |
| 886 | NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; |
| 887 | RegisterTypeForVT[MVT::ppcf128] = MVT::f64; |
| 888 | TransformToType[MVT::ppcf128] = MVT::f64; |
| 889 | ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat); |
| 890 | } |
| 891 | |
| 892 | // Decide how to handle f64. If the target does not have native f64 support, |
| 893 | // expand it to i64 and we will be generating soft float library calls. |
| 894 | if (!isTypeLegal(MVT::f64)) { |
| 895 | NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; |
| 896 | RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; |
| 897 | TransformToType[MVT::f64] = MVT::i64; |
| 898 | ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat); |
| 899 | } |
| 900 | |
| 901 | // Decide how to handle f32. If the target does not have native support for |
| 902 | // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. |
| 903 | if (!isTypeLegal(MVT::f32)) { |
| 904 | if (isTypeLegal(MVT::f64)) { |
| 905 | NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; |
| 906 | RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; |
| 907 | TransformToType[MVT::f32] = MVT::f64; |
| 908 | ValueTypeActions.setTypeAction(MVT::f32, TypePromoteInteger); |
| 909 | } else { |
| 910 | NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; |
| 911 | RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; |
| 912 | TransformToType[MVT::f32] = MVT::i32; |
| 913 | ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat); |
| 914 | } |
| 915 | } |
| 916 | |
| 917 | // Loop over all of the vector value types to see which need transformations. |
| 918 | for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; |
| 919 | i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { |
| 920 | MVT VT = (MVT::SimpleValueType)i; |
| 921 | if (isTypeLegal(VT)) continue; |
| 922 | |
| 923 | // Determine if there is a legal wider type. If so, we should promote to |
| 924 | // that wider vector type. |
| 925 | MVT EltVT = VT.getVectorElementType(); |
| 926 | unsigned NElts = VT.getVectorNumElements(); |
| 927 | if (NElts != 1 && !shouldSplitVectorElementType(EltVT)) { |
| 928 | bool IsLegalWiderType = false; |
| 929 | // First try to promote the elements of integer vectors. If no legal |
| 930 | // promotion was found, fallback to the widen-vector method. |
| 931 | for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { |
| 932 | MVT SVT = (MVT::SimpleValueType)nVT; |
| 933 | // Promote vectors of integers to vectors with the same number |
| 934 | // of elements, with a wider element type. |
| 935 | if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits() |
| 936 | && SVT.getVectorNumElements() == NElts && |
| 937 | isTypeLegal(SVT) && SVT.getScalarType().isInteger()) { |
| 938 | TransformToType[i] = SVT; |
| 939 | RegisterTypeForVT[i] = SVT; |
| 940 | NumRegistersForVT[i] = 1; |
| 941 | ValueTypeActions.setTypeAction(VT, TypePromoteInteger); |
| 942 | IsLegalWiderType = true; |
| 943 | break; |
| 944 | } |
| 945 | } |
| 946 | |
| 947 | if (IsLegalWiderType) continue; |
| 948 | |
| 949 | // Try to widen the vector. |
| 950 | for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { |
| 951 | MVT SVT = (MVT::SimpleValueType)nVT; |
| 952 | if (SVT.getVectorElementType() == EltVT && |
| 953 | SVT.getVectorNumElements() > NElts && |
| 954 | isTypeLegal(SVT)) { |
| 955 | TransformToType[i] = SVT; |
| 956 | RegisterTypeForVT[i] = SVT; |
| 957 | NumRegistersForVT[i] = 1; |
| 958 | ValueTypeActions.setTypeAction(VT, TypeWidenVector); |
| 959 | IsLegalWiderType = true; |
| 960 | break; |
| 961 | } |
| 962 | } |
| 963 | if (IsLegalWiderType) continue; |
| 964 | } |
| 965 | |
| 966 | MVT IntermediateVT; |
| 967 | MVT RegisterVT; |
| 968 | unsigned NumIntermediates; |
| 969 | NumRegistersForVT[i] = |
| 970 | getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates, |
| 971 | RegisterVT, this); |
| 972 | RegisterTypeForVT[i] = RegisterVT; |
| 973 | |
| 974 | MVT NVT = VT.getPow2VectorType(); |
| 975 | if (NVT == VT) { |
| 976 | // Type is already a power of 2. The default action is to split. |
| 977 | TransformToType[i] = MVT::Other; |
| 978 | unsigned NumElts = VT.getVectorNumElements(); |
| 979 | ValueTypeActions.setTypeAction(VT, |
| 980 | NumElts > 1 ? TypeSplitVector : TypeScalarizeVector); |
| 981 | } else { |
| 982 | TransformToType[i] = NVT; |
| 983 | ValueTypeActions.setTypeAction(VT, TypeWidenVector); |
| 984 | } |
| 985 | } |
| 986 | |
| 987 | // Determine the 'representative' register class for each value type. |
| 988 | // An representative register class is the largest (meaning one which is |
| 989 | // not a sub-register class / subreg register class) legal register class for |
| 990 | // a group of value types. For example, on i386, i8, i16, and i32 |
| 991 | // representative would be GR32; while on x86_64 it's GR64. |
| 992 | for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { |
| 993 | const TargetRegisterClass* RRC; |
| 994 | uint8_t Cost; |
| 995 | tie(RRC, Cost) = findRepresentativeClass((MVT::SimpleValueType)i); |
| 996 | RepRegClassForVT[i] = RRC; |
| 997 | RepRegClassCostForVT[i] = Cost; |
| 998 | } |
| 999 | } |
| 1000 | |
| 1001 | EVT TargetLoweringBase::getSetCCResultType(EVT VT) const { |
| 1002 | assert(!VT.isVector() && "No default SetCC type for vectors!"); |
| 1003 | return getPointerTy(0).SimpleTy; |
| 1004 | } |
| 1005 | |
| 1006 | MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const { |
| 1007 | return MVT::i32; // return the default value |
| 1008 | } |
| 1009 | |
| 1010 | /// getVectorTypeBreakdown - Vector types are broken down into some number of |
| 1011 | /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 |
| 1012 | /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. |
| 1013 | /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. |
| 1014 | /// |
| 1015 | /// This method returns the number of registers needed, and the VT for each |
| 1016 | /// register. It also returns the VT and quantity of the intermediate values |
| 1017 | /// before they are promoted/expanded. |
| 1018 | /// |
| 1019 | unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT, |
| 1020 | EVT &IntermediateVT, |
| 1021 | unsigned &NumIntermediates, |
| 1022 | MVT &RegisterVT) const { |
| 1023 | unsigned NumElts = VT.getVectorNumElements(); |
| 1024 | |
| 1025 | // If there is a wider vector type with the same element type as this one, |
| 1026 | // or a promoted vector type that has the same number of elements which |
| 1027 | // are wider, then we should convert to that legal vector type. |
| 1028 | // This handles things like <2 x float> -> <4 x float> and |
| 1029 | // <4 x i1> -> <4 x i32>. |
| 1030 | LegalizeTypeAction TA = getTypeAction(Context, VT); |
| 1031 | if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) { |
| 1032 | EVT RegisterEVT = getTypeToTransformTo(Context, VT); |
| 1033 | if (isTypeLegal(RegisterEVT)) { |
| 1034 | IntermediateVT = RegisterEVT; |
| 1035 | RegisterVT = RegisterEVT.getSimpleVT(); |
| 1036 | NumIntermediates = 1; |
| 1037 | return 1; |
| 1038 | } |
| 1039 | } |
| 1040 | |
| 1041 | // Figure out the right, legal destination reg to copy into. |
| 1042 | EVT EltTy = VT.getVectorElementType(); |
| 1043 | |
| 1044 | unsigned NumVectorRegs = 1; |
| 1045 | |
| 1046 | // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we |
| 1047 | // could break down into LHS/RHS like LegalizeDAG does. |
| 1048 | if (!isPowerOf2_32(NumElts)) { |
| 1049 | NumVectorRegs = NumElts; |
| 1050 | NumElts = 1; |
| 1051 | } |
| 1052 | |
| 1053 | // Divide the input until we get to a supported size. This will always |
| 1054 | // end with a scalar if the target doesn't support vectors. |
| 1055 | while (NumElts > 1 && !isTypeLegal( |
| 1056 | EVT::getVectorVT(Context, EltTy, NumElts))) { |
| 1057 | NumElts >>= 1; |
| 1058 | NumVectorRegs <<= 1; |
| 1059 | } |
| 1060 | |
| 1061 | NumIntermediates = NumVectorRegs; |
| 1062 | |
| 1063 | EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts); |
| 1064 | if (!isTypeLegal(NewVT)) |
| 1065 | NewVT = EltTy; |
| 1066 | IntermediateVT = NewVT; |
| 1067 | |
| 1068 | MVT DestVT = getRegisterType(Context, NewVT); |
| 1069 | RegisterVT = DestVT; |
| 1070 | unsigned NewVTSize = NewVT.getSizeInBits(); |
| 1071 | |
| 1072 | // Convert sizes such as i33 to i64. |
| 1073 | if (!isPowerOf2_32(NewVTSize)) |
| 1074 | NewVTSize = NextPowerOf2(NewVTSize); |
| 1075 | |
| 1076 | if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. |
| 1077 | return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); |
| 1078 | |
| 1079 | // Otherwise, promotion or legal types use the same number of registers as |
| 1080 | // the vector decimated to the appropriate level. |
| 1081 | return NumVectorRegs; |
| 1082 | } |
| 1083 | |
| 1084 | /// Get the EVTs and ArgFlags collections that represent the legalized return |
| 1085 | /// type of the given function. This does not require a DAG or a return value, |
| 1086 | /// and is suitable for use before any DAGs for the function are constructed. |
| 1087 | /// TODO: Move this out of TargetLowering.cpp. |
| 1088 | void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr, |
| 1089 | SmallVectorImpl<ISD::OutputArg> &Outs, |
| 1090 | const TargetLowering &TLI) { |
| 1091 | SmallVector<EVT, 4> ValueVTs; |
| 1092 | ComputeValueVTs(TLI, ReturnType, ValueVTs); |
| 1093 | unsigned NumValues = ValueVTs.size(); |
| 1094 | if (NumValues == 0) return; |
| 1095 | |
| 1096 | for (unsigned j = 0, f = NumValues; j != f; ++j) { |
| 1097 | EVT VT = ValueVTs[j]; |
| 1098 | ISD::NodeType ExtendKind = ISD::ANY_EXTEND; |
| 1099 | |
| 1100 | if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) |
| 1101 | ExtendKind = ISD::SIGN_EXTEND; |
| 1102 | else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt)) |
| 1103 | ExtendKind = ISD::ZERO_EXTEND; |
| 1104 | |
| 1105 | // FIXME: C calling convention requires the return type to be promoted to |
| 1106 | // at least 32-bit. But this is not necessary for non-C calling |
| 1107 | // conventions. The frontend should mark functions whose return values |
| 1108 | // require promoting with signext or zeroext attributes. |
| 1109 | if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { |
| 1110 | MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); |
| 1111 | if (VT.bitsLT(MinVT)) |
| 1112 | VT = MinVT; |
| 1113 | } |
| 1114 | |
| 1115 | unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT); |
| 1116 | MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT); |
| 1117 | |
| 1118 | // 'inreg' on function refers to return value |
| 1119 | ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); |
| 1120 | if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::InReg)) |
| 1121 | Flags.setInReg(); |
| 1122 | |
| 1123 | // Propagate extension type if any |
| 1124 | if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) |
| 1125 | Flags.setSExt(); |
| 1126 | else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt)) |
| 1127 | Flags.setZExt(); |
| 1128 | |
| 1129 | for (unsigned i = 0; i < NumParts; ++i) |
| 1130 | Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true, 0, 0)); |
| 1131 | } |
| 1132 | } |
| 1133 | |
| 1134 | /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate |
| 1135 | /// function arguments in the caller parameter area. This is the actual |
| 1136 | /// alignment, not its logarithm. |
| 1137 | unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty) const { |
| 1138 | return TD->getCallFrameTypeAlignment(Ty); |
| 1139 | } |
| 1140 | |
| 1141 | //===----------------------------------------------------------------------===// |
| 1142 | // TargetTransformInfo Helpers |
| 1143 | //===----------------------------------------------------------------------===// |
| 1144 | |
| 1145 | int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { |
| 1146 | enum InstructionOpcodes { |
| 1147 | #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM, |
| 1148 | #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM |
| 1149 | #include "llvm/IR/Instruction.def" |
| 1150 | }; |
| 1151 | switch (static_cast<InstructionOpcodes>(Opcode)) { |
| 1152 | case Ret: return 0; |
| 1153 | case Br: return 0; |
| 1154 | case Switch: return 0; |
| 1155 | case IndirectBr: return 0; |
| 1156 | case Invoke: return 0; |
| 1157 | case Resume: return 0; |
| 1158 | case Unreachable: return 0; |
| 1159 | case Add: return ISD::ADD; |
| 1160 | case FAdd: return ISD::FADD; |
| 1161 | case Sub: return ISD::SUB; |
| 1162 | case FSub: return ISD::FSUB; |
| 1163 | case Mul: return ISD::MUL; |
| 1164 | case FMul: return ISD::FMUL; |
| 1165 | case UDiv: return ISD::UDIV; |
| 1166 | case SDiv: return ISD::UDIV; |
| 1167 | case FDiv: return ISD::FDIV; |
| 1168 | case URem: return ISD::UREM; |
| 1169 | case SRem: return ISD::SREM; |
| 1170 | case FRem: return ISD::FREM; |
| 1171 | case Shl: return ISD::SHL; |
| 1172 | case LShr: return ISD::SRL; |
| 1173 | case AShr: return ISD::SRA; |
| 1174 | case And: return ISD::AND; |
| 1175 | case Or: return ISD::OR; |
| 1176 | case Xor: return ISD::XOR; |
| 1177 | case Alloca: return 0; |
| 1178 | case Load: return ISD::LOAD; |
| 1179 | case Store: return ISD::STORE; |
| 1180 | case GetElementPtr: return 0; |
| 1181 | case Fence: return 0; |
| 1182 | case AtomicCmpXchg: return 0; |
| 1183 | case AtomicRMW: return 0; |
| 1184 | case Trunc: return ISD::TRUNCATE; |
| 1185 | case ZExt: return ISD::ZERO_EXTEND; |
| 1186 | case SExt: return ISD::SIGN_EXTEND; |
| 1187 | case FPToUI: return ISD::FP_TO_UINT; |
| 1188 | case FPToSI: return ISD::FP_TO_SINT; |
| 1189 | case UIToFP: return ISD::UINT_TO_FP; |
| 1190 | case SIToFP: return ISD::SINT_TO_FP; |
| 1191 | case FPTrunc: return ISD::FP_ROUND; |
| 1192 | case FPExt: return ISD::FP_EXTEND; |
| 1193 | case PtrToInt: return ISD::BITCAST; |
| 1194 | case IntToPtr: return ISD::BITCAST; |
| 1195 | case BitCast: return ISD::BITCAST; |
| 1196 | case ICmp: return ISD::SETCC; |
| 1197 | case FCmp: return ISD::SETCC; |
| 1198 | case PHI: return 0; |
| 1199 | case Call: return 0; |
| 1200 | case Select: return ISD::SELECT; |
| 1201 | case UserOp1: return 0; |
| 1202 | case UserOp2: return 0; |
| 1203 | case VAArg: return 0; |
| 1204 | case ExtractElement: return ISD::EXTRACT_VECTOR_ELT; |
| 1205 | case InsertElement: return ISD::INSERT_VECTOR_ELT; |
| 1206 | case ShuffleVector: return ISD::VECTOR_SHUFFLE; |
| 1207 | case ExtractValue: return ISD::MERGE_VALUES; |
| 1208 | case InsertValue: return ISD::MERGE_VALUES; |
| 1209 | case LandingPad: return 0; |
| 1210 | } |
| 1211 | |
| 1212 | llvm_unreachable("Unknown instruction type encountered!"); |
| 1213 | } |
| 1214 | |
| 1215 | std::pair<unsigned, MVT> |
| 1216 | TargetLoweringBase::getTypeLegalizationCost(Type *Ty) const { |
| 1217 | LLVMContext &C = Ty->getContext(); |
| 1218 | EVT MTy = getValueType(Ty); |
| 1219 | |
| 1220 | unsigned Cost = 1; |
| 1221 | // We keep legalizing the type until we find a legal kind. We assume that |
| 1222 | // the only operation that costs anything is the split. After splitting |
| 1223 | // we need to handle two types. |
| 1224 | while (true) { |
| 1225 | LegalizeKind LK = getTypeConversion(C, MTy); |
| 1226 | |
| 1227 | if (LK.first == TypeLegal) |
| 1228 | return std::make_pair(Cost, MTy.getSimpleVT()); |
| 1229 | |
| 1230 | if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger) |
| 1231 | Cost *= 2; |
| 1232 | |
| 1233 | // Keep legalizing the type. |
| 1234 | MTy = LK.second; |
| 1235 | } |
| 1236 | } |
| 1237 | |
| 1238 | //===----------------------------------------------------------------------===// |
| 1239 | // Loop Strength Reduction hooks |
| 1240 | //===----------------------------------------------------------------------===// |
| 1241 | |
| 1242 | /// isLegalAddressingMode - Return true if the addressing mode represented |
| 1243 | /// by AM is legal for this target, for a load/store of the specified type. |
| 1244 | bool TargetLoweringBase::isLegalAddressingMode(const AddrMode &AM, |
| 1245 | Type *Ty) const { |
| 1246 | // The default implementation of this implements a conservative RISCy, r+r and |
| 1247 | // r+i addr mode. |
| 1248 | |
| 1249 | // Allows a sign-extended 16-bit immediate field. |
| 1250 | if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) |
| 1251 | return false; |
| 1252 | |
| 1253 | // No global is ever allowed as a base. |
| 1254 | if (AM.BaseGV) |
| 1255 | return false; |
| 1256 | |
| 1257 | // Only support r+r, |
| 1258 | switch (AM.Scale) { |
| 1259 | case 0: // "r+i" or just "i", depending on HasBaseReg. |
| 1260 | break; |
| 1261 | case 1: |
| 1262 | if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. |
| 1263 | return false; |
| 1264 | // Otherwise we have r+r or r+i. |
| 1265 | break; |
| 1266 | case 2: |
| 1267 | if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. |
| 1268 | return false; |
| 1269 | // Allow 2*r as r+r. |
| 1270 | break; |
| 1271 | } |
| 1272 | |
| 1273 | return true; |
| 1274 | } |