Nate Begeman | 6cca84e | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 1 | //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===// |
Chris Lattner | f22556d | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | f3ebc3f | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Chris Lattner | f22556d | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defines the interfaces that PPC uses to lower LLVM code into a |
| 11 | // selection DAG. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
Benjamin Kramer | a7c40ef | 2014-08-13 16:26:38 +0000 | [diff] [blame] | 15 | #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H |
| 16 | #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H |
Chris Lattner | f22556d | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 17 | |
Chris Lattner | bfca1ab | 2005-10-14 23:51:18 +0000 | [diff] [blame] | 18 | #include "PPC.h" |
Hal Finkel | ed6a285 | 2013-04-05 23:29:01 +0000 | [diff] [blame] | 19 | #include "PPCInstrInfo.h" |
Bill Schmidt | 230b451 | 2013-06-12 16:39:22 +0000 | [diff] [blame] | 20 | #include "llvm/CodeGen/CallingConvLower.h" |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 21 | #include "llvm/CodeGen/MachineFunction.h" |
| 22 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 23 | #include "llvm/CodeGen/MachineValueType.h" |
Chandler Carruth | 8a8cd2b | 2014-01-07 11:48:04 +0000 | [diff] [blame] | 24 | #include "llvm/CodeGen/SelectionDAG.h" |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 25 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
| 26 | #include "llvm/CodeGen/ValueTypes.h" |
| 27 | #include "llvm/IR/Attributes.h" |
| 28 | #include "llvm/IR/CallingConv.h" |
| 29 | #include "llvm/IR/Function.h" |
| 30 | #include "llvm/IR/InlineAsm.h" |
| 31 | #include "llvm/IR/Metadata.h" |
| 32 | #include "llvm/IR/Type.h" |
Chandler Carruth | 802d755 | 2012-12-04 07:12:27 +0000 | [diff] [blame] | 33 | #include "llvm/Target/TargetLowering.h" |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 34 | #include <utility> |
Chris Lattner | f22556d | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 35 | |
| 36 | namespace llvm { |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 37 | |
Chris Lattner | b2854fa | 2005-08-26 20:25:03 +0000 | [diff] [blame] | 38 | namespace PPCISD { |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 39 | |
Matthias Braun | d04893f | 2015-05-07 21:33:59 +0000 | [diff] [blame] | 40 | enum NodeType : unsigned { |
Nate Begeman | debcb55 | 2007-01-26 22:40:50 +0000 | [diff] [blame] | 41 | // Start the numbering where the builtin ops and target ops leave off. |
Dan Gohman | ed1cf1a | 2008-09-23 18:42:32 +0000 | [diff] [blame] | 42 | FIRST_NUMBER = ISD::BUILTIN_OP_END, |
Chris Lattner | b2854fa | 2005-08-26 20:25:03 +0000 | [diff] [blame] | 43 | |
| 44 | /// FSEL - Traditional three-operand fsel node. |
| 45 | /// |
| 46 | FSEL, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 47 | |
Nate Begeman | 6095214 | 2005-09-06 22:03:27 +0000 | [diff] [blame] | 48 | /// FCFID - The FCFID instruction, taking an f64 operand and producing |
| 49 | /// and f64 value containing the FP representation of the integer that |
| 50 | /// was temporarily in the f64 operand. |
| 51 | FCFID, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 52 | |
Hal Finkel | f6d45f2 | 2013-04-01 17:52:07 +0000 | [diff] [blame] | 53 | /// Newer FCFID[US] integer-to-floating-point conversion instructions for |
| 54 | /// unsigned integers and single-precision outputs. |
| 55 | FCFIDU, FCFIDS, FCFIDUS, |
| 56 | |
David Majnemer | 08249a3 | 2013-09-26 05:22:11 +0000 | [diff] [blame] | 57 | /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 |
| 58 | /// operand, producing an f64 value containing the integer representation |
| 59 | /// of that FP value. |
| 60 | FCTIDZ, FCTIWZ, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 61 | |
Hal Finkel | f6d45f2 | 2013-04-01 17:52:07 +0000 | [diff] [blame] | 62 | /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for |
Tony Jiang | 3a2f00b | 2017-01-05 15:00:45 +0000 | [diff] [blame] | 63 | /// unsigned integers with round toward zero. |
Hal Finkel | f6d45f2 | 2013-04-01 17:52:07 +0000 | [diff] [blame] | 64 | FCTIDUZ, FCTIWUZ, |
| 65 | |
Nemanja Ivanovic | 11049f8 | 2016-10-04 06:59:23 +0000 | [diff] [blame] | 66 | /// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in |
| 67 | /// VSFRC that is sign-extended from ByteWidth to a 64-byte integer. |
| 68 | VEXTS, |
| 69 | |
Hal Finkel | 2e10331 | 2013-04-03 04:01:11 +0000 | [diff] [blame] | 70 | /// Reciprocal estimate instructions (unary FP ops). |
| 71 | FRE, FRSQRTE, |
| 72 | |
Nate Begeman | 69caef2 | 2005-12-13 22:55:22 +0000 | [diff] [blame] | 73 | // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking |
| 74 | // three v4f32 operands and producing a v4f32 result. |
| 75 | VMADDFP, VNMSUBFP, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 76 | |
Chris Lattner | a8713b1 | 2006-03-20 01:53:53 +0000 | [diff] [blame] | 77 | /// VPERM - The PPC VPERM Instruction. |
| 78 | /// |
| 79 | VPERM, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 80 | |
Nemanja Ivanovic | 1a2b2f0 | 2016-05-04 16:04:02 +0000 | [diff] [blame] | 81 | /// XXSPLT - The PPC VSX splat instructions |
| 82 | /// |
| 83 | XXSPLT, |
| 84 | |
Nemanja Ivanovic | b43bb61 | 2016-07-12 21:00:10 +0000 | [diff] [blame] | 85 | /// XXINSERT - The PPC VSX insert instruction |
| 86 | /// |
| 87 | XXINSERT, |
| 88 | |
| 89 | /// VECSHL - The PPC VSX shift left instruction |
| 90 | /// |
| 91 | VECSHL, |
| 92 | |
Hal Finkel | 4edc66b | 2015-01-03 01:16:37 +0000 | [diff] [blame] | 93 | /// The CMPB instruction (takes two operands of i32 or i64). |
| 94 | CMPB, |
| 95 | |
Chris Lattner | 595088a | 2005-11-17 07:30:41 +0000 | [diff] [blame] | 96 | /// Hi/Lo - These represent the high and low 16-bit parts of a global |
| 97 | /// address respectively. These nodes have two operands, the first of |
| 98 | /// which must be a TargetGlobalAddress, and the second of which must be a |
| 99 | /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C', |
| 100 | /// though these are usually folded into other nodes. |
| 101 | Hi, Lo, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 102 | |
Ulrich Weigand | ad0cb91 | 2014-06-18 17:52:49 +0000 | [diff] [blame] | 103 | /// The following two target-specific nodes are used for calls through |
Tilmann Scheller | 79fef93 | 2009-12-18 13:00:15 +0000 | [diff] [blame] | 104 | /// function pointers in the 64-bit SVR4 ABI. |
| 105 | |
Jim Laskey | 48850c1 | 2006-11-16 22:43:37 +0000 | [diff] [blame] | 106 | /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX) |
| 107 | /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to |
| 108 | /// compute an allocation on the stack. |
| 109 | DYNALLOC, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 110 | |
Yury Gribov | d7dbb66 | 2015-12-01 11:40:55 +0000 | [diff] [blame] | 111 | /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to |
| 112 | /// compute an offset from native SP to the address of the most recent |
| 113 | /// dynamic alloca. |
| 114 | DYNAREAOFFSET, |
| 115 | |
Chris Lattner | 595088a | 2005-11-17 07:30:41 +0000 | [diff] [blame] | 116 | /// GlobalBaseReg - On Darwin, this node represents the result of the mflr |
| 117 | /// at function entry, used for PIC code. |
| 118 | GlobalBaseReg, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 119 | |
Chris Lattner | fea33f7 | 2005-12-06 02:10:38 +0000 | [diff] [blame] | 120 | /// These nodes represent the 32-bit PPC shifts that operate on 6-bit |
| 121 | /// shift amounts. These nodes are generated by the multi-precision shift |
| 122 | /// code. |
| 123 | SRL, SRA, SHL, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 124 | |
Hal Finkel | 13d104b | 2014-12-11 18:37:52 +0000 | [diff] [blame] | 125 | /// The combination of sra[wd]i and addze used to implemented signed |
| 126 | /// integer division by a power of 2. The first operand is the dividend, |
| 127 | /// and the second is the constant shift amount (representing the |
| 128 | /// divisor). |
| 129 | SRA_ADDZE, |
| 130 | |
Chris Lattner | eb755fc | 2006-05-17 19:00:46 +0000 | [diff] [blame] | 131 | /// CALL - A direct function call. |
Ulrich Weigand | f62e83f | 2013-03-22 15:24:13 +0000 | [diff] [blame] | 132 | /// CALL_NOP is a call with the special NOP which follows 64-bit |
Hal Finkel | 51861b4 | 2012-03-31 14:45:15 +0000 | [diff] [blame] | 133 | /// SVR4 calls. |
Ulrich Weigand | f62e83f | 2013-03-22 15:24:13 +0000 | [diff] [blame] | 134 | CALL, CALL_NOP, |
Tilmann Scheller | d1aaa32 | 2009-08-15 11:54:46 +0000 | [diff] [blame] | 135 | |
Chris Lattner | eb755fc | 2006-05-17 19:00:46 +0000 | [diff] [blame] | 136 | /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a |
| 137 | /// MTCTR instruction. |
| 138 | MTCTR, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 139 | |
Chris Lattner | eb755fc | 2006-05-17 19:00:46 +0000 | [diff] [blame] | 140 | /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a |
| 141 | /// BCTRL instruction. |
Ulrich Weigand | f62e83f | 2013-03-22 15:24:13 +0000 | [diff] [blame] | 142 | BCTRL, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 143 | |
Hal Finkel | fc096c9 | 2014-12-23 22:29:40 +0000 | [diff] [blame] | 144 | /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl |
| 145 | /// instruction and the TOC reload required on SVR4 PPC64. |
| 146 | BCTRL_LOAD_TOC, |
| 147 | |
Nate Begeman | b11b8e4 | 2005-12-20 00:26:01 +0000 | [diff] [blame] | 148 | /// Return with a flag operand, matched by 'blr' |
| 149 | RET_FLAG, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 150 | |
Ulrich Weigand | d5ebc62 | 2013-07-03 17:05:42 +0000 | [diff] [blame] | 151 | /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction. |
| 152 | /// This copies the bits corresponding to the specified CRREG into the |
| 153 | /// resultant GPR. Bits corresponding to other CR regs are undefined. |
| 154 | MFOCRF, |
Chris Lattner | d7495ae | 2006-03-31 05:13:27 +0000 | [diff] [blame] | 155 | |
Nemanja Ivanovic | c38b531 | 2015-04-11 10:40:42 +0000 | [diff] [blame] | 156 | /// Direct move from a VSX register to a GPR |
| 157 | MFVSR, |
| 158 | |
| 159 | /// Direct move from a GPR to a VSX register (algebraic) |
| 160 | MTVSRA, |
| 161 | |
| 162 | /// Direct move from a GPR to a VSX register (zero) |
| 163 | MTVSRZ, |
| 164 | |
Nemanja Ivanovic | 44513e5 | 2016-07-05 09:22:29 +0000 | [diff] [blame] | 165 | /// Extract a subvector from signed integer vector and convert to FP. |
| 166 | /// It is primarily used to convert a (widened) illegal integer vector |
| 167 | /// type to a legal floating point vector type. |
| 168 | /// For example v2i32 -> widened to v4i32 -> v2f64 |
| 169 | SINT_VEC_TO_FP, |
| 170 | |
| 171 | /// Extract a subvector from unsigned integer vector and convert to FP. |
| 172 | /// As with SINT_VEC_TO_FP, used for converting illegal types. |
| 173 | UINT_VEC_TO_FP, |
| 174 | |
Hal Finkel | 940ab93 | 2014-02-28 00:27:01 +0000 | [diff] [blame] | 175 | // FIXME: Remove these once the ANDI glue bug is fixed: |
| 176 | /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the |
| 177 | /// eq or gt bit of CR0 after executing andi. x, 1. This is used to |
| 178 | /// implement truncation of i32 or i64 to i1. |
| 179 | ANDIo_1_EQ_BIT, ANDIo_1_GT_BIT, |
| 180 | |
Hal Finkel | bbdee93 | 2014-12-02 22:01:00 +0000 | [diff] [blame] | 181 | // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit |
| 182 | // target (returns (Lo, Hi)). It takes a chain operand. |
| 183 | READ_TIME_BASE, |
| 184 | |
Hal Finkel | 756810f | 2013-03-21 21:37:52 +0000 | [diff] [blame] | 185 | // EH_SJLJ_SETJMP - SjLj exception handling setjmp. |
| 186 | EH_SJLJ_SETJMP, |
| 187 | |
| 188 | // EH_SJLJ_LONGJMP - SjLj exception handling longjmp. |
| 189 | EH_SJLJ_LONGJMP, |
| 190 | |
Chris Lattner | d7495ae | 2006-03-31 05:13:27 +0000 | [diff] [blame] | 191 | /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* |
| 192 | /// instructions. For lack of better number, we use the opcode number |
| 193 | /// encoding for the OPC field to identify the compare. For example, 838 |
| 194 | /// is VCMPGTSH. |
| 195 | VCMP, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 196 | |
Chris Lattner | 6961fc7 | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 197 | /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 198 | /// altivec VCMP*o instructions. For lack of better number, we use the |
Chris Lattner | 6961fc7 | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 199 | /// opcode number encoding for the OPC field to identify the compare. For |
| 200 | /// example, 838 is VCMPGTSH. |
Chris Lattner | 9754d14 | 2006-04-18 17:59:36 +0000 | [diff] [blame] | 201 | VCMPo, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 202 | |
Chris Lattner | 9754d14 | 2006-04-18 17:59:36 +0000 | [diff] [blame] | 203 | /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This |
| 204 | /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the |
| 205 | /// condition register to branch on, OPC is the branch opcode to use (e.g. |
| 206 | /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is |
| 207 | /// an optional input flag argument. |
Chris Lattner | a7976d3 | 2006-07-10 20:56:58 +0000 | [diff] [blame] | 208 | COND_BRANCH, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 209 | |
Hal Finkel | 25c1992 | 2013-05-15 21:37:41 +0000 | [diff] [blame] | 210 | /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based |
| 211 | /// loops. |
| 212 | BDNZ, BDZ, |
| 213 | |
Ulrich Weigand | 874fc62 | 2013-03-26 10:56:22 +0000 | [diff] [blame] | 214 | /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding |
| 215 | /// towards zero. Used only as part of the long double-to-int |
| 216 | /// conversion sequence. |
Dale Johannesen | 666323e | 2007-10-10 01:01:31 +0000 | [diff] [blame] | 217 | FADDRTZ, |
| 218 | |
Ulrich Weigand | 874fc62 | 2013-03-26 10:56:22 +0000 | [diff] [blame] | 219 | /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register. |
| 220 | MFFS, |
Evan Cheng | 51096af | 2008-04-19 01:30:48 +0000 | [diff] [blame] | 221 | |
Arnold Schwaighofer | be0de34 | 2008-04-30 09:16:33 +0000 | [diff] [blame] | 222 | /// TC_RETURN - A tail call return. |
| 223 | /// operand #0 chain |
| 224 | /// operand #1 callee (register or absolute) |
| 225 | /// operand #2 stack adjustment |
| 226 | /// operand #3 optional in flag |
Dan Gohman | 48b185d | 2009-09-25 20:36:54 +0000 | [diff] [blame] | 227 | TC_RETURN, |
| 228 | |
Hal Finkel | 5ab3780 | 2012-08-28 02:10:27 +0000 | [diff] [blame] | 229 | /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls |
| 230 | CR6SET, |
| 231 | CR6UNSET, |
| 232 | |
Roman Divacky | 8854e76 | 2013-12-22 09:48:38 +0000 | [diff] [blame] | 233 | /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS |
| 234 | /// on PPC32. |
Roman Divacky | 32143e2 | 2013-12-20 18:08:54 +0000 | [diff] [blame] | 235 | PPC32_GOT, |
| 236 | |
Hal Finkel | 7c8ae53 | 2014-07-25 17:47:22 +0000 | [diff] [blame] | 237 | /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and |
Hal Finkel | 0746211 | 2015-02-25 18:06:45 +0000 | [diff] [blame] | 238 | /// local dynamic TLS on PPC32. |
Hal Finkel | 7c8ae53 | 2014-07-25 17:47:22 +0000 | [diff] [blame] | 239 | PPC32_PICGOT, |
| 240 | |
Bill Schmidt | 9f0b4ec | 2012-12-14 17:02:38 +0000 | [diff] [blame] | 241 | /// G8RC = ADDIS_GOT_TPREL_HA %X2, Symbol - Used by the initial-exec |
| 242 | /// TLS model, produces an ADDIS8 instruction that adds the GOT |
NAKAMURA Takumi | dc9f013 | 2013-05-15 18:01:35 +0000 | [diff] [blame] | 243 | /// base to sym\@got\@tprel\@ha. |
Bill Schmidt | 9f0b4ec | 2012-12-14 17:02:38 +0000 | [diff] [blame] | 244 | ADDIS_GOT_TPREL_HA, |
| 245 | |
| 246 | /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec |
Bill Schmidt | ca4a0c9 | 2012-12-04 16:18:08 +0000 | [diff] [blame] | 247 | /// TLS model, produces a LD instruction with base register G8RReg |
NAKAMURA Takumi | dc9f013 | 2013-05-15 18:01:35 +0000 | [diff] [blame] | 248 | /// and offset sym\@got\@tprel\@l. This completes the addition that |
Bill Schmidt | 9f0b4ec | 2012-12-14 17:02:38 +0000 | [diff] [blame] | 249 | /// finds the offset of "sym" relative to the thread pointer. |
| 250 | LD_GOT_TPREL_L, |
Bill Schmidt | ca4a0c9 | 2012-12-04 16:18:08 +0000 | [diff] [blame] | 251 | |
| 252 | /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS |
| 253 | /// model, produces an ADD instruction that adds the contents of |
| 254 | /// G8RReg to the thread pointer. Symbol contains a relocation |
NAKAMURA Takumi | dc9f013 | 2013-05-15 18:01:35 +0000 | [diff] [blame] | 255 | /// sym\@tls which is to be replaced by the thread pointer and |
Bill Schmidt | ca4a0c9 | 2012-12-04 16:18:08 +0000 | [diff] [blame] | 256 | /// identifies to the linker that the instruction is part of a |
| 257 | /// TLS sequence. |
| 258 | ADD_TLS, |
| 259 | |
Bill Schmidt | c56f1d3 | 2012-12-11 20:30:11 +0000 | [diff] [blame] | 260 | /// G8RC = ADDIS_TLSGD_HA %X2, Symbol - For the general-dynamic TLS |
| 261 | /// model, produces an ADDIS8 instruction that adds the GOT base |
NAKAMURA Takumi | dc9f013 | 2013-05-15 18:01:35 +0000 | [diff] [blame] | 262 | /// register to sym\@got\@tlsgd\@ha. |
Bill Schmidt | c56f1d3 | 2012-12-11 20:30:11 +0000 | [diff] [blame] | 263 | ADDIS_TLSGD_HA, |
| 264 | |
Bill Schmidt | 82f1c77 | 2015-02-10 19:09:05 +0000 | [diff] [blame] | 265 | /// %X3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS |
Bill Schmidt | c56f1d3 | 2012-12-11 20:30:11 +0000 | [diff] [blame] | 266 | /// model, produces an ADDI8 instruction that adds G8RReg to |
Bill Schmidt | 82f1c77 | 2015-02-10 19:09:05 +0000 | [diff] [blame] | 267 | /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by |
| 268 | /// ADDIS_TLSGD_L_ADDR until after register assignment. |
Bill Schmidt | c56f1d3 | 2012-12-11 20:30:11 +0000 | [diff] [blame] | 269 | ADDI_TLSGD_L, |
| 270 | |
Bill Schmidt | 82f1c77 | 2015-02-10 19:09:05 +0000 | [diff] [blame] | 271 | /// %X3 = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS |
| 272 | /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by |
| 273 | /// ADDIS_TLSGD_L_ADDR until after register assignment. |
| 274 | GET_TLS_ADDR, |
| 275 | |
| 276 | /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that |
| 277 | /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following |
| 278 | /// register assignment. |
| 279 | ADDI_TLSGD_L_ADDR, |
| 280 | |
Bill Schmidt | 24b8dd6 | 2012-12-12 19:29:35 +0000 | [diff] [blame] | 281 | /// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS |
| 282 | /// model, produces an ADDIS8 instruction that adds the GOT base |
NAKAMURA Takumi | dc9f013 | 2013-05-15 18:01:35 +0000 | [diff] [blame] | 283 | /// register to sym\@got\@tlsld\@ha. |
Bill Schmidt | 24b8dd6 | 2012-12-12 19:29:35 +0000 | [diff] [blame] | 284 | ADDIS_TLSLD_HA, |
| 285 | |
Bill Schmidt | 82f1c77 | 2015-02-10 19:09:05 +0000 | [diff] [blame] | 286 | /// %X3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS |
Bill Schmidt | 24b8dd6 | 2012-12-12 19:29:35 +0000 | [diff] [blame] | 287 | /// model, produces an ADDI8 instruction that adds G8RReg to |
Bill Schmidt | 82f1c77 | 2015-02-10 19:09:05 +0000 | [diff] [blame] | 288 | /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by |
| 289 | /// ADDIS_TLSLD_L_ADDR until after register assignment. |
Bill Schmidt | 24b8dd6 | 2012-12-12 19:29:35 +0000 | [diff] [blame] | 290 | ADDI_TLSLD_L, |
| 291 | |
Bill Schmidt | 82f1c77 | 2015-02-10 19:09:05 +0000 | [diff] [blame] | 292 | /// %X3 = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS |
| 293 | /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by |
| 294 | /// ADDIS_TLSLD_L_ADDR until after register assignment. |
| 295 | GET_TLSLD_ADDR, |
| 296 | |
| 297 | /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that |
| 298 | /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion |
| 299 | /// following register assignment. |
| 300 | ADDI_TLSLD_L_ADDR, |
| 301 | |
| 302 | /// G8RC = ADDIS_DTPREL_HA %X3, Symbol - For the local-dynamic TLS |
| 303 | /// model, produces an ADDIS8 instruction that adds X3 to |
| 304 | /// sym\@dtprel\@ha. |
Bill Schmidt | 24b8dd6 | 2012-12-12 19:29:35 +0000 | [diff] [blame] | 305 | ADDIS_DTPREL_HA, |
| 306 | |
| 307 | /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS |
| 308 | /// model, produces an ADDI8 instruction that adds G8RReg to |
NAKAMURA Takumi | dc9f013 | 2013-05-15 18:01:35 +0000 | [diff] [blame] | 309 | /// sym\@got\@dtprel\@l. |
Bill Schmidt | 24b8dd6 | 2012-12-12 19:29:35 +0000 | [diff] [blame] | 310 | ADDI_DTPREL_L, |
| 311 | |
Bill Schmidt | 51e7951 | 2013-02-20 15:50:31 +0000 | [diff] [blame] | 312 | /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded |
Bill Schmidt | c6cbecc | 2013-02-20 20:41:42 +0000 | [diff] [blame] | 313 | /// during instruction selection to optimize a BUILD_VECTOR into |
| 314 | /// operations on splats. This is necessary to avoid losing these |
| 315 | /// optimizations due to constant folding. |
Bill Schmidt | 51e7951 | 2013-02-20 15:50:31 +0000 | [diff] [blame] | 316 | VADD_SPLAT, |
| 317 | |
Bill Schmidt | a87a7e2 | 2013-05-14 19:35:45 +0000 | [diff] [blame] | 318 | /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned |
| 319 | /// operand identifies the operating system entry point. |
| 320 | SC, |
| 321 | |
Bill Schmidt | e26236e | 2015-05-22 16:44:10 +0000 | [diff] [blame] | 322 | /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer. |
| 323 | CLRBHRB, |
| 324 | |
| 325 | /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch |
| 326 | /// history rolling buffer entry. |
| 327 | MFBHRBE, |
| 328 | |
| 329 | /// CHAIN = RFEBB CHAIN, State - Return from event-based branch. |
| 330 | RFEBB, |
| 331 | |
Bill Schmidt | fae5d71 | 2014-12-09 16:35:51 +0000 | [diff] [blame] | 332 | /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little |
| 333 | /// endian. Maps to an xxswapd instruction that corrects an lxvd2x |
| 334 | /// or stxvd2x instruction. The chain is necessary because the |
| 335 | /// sequence replaces a load and needs to provide the same number |
| 336 | /// of outputs. |
| 337 | XXSWAPD, |
| 338 | |
Nemanja Ivanovic | eebbcb6 | 2016-07-12 12:16:27 +0000 | [diff] [blame] | 339 | /// An SDNode for swaps that are not associated with any loads/stores |
| 340 | /// and thereby have no chain. |
| 341 | SWAP_NO_CHAIN, |
| 342 | |
Hal Finkel | c93a9a2 | 2015-02-25 01:06:45 +0000 | [diff] [blame] | 343 | /// QVFPERM = This corresponds to the QPX qvfperm instruction. |
| 344 | QVFPERM, |
| 345 | |
| 346 | /// QVGPCI = This corresponds to the QPX qvgpci instruction. |
| 347 | QVGPCI, |
| 348 | |
| 349 | /// QVALIGNI = This corresponds to the QPX qvaligni instruction. |
| 350 | QVALIGNI, |
| 351 | |
| 352 | /// QVESPLATI = This corresponds to the QPX qvesplati instruction. |
| 353 | QVESPLATI, |
| 354 | |
| 355 | /// QBFLT = Access the underlying QPX floating-point boolean |
| 356 | /// representation. |
| 357 | QBFLT, |
| 358 | |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 359 | /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a |
Dan Gohman | 48b185d | 2009-09-25 20:36:54 +0000 | [diff] [blame] | 360 | /// byte-swapping store instruction. It byte-swaps the low "Type" bits of |
| 361 | /// the GPRC input, then stores it through Ptr. Type can be either i16 or |
| 362 | /// i32. |
Hal Finkel | e53429a | 2013-03-31 01:58:02 +0000 | [diff] [blame] | 363 | STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 364 | |
| 365 | /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a |
Dan Gohman | 48b185d | 2009-09-25 20:36:54 +0000 | [diff] [blame] | 366 | /// byte-swapping load instruction. It loads "Type" bits, byte swaps it, |
| 367 | /// then puts it in the bottom bits of the GPRC. TYPE can be either i16 |
| 368 | /// or i32. |
Bill Schmidt | 34627e3 | 2012-11-27 17:35:46 +0000 | [diff] [blame] | 369 | LBRX, |
| 370 | |
Hal Finkel | 60c7510 | 2013-04-01 15:37:53 +0000 | [diff] [blame] | 371 | /// STFIWX - The STFIWX instruction. The first operand is an input token |
| 372 | /// chain, then an f64 value to store, then an address to store it to. |
| 373 | STFIWX, |
| 374 | |
Hal Finkel | beb296b | 2013-03-31 10:12:51 +0000 | [diff] [blame] | 375 | /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point |
| 376 | /// load which sign-extends from a 32-bit integer value into the |
| 377 | /// destination 64-bit register. |
| 378 | LFIWAX, |
| 379 | |
Hal Finkel | f6d45f2 | 2013-04-01 17:52:07 +0000 | [diff] [blame] | 380 | /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point |
| 381 | /// load which zero-extends from a 32-bit integer value into the |
| 382 | /// destination 64-bit register. |
| 383 | LFIWZX, |
| 384 | |
Nemanja Ivanovic | 11049f8 | 2016-10-04 06:59:23 +0000 | [diff] [blame] | 385 | /// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an |
| 386 | /// integer smaller than 64 bits into a VSR. The integer is zero-extended. |
| 387 | /// This can be used for converting loaded integers to floating point. |
| 388 | LXSIZX, |
| 389 | |
| 390 | /// STXSIX - The STXSI[bh]X instruction. The first operand is an input |
| 391 | /// chain, then an f64 value to store, then an address to store it to, |
| 392 | /// followed by a byte-width for the store. |
| 393 | STXSIX, |
| 394 | |
Bill Schmidt | fae5d71 | 2014-12-09 16:35:51 +0000 | [diff] [blame] | 395 | /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian. |
| 396 | /// Maps directly to an lxvd2x instruction that will be followed by |
| 397 | /// an xxswapd. |
| 398 | LXVD2X, |
| 399 | |
| 400 | /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian. |
| 401 | /// Maps directly to an stxvd2x instruction that will be preceded by |
| 402 | /// an xxswapd. |
Hal Finkel | c93a9a2 | 2015-02-25 01:06:45 +0000 | [diff] [blame] | 403 | STXVD2X, |
| 404 | |
| 405 | /// QBRC, CHAIN = QVLFSb CHAIN, Ptr |
| 406 | /// The 4xf32 load used for v4i1 constants. |
Hal Finkel | cf59921 | 2015-02-25 21:36:59 +0000 | [diff] [blame] | 407 | QVLFSb, |
| 408 | |
| 409 | /// GPRC = TOC_ENTRY GA, TOC |
| 410 | /// Loads the entry for GA from the TOC, where the TOC base is given by |
| 411 | /// the last operand. |
| 412 | TOC_ENTRY |
Chris Lattner | f424a66 | 2006-01-27 23:34:02 +0000 | [diff] [blame] | 413 | }; |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 414 | |
| 415 | } // end namespace PPCISD |
Chris Lattner | 382f356 | 2006-03-20 06:15:45 +0000 | [diff] [blame] | 416 | |
| 417 | /// Define some predicates that are used for node matching. |
| 418 | namespace PPC { |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 419 | |
Chris Lattner | e8b83b4 | 2006-04-06 17:23:16 +0000 | [diff] [blame] | 420 | /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a |
| 421 | /// VPKUHUM instruction. |
Ulrich Weigand | cc9909b | 2014-08-04 13:53:40 +0000 | [diff] [blame] | 422 | bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
Bill Schmidt | f910a06 | 2014-06-10 14:35:01 +0000 | [diff] [blame] | 423 | SelectionDAG &DAG); |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 424 | |
Chris Lattner | e8b83b4 | 2006-04-06 17:23:16 +0000 | [diff] [blame] | 425 | /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a |
| 426 | /// VPKUWUM instruction. |
Ulrich Weigand | cc9909b | 2014-08-04 13:53:40 +0000 | [diff] [blame] | 427 | bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
Bill Schmidt | f910a06 | 2014-06-10 14:35:01 +0000 | [diff] [blame] | 428 | SelectionDAG &DAG); |
Chris Lattner | d1dcb52 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 429 | |
Bill Schmidt | 5ed84cd | 2015-05-16 01:02:12 +0000 | [diff] [blame] | 430 | /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a |
| 431 | /// VPKUDUM instruction. |
| 432 | bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, |
| 433 | SelectionDAG &DAG); |
| 434 | |
Chris Lattner | d1dcb52 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 435 | /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for |
| 436 | /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). |
Nate Begeman | 8d6d4b9 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 437 | bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, |
Bill Schmidt | c9fa5dd | 2014-07-25 01:55:55 +0000 | [diff] [blame] | 438 | unsigned ShuffleKind, SelectionDAG &DAG); |
Chris Lattner | d1dcb52 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 439 | |
| 440 | /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for |
| 441 | /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). |
Nate Begeman | 8d6d4b9 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 442 | bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, |
Bill Schmidt | c9fa5dd | 2014-07-25 01:55:55 +0000 | [diff] [blame] | 443 | unsigned ShuffleKind, SelectionDAG &DAG); |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 444 | |
Kit Barton | 13894c7 | 2015-06-25 15:17:40 +0000 | [diff] [blame] | 445 | /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for |
| 446 | /// a VMRGEW or VMRGOW instruction |
| 447 | bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, |
| 448 | unsigned ShuffleKind, SelectionDAG &DAG); |
| 449 | |
Bill Schmidt | 42a6936 | 2014-08-05 20:47:25 +0000 | [diff] [blame] | 450 | /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the |
| 451 | /// shift amount, otherwise return -1. |
| 452 | int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, |
| 453 | SelectionDAG &DAG); |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 454 | |
Chris Lattner | 382f356 | 2006-03-20 06:15:45 +0000 | [diff] [blame] | 455 | /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand |
| 456 | /// specifies a splat of a single element that is suitable for input to |
| 457 | /// VSPLTB/VSPLTH/VSPLTW. |
Nate Begeman | 8d6d4b9 | 2009-04-27 18:41:29 +0000 | [diff] [blame] | 458 | bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize); |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 459 | |
Nemanja Ivanovic | b43bb61 | 2016-07-12 21:00:10 +0000 | [diff] [blame] | 460 | /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by |
| 461 | /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any |
| 462 | /// shuffle of v4f32/v4i32 vectors that just inserts one element from one |
| 463 | /// vector into the other. This function will also set a couple of |
| 464 | /// output parameters for how much the source vector needs to be shifted and |
| 465 | /// what byte number needs to be specified for the instruction to put the |
| 466 | /// element in the desired location of the target vector. |
| 467 | bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, |
| 468 | unsigned &InsertAtByte, bool &Swap, bool IsLE); |
| 469 | |
Chris Lattner | 382f356 | 2006-03-20 06:15:45 +0000 | [diff] [blame] | 470 | /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the |
| 471 | /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. |
Bill Schmidt | f910a06 | 2014-06-10 14:35:01 +0000 | [diff] [blame] | 472 | unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG); |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 473 | |
Chris Lattner | 74cf9ff | 2006-04-12 17:37:20 +0000 | [diff] [blame] | 474 | /// get_VSPLTI_elt - If this is a build_vector of constants which can be |
Chris Lattner | d71a1f9 | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 475 | /// formed by using a vspltis[bhw] instruction of the specified element |
| 476 | /// size, return the constant being splatted. The ByteSize field indicates |
| 477 | /// the number of bytes of each element [124] -> [bhw]. |
Dan Gohman | 2ce6f2a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 478 | SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG); |
Hal Finkel | c93a9a2 | 2015-02-25 01:06:45 +0000 | [diff] [blame] | 479 | |
| 480 | /// If this is a qvaligni shuffle mask, return the shift |
| 481 | /// amount, otherwise return -1. |
| 482 | int isQVALIGNIShuffleMask(SDNode *N); |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 483 | |
| 484 | } // end namespace PPC |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 485 | |
Nate Begeman | 6cca84e | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 486 | class PPCTargetLowering : public TargetLowering { |
Eric Christopher | b1aaebe | 2014-06-12 22:38:18 +0000 | [diff] [blame] | 487 | const PPCSubtarget &Subtarget; |
Dan Gohman | 31ae586 | 2010-04-17 14:41:14 +0000 | [diff] [blame] | 488 | |
Chris Lattner | f22556d | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 489 | public: |
Eric Christopher | cccae79 | 2015-01-30 22:02:31 +0000 | [diff] [blame] | 490 | explicit PPCTargetLowering(const PPCTargetMachine &TM, |
| 491 | const PPCSubtarget &STI); |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 492 | |
Chris Lattner | 347ed8a | 2006-01-09 23:52:17 +0000 | [diff] [blame] | 493 | /// getTargetNodeName() - This method returns the name of a target specific |
| 494 | /// DAG node. |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 495 | const char *getTargetNodeName(unsigned Opcode) const override; |
Chris Lattner | a801fced | 2006-11-08 02:15:41 +0000 | [diff] [blame] | 496 | |
Nemanja Ivanovic | 44513e5 | 2016-07-05 09:22:29 +0000 | [diff] [blame] | 497 | /// getPreferredVectorAction - The code we generate when vector types are |
| 498 | /// legalized by promoting the integer element type is often much worse |
| 499 | /// than code we generate if we widen the type for applicable vector types. |
| 500 | /// The issue with promoting is that the vector is scalaraized, individual |
| 501 | /// elements promoted and then the vector is rebuilt. So say we load a pair |
| 502 | /// of v4i8's and shuffle them. This will turn into a mess of 8 extending |
| 503 | /// loads, moves back into VSR's (or memory ops if we don't have moves) and |
| 504 | /// then the VPERM for the shuffle. All in all a very slow sequence. |
| 505 | TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) |
| 506 | const override { |
Sanjay Patel | 1ed771f | 2016-09-14 16:37:15 +0000 | [diff] [blame] | 507 | if (VT.getScalarSizeInBits() % 8 == 0) |
Nemanja Ivanovic | 44513e5 | 2016-07-05 09:22:29 +0000 | [diff] [blame] | 508 | return TypeWidenVector; |
| 509 | return TargetLoweringBase::getPreferredVectorAction(VT); |
| 510 | } |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 511 | |
Petar Jovanovic | 280f710 | 2015-12-14 17:57:33 +0000 | [diff] [blame] | 512 | bool useSoftFloat() const override; |
| 513 | |
Mehdi Amini | eaabc51 | 2015-07-09 15:12:23 +0000 | [diff] [blame] | 514 | MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override { |
Mehdi Amini | 9639d65 | 2015-07-09 02:09:20 +0000 | [diff] [blame] | 515 | return MVT::i32; |
| 516 | } |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 517 | |
Hal Finkel | 9bb61de | 2015-01-05 05:24:42 +0000 | [diff] [blame] | 518 | bool isCheapToSpeculateCttz() const override { |
| 519 | return true; |
| 520 | } |
| 521 | |
| 522 | bool isCheapToSpeculateCtlz() const override { |
| 523 | return true; |
| 524 | } |
| 525 | |
Pierre Gousseau | 051db7d | 2016-08-16 13:53:53 +0000 | [diff] [blame] | 526 | bool isCtlzFast() const override { |
| 527 | return true; |
| 528 | } |
| 529 | |
Hal Finkel | 5ef4b03 | 2016-09-02 02:58:25 +0000 | [diff] [blame] | 530 | bool hasAndNotCompare(SDValue) const override { |
| 531 | return true; |
| 532 | } |
| 533 | |
Sanjay Patel | b2f1621 | 2017-04-05 14:09:39 +0000 | [diff] [blame] | 534 | bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { |
| 535 | return VT.isScalarInteger(); |
| 536 | } |
| 537 | |
Chuang-Yu Cheng | 98c1894 | 2016-04-08 12:04:32 +0000 | [diff] [blame] | 538 | bool supportSplitCSR(MachineFunction *MF) const override { |
| 539 | return |
| 540 | MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && |
| 541 | MF->getFunction()->hasFnAttribute(Attribute::NoUnwind); |
| 542 | } |
| 543 | |
| 544 | void initializeSplitCSR(MachineBasicBlock *Entry) const override; |
| 545 | |
| 546 | void insertCopiesSplitCSR( |
| 547 | MachineBasicBlock *Entry, |
| 548 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const override; |
| 549 | |
Scott Michel | a6729e8 | 2008-03-10 15:42:14 +0000 | [diff] [blame] | 550 | /// getSetCCResultType - Return the ISD::SETCC ValueType |
Mehdi Amini | 44ede33 | 2015-07-09 02:09:04 +0000 | [diff] [blame] | 551 | EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, |
| 552 | EVT VT) const override; |
Scott Michel | a6729e8 | 2008-03-10 15:42:14 +0000 | [diff] [blame] | 553 | |
Hal Finkel | 62ac736 | 2014-09-19 11:42:56 +0000 | [diff] [blame] | 554 | /// Return true if target always beneficiates from combining into FMA for a |
| 555 | /// given value type. This must typically return false on targets where FMA |
| 556 | /// takes more cycles to execute than FADD. |
| 557 | bool enableAggressiveFMAFusion(EVT VT) const override; |
| 558 | |
Chris Lattner | a801fced | 2006-11-08 02:15:41 +0000 | [diff] [blame] | 559 | /// getPreIndexedAddressParts - returns true by value, base pointer and |
| 560 | /// offset pointer and addressing mode by reference if the node's address |
| 561 | /// can be legally represented as pre-indexed load / store address. |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 562 | bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, |
| 563 | SDValue &Offset, |
| 564 | ISD::MemIndexedMode &AM, |
| 565 | SelectionDAG &DAG) const override; |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 566 | |
Chris Lattner | a801fced | 2006-11-08 02:15:41 +0000 | [diff] [blame] | 567 | /// SelectAddressRegReg - Given the specified addressed, check to see if it |
| 568 | /// can be represented as an indexed [r+r] operation. Returns false if it |
| 569 | /// can be more efficiently represented with [r+imm]. |
Dan Gohman | 2ce6f2a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 570 | bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, |
Dan Gohman | 02b9313 | 2009-01-15 16:29:45 +0000 | [diff] [blame] | 571 | SelectionDAG &DAG) const; |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 572 | |
Chris Lattner | a801fced | 2006-11-08 02:15:41 +0000 | [diff] [blame] | 573 | /// SelectAddressRegImm - Returns true if the address N can be represented |
| 574 | /// by a base register plus a signed 16-bit displacement [r+imm], and if it |
Ulrich Weigand | 9d980cb | 2013-05-16 17:58:02 +0000 | [diff] [blame] | 575 | /// is not better represented as reg+reg. If Aligned is true, only accept |
| 576 | /// displacements suitable for STD and friends, i.e. multiples of 4. |
Dan Gohman | 2ce6f2a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 577 | bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, |
Ulrich Weigand | 9d980cb | 2013-05-16 17:58:02 +0000 | [diff] [blame] | 578 | SelectionDAG &DAG, bool Aligned) const; |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 579 | |
Chris Lattner | a801fced | 2006-11-08 02:15:41 +0000 | [diff] [blame] | 580 | /// SelectAddressRegRegOnly - Given the specified addressed, force it to be |
| 581 | /// represented as an indexed [r+r] operation. |
Dan Gohman | 2ce6f2a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 582 | bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, |
Dan Gohman | 02b9313 | 2009-01-15 16:29:45 +0000 | [diff] [blame] | 583 | SelectionDAG &DAG) const; |
Chris Lattner | a801fced | 2006-11-08 02:15:41 +0000 | [diff] [blame] | 584 | |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 585 | Sched::Preference getSchedulingPreference(SDNode *N) const override; |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 586 | |
Chris Lattner | f3d06c6 | 2005-08-26 00:52:45 +0000 | [diff] [blame] | 587 | /// LowerOperation - Provide custom lowering hooks for some operations. |
| 588 | /// |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 589 | SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; |
Chris Lattner | 57ee7c6 | 2007-11-28 18:44:47 +0000 | [diff] [blame] | 590 | |
Duncan Sands | 6ed4014 | 2008-12-01 11:39:25 +0000 | [diff] [blame] | 591 | /// ReplaceNodeResults - Replace the results of node with an illegal result |
| 592 | /// type with new values built out of custom code. |
| 593 | /// |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 594 | void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, |
| 595 | SelectionDAG &DAG) const override; |
Duncan Sands | 6ed4014 | 2008-12-01 11:39:25 +0000 | [diff] [blame] | 596 | |
Bill Schmidt | fae5d71 | 2014-12-09 16:35:51 +0000 | [diff] [blame] | 597 | SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const; |
| 598 | SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const; |
| 599 | |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 600 | SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 601 | |
Hal Finkel | 13d104b | 2014-12-11 18:37:52 +0000 | [diff] [blame] | 602 | SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, |
| 603 | std::vector<SDNode *> *Created) const override; |
| 604 | |
Pat Gavlin | a717f25 | 2015-07-09 17:40:29 +0000 | [diff] [blame] | 605 | unsigned getRegisterByName(const char* RegName, EVT VT, |
| 606 | SelectionDAG &DAG) const override; |
Hal Finkel | 0d8db46 | 2014-05-11 19:29:11 +0000 | [diff] [blame] | 607 | |
Jay Foad | a0653a3 | 2014-05-14 21:14:37 +0000 | [diff] [blame] | 608 | void computeKnownBitsForTargetNode(const SDValue Op, |
Craig Topper | d0af7e8 | 2017-04-28 05:31:46 +0000 | [diff] [blame] | 609 | KnownBits &Known, |
Simon Pilgrim | 37b536e | 2017-03-31 11:24:16 +0000 | [diff] [blame] | 610 | const APInt &DemandedElts, |
Jay Foad | a0653a3 | 2014-05-14 21:14:37 +0000 | [diff] [blame] | 611 | const SelectionDAG &DAG, |
| 612 | unsigned Depth = 0) const override; |
Nate Begeman | 78afac2 | 2005-10-18 23:23:37 +0000 | [diff] [blame] | 613 | |
Hal Finkel | 5772566 | 2015-01-03 17:58:24 +0000 | [diff] [blame] | 614 | unsigned getPrefLoopAlignment(MachineLoop *ML) const override; |
| 615 | |
James Y Knight | f44fc52 | 2016-03-16 22:12:04 +0000 | [diff] [blame] | 616 | bool shouldInsertFencesForAtomic(const Instruction *I) const override { |
| 617 | return true; |
| 618 | } |
| 619 | |
Tim Shen | 04de70d | 2017-05-09 15:27:17 +0000 | [diff] [blame^] | 620 | Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, |
| 621 | AtomicOrdering Ord) const override; |
| 622 | Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, |
| 623 | AtomicOrdering Ord) const override; |
Robin Morisset | 2212996 | 2014-09-23 20:46:49 +0000 | [diff] [blame] | 624 | |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 625 | MachineBasicBlock * |
Duncan P. N. Exon Smith | e4f5e4f | 2016-06-30 22:52:52 +0000 | [diff] [blame] | 626 | EmitInstrWithCustomInserter(MachineInstr &MI, |
| 627 | MachineBasicBlock *MBB) const override; |
| 628 | MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI, |
Nemanja Ivanovic | 0adf26b | 2015-03-10 20:51:07 +0000 | [diff] [blame] | 629 | MachineBasicBlock *MBB, |
| 630 | unsigned AtomicSize, |
Hal Finkel | 5728200 | 2016-08-28 16:17:58 +0000 | [diff] [blame] | 631 | unsigned BinOpcode, |
| 632 | unsigned CmpOpcode = 0, |
| 633 | unsigned CmpPred = 0) const; |
Duncan P. N. Exon Smith | e4f5e4f | 2016-06-30 22:52:52 +0000 | [diff] [blame] | 634 | MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI, |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 635 | MachineBasicBlock *MBB, |
Duncan P. N. Exon Smith | e4f5e4f | 2016-06-30 22:52:52 +0000 | [diff] [blame] | 636 | bool is8bit, |
Hal Finkel | 5728200 | 2016-08-28 16:17:58 +0000 | [diff] [blame] | 637 | unsigned Opcode, |
| 638 | unsigned CmpOpcode = 0, |
| 639 | unsigned CmpPred = 0) const; |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 640 | |
Duncan P. N. Exon Smith | e4f5e4f | 2016-06-30 22:52:52 +0000 | [diff] [blame] | 641 | MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI, |
Hal Finkel | 756810f | 2013-03-21 21:37:52 +0000 | [diff] [blame] | 642 | MachineBasicBlock *MBB) const; |
| 643 | |
Duncan P. N. Exon Smith | e4f5e4f | 2016-06-30 22:52:52 +0000 | [diff] [blame] | 644 | MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI, |
Hal Finkel | 756810f | 2013-03-21 21:37:52 +0000 | [diff] [blame] | 645 | MachineBasicBlock *MBB) const; |
| 646 | |
Benjamin Kramer | 9bfb627 | 2015-07-05 19:29:18 +0000 | [diff] [blame] | 647 | ConstraintType getConstraintType(StringRef Constraint) const override; |
John Thompson | e8360b7 | 2010-10-29 17:29:13 +0000 | [diff] [blame] | 648 | |
| 649 | /// Examine constraint string and operand type and determine a weight value. |
| 650 | /// The operand object must already have been set up with the operand type. |
| 651 | ConstraintWeight getSingleConstraintMatchWeight( |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 652 | AsmOperandInfo &info, const char *constraint) const override; |
John Thompson | e8360b7 | 2010-10-29 17:29:13 +0000 | [diff] [blame] | 653 | |
Eric Christopher | 11e4df7 | 2015-02-26 22:38:43 +0000 | [diff] [blame] | 654 | std::pair<unsigned, const TargetRegisterClass *> |
| 655 | getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, |
Benjamin Kramer | 9bfb627 | 2015-07-05 19:29:18 +0000 | [diff] [blame] | 656 | StringRef Constraint, MVT VT) const override; |
Evan Cheng | 2dd2c65 | 2006-03-13 23:20:37 +0000 | [diff] [blame] | 657 | |
Dale Johannesen | cbde4c2 | 2008-02-28 22:31:51 +0000 | [diff] [blame] | 658 | /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate |
| 659 | /// function arguments in the caller parameter area. This is the actual |
| 660 | /// alignment, not its logarithm. |
Mehdi Amini | 5c183d5 | 2015-07-09 02:09:28 +0000 | [diff] [blame] | 661 | unsigned getByValTypeAlignment(Type *Ty, |
| 662 | const DataLayout &DL) const override; |
Dale Johannesen | cbde4c2 | 2008-02-28 22:31:51 +0000 | [diff] [blame] | 663 | |
Chris Lattner | d8c9cb9 | 2007-08-25 00:47:38 +0000 | [diff] [blame] | 664 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
Dale Johannesen | ce97d55 | 2010-06-25 21:55:36 +0000 | [diff] [blame] | 665 | /// vector. If it is invalid, don't add anything to Ops. |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 666 | void LowerAsmOperandForConstraint(SDValue Op, |
| 667 | std::string &Constraint, |
| 668 | std::vector<SDValue> &Ops, |
| 669 | SelectionDAG &DAG) const override; |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 670 | |
Benjamin Kramer | 9bfb627 | 2015-07-05 19:29:18 +0000 | [diff] [blame] | 671 | unsigned |
| 672 | getInlineAsmMemConstraint(StringRef ConstraintCode) const override { |
Daniel Sanders | 0828860 | 2015-03-17 11:09:13 +0000 | [diff] [blame] | 673 | if (ConstraintCode == "es") |
| 674 | return InlineAsm::Constraint_es; |
| 675 | else if (ConstraintCode == "o") |
| 676 | return InlineAsm::Constraint_o; |
| 677 | else if (ConstraintCode == "Q") |
| 678 | return InlineAsm::Constraint_Q; |
| 679 | else if (ConstraintCode == "Z") |
| 680 | return InlineAsm::Constraint_Z; |
| 681 | else if (ConstraintCode == "Zy") |
| 682 | return InlineAsm::Constraint_Zy; |
| 683 | return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); |
Daniel Sanders | bf5b80f | 2015-03-16 13:13:41 +0000 | [diff] [blame] | 684 | } |
| 685 | |
Chris Lattner | 1eb94d9 | 2007-03-30 23:15:24 +0000 | [diff] [blame] | 686 | /// isLegalAddressingMode - Return true if the addressing mode represented |
| 687 | /// by AM is legal for this target, for a load/store of the specified type. |
Mehdi Amini | 0cdec1e | 2015-07-09 02:09:40 +0000 | [diff] [blame] | 688 | bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, |
| 689 | Type *Ty, unsigned AS) const override; |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 690 | |
Hal Finkel | 34974ed | 2014-04-12 21:52:38 +0000 | [diff] [blame] | 691 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
| 692 | /// icmp immediate, that is the target has icmp instructions which can |
| 693 | /// compare a register against the immediate without having to materialize |
| 694 | /// the immediate into a register. |
| 695 | bool isLegalICmpImmediate(int64_t Imm) const override; |
| 696 | |
| 697 | /// isLegalAddImmediate - Return true if the specified immediate is legal |
| 698 | /// add immediate, that is the target has add instructions which can |
| 699 | /// add a register and the immediate without having to materialize |
| 700 | /// the immediate into a register. |
| 701 | bool isLegalAddImmediate(int64_t Imm) const override; |
| 702 | |
| 703 | /// isTruncateFree - Return true if it's free to truncate a value of |
| 704 | /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in |
| 705 | /// register X1 to i32 by referencing its sub-register R1. |
| 706 | bool isTruncateFree(Type *Ty1, Type *Ty2) const override; |
| 707 | bool isTruncateFree(EVT VT1, EVT VT2) const override; |
| 708 | |
Hal Finkel | 5d5d153 | 2015-01-10 08:21:59 +0000 | [diff] [blame] | 709 | bool isZExtFree(SDValue Val, EVT VT2) const override; |
| 710 | |
Olivier Sallenave | 3250969 | 2015-01-13 15:06:36 +0000 | [diff] [blame] | 711 | bool isFPExtFree(EVT VT) const override; |
| 712 | |
Hal Finkel | 34974ed | 2014-04-12 21:52:38 +0000 | [diff] [blame] | 713 | /// \brief Returns true if it is beneficial to convert a load of a constant |
| 714 | /// to just the constant itself. |
| 715 | bool shouldConvertConstantLoadToIntImm(const APInt &Imm, |
| 716 | Type *Ty) const override; |
| 717 | |
Sanjay Patel | 066f320 | 2017-03-04 19:18:09 +0000 | [diff] [blame] | 718 | bool convertSelectOfConstantsToMath() const override { |
| 719 | return true; |
| 720 | } |
| 721 | |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 722 | bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; |
Owen Anderson | b2c80da | 2011-02-25 21:41:48 +0000 | [diff] [blame] | 723 | |
Hal Finkel | 46ef7ce | 2014-08-13 01:15:40 +0000 | [diff] [blame] | 724 | bool getTgtMemIntrinsic(IntrinsicInfo &Info, |
| 725 | const CallInst &I, |
| 726 | unsigned Intrinsic) const override; |
| 727 | |
Evan Cheng | d9929f0 | 2010-04-01 20:10:42 +0000 | [diff] [blame] | 728 | /// getOptimalMemOpType - Returns the target specific optimal type for load |
Evan Cheng | 6139937 | 2010-04-02 19:36:14 +0000 | [diff] [blame] | 729 | /// and store operations as a result of memset, memcpy, and memmove |
| 730 | /// lowering. If DstAlign is zero that means it's safe to destination |
| 731 | /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it |
| 732 | /// means there isn't a need to check it against alignment requirement, |
Evan Cheng | 962711e | 2012-12-12 02:34:41 +0000 | [diff] [blame] | 733 | /// probably because the source does not need to be loaded. If 'IsMemset' is |
| 734 | /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that |
| 735 | /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy |
| 736 | /// source is constant so it does not need to be loaded. |
Dan Gohman | 148c69a | 2010-04-16 20:11:05 +0000 | [diff] [blame] | 737 | /// It returns EVT::Other if the type should be determined using generic |
| 738 | /// target-independent logic. |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 739 | EVT |
NAKAMURA Takumi | dcc6645 | 2013-05-15 18:01:28 +0000 | [diff] [blame] | 740 | getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, |
Evan Cheng | 962711e | 2012-12-12 02:34:41 +0000 | [diff] [blame] | 741 | bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 742 | MachineFunction &MF) const override; |
Dan Gohman | c14e522 | 2008-10-21 03:41:46 +0000 | [diff] [blame] | 743 | |
Hal Finkel | 8d7fbc9 | 2013-03-15 15:27:13 +0000 | [diff] [blame] | 744 | /// Is unaligned memory access allowed for the given type, and is it fast |
| 745 | /// relative to software emulation. |
Matt Arsenault | 6f2a526 | 2014-07-27 17:46:40 +0000 | [diff] [blame] | 746 | bool allowsMisalignedMemoryAccesses(EVT VT, |
| 747 | unsigned AddrSpace, |
| 748 | unsigned Align = 1, |
| 749 | bool *Fast = nullptr) const override; |
Hal Finkel | 8d7fbc9 | 2013-03-15 15:27:13 +0000 | [diff] [blame] | 750 | |
Stephen Lin | 73de7bf | 2013-07-09 18:16:56 +0000 | [diff] [blame] | 751 | /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster |
| 752 | /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be |
| 753 | /// expanded to FMAs when this method returns true, otherwise fmuladd is |
| 754 | /// expanded to fmul + fadd. |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 755 | bool isFMAFasterThanFMulAndFAdd(EVT VT) const override; |
Hal Finkel | 0a479ae | 2012-06-22 00:49:52 +0000 | [diff] [blame] | 756 | |
Hal Finkel | 934361a | 2015-01-14 01:07:51 +0000 | [diff] [blame] | 757 | const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; |
| 758 | |
Hal Finkel | b4240ca | 2014-03-31 17:48:16 +0000 | [diff] [blame] | 759 | // Should we expand the build vector with shuffles? |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 760 | bool |
Hal Finkel | b4240ca | 2014-03-31 17:48:16 +0000 | [diff] [blame] | 761 | shouldExpandBuildVectorWithShuffles(EVT VT, |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 762 | unsigned DefinedValues) const override; |
Hal Finkel | b4240ca | 2014-03-31 17:48:16 +0000 | [diff] [blame] | 763 | |
Bill Schmidt | 0cf702f | 2013-07-30 00:50:39 +0000 | [diff] [blame] | 764 | /// createFastISel - This method returns a target-specific FastISel object, |
| 765 | /// or null if the target does not support "fast" instruction selection. |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 766 | FastISel *createFastISel(FunctionLoweringInfo &FuncInfo, |
| 767 | const TargetLibraryInfo *LibInfo) const override; |
Bill Schmidt | 0cf702f | 2013-07-30 00:50:39 +0000 | [diff] [blame] | 768 | |
Ulrich Weigand | 85d5df2 | 2014-07-21 00:13:26 +0000 | [diff] [blame] | 769 | /// \brief Returns true if an argument of type Ty needs to be passed in a |
| 770 | /// contiguous block of registers in calling convention CallConv. |
| 771 | bool functionArgumentNeedsConsecutiveRegisters( |
| 772 | Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override { |
| 773 | // We support any array type as "consecutive" block in the parameter |
| 774 | // save area. The element type defines the alignment requirement and |
| 775 | // whether the argument should go in GPRs, FPRs, or VRs if available. |
| 776 | // |
| 777 | // Note that clang uses this capability both to implement the ELFv2 |
| 778 | // homogeneous float/vector aggregate ABI, and to avoid having to use |
| 779 | // "byval" when passing aggregates that might fully fit in registers. |
| 780 | return Ty->isArrayTy(); |
| 781 | } |
| 782 | |
Joseph Tremoulet | f748c89 | 2015-11-07 01:11:31 +0000 | [diff] [blame] | 783 | /// If a physical register, this returns the register that receives the |
| 784 | /// exception address on entry to an EH pad. |
| 785 | unsigned |
| 786 | getExceptionPointerRegister(const Constant *PersonalityFn) const override; |
Hal Finkel | ed844c4 | 2015-01-06 22:31:02 +0000 | [diff] [blame] | 787 | |
Joseph Tremoulet | f748c89 | 2015-11-07 01:11:31 +0000 | [diff] [blame] | 788 | /// If a physical register, this returns the register that receives the |
| 789 | /// exception typeid on entry to a landing pad. |
| 790 | unsigned |
| 791 | getExceptionSelectorRegister(const Constant *PersonalityFn) const override; |
| 792 | |
Tim Shen | a1d8bc5 | 2016-04-19 20:14:52 +0000 | [diff] [blame] | 793 | /// Override to support customized stack guard loading. |
| 794 | bool useLoadStackGuardNode() const override; |
| 795 | void insertSSPDeclarations(Module &M) const override; |
| 796 | |
Ehsan Amiri | c90b02c | 2016-10-24 17:31:09 +0000 | [diff] [blame] | 797 | bool isFPImmLegal(const APFloat &Imm, EVT VT) const override; |
Joerg Sonnenberger | 8c1a9ac | 2016-11-16 00:37:30 +0000 | [diff] [blame] | 798 | |
| 799 | unsigned getJumpTableEncoding() const override; |
| 800 | bool isJumpTableRelative() const override; |
| 801 | SDValue getPICJumpTableRelocBase(SDValue Table, |
| 802 | SelectionDAG &DAG) const override; |
| 803 | const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF, |
| 804 | unsigned JTI, |
| 805 | MCContext &Ctx) const override; |
| 806 | |
Joseph Tremoulet | f748c89 | 2015-11-07 01:11:31 +0000 | [diff] [blame] | 807 | private: |
Hal Finkel | ed844c4 | 2015-01-06 22:31:02 +0000 | [diff] [blame] | 808 | struct ReuseLoadInfo { |
| 809 | SDValue Ptr; |
| 810 | SDValue Chain; |
| 811 | SDValue ResChain; |
| 812 | MachinePointerInfo MPI; |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 813 | bool IsDereferenceable = false; |
| 814 | bool IsInvariant = false; |
| 815 | unsigned Alignment = 0; |
Hal Finkel | ed844c4 | 2015-01-06 22:31:02 +0000 | [diff] [blame] | 816 | AAMDNodes AAInfo; |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 817 | const MDNode *Ranges = nullptr; |
Hal Finkel | ed844c4 | 2015-01-06 22:31:02 +0000 | [diff] [blame] | 818 | |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 819 | ReuseLoadInfo() = default; |
Justin Lebar | adbf09e | 2016-09-11 01:38:58 +0000 | [diff] [blame] | 820 | |
| 821 | MachineMemOperand::Flags MMOFlags() const { |
| 822 | MachineMemOperand::Flags F = MachineMemOperand::MONone; |
| 823 | if (IsDereferenceable) |
| 824 | F |= MachineMemOperand::MODereferenceable; |
| 825 | if (IsInvariant) |
| 826 | F |= MachineMemOperand::MOInvariant; |
| 827 | return F; |
| 828 | } |
Hal Finkel | ed844c4 | 2015-01-06 22:31:02 +0000 | [diff] [blame] | 829 | }; |
| 830 | |
| 831 | bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI, |
Hal Finkel | 6c39269 | 2015-01-09 01:34:30 +0000 | [diff] [blame] | 832 | SelectionDAG &DAG, |
| 833 | ISD::LoadExtType ET = ISD::NON_EXTLOAD) const; |
Hal Finkel | ed844c4 | 2015-01-06 22:31:02 +0000 | [diff] [blame] | 834 | void spliceIntoChain(SDValue ResChain, SDValue NewResChain, |
| 835 | SelectionDAG &DAG) const; |
| 836 | |
| 837 | void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 838 | SelectionDAG &DAG, const SDLoc &dl) const; |
Nemanja Ivanovic | c38b531 | 2015-04-11 10:40:42 +0000 | [diff] [blame] | 839 | SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG, |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 840 | const SDLoc &dl) const; |
Guozhi Wei | 1fd553c | 2016-12-12 22:09:02 +0000 | [diff] [blame] | 841 | |
| 842 | bool directMoveIsProfitable(const SDValue &Op) const; |
Nemanja Ivanovic | c38b531 | 2015-04-11 10:40:42 +0000 | [diff] [blame] | 843 | SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG, |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 844 | const SDLoc &dl) const; |
Hal Finkel | ed844c4 | 2015-01-06 22:31:02 +0000 | [diff] [blame] | 845 | |
Dan Gohman | 2ce6f2a | 2008-07-27 21:46:04 +0000 | [diff] [blame] | 846 | SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const; |
| 847 | SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const; |
Arnold Schwaighofer | be0de34 | 2008-04-30 09:16:33 +0000 | [diff] [blame] | 848 | |
Evan Cheng | 67a69dd | 2010-01-27 00:07:07 +0000 | [diff] [blame] | 849 | bool |
| 850 | IsEligibleForTailCallOptimization(SDValue Callee, |
| 851 | CallingConv::ID CalleeCC, |
| 852 | bool isVarArg, |
| 853 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 854 | SelectionDAG& DAG) const; |
| 855 | |
Chuang-Yu Cheng | 2e5973e | 2016-04-06 02:04:38 +0000 | [diff] [blame] | 856 | bool |
| 857 | IsEligibleForTailCallOptimization_64SVR4( |
| 858 | SDValue Callee, |
| 859 | CallingConv::ID CalleeCC, |
| 860 | ImmutableCallSite *CS, |
| 861 | bool isVarArg, |
| 862 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 863 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 864 | SelectionDAG& DAG) const; |
| 865 | |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 866 | SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff, |
| 867 | SDValue Chain, SDValue &LROpOut, |
Eric Christopher | e0d09ba | 2016-07-07 01:08:21 +0000 | [diff] [blame] | 868 | SDValue &FPOpOut, |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 869 | const SDLoc &dl) const; |
Arnold Schwaighofer | be0de34 | 2008-04-30 09:16:33 +0000 | [diff] [blame] | 870 | |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 871 | SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; |
| 872 | SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; |
| 873 | SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; |
| 874 | SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; |
Roman Divacky | e3f15c98 | 2012-06-04 17:36:38 +0000 | [diff] [blame] | 875 | SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 876 | SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 877 | SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; |
| 878 | SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; |
Duncan Sands | a098436 | 2011-09-06 13:37:06 +0000 | [diff] [blame] | 879 | SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; |
| 880 | SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; |
Eric Christopher | b976a39 | 2016-07-07 00:39:27 +0000 | [diff] [blame] | 881 | SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; |
| 882 | SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; |
| 883 | SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; |
| 884 | SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; |
| 885 | SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const; |
| 886 | SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; |
Hal Finkel | 5081ac2 | 2016-09-01 10:28:47 +0000 | [diff] [blame] | 887 | SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const; |
Hal Finkel | 940ab93 | 2014-02-28 00:27:01 +0000 | [diff] [blame] | 888 | SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const; |
| 889 | SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; |
| 890 | SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 891 | SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 892 | SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, |
| 893 | const SDLoc &dl) const; |
Hal Finkel | f6d45f2 | 2013-04-01 17:52:07 +0000 | [diff] [blame] | 894 | SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 895 | SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; |
| 896 | SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const; |
| 897 | SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const; |
| 898 | SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const; |
| 899 | SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
| 900 | SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; |
Nemanja Ivanovic | d5deb48 | 2016-09-14 14:19:09 +0000 | [diff] [blame] | 901 | SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; |
Hal Finkel | c93a9a2 | 2015-02-25 01:06:45 +0000 | [diff] [blame] | 902 | SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 903 | SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; |
| 904 | SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
Hal Finkel | 5c0d145 | 2014-03-30 13:22:59 +0000 | [diff] [blame] | 905 | SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 906 | SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 907 | |
Hal Finkel | c93a9a2 | 2015-02-25 01:06:45 +0000 | [diff] [blame] | 908 | SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const; |
| 909 | SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const; |
| 910 | |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 911 | SDValue LowerCallResult(SDValue Chain, SDValue InFlag, |
Sandeep Patel | 68c5f47 | 2009-09-02 08:44:58 +0000 | [diff] [blame] | 912 | CallingConv::ID CallConv, bool isVarArg, |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 913 | const SmallVectorImpl<ISD::InputArg> &Ins, |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 914 | const SDLoc &dl, SelectionDAG &DAG, |
Dan Gohman | 21cea8a | 2010-04-17 15:26:15 +0000 | [diff] [blame] | 915 | SmallVectorImpl<SDValue> &InVals) const; |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 916 | SDValue FinishCall(CallingConv::ID CallConv, const SDLoc &dl, |
Eric Christopher | 2454a3b | 2016-07-07 01:08:23 +0000 | [diff] [blame] | 917 | bool isTailCall, bool isVarArg, bool isPatchPoint, |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 918 | bool hasNest, SelectionDAG &DAG, |
| 919 | SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, |
Hal Finkel | e2ab0f1 | 2015-01-15 21:17:34 +0000 | [diff] [blame] | 920 | SDValue InFlag, SDValue Chain, SDValue CallSeqStart, |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 921 | SDValue &Callee, int SPDiff, unsigned NumBytes, |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 922 | const SmallVectorImpl<ISD::InputArg> &Ins, |
Hal Finkel | e2ab0f1 | 2015-01-15 21:17:34 +0000 | [diff] [blame] | 923 | SmallVectorImpl<SDValue> &InVals, |
| 924 | ImmutableCallSite *CS) const; |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 925 | |
Craig Topper | 0d3fa92 | 2014-04-29 07:57:37 +0000 | [diff] [blame] | 926 | SDValue |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 927 | LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 928 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 929 | const SDLoc &dl, SelectionDAG &DAG, |
| 930 | SmallVectorImpl<SDValue> &InVals) const override; |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 931 | |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 932 | SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, |
| 933 | SmallVectorImpl<SDValue> &InVals) const override; |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 934 | |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 935 | bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, |
| 936 | bool isVarArg, |
| 937 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 938 | LLVMContext &Context) const override; |
Hal Finkel | 450128a | 2011-10-14 19:51:36 +0000 | [diff] [blame] | 939 | |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 940 | SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 941 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 942 | const SmallVectorImpl<SDValue> &OutVals, |
| 943 | const SDLoc &dl, SelectionDAG &DAG) const override; |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 944 | |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 945 | SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, |
| 946 | SelectionDAG &DAG, SDValue ArgVal, |
| 947 | const SDLoc &dl) const; |
Bill Schmidt | 57d6de5 | 2012-10-23 15:51:16 +0000 | [diff] [blame] | 948 | |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 949 | SDValue LowerFormalArguments_Darwin( |
| 950 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 951 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 952 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; |
| 953 | SDValue LowerFormalArguments_64SVR4( |
| 954 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 955 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 956 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; |
| 957 | SDValue LowerFormalArguments_32SVR4( |
| 958 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 959 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 960 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const; |
Dan Gohman | f9bbcd1 | 2009-08-05 01:29:28 +0000 | [diff] [blame] | 961 | |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 962 | SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, |
| 963 | SDValue CallSeqStart, |
| 964 | ISD::ArgFlagsTy Flags, SelectionDAG &DAG, |
| 965 | const SDLoc &dl) const; |
Bill Schmidt | 57d6de5 | 2012-10-23 15:51:16 +0000 | [diff] [blame] | 966 | |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 967 | SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee, |
| 968 | CallingConv::ID CallConv, bool isVarArg, |
Eric Christopher | 2454a3b | 2016-07-07 01:08:23 +0000 | [diff] [blame] | 969 | bool isTailCall, bool isPatchPoint, |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 970 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 971 | const SmallVectorImpl<SDValue> &OutVals, |
| 972 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 973 | const SDLoc &dl, SelectionDAG &DAG, |
| 974 | SmallVectorImpl<SDValue> &InVals, |
| 975 | ImmutableCallSite *CS) const; |
| 976 | SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee, |
| 977 | CallingConv::ID CallConv, bool isVarArg, |
Eric Christopher | 2454a3b | 2016-07-07 01:08:23 +0000 | [diff] [blame] | 978 | bool isTailCall, bool isPatchPoint, |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 979 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 980 | const SmallVectorImpl<SDValue> &OutVals, |
| 981 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 982 | const SDLoc &dl, SelectionDAG &DAG, |
| 983 | SmallVectorImpl<SDValue> &InVals, |
| 984 | ImmutableCallSite *CS) const; |
| 985 | SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee, |
| 986 | CallingConv::ID CallConv, bool isVarArg, |
Eric Christopher | 2454a3b | 2016-07-07 01:08:23 +0000 | [diff] [blame] | 987 | bool isTailCall, bool isPatchPoint, |
Benjamin Kramer | bdc4956 | 2016-06-12 15:39:02 +0000 | [diff] [blame] | 988 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 989 | const SmallVectorImpl<SDValue> &OutVals, |
| 990 | const SmallVectorImpl<ISD::InputArg> &Ins, |
| 991 | const SDLoc &dl, SelectionDAG &DAG, |
| 992 | SmallVectorImpl<SDValue> &InVals, |
| 993 | ImmutableCallSite *CS) const; |
Hal Finkel | 756810f | 2013-03-21 21:37:52 +0000 | [diff] [blame] | 994 | |
| 995 | SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; |
| 996 | SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; |
Hal Finkel | 2e10331 | 2013-04-03 04:01:11 +0000 | [diff] [blame] | 997 | |
Hal Finkel | 940ab93 | 2014-02-28 00:27:01 +0000 | [diff] [blame] | 998 | SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const; |
Nemanja Ivanovic | 44513e5 | 2016-07-05 09:22:29 +0000 | [diff] [blame] | 999 | SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const; |
Hal Finkel | 940ab93 | 2014-02-28 00:27:01 +0000 | [diff] [blame] | 1000 | SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const; |
Hal Finkel | 5efb918 | 2015-01-06 06:01:57 +0000 | [diff] [blame] | 1001 | SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const; |
Sanjay Patel | bdf1e38 | 2014-09-26 23:01:47 +0000 | [diff] [blame] | 1002 | |
Ehsan Amiri | 8581868 | 2016-11-18 10:41:44 +0000 | [diff] [blame] | 1003 | /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces |
| 1004 | /// SETCC with integer subtraction when (1) there is a legal way of doing it |
| 1005 | /// (2) keeping the result of comparison in GPR has performance benefit. |
| 1006 | SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const; |
| 1007 | |
Evandro Menezes | 21f9ce1 | 2016-11-10 23:31:06 +0000 | [diff] [blame] | 1008 | SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, |
| 1009 | int &RefinementSteps, bool &UseOneConstNR, |
| 1010 | bool Reciprocal) const override; |
Sanjay Patel | 0051efc | 2016-10-20 16:55:45 +0000 | [diff] [blame] | 1011 | SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, |
| 1012 | int &RefinementSteps) const override; |
Sanjay Patel | 1dd1559 | 2015-07-28 23:05:48 +0000 | [diff] [blame] | 1013 | unsigned combineRepeatedFPDivisors() const override; |
Bill Schmidt | 8c3976e | 2013-08-26 20:11:46 +0000 | [diff] [blame] | 1014 | |
| 1015 | CCAssignFn *useFastISelCCs(unsigned Flag) const; |
Nemanja Ivanovic | 8c11e79 | 2016-11-29 23:36:03 +0000 | [diff] [blame] | 1016 | |
| 1017 | SDValue |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 1018 | combineElementTruncationToVectorTruncation(SDNode *N, |
| 1019 | DAGCombinerInfo &DCI) const; |
Tim Shen | e59d06f | 2017-05-03 00:07:02 +0000 | [diff] [blame] | 1020 | |
| 1021 | bool supportsModuloShift(ISD::NodeType Inst, |
| 1022 | EVT ReturnType) const override { |
| 1023 | assert((Inst == ISD::SHL || Inst == ISD::SRA || Inst == ISD::SRL) && |
| 1024 | "Expect a shift instruction"); |
| 1025 | assert(isOperationLegal(Inst, ReturnType)); |
| 1026 | return ReturnType.isVector(); |
| 1027 | } |
Chris Lattner | f22556d | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1028 | }; |
Bill Schmidt | 230b451 | 2013-06-12 16:39:22 +0000 | [diff] [blame] | 1029 | |
Bill Schmidt | 0cf702f | 2013-07-30 00:50:39 +0000 | [diff] [blame] | 1030 | namespace PPC { |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 1031 | |
Bill Schmidt | 0cf702f | 2013-07-30 00:50:39 +0000 | [diff] [blame] | 1032 | FastISel *createFastISel(FunctionLoweringInfo &FuncInfo, |
| 1033 | const TargetLibraryInfo *LibInfo); |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 1034 | |
| 1035 | } // end namespace PPC |
Bill Schmidt | 0cf702f | 2013-07-30 00:50:39 +0000 | [diff] [blame] | 1036 | |
Bill Schmidt | 230b451 | 2013-06-12 16:39:22 +0000 | [diff] [blame] | 1037 | bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, |
| 1038 | CCValAssign::LocInfo &LocInfo, |
| 1039 | ISD::ArgFlagsTy &ArgFlags, |
| 1040 | CCState &State); |
| 1041 | |
| 1042 | bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, |
| 1043 | MVT &LocVT, |
| 1044 | CCValAssign::LocInfo &LocInfo, |
| 1045 | ISD::ArgFlagsTy &ArgFlags, |
| 1046 | CCState &State); |
| 1047 | |
Strahinja Petrovic | 30e0ce8 | 2016-08-05 08:47:26 +0000 | [diff] [blame] | 1048 | bool |
| 1049 | CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, |
| 1050 | MVT &LocVT, |
| 1051 | CCValAssign::LocInfo &LocInfo, |
| 1052 | ISD::ArgFlagsTy &ArgFlags, |
| 1053 | CCState &State); |
| 1054 | |
Bill Schmidt | 230b451 | 2013-06-12 16:39:22 +0000 | [diff] [blame] | 1055 | bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, |
| 1056 | MVT &LocVT, |
| 1057 | CCValAssign::LocInfo &LocInfo, |
| 1058 | ISD::ArgFlagsTy &ArgFlags, |
| 1059 | CCState &State); |
Chris Lattner | f22556d | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1060 | |
Eugene Zelenko | 8187c19 | 2017-01-13 00:58:58 +0000 | [diff] [blame] | 1061 | } // end namespace llvm |
| 1062 | |
| 1063 | #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H |