Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1 | //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file was developed by Chris Lattner and is distributed under |
| 6 | // the University of Illinois Open Source License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 10 | // This file implements the PPCISelLowering class. |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
Chris Lattner | 16e71f2 | 2005-10-14 23:59:06 +0000 | [diff] [blame] | 14 | #include "PPCISelLowering.h" |
| 15 | #include "PPCTargetMachine.h" |
Nate Begeman | 750ac1b | 2006-02-01 07:19:44 +0000 | [diff] [blame] | 16 | #include "llvm/ADT/VectorExtras.h" |
Evan Cheng | c4c6257 | 2006-03-13 23:20:37 +0000 | [diff] [blame] | 17 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 18 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 19 | #include "llvm/CodeGen/MachineFunction.h" |
Chris Lattner | 8a2d3ca | 2005-08-26 21:23:58 +0000 | [diff] [blame] | 20 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 21 | #include "llvm/CodeGen/SelectionDAG.h" |
Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 22 | #include "llvm/CodeGen/SSARegMap.h" |
Chris Lattner | 0b1e4e5 | 2005-08-26 17:36:52 +0000 | [diff] [blame] | 23 | #include "llvm/Constants.h" |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 24 | #include "llvm/Function.h" |
Chris Lattner | 6d92cad | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 25 | #include "llvm/Intrinsics.h" |
Nate Begeman | 750ac1b | 2006-02-01 07:19:44 +0000 | [diff] [blame] | 26 | #include "llvm/Support/MathExtras.h" |
Evan Cheng | d2ee218 | 2006-02-18 00:08:58 +0000 | [diff] [blame] | 27 | #include "llvm/Target/TargetOptions.h" |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 28 | using namespace llvm; |
| 29 | |
Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 30 | PPCTargetLowering::PPCTargetLowering(TargetMachine &TM) |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 31 | : TargetLowering(TM) { |
| 32 | |
| 33 | // Fold away setcc operations if possible. |
| 34 | setSetCCIsExpensive(); |
Nate Begeman | 405e3ec | 2005-10-21 00:02:42 +0000 | [diff] [blame] | 35 | setPow2DivIsCheap(); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 36 | |
Chris Lattner | d145a61 | 2005-09-27 22:18:25 +0000 | [diff] [blame] | 37 | // Use _setjmp/_longjmp instead of setjmp/longjmp. |
| 38 | setUseUnderscoreSetJmpLongJmp(true); |
| 39 | |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 40 | // Set up the register classes. |
Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 41 | addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); |
| 42 | addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); |
| 43 | addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 44 | |
Chris Lattner | a54aa94 | 2006-01-29 06:26:08 +0000 | [diff] [blame] | 45 | setOperationAction(ISD::ConstantFP, MVT::f64, Expand); |
| 46 | setOperationAction(ISD::ConstantFP, MVT::f32, Expand); |
| 47 | |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 48 | // PowerPC has no intrinsics for these particular operations |
| 49 | setOperationAction(ISD::MEMMOVE, MVT::Other, Expand); |
| 50 | setOperationAction(ISD::MEMSET, MVT::Other, Expand); |
| 51 | setOperationAction(ISD::MEMCPY, MVT::Other, Expand); |
| 52 | |
| 53 | // PowerPC has an i16 but no i8 (or i1) SEXTLOAD |
| 54 | setOperationAction(ISD::SEXTLOAD, MVT::i1, Expand); |
| 55 | setOperationAction(ISD::SEXTLOAD, MVT::i8, Expand); |
| 56 | |
| 57 | // PowerPC has no SREM/UREM instructions |
| 58 | setOperationAction(ISD::SREM, MVT::i32, Expand); |
| 59 | setOperationAction(ISD::UREM, MVT::i32, Expand); |
| 60 | |
| 61 | // We don't support sin/cos/sqrt/fmod |
| 62 | setOperationAction(ISD::FSIN , MVT::f64, Expand); |
| 63 | setOperationAction(ISD::FCOS , MVT::f64, Expand); |
Chris Lattner | 615c2d0 | 2005-09-28 22:29:58 +0000 | [diff] [blame] | 64 | setOperationAction(ISD::FREM , MVT::f64, Expand); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 65 | setOperationAction(ISD::FSIN , MVT::f32, Expand); |
| 66 | setOperationAction(ISD::FCOS , MVT::f32, Expand); |
Chris Lattner | 615c2d0 | 2005-09-28 22:29:58 +0000 | [diff] [blame] | 67 | setOperationAction(ISD::FREM , MVT::f32, Expand); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 68 | |
| 69 | // If we're enabling GP optimizations, use hardware square root |
Chris Lattner | 1e9de3e | 2005-09-02 18:33:05 +0000 | [diff] [blame] | 70 | if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 71 | setOperationAction(ISD::FSQRT, MVT::f64, Expand); |
| 72 | setOperationAction(ISD::FSQRT, MVT::f32, Expand); |
| 73 | } |
| 74 | |
Chris Lattner | 9601a86 | 2006-03-05 05:08:37 +0000 | [diff] [blame] | 75 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); |
| 76 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); |
| 77 | |
Nate Begeman | d88fc03 | 2006-01-14 03:14:10 +0000 | [diff] [blame] | 78 | // PowerPC does not have BSWAP, CTPOP or CTTZ |
| 79 | setOperationAction(ISD::BSWAP, MVT::i32 , Expand); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 80 | setOperationAction(ISD::CTPOP, MVT::i32 , Expand); |
| 81 | setOperationAction(ISD::CTTZ , MVT::i32 , Expand); |
| 82 | |
Nate Begeman | 35ef913 | 2006-01-11 21:21:00 +0000 | [diff] [blame] | 83 | // PowerPC does not have ROTR |
| 84 | setOperationAction(ISD::ROTR, MVT::i32 , Expand); |
| 85 | |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 86 | // PowerPC does not have Select |
| 87 | setOperationAction(ISD::SELECT, MVT::i32, Expand); |
| 88 | setOperationAction(ISD::SELECT, MVT::f32, Expand); |
| 89 | setOperationAction(ISD::SELECT, MVT::f64, Expand); |
Chris Lattner | a1d95e1 | 2006-04-08 22:59:15 +0000 | [diff] [blame] | 90 | setOperationAction(ISD::SELECT, MVT::v4f32, Expand); |
| 91 | setOperationAction(ISD::SELECT, MVT::v4i32, Expand); |
| 92 | setOperationAction(ISD::SELECT, MVT::v8i16, Expand); |
| 93 | setOperationAction(ISD::SELECT, MVT::v16i8, Expand); |
Chris Lattner | e4bc9ea | 2005-08-26 00:52:45 +0000 | [diff] [blame] | 94 | |
Chris Lattner | 0b1e4e5 | 2005-08-26 17:36:52 +0000 | [diff] [blame] | 95 | // PowerPC wants to turn select_cc of FP into fsel when possible. |
| 96 | setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); |
| 97 | setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); |
Nate Begeman | 4477590 | 2006-01-31 08:17:29 +0000 | [diff] [blame] | 98 | |
Nate Begeman | 750ac1b | 2006-02-01 07:19:44 +0000 | [diff] [blame] | 99 | // PowerPC wants to optimize integer setcc a bit |
Nate Begeman | 4477590 | 2006-01-31 08:17:29 +0000 | [diff] [blame] | 100 | setOperationAction(ISD::SETCC, MVT::i32, Custom); |
Chris Lattner | eb9b62e | 2005-08-31 19:09:57 +0000 | [diff] [blame] | 101 | |
Nate Begeman | 81e8097 | 2006-03-17 01:40:33 +0000 | [diff] [blame] | 102 | // PowerPC does not have BRCOND which requires SetCC |
| 103 | setOperationAction(ISD::BRCOND, MVT::Other, Expand); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 104 | |
Chris Lattner | f760532 | 2005-08-31 21:09:52 +0000 | [diff] [blame] | 105 | // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. |
| 106 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); |
Nate Begeman | c09eeec | 2005-09-06 22:03:27 +0000 | [diff] [blame] | 107 | |
Jim Laskey | ad23c9d | 2005-08-17 00:40:22 +0000 | [diff] [blame] | 108 | // PowerPC does not have [U|S]INT_TO_FP |
| 109 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); |
| 110 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); |
| 111 | |
Chris Lattner | 53e8845 | 2005-12-23 05:13:35 +0000 | [diff] [blame] | 112 | setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); |
| 113 | setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); |
| 114 | |
Chris Lattner | e6ec9f2 | 2005-09-10 00:21:06 +0000 | [diff] [blame] | 115 | // PowerPC does not have truncstore for i1. |
| 116 | setOperationAction(ISD::TRUNCSTORE, MVT::i1, Promote); |
Chris Lattner | f73bae1 | 2005-11-29 06:16:21 +0000 | [diff] [blame] | 117 | |
Jim Laskey | abf6d17 | 2006-01-05 01:25:28 +0000 | [diff] [blame] | 118 | // Support label based line numbers. |
Chris Lattner | f73bae1 | 2005-11-29 06:16:21 +0000 | [diff] [blame] | 119 | setOperationAction(ISD::LOCATION, MVT::Other, Expand); |
Jim Laskey | e0bce71 | 2006-01-05 01:47:43 +0000 | [diff] [blame] | 120 | setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); |
Jim Laskey | abf6d17 | 2006-01-05 01:25:28 +0000 | [diff] [blame] | 121 | // FIXME - use subtarget debug flags |
Jim Laskey | e0bce71 | 2006-01-05 01:47:43 +0000 | [diff] [blame] | 122 | if (!TM.getSubtarget<PPCSubtarget>().isDarwin()) |
Jim Laskey | abf6d17 | 2006-01-05 01:25:28 +0000 | [diff] [blame] | 123 | setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand); |
Chris Lattner | e6ec9f2 | 2005-09-10 00:21:06 +0000 | [diff] [blame] | 124 | |
Nate Begeman | 28a6b02 | 2005-12-10 02:36:00 +0000 | [diff] [blame] | 125 | // We want to legalize GlobalAddress and ConstantPool nodes into the |
| 126 | // appropriate instructions to materialize the address. |
Chris Lattner | 3eef4e3 | 2005-11-17 18:26:56 +0000 | [diff] [blame] | 127 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
Nate Begeman | 28a6b02 | 2005-12-10 02:36:00 +0000 | [diff] [blame] | 128 | setOperationAction(ISD::ConstantPool, MVT::i32, Custom); |
Chris Lattner | b99329e | 2006-01-13 02:42:53 +0000 | [diff] [blame] | 129 | |
Nate Begeman | ee62557 | 2006-01-27 21:09:22 +0000 | [diff] [blame] | 130 | // RET must be custom lowered, to meet ABI requirements |
| 131 | setOperationAction(ISD::RET , MVT::Other, Custom); |
| 132 | |
Nate Begeman | acc398c | 2006-01-25 18:21:52 +0000 | [diff] [blame] | 133 | // VASTART needs to be custom lowered to use the VarArgsFrameIndex |
| 134 | setOperationAction(ISD::VASTART , MVT::Other, Custom); |
| 135 | |
Chris Lattner | b22c08b | 2006-01-15 09:02:48 +0000 | [diff] [blame] | 136 | // Use the default implementation. |
Nate Begeman | acc398c | 2006-01-25 18:21:52 +0000 | [diff] [blame] | 137 | setOperationAction(ISD::VAARG , MVT::Other, Expand); |
| 138 | setOperationAction(ISD::VACOPY , MVT::Other, Expand); |
| 139 | setOperationAction(ISD::VAEND , MVT::Other, Expand); |
Chris Lattner | b22c08b | 2006-01-15 09:02:48 +0000 | [diff] [blame] | 140 | setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); |
| 141 | setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); |
| 142 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand); |
Chris Lattner | 860e886 | 2005-11-17 07:30:41 +0000 | [diff] [blame] | 143 | |
Chris Lattner | 6d92cad | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 144 | // We want to custom lower some of our intrinsics. |
Chris Lattner | 48b61a7 | 2006-03-28 00:40:33 +0000 | [diff] [blame] | 145 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
Chris Lattner | 6d92cad | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 146 | |
Nate Begeman | c09eeec | 2005-09-06 22:03:27 +0000 | [diff] [blame] | 147 | if (TM.getSubtarget<PPCSubtarget>().is64Bit()) { |
Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 148 | // They also have instructions for converting between i64 and fp. |
Nate Begeman | c09eeec | 2005-09-06 22:03:27 +0000 | [diff] [blame] | 149 | setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); |
| 150 | setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); |
Chris Lattner | 7fbcef7 | 2006-03-24 07:53:47 +0000 | [diff] [blame] | 151 | |
| 152 | // FIXME: disable this lowered code. This generates 64-bit register values, |
| 153 | // and we don't model the fact that the top part is clobbered by calls. We |
| 154 | // need to flag these together so that the value isn't live across a call. |
| 155 | //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); |
| 156 | |
Nate Begeman | ae749a9 | 2005-10-25 23:48:36 +0000 | [diff] [blame] | 157 | // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT |
| 158 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); |
| 159 | } else { |
Chris Lattner | 860e886 | 2005-11-17 07:30:41 +0000 | [diff] [blame] | 160 | // PowerPC does not have FP_TO_UINT on 32-bit implementations. |
Nate Begeman | ae749a9 | 2005-10-25 23:48:36 +0000 | [diff] [blame] | 161 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); |
Nate Begeman | 9d2b817 | 2005-10-18 00:56:42 +0000 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | if (TM.getSubtarget<PPCSubtarget>().has64BitRegs()) { |
| 165 | // 64 bit PowerPC implementations can support i64 types directly |
| 166 | addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); |
Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 167 | // BUILD_PAIR can't be handled natively, and should be expanded to shl/or |
| 168 | setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); |
Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 169 | } else { |
| 170 | // 32 bit PowerPC wants to expand i64 shifts itself. |
| 171 | setOperationAction(ISD::SHL, MVT::i64, Custom); |
| 172 | setOperationAction(ISD::SRL, MVT::i64, Custom); |
| 173 | setOperationAction(ISD::SRA, MVT::i64, Custom); |
Nate Begeman | c09eeec | 2005-09-06 22:03:27 +0000 | [diff] [blame] | 174 | } |
Evan Cheng | d30bf01 | 2006-03-01 01:11:20 +0000 | [diff] [blame] | 175 | |
Nate Begeman | 425a969 | 2005-11-29 08:17:20 +0000 | [diff] [blame] | 176 | if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { |
Chris Lattner | e3fea5a | 2006-03-31 19:52:36 +0000 | [diff] [blame] | 177 | // First set operation action for all vector types to expand. Then we |
| 178 | // will selectively turn on ones that can be effectively codegen'd. |
| 179 | for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; |
| 180 | VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { |
| 181 | // add/sub/and/or/xor are legal for all supported vector VT's. |
| 182 | setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); |
| 183 | setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); |
| 184 | setOperationAction(ISD::AND , (MVT::ValueType)VT, Legal); |
| 185 | setOperationAction(ISD::OR , (MVT::ValueType)VT, Legal); |
| 186 | setOperationAction(ISD::XOR , (MVT::ValueType)VT, Legal); |
| 187 | |
Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 188 | // We promote all shuffles to v16i8. |
| 189 | setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote); |
| 190 | AddPromotedToType(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8); |
Chris Lattner | e3fea5a | 2006-03-31 19:52:36 +0000 | [diff] [blame] | 191 | |
| 192 | setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); |
| 193 | setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); |
| 194 | setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); |
| 195 | setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); |
| 196 | setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); |
| 197 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); |
| 198 | setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); |
| 199 | setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand); |
Chris Lattner | 01cae07 | 2006-04-03 23:55:43 +0000 | [diff] [blame] | 200 | |
| 201 | setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand); |
Chris Lattner | e3fea5a | 2006-03-31 19:52:36 +0000 | [diff] [blame] | 202 | } |
| 203 | |
Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 204 | // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle |
| 205 | // with merges, splats, etc. |
| 206 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); |
| 207 | |
Nate Begeman | 425a969 | 2005-11-29 08:17:20 +0000 | [diff] [blame] | 208 | addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); |
Nate Begeman | 7fd1edd | 2005-12-19 23:25:09 +0000 | [diff] [blame] | 209 | addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); |
Chris Lattner | 8d052bc | 2006-03-25 07:39:07 +0000 | [diff] [blame] | 210 | addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); |
| 211 | addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); |
Chris Lattner | ec4a0c7 | 2006-01-29 06:32:58 +0000 | [diff] [blame] | 212 | |
Chris Lattner | e3fea5a | 2006-03-31 19:52:36 +0000 | [diff] [blame] | 213 | setOperationAction(ISD::MUL, MVT::v4f32, Legal); |
Chris Lattner | f1d0b2b | 2006-03-20 01:53:53 +0000 | [diff] [blame] | 214 | |
Chris Lattner | b2177b9 | 2006-03-19 06:55:52 +0000 | [diff] [blame] | 215 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); |
| 216 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); |
Chris Lattner | 64b3a08 | 2006-03-24 07:48:08 +0000 | [diff] [blame] | 217 | |
Chris Lattner | 541f91b | 2006-04-02 00:43:36 +0000 | [diff] [blame] | 218 | setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); |
| 219 | setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); |
Chris Lattner | 64b3a08 | 2006-03-24 07:48:08 +0000 | [diff] [blame] | 220 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); |
| 221 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); |
Nate Begeman | 425a969 | 2005-11-29 08:17:20 +0000 | [diff] [blame] | 222 | } |
| 223 | |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 224 | setSetCCResultContents(ZeroOrOneSetCCResult); |
Chris Lattner | cadd742 | 2006-01-13 17:52:03 +0000 | [diff] [blame] | 225 | setStackPointerRegisterToSaveRestore(PPC::R1); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 226 | |
Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 227 | // We have target-specific dag combine patterns for the following nodes: |
| 228 | setTargetDAGCombine(ISD::SINT_TO_FP); |
Chris Lattner | 5126984 | 2006-03-01 05:50:56 +0000 | [diff] [blame] | 229 | setTargetDAGCombine(ISD::STORE); |
Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 230 | |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 231 | computeRegisterProperties(); |
| 232 | } |
| 233 | |
Chris Lattner | da6d20f | 2006-01-09 23:52:17 +0000 | [diff] [blame] | 234 | const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { |
| 235 | switch (Opcode) { |
| 236 | default: return 0; |
| 237 | case PPCISD::FSEL: return "PPCISD::FSEL"; |
| 238 | case PPCISD::FCFID: return "PPCISD::FCFID"; |
| 239 | case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; |
| 240 | case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; |
Chris Lattner | 5126984 | 2006-03-01 05:50:56 +0000 | [diff] [blame] | 241 | case PPCISD::STFIWX: return "PPCISD::STFIWX"; |
Chris Lattner | da6d20f | 2006-01-09 23:52:17 +0000 | [diff] [blame] | 242 | case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; |
| 243 | case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; |
Chris Lattner | f1d0b2b | 2006-03-20 01:53:53 +0000 | [diff] [blame] | 244 | case PPCISD::VPERM: return "PPCISD::VPERM"; |
Chris Lattner | da6d20f | 2006-01-09 23:52:17 +0000 | [diff] [blame] | 245 | case PPCISD::Hi: return "PPCISD::Hi"; |
| 246 | case PPCISD::Lo: return "PPCISD::Lo"; |
| 247 | case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; |
| 248 | case PPCISD::SRL: return "PPCISD::SRL"; |
| 249 | case PPCISD::SRA: return "PPCISD::SRA"; |
| 250 | case PPCISD::SHL: return "PPCISD::SHL"; |
Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 251 | case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; |
| 252 | case PPCISD::STD_32: return "PPCISD::STD_32"; |
Chris Lattner | e00ebf0 | 2006-01-28 07:33:03 +0000 | [diff] [blame] | 253 | case PPCISD::CALL: return "PPCISD::CALL"; |
Chris Lattner | da6d20f | 2006-01-09 23:52:17 +0000 | [diff] [blame] | 254 | case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; |
Chris Lattner | 6d92cad | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 255 | case PPCISD::MFCR: return "PPCISD::MFCR"; |
Chris Lattner | a17b155 | 2006-03-31 05:13:27 +0000 | [diff] [blame] | 256 | case PPCISD::VCMP: return "PPCISD::VCMP"; |
Chris Lattner | 6d92cad | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 257 | case PPCISD::VCMPo: return "PPCISD::VCMPo"; |
Chris Lattner | da6d20f | 2006-01-09 23:52:17 +0000 | [diff] [blame] | 258 | } |
| 259 | } |
| 260 | |
Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 261 | //===----------------------------------------------------------------------===// |
| 262 | // Node matching predicates, for use by the tblgen matching code. |
| 263 | //===----------------------------------------------------------------------===// |
| 264 | |
Chris Lattner | 0b1e4e5 | 2005-08-26 17:36:52 +0000 | [diff] [blame] | 265 | /// isFloatingPointZero - Return true if this is 0.0 or -0.0. |
| 266 | static bool isFloatingPointZero(SDOperand Op) { |
| 267 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) |
| 268 | return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); |
| 269 | else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) { |
| 270 | // Maybe this has already been legalized into the constant pool? |
| 271 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) |
| 272 | if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get())) |
| 273 | return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); |
| 274 | } |
| 275 | return false; |
| 276 | } |
| 277 | |
Chris Lattner | ddb739e | 2006-04-06 17:23:16 +0000 | [diff] [blame] | 278 | /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return |
| 279 | /// true if Op is undef or if it matches the specified value. |
| 280 | static bool isConstantOrUndef(SDOperand Op, unsigned Val) { |
| 281 | return Op.getOpcode() == ISD::UNDEF || |
| 282 | cast<ConstantSDNode>(Op)->getValue() == Val; |
| 283 | } |
| 284 | |
| 285 | /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a |
| 286 | /// VPKUHUM instruction. |
Chris Lattner | f24380e | 2006-04-06 22:28:36 +0000 | [diff] [blame] | 287 | bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) { |
| 288 | if (!isUnary) { |
| 289 | for (unsigned i = 0; i != 16; ++i) |
| 290 | if (!isConstantOrUndef(N->getOperand(i), i*2+1)) |
| 291 | return false; |
| 292 | } else { |
| 293 | for (unsigned i = 0; i != 8; ++i) |
| 294 | if (!isConstantOrUndef(N->getOperand(i), i*2+1) || |
| 295 | !isConstantOrUndef(N->getOperand(i+8), i*2+1)) |
| 296 | return false; |
| 297 | } |
Chris Lattner | d0608e1 | 2006-04-06 18:26:28 +0000 | [diff] [blame] | 298 | return true; |
Chris Lattner | ddb739e | 2006-04-06 17:23:16 +0000 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a |
| 302 | /// VPKUWUM instruction. |
Chris Lattner | f24380e | 2006-04-06 22:28:36 +0000 | [diff] [blame] | 303 | bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) { |
| 304 | if (!isUnary) { |
| 305 | for (unsigned i = 0; i != 16; i += 2) |
| 306 | if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || |
| 307 | !isConstantOrUndef(N->getOperand(i+1), i*2+3)) |
| 308 | return false; |
| 309 | } else { |
| 310 | for (unsigned i = 0; i != 8; i += 2) |
| 311 | if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || |
| 312 | !isConstantOrUndef(N->getOperand(i+1), i*2+3) || |
| 313 | !isConstantOrUndef(N->getOperand(i+8), i*2+2) || |
| 314 | !isConstantOrUndef(N->getOperand(i+9), i*2+3)) |
| 315 | return false; |
| 316 | } |
Chris Lattner | d0608e1 | 2006-04-06 18:26:28 +0000 | [diff] [blame] | 317 | return true; |
Chris Lattner | ddb739e | 2006-04-06 17:23:16 +0000 | [diff] [blame] | 318 | } |
| 319 | |
Chris Lattner | caad163 | 2006-04-06 22:02:42 +0000 | [diff] [blame] | 320 | /// isVMerge - Common function, used to match vmrg* shuffles. |
| 321 | /// |
| 322 | static bool isVMerge(SDNode *N, unsigned UnitSize, |
| 323 | unsigned LHSStart, unsigned RHSStart) { |
Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 324 | assert(N->getOpcode() == ISD::BUILD_VECTOR && |
| 325 | N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); |
| 326 | assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && |
| 327 | "Unsupported merge size!"); |
| 328 | |
| 329 | for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units |
| 330 | for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit |
| 331 | if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j), |
Chris Lattner | caad163 | 2006-04-06 22:02:42 +0000 | [diff] [blame] | 332 | LHSStart+j+i*UnitSize) || |
Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 333 | !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j), |
Chris Lattner | caad163 | 2006-04-06 22:02:42 +0000 | [diff] [blame] | 334 | RHSStart+j+i*UnitSize)) |
Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 335 | return false; |
| 336 | } |
Chris Lattner | caad163 | 2006-04-06 22:02:42 +0000 | [diff] [blame] | 337 | return true; |
| 338 | } |
| 339 | |
| 340 | /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for |
| 341 | /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). |
| 342 | bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { |
| 343 | if (!isUnary) |
| 344 | return isVMerge(N, UnitSize, 8, 24); |
| 345 | return isVMerge(N, UnitSize, 8, 8); |
Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 346 | } |
| 347 | |
| 348 | /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for |
| 349 | /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). |
Chris Lattner | caad163 | 2006-04-06 22:02:42 +0000 | [diff] [blame] | 350 | bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { |
| 351 | if (!isUnary) |
| 352 | return isVMerge(N, UnitSize, 0, 16); |
| 353 | return isVMerge(N, UnitSize, 0, 0); |
Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 354 | } |
| 355 | |
| 356 | |
Chris Lattner | d0608e1 | 2006-04-06 18:26:28 +0000 | [diff] [blame] | 357 | /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift |
| 358 | /// amount, otherwise return -1. |
Chris Lattner | f24380e | 2006-04-06 22:28:36 +0000 | [diff] [blame] | 359 | int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { |
Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 360 | assert(N->getOpcode() == ISD::BUILD_VECTOR && |
| 361 | N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); |
Chris Lattner | d0608e1 | 2006-04-06 18:26:28 +0000 | [diff] [blame] | 362 | // Find the first non-undef value in the shuffle mask. |
| 363 | unsigned i; |
| 364 | for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i) |
| 365 | /*search*/; |
| 366 | |
| 367 | if (i == 16) return -1; // all undef. |
| 368 | |
| 369 | // Otherwise, check to see if the rest of the elements are consequtively |
| 370 | // numbered from this value. |
| 371 | unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue(); |
| 372 | if (ShiftAmt < i) return -1; |
| 373 | ShiftAmt -= i; |
Chris Lattner | ddb739e | 2006-04-06 17:23:16 +0000 | [diff] [blame] | 374 | |
Chris Lattner | f24380e | 2006-04-06 22:28:36 +0000 | [diff] [blame] | 375 | if (!isUnary) { |
| 376 | // Check the rest of the elements to see if they are consequtive. |
| 377 | for (++i; i != 16; ++i) |
| 378 | if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i)) |
| 379 | return -1; |
| 380 | } else { |
| 381 | // Check the rest of the elements to see if they are consequtive. |
| 382 | for (++i; i != 16; ++i) |
| 383 | if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15)) |
| 384 | return -1; |
| 385 | } |
Chris Lattner | d0608e1 | 2006-04-06 18:26:28 +0000 | [diff] [blame] | 386 | |
| 387 | return ShiftAmt; |
| 388 | } |
Chris Lattner | ef819f8 | 2006-03-20 06:33:01 +0000 | [diff] [blame] | 389 | |
| 390 | /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand |
| 391 | /// specifies a splat of a single element that is suitable for input to |
| 392 | /// VSPLTB/VSPLTH/VSPLTW. |
Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 393 | bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) { |
| 394 | assert(N->getOpcode() == ISD::BUILD_VECTOR && |
| 395 | N->getNumOperands() == 16 && |
| 396 | (EltSize == 1 || EltSize == 2 || EltSize == 4)); |
Chris Lattner | dd4d2d0 | 2006-03-20 06:51:10 +0000 | [diff] [blame] | 397 | |
Chris Lattner | 88a99ef | 2006-03-20 06:37:44 +0000 | [diff] [blame] | 398 | // This is a splat operation if each element of the permute is the same, and |
| 399 | // if the value doesn't reference the second vector. |
Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 400 | unsigned ElementBase = 0; |
Chris Lattner | 88a99ef | 2006-03-20 06:37:44 +0000 | [diff] [blame] | 401 | SDOperand Elt = N->getOperand(0); |
Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 402 | if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) |
| 403 | ElementBase = EltV->getValue(); |
| 404 | else |
| 405 | return false; // FIXME: Handle UNDEF elements too! |
| 406 | |
| 407 | if (cast<ConstantSDNode>(Elt)->getValue() >= 16) |
| 408 | return false; |
| 409 | |
| 410 | // Check that they are consequtive. |
| 411 | for (unsigned i = 1; i != EltSize; ++i) { |
| 412 | if (!isa<ConstantSDNode>(N->getOperand(i)) || |
| 413 | cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase) |
| 414 | return false; |
| 415 | } |
| 416 | |
Chris Lattner | 88a99ef | 2006-03-20 06:37:44 +0000 | [diff] [blame] | 417 | assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!"); |
Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 418 | for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { |
Chris Lattner | b097aa9 | 2006-04-14 23:19:08 +0000 | [diff] [blame] | 419 | if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; |
Chris Lattner | 88a99ef | 2006-03-20 06:37:44 +0000 | [diff] [blame] | 420 | assert(isa<ConstantSDNode>(N->getOperand(i)) && |
| 421 | "Invalid VECTOR_SHUFFLE mask!"); |
Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 422 | for (unsigned j = 0; j != EltSize; ++j) |
| 423 | if (N->getOperand(i+j) != N->getOperand(j)) |
| 424 | return false; |
Chris Lattner | 88a99ef | 2006-03-20 06:37:44 +0000 | [diff] [blame] | 425 | } |
| 426 | |
Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 427 | return true; |
Chris Lattner | ef819f8 | 2006-03-20 06:33:01 +0000 | [diff] [blame] | 428 | } |
| 429 | |
| 430 | /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the |
| 431 | /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. |
Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 432 | unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { |
| 433 | assert(isSplatShuffleMask(N, EltSize)); |
| 434 | return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize; |
Chris Lattner | ef819f8 | 2006-03-20 06:33:01 +0000 | [diff] [blame] | 435 | } |
| 436 | |
Chris Lattner | e87192a | 2006-04-12 17:37:20 +0000 | [diff] [blame] | 437 | /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed |
Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 438 | /// by using a vspltis[bhw] instruction of the specified element size, return |
| 439 | /// the constant being splatted. The ByteSize field indicates the number of |
| 440 | /// bytes of each element [124] -> [bhw]. |
Chris Lattner | e87192a | 2006-04-12 17:37:20 +0000 | [diff] [blame] | 441 | SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { |
Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 442 | SDOperand OpVal(0, 0); |
Chris Lattner | 79d9a88 | 2006-04-08 07:14:26 +0000 | [diff] [blame] | 443 | |
| 444 | // If ByteSize of the splat is bigger than the element size of the |
| 445 | // build_vector, then we have a case where we are checking for a splat where |
| 446 | // multiple elements of the buildvector are folded together into a single |
| 447 | // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). |
| 448 | unsigned EltSize = 16/N->getNumOperands(); |
| 449 | if (EltSize < ByteSize) { |
| 450 | unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. |
| 451 | SDOperand UniquedVals[4]; |
| 452 | assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); |
| 453 | |
| 454 | // See if all of the elements in the buildvector agree across. |
| 455 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 456 | if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; |
| 457 | // If the element isn't a constant, bail fully out. |
| 458 | if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand(); |
| 459 | |
| 460 | |
| 461 | if (UniquedVals[i&(Multiple-1)].Val == 0) |
| 462 | UniquedVals[i&(Multiple-1)] = N->getOperand(i); |
| 463 | else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) |
| 464 | return SDOperand(); // no match. |
| 465 | } |
| 466 | |
| 467 | // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains |
| 468 | // either constant or undef values that are identical for each chunk. See |
| 469 | // if these chunks can form into a larger vspltis*. |
| 470 | |
| 471 | // Check to see if all of the leading entries are either 0 or -1. If |
| 472 | // neither, then this won't fit into the immediate field. |
| 473 | bool LeadingZero = true; |
| 474 | bool LeadingOnes = true; |
| 475 | for (unsigned i = 0; i != Multiple-1; ++i) { |
| 476 | if (UniquedVals[i].Val == 0) continue; // Must have been undefs. |
| 477 | |
| 478 | LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); |
| 479 | LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); |
| 480 | } |
| 481 | // Finally, check the least significant entry. |
| 482 | if (LeadingZero) { |
| 483 | if (UniquedVals[Multiple-1].Val == 0) |
| 484 | return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef |
| 485 | int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue(); |
| 486 | if (Val < 16) |
| 487 | return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) |
| 488 | } |
| 489 | if (LeadingOnes) { |
| 490 | if (UniquedVals[Multiple-1].Val == 0) |
| 491 | return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef |
| 492 | int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended(); |
| 493 | if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) |
| 494 | return DAG.getTargetConstant(Val, MVT::i32); |
| 495 | } |
| 496 | |
| 497 | return SDOperand(); |
| 498 | } |
| 499 | |
Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 500 | // Check to see if this buildvec has a single non-undef value in its elements. |
| 501 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 502 | if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; |
| 503 | if (OpVal.Val == 0) |
| 504 | OpVal = N->getOperand(i); |
| 505 | else if (OpVal != N->getOperand(i)) |
Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 506 | return SDOperand(); |
Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 507 | } |
| 508 | |
Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 509 | if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def. |
Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 510 | |
Nate Begeman | 98e70cc | 2006-03-28 04:15:58 +0000 | [diff] [blame] | 511 | unsigned ValSizeInBytes = 0; |
| 512 | uint64_t Value = 0; |
Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 513 | if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { |
| 514 | Value = CN->getValue(); |
| 515 | ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8; |
| 516 | } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { |
| 517 | assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); |
| 518 | Value = FloatToBits(CN->getValue()); |
| 519 | ValSizeInBytes = 4; |
| 520 | } |
| 521 | |
| 522 | // If the splat value is larger than the element value, then we can never do |
| 523 | // this splat. The only case that we could fit the replicated bits into our |
| 524 | // immediate field for would be zero, and we prefer to use vxor for it. |
Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 525 | if (ValSizeInBytes < ByteSize) return SDOperand(); |
Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 526 | |
| 527 | // If the element value is larger than the splat value, cut it in half and |
| 528 | // check to see if the two halves are equal. Continue doing this until we |
| 529 | // get to ByteSize. This allows us to handle 0x01010101 as 0x01. |
| 530 | while (ValSizeInBytes > ByteSize) { |
| 531 | ValSizeInBytes >>= 1; |
| 532 | |
| 533 | // If the top half equals the bottom half, we're still ok. |
Chris Lattner | 9b42bdd | 2006-04-05 17:39:25 +0000 | [diff] [blame] | 534 | if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != |
| 535 | (Value & ((1 << (8*ValSizeInBytes))-1))) |
Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 536 | return SDOperand(); |
Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 537 | } |
| 538 | |
| 539 | // Properly sign extend the value. |
| 540 | int ShAmt = (4-ByteSize)*8; |
| 541 | int MaskVal = ((int)Value << ShAmt) >> ShAmt; |
| 542 | |
Evan Cheng | 5b6a01b | 2006-03-26 09:52:32 +0000 | [diff] [blame] | 543 | // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. |
Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 544 | if (MaskVal == 0) return SDOperand(); |
Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 545 | |
Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 546 | // Finally, if this value fits in a 5 bit sext field, return it |
| 547 | if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) |
| 548 | return DAG.getTargetConstant(MaskVal, MVT::i32); |
| 549 | return SDOperand(); |
Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 550 | } |
| 551 | |
Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 552 | //===----------------------------------------------------------------------===// |
| 553 | // LowerOperation implementation |
| 554 | //===----------------------------------------------------------------------===// |
| 555 | |
| 556 | static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { |
| 557 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); |
| 558 | Constant *C = CP->get(); |
| 559 | SDOperand CPI = DAG.getTargetConstantPool(C, MVT::i32, CP->getAlignment()); |
| 560 | SDOperand Zero = DAG.getConstant(0, MVT::i32); |
| 561 | |
| 562 | const TargetMachine &TM = DAG.getTarget(); |
| 563 | |
| 564 | // If this is a non-darwin platform, we don't support non-static relo models |
| 565 | // yet. |
| 566 | if (TM.getRelocationModel() == Reloc::Static || |
| 567 | !TM.getSubtarget<PPCSubtarget>().isDarwin()) { |
| 568 | // Generate non-pic code that has direct accesses to the constant pool. |
| 569 | // The address of the global is just (hi(&g)+lo(&g)). |
| 570 | SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero); |
| 571 | SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero); |
| 572 | return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); |
| 573 | } |
| 574 | |
| 575 | SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero); |
| 576 | if (TM.getRelocationModel() == Reloc::PIC) { |
| 577 | // With PIC, the first instruction is actually "GR+hi(&G)". |
| 578 | Hi = DAG.getNode(ISD::ADD, MVT::i32, |
| 579 | DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi); |
| 580 | } |
| 581 | |
| 582 | SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero); |
| 583 | Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); |
| 584 | return Lo; |
| 585 | } |
| 586 | |
| 587 | static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { |
| 588 | GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); |
| 589 | GlobalValue *GV = GSDN->getGlobal(); |
| 590 | SDOperand GA = DAG.getTargetGlobalAddress(GV, MVT::i32, GSDN->getOffset()); |
| 591 | SDOperand Zero = DAG.getConstant(0, MVT::i32); |
| 592 | |
| 593 | const TargetMachine &TM = DAG.getTarget(); |
| 594 | |
| 595 | // If this is a non-darwin platform, we don't support non-static relo models |
| 596 | // yet. |
| 597 | if (TM.getRelocationModel() == Reloc::Static || |
| 598 | !TM.getSubtarget<PPCSubtarget>().isDarwin()) { |
| 599 | // Generate non-pic code that has direct accesses to globals. |
| 600 | // The address of the global is just (hi(&g)+lo(&g)). |
| 601 | SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero); |
| 602 | SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero); |
| 603 | return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); |
| 604 | } |
| 605 | |
| 606 | SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero); |
| 607 | if (TM.getRelocationModel() == Reloc::PIC) { |
| 608 | // With PIC, the first instruction is actually "GR+hi(&G)". |
| 609 | Hi = DAG.getNode(ISD::ADD, MVT::i32, |
| 610 | DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi); |
| 611 | } |
| 612 | |
| 613 | SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero); |
| 614 | Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); |
| 615 | |
| 616 | if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() && |
| 617 | (!GV->isExternal() || GV->hasNotBeenReadFromBytecode())) |
| 618 | return Lo; |
| 619 | |
| 620 | // If the global is weak or external, we have to go through the lazy |
| 621 | // resolution stub. |
| 622 | return DAG.getLoad(MVT::i32, DAG.getEntryNode(), Lo, DAG.getSrcValue(0)); |
| 623 | } |
| 624 | |
| 625 | static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) { |
| 626 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); |
| 627 | |
| 628 | // If we're comparing for equality to zero, expose the fact that this is |
| 629 | // implented as a ctlz/srl pair on ppc, so that the dag combiner can |
| 630 | // fold the new nodes. |
| 631 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { |
| 632 | if (C->isNullValue() && CC == ISD::SETEQ) { |
| 633 | MVT::ValueType VT = Op.getOperand(0).getValueType(); |
| 634 | SDOperand Zext = Op.getOperand(0); |
| 635 | if (VT < MVT::i32) { |
| 636 | VT = MVT::i32; |
| 637 | Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); |
| 638 | } |
| 639 | unsigned Log2b = Log2_32(MVT::getSizeInBits(VT)); |
| 640 | SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); |
| 641 | SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, |
| 642 | DAG.getConstant(Log2b, MVT::i32)); |
| 643 | return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc); |
| 644 | } |
| 645 | // Leave comparisons against 0 and -1 alone for now, since they're usually |
| 646 | // optimized. FIXME: revisit this when we can custom lower all setcc |
| 647 | // optimizations. |
| 648 | if (C->isAllOnesValue() || C->isNullValue()) |
| 649 | return SDOperand(); |
| 650 | } |
| 651 | |
| 652 | // If we have an integer seteq/setne, turn it into a compare against zero |
| 653 | // by subtracting the rhs from the lhs, which is faster than setting a |
| 654 | // condition register, reading it back out, and masking the correct bit. |
| 655 | MVT::ValueType LHSVT = Op.getOperand(0).getValueType(); |
| 656 | if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { |
| 657 | MVT::ValueType VT = Op.getValueType(); |
| 658 | SDOperand Sub = DAG.getNode(ISD::SUB, LHSVT, Op.getOperand(0), |
| 659 | Op.getOperand(1)); |
| 660 | return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); |
| 661 | } |
| 662 | return SDOperand(); |
| 663 | } |
| 664 | |
| 665 | static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, |
| 666 | unsigned VarArgsFrameIndex) { |
| 667 | // vastart just stores the address of the VarArgsFrameIndex slot into the |
| 668 | // memory location argument. |
| 669 | SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32); |
| 670 | return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR, |
| 671 | Op.getOperand(1), Op.getOperand(2)); |
| 672 | } |
| 673 | |
| 674 | static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { |
| 675 | SDOperand Copy; |
| 676 | switch(Op.getNumOperands()) { |
| 677 | default: |
| 678 | assert(0 && "Do not know how to return this many arguments!"); |
| 679 | abort(); |
| 680 | case 1: |
| 681 | return SDOperand(); // ret void is legal |
| 682 | case 2: { |
| 683 | MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); |
| 684 | unsigned ArgReg; |
| 685 | if (MVT::isVector(ArgVT)) |
| 686 | ArgReg = PPC::V2; |
| 687 | else if (MVT::isInteger(ArgVT)) |
| 688 | ArgReg = PPC::R3; |
| 689 | else { |
| 690 | assert(MVT::isFloatingPoint(ArgVT)); |
| 691 | ArgReg = PPC::F1; |
| 692 | } |
| 693 | |
| 694 | Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1), |
| 695 | SDOperand()); |
| 696 | |
| 697 | // If we haven't noted the R3/F1 are live out, do so now. |
| 698 | if (DAG.getMachineFunction().liveout_empty()) |
| 699 | DAG.getMachineFunction().addLiveOut(ArgReg); |
| 700 | break; |
| 701 | } |
| 702 | case 3: |
| 703 | Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(2), |
| 704 | SDOperand()); |
| 705 | Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1)); |
| 706 | // If we haven't noted the R3+R4 are live out, do so now. |
| 707 | if (DAG.getMachineFunction().liveout_empty()) { |
| 708 | DAG.getMachineFunction().addLiveOut(PPC::R3); |
| 709 | DAG.getMachineFunction().addLiveOut(PPC::R4); |
| 710 | } |
| 711 | break; |
| 712 | } |
| 713 | return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1)); |
| 714 | } |
| 715 | |
| 716 | /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when |
| 717 | /// possible. |
| 718 | static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { |
| 719 | // Not FP? Not a fsel. |
| 720 | if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) || |
| 721 | !MVT::isFloatingPoint(Op.getOperand(2).getValueType())) |
| 722 | return SDOperand(); |
| 723 | |
| 724 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
| 725 | |
| 726 | // Cannot handle SETEQ/SETNE. |
| 727 | if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand(); |
| 728 | |
| 729 | MVT::ValueType ResVT = Op.getValueType(); |
| 730 | MVT::ValueType CmpVT = Op.getOperand(0).getValueType(); |
| 731 | SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); |
| 732 | SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3); |
| 733 | |
| 734 | // If the RHS of the comparison is a 0.0, we don't need to do the |
| 735 | // subtraction at all. |
| 736 | if (isFloatingPointZero(RHS)) |
| 737 | switch (CC) { |
| 738 | default: break; // SETUO etc aren't handled by fsel. |
| 739 | case ISD::SETULT: |
| 740 | case ISD::SETLT: |
| 741 | std::swap(TV, FV); // fsel is natively setge, swap operands for setlt |
| 742 | case ISD::SETUGE: |
| 743 | case ISD::SETGE: |
| 744 | if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits |
| 745 | LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); |
| 746 | return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV); |
| 747 | case ISD::SETUGT: |
| 748 | case ISD::SETGT: |
| 749 | std::swap(TV, FV); // fsel is natively setge, swap operands for setlt |
| 750 | case ISD::SETULE: |
| 751 | case ISD::SETLE: |
| 752 | if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits |
| 753 | LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); |
| 754 | return DAG.getNode(PPCISD::FSEL, ResVT, |
| 755 | DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV); |
| 756 | } |
| 757 | |
| 758 | SDOperand Cmp; |
| 759 | switch (CC) { |
| 760 | default: break; // SETUO etc aren't handled by fsel. |
| 761 | case ISD::SETULT: |
| 762 | case ISD::SETLT: |
| 763 | Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); |
| 764 | if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits |
| 765 | Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); |
| 766 | return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); |
| 767 | case ISD::SETUGE: |
| 768 | case ISD::SETGE: |
| 769 | Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); |
| 770 | if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits |
| 771 | Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); |
| 772 | return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); |
| 773 | case ISD::SETUGT: |
| 774 | case ISD::SETGT: |
| 775 | Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); |
| 776 | if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits |
| 777 | Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); |
| 778 | return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); |
| 779 | case ISD::SETULE: |
| 780 | case ISD::SETLE: |
| 781 | Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); |
| 782 | if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits |
| 783 | Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); |
| 784 | return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); |
| 785 | } |
| 786 | return SDOperand(); |
| 787 | } |
| 788 | |
| 789 | static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { |
| 790 | assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType())); |
| 791 | SDOperand Src = Op.getOperand(0); |
| 792 | if (Src.getValueType() == MVT::f32) |
| 793 | Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); |
| 794 | |
| 795 | SDOperand Tmp; |
| 796 | switch (Op.getValueType()) { |
| 797 | default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); |
| 798 | case MVT::i32: |
| 799 | Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); |
| 800 | break; |
| 801 | case MVT::i64: |
| 802 | Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src); |
| 803 | break; |
| 804 | } |
| 805 | |
| 806 | // Convert the FP value to an int value through memory. |
| 807 | SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp); |
| 808 | if (Op.getValueType() == MVT::i32) |
| 809 | Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits); |
| 810 | return Bits; |
| 811 | } |
| 812 | |
| 813 | static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { |
| 814 | if (Op.getOperand(0).getValueType() == MVT::i64) { |
| 815 | SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); |
| 816 | SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); |
| 817 | if (Op.getValueType() == MVT::f32) |
| 818 | FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); |
| 819 | return FP; |
| 820 | } |
| 821 | |
| 822 | assert(Op.getOperand(0).getValueType() == MVT::i32 && |
| 823 | "Unhandled SINT_TO_FP type in custom expander!"); |
| 824 | // Since we only generate this in 64-bit mode, we can take advantage of |
| 825 | // 64-bit registers. In particular, sign extend the input value into the |
| 826 | // 64-bit register with extsw, store the WHOLE 64-bit value into the stack |
| 827 | // then lfd it and fcfid it. |
| 828 | MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); |
| 829 | int FrameIdx = FrameInfo->CreateStackObject(8, 8); |
| 830 | SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32); |
| 831 | |
| 832 | SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, |
| 833 | Op.getOperand(0)); |
| 834 | |
| 835 | // STD the extended value into the stack slot. |
| 836 | SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other, |
| 837 | DAG.getEntryNode(), Ext64, FIdx, |
| 838 | DAG.getSrcValue(NULL)); |
| 839 | // Load the value as a double. |
| 840 | SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, DAG.getSrcValue(NULL)); |
| 841 | |
| 842 | // FCFID it and return it. |
| 843 | SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); |
| 844 | if (Op.getValueType() == MVT::f32) |
| 845 | FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); |
| 846 | return FP; |
| 847 | } |
| 848 | |
| 849 | static SDOperand LowerSHL(SDOperand Op, SelectionDAG &DAG) { |
| 850 | assert(Op.getValueType() == MVT::i64 && |
| 851 | Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!"); |
| 852 | // The generic code does a fine job expanding shift by a constant. |
| 853 | if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand(); |
| 854 | |
| 855 | // Otherwise, expand into a bunch of logical ops. Note that these ops |
| 856 | // depend on the PPC behavior for oversized shift amounts. |
| 857 | SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), |
| 858 | DAG.getConstant(0, MVT::i32)); |
| 859 | SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), |
| 860 | DAG.getConstant(1, MVT::i32)); |
| 861 | SDOperand Amt = Op.getOperand(1); |
| 862 | |
| 863 | SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, |
| 864 | DAG.getConstant(32, MVT::i32), Amt); |
| 865 | SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt); |
| 866 | SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1); |
| 867 | SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); |
| 868 | SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, |
| 869 | DAG.getConstant(-32U, MVT::i32)); |
| 870 | SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5); |
| 871 | SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); |
| 872 | SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt); |
| 873 | return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); |
| 874 | } |
| 875 | |
| 876 | static SDOperand LowerSRL(SDOperand Op, SelectionDAG &DAG) { |
| 877 | assert(Op.getValueType() == MVT::i64 && |
| 878 | Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!"); |
| 879 | // The generic code does a fine job expanding shift by a constant. |
| 880 | if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand(); |
| 881 | |
| 882 | // Otherwise, expand into a bunch of logical ops. Note that these ops |
| 883 | // depend on the PPC behavior for oversized shift amounts. |
| 884 | SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), |
| 885 | DAG.getConstant(0, MVT::i32)); |
| 886 | SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), |
| 887 | DAG.getConstant(1, MVT::i32)); |
| 888 | SDOperand Amt = Op.getOperand(1); |
| 889 | |
| 890 | SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, |
| 891 | DAG.getConstant(32, MVT::i32), Amt); |
| 892 | SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); |
| 893 | SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); |
| 894 | SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); |
| 895 | SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, |
| 896 | DAG.getConstant(-32U, MVT::i32)); |
| 897 | SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5); |
| 898 | SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); |
| 899 | SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt); |
| 900 | return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); |
| 901 | } |
| 902 | |
| 903 | static SDOperand LowerSRA(SDOperand Op, SelectionDAG &DAG) { |
| 904 | assert(Op.getValueType() == MVT::i64 && |
| 905 | Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!"); |
| 906 | // The generic code does a fine job expanding shift by a constant. |
| 907 | if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand(); |
| 908 | |
| 909 | // Otherwise, expand into a bunch of logical ops, followed by a select_cc. |
| 910 | SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), |
| 911 | DAG.getConstant(0, MVT::i32)); |
| 912 | SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), |
| 913 | DAG.getConstant(1, MVT::i32)); |
| 914 | SDOperand Amt = Op.getOperand(1); |
| 915 | |
| 916 | SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, |
| 917 | DAG.getConstant(32, MVT::i32), Amt); |
| 918 | SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); |
| 919 | SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); |
| 920 | SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); |
| 921 | SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, |
| 922 | DAG.getConstant(-32U, MVT::i32)); |
| 923 | SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5); |
| 924 | SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt); |
| 925 | SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32), |
| 926 | Tmp4, Tmp6, ISD::SETLE); |
| 927 | return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); |
| 928 | } |
| 929 | |
| 930 | //===----------------------------------------------------------------------===// |
| 931 | // Vector related lowering. |
| 932 | // |
| 933 | |
Chris Lattner | ac225ca | 2006-04-12 19:07:14 +0000 | [diff] [blame] | 934 | // If this is a vector of constants or undefs, get the bits. A bit in |
| 935 | // UndefBits is set if the corresponding element of the vector is an |
| 936 | // ISD::UNDEF value. For undefs, the corresponding VectorBits values are |
| 937 | // zero. Return true if this is not an array of constants, false if it is. |
| 938 | // |
| 939 | // Note that VectorBits/UndefBits are returned in 'little endian' form, so |
| 940 | // elements 0,1 go in VectorBits[0] and 2,3 go in VectorBits[1] for a v4i32. |
| 941 | static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], |
| 942 | uint64_t UndefBits[2]) { |
| 943 | // Start with zero'd results. |
| 944 | VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; |
| 945 | |
| 946 | unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); |
| 947 | for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { |
| 948 | SDOperand OpVal = BV->getOperand(i); |
| 949 | |
| 950 | unsigned PartNo = i >= e/2; // In the upper 128 bits? |
| 951 | unsigned SlotNo = i & (e/2-1); // Which subpiece of the uint64_t it is. |
| 952 | |
| 953 | uint64_t EltBits = 0; |
| 954 | if (OpVal.getOpcode() == ISD::UNDEF) { |
| 955 | uint64_t EltUndefBits = ~0U >> (32-EltBitSize); |
| 956 | UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize); |
| 957 | continue; |
| 958 | } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { |
| 959 | EltBits = CN->getValue() & (~0U >> (32-EltBitSize)); |
| 960 | } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { |
| 961 | assert(CN->getValueType(0) == MVT::f32 && |
| 962 | "Only one legal FP vector type!"); |
| 963 | EltBits = FloatToBits(CN->getValue()); |
| 964 | } else { |
| 965 | // Nonconstant element. |
| 966 | return true; |
| 967 | } |
| 968 | |
| 969 | VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize); |
| 970 | } |
| 971 | |
| 972 | //printf("%llx %llx %llx %llx\n", |
| 973 | // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]); |
| 974 | return false; |
| 975 | } |
Chris Lattner | ef819f8 | 2006-03-20 06:33:01 +0000 | [diff] [blame] | 976 | |
Chris Lattner | f1b4708 | 2006-04-14 05:19:18 +0000 | [diff] [blame] | 977 | // If this is a case we can't handle, return null and let the default |
| 978 | // expansion code take care of it. If we CAN select this case, and if it |
| 979 | // selects to a single instruction, return Op. Otherwise, if we can codegen |
| 980 | // this case more efficiently than a constant pool load, lower it to the |
| 981 | // sequence of ops that should be used. |
| 982 | static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { |
| 983 | // If this is a vector of constants or undefs, get the bits. A bit in |
| 984 | // UndefBits is set if the corresponding element of the vector is an |
| 985 | // ISD::UNDEF value. For undefs, the corresponding VectorBits values are |
| 986 | // zero. |
| 987 | uint64_t VectorBits[2]; |
| 988 | uint64_t UndefBits[2]; |
| 989 | if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)) |
| 990 | return SDOperand(); // Not a constant vector. |
| 991 | |
| 992 | // See if this is all zeros. |
| 993 | if ((VectorBits[0] | VectorBits[1]) == 0) { |
| 994 | // Canonicalize all zero vectors to be v4i32. |
| 995 | if (Op.getValueType() != MVT::v4i32) { |
| 996 | SDOperand Z = DAG.getConstant(0, MVT::i32); |
| 997 | Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z); |
| 998 | Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z); |
| 999 | } |
| 1000 | return Op; |
| 1001 | } |
| 1002 | |
| 1003 | // Check to see if this is something we can use VSPLTI* to form. |
| 1004 | MVT::ValueType CanonicalVT = MVT::Other; |
| 1005 | SDNode *CST = 0; |
| 1006 | |
| 1007 | if ((CST = PPC::get_VSPLTI_elt(Op.Val, 4, DAG).Val)) // vspltisw |
| 1008 | CanonicalVT = MVT::v4i32; |
| 1009 | else if ((CST = PPC::get_VSPLTI_elt(Op.Val, 2, DAG).Val)) // vspltish |
| 1010 | CanonicalVT = MVT::v8i16; |
| 1011 | else if ((CST = PPC::get_VSPLTI_elt(Op.Val, 1, DAG).Val)) // vspltisb |
| 1012 | CanonicalVT = MVT::v16i8; |
| 1013 | |
| 1014 | // If this matches one of the vsplti* patterns, force it to the canonical |
| 1015 | // type for the pattern. |
| 1016 | if (CST) { |
| 1017 | if (Op.getValueType() != CanonicalVT) { |
| 1018 | // Convert the splatted element to the right element type. |
| 1019 | SDOperand Elt = DAG.getNode(ISD::TRUNCATE, |
| 1020 | MVT::getVectorBaseType(CanonicalVT), |
| 1021 | SDOperand(CST, 0)); |
| 1022 | std::vector<SDOperand> Ops(MVT::getVectorNumElements(CanonicalVT), Elt); |
| 1023 | SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, Ops); |
| 1024 | Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); |
| 1025 | } |
| 1026 | return Op; |
| 1027 | } |
| 1028 | |
| 1029 | // If this is some other splat of 4-byte elements, see if we can handle it |
| 1030 | // in another way. |
| 1031 | // FIXME: Make this more undef happy and work with other widths (1,2 bytes). |
| 1032 | if (VectorBits[0] == VectorBits[1] && |
| 1033 | unsigned(VectorBits[0]) == unsigned(VectorBits[0] >> 32)) { |
| 1034 | unsigned Bits = unsigned(VectorBits[0]); |
| 1035 | |
| 1036 | // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is |
| 1037 | // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). These are important |
| 1038 | // for fneg/fabs. |
| 1039 | if (Bits == 0x80000000 || Bits == 0x7FFFFFFF) { |
| 1040 | // Make -1 and vspltisw -1: |
| 1041 | SDOperand OnesI = DAG.getConstant(~0U, MVT::i32); |
| 1042 | SDOperand OnesV = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, |
| 1043 | OnesI, OnesI, OnesI, OnesI); |
| 1044 | |
| 1045 | // Make the VSLW intrinsic, computing 0x8000_0000. |
| 1046 | SDOperand Res |
| 1047 | = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, MVT::v4i32, |
| 1048 | DAG.getConstant(Intrinsic::ppc_altivec_vslw, MVT::i32), |
| 1049 | OnesV, OnesV); |
| 1050 | |
| 1051 | // If this is 0x7FFF_FFFF, xor by OnesV to invert it. |
| 1052 | if (Bits == 0x7FFFFFFF) |
| 1053 | Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV); |
| 1054 | |
| 1055 | return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); |
| 1056 | } |
| 1057 | } |
| 1058 | |
| 1059 | return SDOperand(); |
| 1060 | } |
| 1061 | |
| 1062 | /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this |
| 1063 | /// is a shuffle we can handle in a single instruction, return it. Otherwise, |
| 1064 | /// return the code it can be lowered into. Worst case, it can always be |
| 1065 | /// lowered into a vperm. |
| 1066 | static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { |
| 1067 | SDOperand V1 = Op.getOperand(0); |
| 1068 | SDOperand V2 = Op.getOperand(1); |
| 1069 | SDOperand PermMask = Op.getOperand(2); |
| 1070 | |
| 1071 | // Cases that are handled by instructions that take permute immediates |
| 1072 | // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be |
| 1073 | // selected by the instruction selector. |
| 1074 | if (V2.getOpcode() == ISD::UNDEF) { |
| 1075 | if (PPC::isSplatShuffleMask(PermMask.Val, 1) || |
| 1076 | PPC::isSplatShuffleMask(PermMask.Val, 2) || |
| 1077 | PPC::isSplatShuffleMask(PermMask.Val, 4) || |
| 1078 | PPC::isVPKUWUMShuffleMask(PermMask.Val, true) || |
| 1079 | PPC::isVPKUHUMShuffleMask(PermMask.Val, true) || |
| 1080 | PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 || |
| 1081 | PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) || |
| 1082 | PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) || |
| 1083 | PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) || |
| 1084 | PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) || |
| 1085 | PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) || |
| 1086 | PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) { |
| 1087 | return Op; |
| 1088 | } |
| 1089 | } |
| 1090 | |
| 1091 | // Altivec has a variety of "shuffle immediates" that take two vector inputs |
| 1092 | // and produce a fixed permutation. If any of these match, do not lower to |
| 1093 | // VPERM. |
| 1094 | if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) || |
| 1095 | PPC::isVPKUHUMShuffleMask(PermMask.Val, false) || |
| 1096 | PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 || |
| 1097 | PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) || |
| 1098 | PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) || |
| 1099 | PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) || |
| 1100 | PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) || |
| 1101 | PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) || |
| 1102 | PPC::isVMRGHShuffleMask(PermMask.Val, 4, false)) |
| 1103 | return Op; |
| 1104 | |
| 1105 | // TODO: Handle more cases, and also handle cases that are cheaper to do as |
| 1106 | // multiple such instructions than as a constant pool load/vperm pair. |
| 1107 | |
| 1108 | // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant |
| 1109 | // vector that will get spilled to the constant pool. |
| 1110 | if (V2.getOpcode() == ISD::UNDEF) V2 = V1; |
| 1111 | |
| 1112 | // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except |
| 1113 | // that it is in input element units, not in bytes. Convert now. |
| 1114 | MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType()); |
| 1115 | unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; |
| 1116 | |
| 1117 | std::vector<SDOperand> ResultMask; |
| 1118 | for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { |
Chris Lattner | 730b456 | 2006-04-15 23:48:05 +0000 | [diff] [blame] | 1119 | unsigned SrcElt; |
| 1120 | if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF) |
| 1121 | SrcElt = 0; |
| 1122 | else |
| 1123 | SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue(); |
Chris Lattner | f1b4708 | 2006-04-14 05:19:18 +0000 | [diff] [blame] | 1124 | |
| 1125 | for (unsigned j = 0; j != BytesPerElement; ++j) |
| 1126 | ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, |
| 1127 | MVT::i8)); |
| 1128 | } |
| 1129 | |
| 1130 | SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, ResultMask); |
| 1131 | return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); |
| 1132 | } |
| 1133 | |
Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1134 | /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom |
| 1135 | /// lower, do it, otherwise return null. |
| 1136 | static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { |
| 1137 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); |
| 1138 | |
| 1139 | // If this is a lowered altivec predicate compare, CompareOpc is set to the |
| 1140 | // opcode number of the comparison. |
| 1141 | int CompareOpc = -1; |
| 1142 | bool isDot = false; |
| 1143 | switch (IntNo) { |
| 1144 | default: return SDOperand(); // Don't custom lower most intrinsics. |
| 1145 | // Comparison predicates. |
| 1146 | case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; |
| 1147 | case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; |
| 1148 | case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; |
| 1149 | case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; |
| 1150 | case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; |
| 1151 | case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; |
| 1152 | case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; |
| 1153 | case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; |
| 1154 | case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; |
| 1155 | case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; |
| 1156 | case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; |
| 1157 | case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; |
| 1158 | case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; |
| 1159 | |
| 1160 | // Normal Comparisons. |
| 1161 | case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; |
| 1162 | case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; |
| 1163 | case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; |
| 1164 | case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; |
| 1165 | case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; |
| 1166 | case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; |
| 1167 | case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; |
| 1168 | case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; |
| 1169 | case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; |
| 1170 | case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; |
| 1171 | case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; |
| 1172 | case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; |
| 1173 | case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; |
| 1174 | } |
| 1175 | |
| 1176 | assert(CompareOpc>0 && "We only lower altivec predicate compares so far!"); |
| 1177 | |
| 1178 | // If this is a non-dot comparison, make the VCMP node. |
| 1179 | if (!isDot) { |
| 1180 | SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), |
| 1181 | Op.getOperand(1), Op.getOperand(2), |
| 1182 | DAG.getConstant(CompareOpc, MVT::i32)); |
| 1183 | return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp); |
| 1184 | } |
| 1185 | |
| 1186 | // Create the PPCISD altivec 'dot' comparison node. |
| 1187 | std::vector<SDOperand> Ops; |
| 1188 | std::vector<MVT::ValueType> VTs; |
| 1189 | Ops.push_back(Op.getOperand(2)); // LHS |
| 1190 | Ops.push_back(Op.getOperand(3)); // RHS |
| 1191 | Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32)); |
| 1192 | VTs.push_back(Op.getOperand(2).getValueType()); |
| 1193 | VTs.push_back(MVT::Flag); |
| 1194 | SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops); |
| 1195 | |
| 1196 | // Now that we have the comparison, emit a copy from the CR to a GPR. |
| 1197 | // This is flagged to the above dot comparison. |
| 1198 | SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, |
| 1199 | DAG.getRegister(PPC::CR6, MVT::i32), |
| 1200 | CompNode.getValue(1)); |
| 1201 | |
| 1202 | // Unpack the result based on how the target uses it. |
| 1203 | unsigned BitNo; // Bit # of CR6. |
| 1204 | bool InvertBit; // Invert result? |
| 1205 | switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { |
| 1206 | default: // Can't happen, don't crash on invalid number though. |
| 1207 | case 0: // Return the value of the EQ bit of CR6. |
| 1208 | BitNo = 0; InvertBit = false; |
| 1209 | break; |
| 1210 | case 1: // Return the inverted value of the EQ bit of CR6. |
| 1211 | BitNo = 0; InvertBit = true; |
| 1212 | break; |
| 1213 | case 2: // Return the value of the LT bit of CR6. |
| 1214 | BitNo = 2; InvertBit = false; |
| 1215 | break; |
| 1216 | case 3: // Return the inverted value of the LT bit of CR6. |
| 1217 | BitNo = 2; InvertBit = true; |
| 1218 | break; |
| 1219 | } |
| 1220 | |
| 1221 | // Shift the bit into the low position. |
| 1222 | Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags, |
| 1223 | DAG.getConstant(8-(3-BitNo), MVT::i32)); |
| 1224 | // Isolate the bit. |
| 1225 | Flags = DAG.getNode(ISD::AND, MVT::i32, Flags, |
| 1226 | DAG.getConstant(1, MVT::i32)); |
| 1227 | |
| 1228 | // If we are supposed to, toggle the bit. |
| 1229 | if (InvertBit) |
| 1230 | Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags, |
| 1231 | DAG.getConstant(1, MVT::i32)); |
| 1232 | return Flags; |
| 1233 | } |
| 1234 | |
| 1235 | static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { |
| 1236 | // Create a stack slot that is 16-byte aligned. |
| 1237 | MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); |
| 1238 | int FrameIdx = FrameInfo->CreateStackObject(16, 16); |
| 1239 | SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32); |
| 1240 | |
| 1241 | // Store the input value into Value#0 of the stack slot. |
| 1242 | SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(), |
| 1243 | Op.getOperand(0), FIdx,DAG.getSrcValue(NULL)); |
| 1244 | // Load it out. |
| 1245 | return DAG.getLoad(Op.getValueType(), Store, FIdx, DAG.getSrcValue(NULL)); |
| 1246 | } |
| 1247 | |
Chris Lattner | e4bc9ea | 2005-08-26 00:52:45 +0000 | [diff] [blame] | 1248 | /// LowerOperation - Provide custom lowering hooks for some operations. |
| 1249 | /// |
Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 1250 | SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { |
Chris Lattner | e4bc9ea | 2005-08-26 00:52:45 +0000 | [diff] [blame] | 1251 | switch (Op.getOpcode()) { |
| 1252 | default: assert(0 && "Wasn't expecting to be able to lower this!"); |
Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1253 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); |
| 1254 | case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); |
| 1255 | case ISD::SETCC: return LowerSETCC(Op, DAG); |
| 1256 | case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex); |
| 1257 | case ISD::RET: return LowerRET(Op, DAG); |
Chris Lattner | 7c0d664 | 2005-10-02 06:37:13 +0000 | [diff] [blame] | 1258 | |
Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1259 | case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); |
| 1260 | case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); |
| 1261 | case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); |
Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 1262 | |
Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1263 | // Lower 64-bit shifts. |
| 1264 | case ISD::SHL: return LowerSHL(Op, DAG); |
| 1265 | case ISD::SRL: return LowerSRL(Op, DAG); |
| 1266 | case ISD::SRA: return LowerSRA(Op, DAG); |
Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 1267 | |
Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1268 | // Vector-related lowering. |
| 1269 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); |
| 1270 | case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); |
| 1271 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); |
| 1272 | case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); |
Chris Lattner | bc11c34 | 2005-08-31 20:23:54 +0000 | [diff] [blame] | 1273 | } |
Chris Lattner | e4bc9ea | 2005-08-26 00:52:45 +0000 | [diff] [blame] | 1274 | return SDOperand(); |
| 1275 | } |
| 1276 | |
Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1277 | //===----------------------------------------------------------------------===// |
| 1278 | // Other Lowering Code |
| 1279 | //===----------------------------------------------------------------------===// |
| 1280 | |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1281 | std::vector<SDOperand> |
Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 1282 | PPCTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1283 | // |
| 1284 | // add beautiful description of PPC stack frame format, or at least some docs |
| 1285 | // |
| 1286 | MachineFunction &MF = DAG.getMachineFunction(); |
| 1287 | MachineFrameInfo *MFI = MF.getFrameInfo(); |
| 1288 | MachineBasicBlock& BB = MF.front(); |
Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1289 | SSARegMap *RegMap = MF.getSSARegMap(); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1290 | std::vector<SDOperand> ArgValues; |
| 1291 | |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1292 | unsigned ArgOffset = 24; |
| 1293 | unsigned GPR_remaining = 8; |
| 1294 | unsigned FPR_remaining = 13; |
| 1295 | unsigned GPR_idx = 0, FPR_idx = 0; |
| 1296 | static const unsigned GPR[] = { |
| 1297 | PPC::R3, PPC::R4, PPC::R5, PPC::R6, |
| 1298 | PPC::R7, PPC::R8, PPC::R9, PPC::R10, |
| 1299 | }; |
| 1300 | static const unsigned FPR[] = { |
| 1301 | PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, |
| 1302 | PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 |
| 1303 | }; |
| 1304 | |
| 1305 | // Add DAG nodes to load the arguments... On entry to a function on PPC, |
| 1306 | // the arguments start at offset 24, although they are likely to be passed |
| 1307 | // in registers. |
| 1308 | for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { |
| 1309 | SDOperand newroot, argt; |
| 1310 | unsigned ObjSize; |
| 1311 | bool needsLoad = false; |
| 1312 | bool ArgLive = !I->use_empty(); |
| 1313 | MVT::ValueType ObjectVT = getValueType(I->getType()); |
| 1314 | |
| 1315 | switch (ObjectVT) { |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1316 | default: assert(0 && "Unhandled argument type!"); |
| 1317 | case MVT::i1: |
| 1318 | case MVT::i8: |
| 1319 | case MVT::i16: |
| 1320 | case MVT::i32: |
| 1321 | ObjSize = 4; |
| 1322 | if (!ArgLive) break; |
| 1323 | if (GPR_remaining > 0) { |
Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1324 | unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); |
Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1325 | MF.addLiveIn(GPR[GPR_idx], VReg); |
| 1326 | argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32); |
Nate Begeman | 49296f1 | 2005-08-31 01:58:39 +0000 | [diff] [blame] | 1327 | if (ObjectVT != MVT::i32) { |
| 1328 | unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext |
| 1329 | : ISD::AssertZext; |
| 1330 | argt = DAG.getNode(AssertOp, MVT::i32, argt, |
| 1331 | DAG.getValueType(ObjectVT)); |
| 1332 | argt = DAG.getNode(ISD::TRUNCATE, ObjectVT, argt); |
| 1333 | } |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1334 | } else { |
| 1335 | needsLoad = true; |
| 1336 | } |
| 1337 | break; |
Chris Lattner | 80720a9 | 2005-11-30 20:40:54 +0000 | [diff] [blame] | 1338 | case MVT::i64: |
| 1339 | ObjSize = 8; |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1340 | if (!ArgLive) break; |
| 1341 | if (GPR_remaining > 0) { |
| 1342 | SDOperand argHi, argLo; |
Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1343 | unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); |
Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1344 | MF.addLiveIn(GPR[GPR_idx], VReg); |
| 1345 | argHi = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32); |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1346 | // If we have two or more remaining argument registers, then both halves |
| 1347 | // of the i64 can be sourced from there. Otherwise, the lower half will |
| 1348 | // have to come off the stack. This can happen when an i64 is preceded |
| 1349 | // by 28 bytes of arguments. |
| 1350 | if (GPR_remaining > 1) { |
Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1351 | unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); |
Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1352 | MF.addLiveIn(GPR[GPR_idx+1], VReg); |
| 1353 | argLo = DAG.getCopyFromReg(argHi, VReg, MVT::i32); |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1354 | } else { |
| 1355 | int FI = MFI->CreateFixedObject(4, ArgOffset+4); |
| 1356 | SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32); |
| 1357 | argLo = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN, |
| 1358 | DAG.getSrcValue(NULL)); |
| 1359 | } |
| 1360 | // Build the outgoing arg thingy |
| 1361 | argt = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, argLo, argHi); |
| 1362 | newroot = argLo; |
| 1363 | } else { |
| 1364 | needsLoad = true; |
| 1365 | } |
| 1366 | break; |
| 1367 | case MVT::f32: |
| 1368 | case MVT::f64: |
| 1369 | ObjSize = (ObjectVT == MVT::f64) ? 8 : 4; |
Chris Lattner | 413b979 | 2006-01-11 18:21:25 +0000 | [diff] [blame] | 1370 | if (!ArgLive) { |
| 1371 | if (FPR_remaining > 0) { |
| 1372 | --FPR_remaining; |
| 1373 | ++FPR_idx; |
| 1374 | } |
| 1375 | break; |
| 1376 | } |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1377 | if (FPR_remaining > 0) { |
Chris Lattner | 919c032 | 2005-10-01 01:35:02 +0000 | [diff] [blame] | 1378 | unsigned VReg; |
| 1379 | if (ObjectVT == MVT::f32) |
Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1380 | VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass); |
Chris Lattner | 919c032 | 2005-10-01 01:35:02 +0000 | [diff] [blame] | 1381 | else |
Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1382 | VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass); |
Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1383 | MF.addLiveIn(FPR[FPR_idx], VReg); |
| 1384 | argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, ObjectVT); |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1385 | --FPR_remaining; |
| 1386 | ++FPR_idx; |
| 1387 | } else { |
| 1388 | needsLoad = true; |
| 1389 | } |
| 1390 | break; |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1391 | } |
| 1392 | |
| 1393 | // We need to load the argument to a virtual register if we determined above |
| 1394 | // that we ran out of physical registers of the appropriate type |
| 1395 | if (needsLoad) { |
| 1396 | unsigned SubregOffset = 0; |
| 1397 | if (ObjectVT == MVT::i8 || ObjectVT == MVT::i1) SubregOffset = 3; |
| 1398 | if (ObjectVT == MVT::i16) SubregOffset = 2; |
| 1399 | int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); |
| 1400 | SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32); |
| 1401 | FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN, |
| 1402 | DAG.getConstant(SubregOffset, MVT::i32)); |
| 1403 | argt = newroot = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN, |
| 1404 | DAG.getSrcValue(NULL)); |
| 1405 | } |
| 1406 | |
| 1407 | // Every 4 bytes of argument space consumes one of the GPRs available for |
| 1408 | // argument passing. |
| 1409 | if (GPR_remaining > 0) { |
| 1410 | unsigned delta = (GPR_remaining > 1 && ObjSize == 8) ? 2 : 1; |
| 1411 | GPR_remaining -= delta; |
| 1412 | GPR_idx += delta; |
| 1413 | } |
| 1414 | ArgOffset += ObjSize; |
| 1415 | if (newroot.Val) |
| 1416 | DAG.setRoot(newroot.getValue(1)); |
| 1417 | |
| 1418 | ArgValues.push_back(argt); |
| 1419 | } |
| 1420 | |
| 1421 | // If the function takes variable number of arguments, make a frame index for |
| 1422 | // the start of the first vararg value... for expansion of llvm.va_start. |
| 1423 | if (F.isVarArg()) { |
| 1424 | VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset); |
| 1425 | SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32); |
| 1426 | // If this function is vararg, store any remaining integer argument regs |
| 1427 | // to their spots on the stack so that they may be loaded by deferencing the |
| 1428 | // result of va_next. |
| 1429 | std::vector<SDOperand> MemOps; |
| 1430 | for (; GPR_remaining > 0; --GPR_remaining, ++GPR_idx) { |
Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1431 | unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); |
Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1432 | MF.addLiveIn(GPR[GPR_idx], VReg); |
| 1433 | SDOperand Val = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1434 | SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1), |
| 1435 | Val, FIN, DAG.getSrcValue(NULL)); |
| 1436 | MemOps.push_back(Store); |
| 1437 | // Increment the address by four for the next argument to store |
| 1438 | SDOperand PtrOff = DAG.getConstant(4, getPointerTy()); |
| 1439 | FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN, PtrOff); |
| 1440 | } |
Chris Lattner | 80720a9 | 2005-11-30 20:40:54 +0000 | [diff] [blame] | 1441 | if (!MemOps.empty()) { |
| 1442 | MemOps.push_back(DAG.getRoot()); |
| 1443 | DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps)); |
| 1444 | } |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1445 | } |
| 1446 | |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1447 | return ArgValues; |
| 1448 | } |
| 1449 | |
| 1450 | std::pair<SDOperand, SDOperand> |
Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 1451 | PPCTargetLowering::LowerCallTo(SDOperand Chain, |
| 1452 | const Type *RetTy, bool isVarArg, |
| 1453 | unsigned CallingConv, bool isTailCall, |
| 1454 | SDOperand Callee, ArgListTy &Args, |
| 1455 | SelectionDAG &DAG) { |
Chris Lattner | 281b55e | 2006-01-27 23:34:02 +0000 | [diff] [blame] | 1456 | // args_to_use will accumulate outgoing args for the PPCISD::CALL case in |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1457 | // SelectExpr to use to put the arguments in the appropriate registers. |
| 1458 | std::vector<SDOperand> args_to_use; |
| 1459 | |
| 1460 | // Count how many bytes are to be pushed on the stack, including the linkage |
| 1461 | // area, and parameter passing area. |
| 1462 | unsigned NumBytes = 24; |
| 1463 | |
| 1464 | if (Args.empty()) { |
Chris Lattner | 45b3976 | 2006-02-13 08:55:29 +0000 | [diff] [blame] | 1465 | Chain = DAG.getCALLSEQ_START(Chain, |
| 1466 | DAG.getConstant(NumBytes, getPointerTy())); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1467 | } else { |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1468 | for (unsigned i = 0, e = Args.size(); i != e; ++i) { |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1469 | switch (getValueType(Args[i].second)) { |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1470 | default: assert(0 && "Unknown value type!"); |
| 1471 | case MVT::i1: |
| 1472 | case MVT::i8: |
| 1473 | case MVT::i16: |
| 1474 | case MVT::i32: |
| 1475 | case MVT::f32: |
| 1476 | NumBytes += 4; |
| 1477 | break; |
| 1478 | case MVT::i64: |
| 1479 | case MVT::f64: |
| 1480 | NumBytes += 8; |
| 1481 | break; |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1482 | } |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1483 | } |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1484 | |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1485 | // Just to be safe, we'll always reserve the full 24 bytes of linkage area |
| 1486 | // plus 32 bytes of argument space in case any called code gets funky on us. |
| 1487 | // (Required by ABI to support var arg) |
| 1488 | if (NumBytes < 56) NumBytes = 56; |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1489 | |
| 1490 | // Adjust the stack pointer for the new arguments... |
| 1491 | // These operations are automatically eliminated by the prolog/epilog pass |
Chris Lattner | 45b3976 | 2006-02-13 08:55:29 +0000 | [diff] [blame] | 1492 | Chain = DAG.getCALLSEQ_START(Chain, |
| 1493 | DAG.getConstant(NumBytes, getPointerTy())); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1494 | |
| 1495 | // Set up a copy of the stack pointer for use loading and storing any |
| 1496 | // arguments that may not fit in the registers available for argument |
| 1497 | // passing. |
Chris Lattner | a243db8 | 2006-01-11 19:55:07 +0000 | [diff] [blame] | 1498 | SDOperand StackPtr = DAG.getRegister(PPC::R1, MVT::i32); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1499 | |
| 1500 | // Figure out which arguments are going to go in registers, and which in |
| 1501 | // memory. Also, if this is a vararg function, floating point operations |
| 1502 | // must be stored to our stack, and loaded into integer regs as well, if |
| 1503 | // any integer regs are available for argument passing. |
| 1504 | unsigned ArgOffset = 24; |
| 1505 | unsigned GPR_remaining = 8; |
| 1506 | unsigned FPR_remaining = 13; |
| 1507 | |
| 1508 | std::vector<SDOperand> MemOps; |
| 1509 | for (unsigned i = 0, e = Args.size(); i != e; ++i) { |
| 1510 | // PtrOff will be used to store the current argument to the stack if a |
| 1511 | // register cannot be found for it. |
| 1512 | SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); |
| 1513 | PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff); |
| 1514 | MVT::ValueType ArgVT = getValueType(Args[i].second); |
| 1515 | |
| 1516 | switch (ArgVT) { |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1517 | default: assert(0 && "Unexpected ValueType for argument!"); |
| 1518 | case MVT::i1: |
| 1519 | case MVT::i8: |
| 1520 | case MVT::i16: |
| 1521 | // Promote the integer to 32 bits. If the input type is signed use a |
| 1522 | // sign extend, otherwise use a zero extend. |
| 1523 | if (Args[i].second->isSigned()) |
| 1524 | Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first); |
| 1525 | else |
| 1526 | Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first); |
| 1527 | // FALL THROUGH |
| 1528 | case MVT::i32: |
| 1529 | if (GPR_remaining > 0) { |
| 1530 | args_to_use.push_back(Args[i].first); |
| 1531 | --GPR_remaining; |
| 1532 | } else { |
| 1533 | MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, |
| 1534 | Args[i].first, PtrOff, |
| 1535 | DAG.getSrcValue(NULL))); |
| 1536 | } |
| 1537 | ArgOffset += 4; |
| 1538 | break; |
| 1539 | case MVT::i64: |
| 1540 | // If we have one free GPR left, we can place the upper half of the i64 |
| 1541 | // in it, and store the other half to the stack. If we have two or more |
| 1542 | // free GPRs, then we can pass both halves of the i64 in registers. |
| 1543 | if (GPR_remaining > 0) { |
| 1544 | SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, |
| 1545 | Args[i].first, DAG.getConstant(1, MVT::i32)); |
| 1546 | SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, |
| 1547 | Args[i].first, DAG.getConstant(0, MVT::i32)); |
| 1548 | args_to_use.push_back(Hi); |
| 1549 | --GPR_remaining; |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1550 | if (GPR_remaining > 0) { |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1551 | args_to_use.push_back(Lo); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1552 | --GPR_remaining; |
| 1553 | } else { |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1554 | SDOperand ConstFour = DAG.getConstant(4, getPointerTy()); |
| 1555 | PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1556 | MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1557 | Lo, PtrOff, DAG.getSrcValue(NULL))); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1558 | } |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1559 | } else { |
| 1560 | MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, |
| 1561 | Args[i].first, PtrOff, |
| 1562 | DAG.getSrcValue(NULL))); |
| 1563 | } |
| 1564 | ArgOffset += 8; |
| 1565 | break; |
| 1566 | case MVT::f32: |
| 1567 | case MVT::f64: |
| 1568 | if (FPR_remaining > 0) { |
| 1569 | args_to_use.push_back(Args[i].first); |
| 1570 | --FPR_remaining; |
| 1571 | if (isVarArg) { |
| 1572 | SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Chain, |
| 1573 | Args[i].first, PtrOff, |
| 1574 | DAG.getSrcValue(NULL)); |
| 1575 | MemOps.push_back(Store); |
| 1576 | // Float varargs are always shadowed in available integer registers |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1577 | if (GPR_remaining > 0) { |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1578 | SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff, |
| 1579 | DAG.getSrcValue(NULL)); |
Chris Lattner | 1df7478 | 2005-11-17 18:30:17 +0000 | [diff] [blame] | 1580 | MemOps.push_back(Load.getValue(1)); |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1581 | args_to_use.push_back(Load); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1582 | --GPR_remaining; |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1583 | } |
| 1584 | if (GPR_remaining > 0 && MVT::f64 == ArgVT) { |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1585 | SDOperand ConstFour = DAG.getConstant(4, getPointerTy()); |
| 1586 | PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour); |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1587 | SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff, |
| 1588 | DAG.getSrcValue(NULL)); |
Chris Lattner | 1df7478 | 2005-11-17 18:30:17 +0000 | [diff] [blame] | 1589 | MemOps.push_back(Load.getValue(1)); |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1590 | args_to_use.push_back(Load); |
| 1591 | --GPR_remaining; |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1592 | } |
| 1593 | } else { |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1594 | // If we have any FPRs remaining, we may also have GPRs remaining. |
| 1595 | // Args passed in FPRs consume either 1 (f32) or 2 (f64) available |
| 1596 | // GPRs. |
| 1597 | if (GPR_remaining > 0) { |
| 1598 | args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); |
| 1599 | --GPR_remaining; |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1600 | } |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1601 | if (GPR_remaining > 0 && MVT::f64 == ArgVT) { |
| 1602 | args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); |
| 1603 | --GPR_remaining; |
| 1604 | } |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1605 | } |
Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1606 | } else { |
| 1607 | MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, |
| 1608 | Args[i].first, PtrOff, |
| 1609 | DAG.getSrcValue(NULL))); |
| 1610 | } |
| 1611 | ArgOffset += (ArgVT == MVT::f32) ? 4 : 8; |
| 1612 | break; |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1613 | } |
| 1614 | } |
| 1615 | if (!MemOps.empty()) |
| 1616 | Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps); |
| 1617 | } |
| 1618 | |
| 1619 | std::vector<MVT::ValueType> RetVals; |
| 1620 | MVT::ValueType RetTyVT = getValueType(RetTy); |
Chris Lattner | f505949 | 2005-09-02 01:24:55 +0000 | [diff] [blame] | 1621 | MVT::ValueType ActualRetTyVT = RetTyVT; |
| 1622 | if (RetTyVT >= MVT::i1 && RetTyVT <= MVT::i16) |
| 1623 | ActualRetTyVT = MVT::i32; // Promote result to i32. |
| 1624 | |
Chris Lattner | e00ebf0 | 2006-01-28 07:33:03 +0000 | [diff] [blame] | 1625 | if (RetTyVT == MVT::i64) { |
| 1626 | RetVals.push_back(MVT::i32); |
| 1627 | RetVals.push_back(MVT::i32); |
| 1628 | } else if (RetTyVT != MVT::isVoid) { |
Chris Lattner | f505949 | 2005-09-02 01:24:55 +0000 | [diff] [blame] | 1629 | RetVals.push_back(ActualRetTyVT); |
Chris Lattner | e00ebf0 | 2006-01-28 07:33:03 +0000 | [diff] [blame] | 1630 | } |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1631 | RetVals.push_back(MVT::Other); |
| 1632 | |
Chris Lattner | 2823b3e | 2005-11-17 05:56:14 +0000 | [diff] [blame] | 1633 | // If the callee is a GlobalAddress node (quite common, every direct call is) |
| 1634 | // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. |
| 1635 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) |
| 1636 | Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32); |
| 1637 | |
Chris Lattner | 281b55e | 2006-01-27 23:34:02 +0000 | [diff] [blame] | 1638 | std::vector<SDOperand> Ops; |
| 1639 | Ops.push_back(Chain); |
| 1640 | Ops.push_back(Callee); |
| 1641 | Ops.insert(Ops.end(), args_to_use.begin(), args_to_use.end()); |
| 1642 | SDOperand TheCall = DAG.getNode(PPCISD::CALL, RetVals, Ops); |
Chris Lattner | e00ebf0 | 2006-01-28 07:33:03 +0000 | [diff] [blame] | 1643 | Chain = TheCall.getValue(TheCall.Val->getNumValues()-1); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1644 | Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain, |
| 1645 | DAG.getConstant(NumBytes, getPointerTy())); |
Chris Lattner | f505949 | 2005-09-02 01:24:55 +0000 | [diff] [blame] | 1646 | SDOperand RetVal = TheCall; |
| 1647 | |
| 1648 | // If the result is a small value, add a note so that we keep track of the |
| 1649 | // information about whether it is sign or zero extended. |
| 1650 | if (RetTyVT != ActualRetTyVT) { |
| 1651 | RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext : ISD::AssertZext, |
| 1652 | MVT::i32, RetVal, DAG.getValueType(RetTyVT)); |
| 1653 | RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal); |
Chris Lattner | e00ebf0 | 2006-01-28 07:33:03 +0000 | [diff] [blame] | 1654 | } else if (RetTyVT == MVT::i64) { |
| 1655 | RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, RetVal, RetVal.getValue(1)); |
Chris Lattner | f505949 | 2005-09-02 01:24:55 +0000 | [diff] [blame] | 1656 | } |
| 1657 | |
| 1658 | return std::make_pair(RetVal, Chain); |
Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1659 | } |
| 1660 | |
Chris Lattner | 8a2d3ca | 2005-08-26 21:23:58 +0000 | [diff] [blame] | 1661 | MachineBasicBlock * |
Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 1662 | PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, |
| 1663 | MachineBasicBlock *BB) { |
Chris Lattner | 8a2d3ca | 2005-08-26 21:23:58 +0000 | [diff] [blame] | 1664 | assert((MI->getOpcode() == PPC::SELECT_CC_Int || |
Chris Lattner | 919c032 | 2005-10-01 01:35:02 +0000 | [diff] [blame] | 1665 | MI->getOpcode() == PPC::SELECT_CC_F4 || |
Chris Lattner | 710ff32 | 2006-04-08 22:45:08 +0000 | [diff] [blame] | 1666 | MI->getOpcode() == PPC::SELECT_CC_F8 || |
| 1667 | MI->getOpcode() == PPC::SELECT_CC_VRRC) && |
Chris Lattner | 8a2d3ca | 2005-08-26 21:23:58 +0000 | [diff] [blame] | 1668 | "Unexpected instr type to insert"); |
| 1669 | |
| 1670 | // To "insert" a SELECT_CC instruction, we actually have to insert the diamond |
| 1671 | // control-flow pattern. The incoming instruction knows the destination vreg |
| 1672 | // to set, the condition code register to branch on, the true/false values to |
| 1673 | // select between, and a branch opcode to use. |
| 1674 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 1675 | ilist<MachineBasicBlock>::iterator It = BB; |
| 1676 | ++It; |
| 1677 | |
| 1678 | // thisMBB: |
| 1679 | // ... |
| 1680 | // TrueVal = ... |
| 1681 | // cmpTY ccX, r1, r2 |
| 1682 | // bCC copy1MBB |
| 1683 | // fallthrough --> copy0MBB |
| 1684 | MachineBasicBlock *thisMBB = BB; |
| 1685 | MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); |
| 1686 | MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); |
| 1687 | BuildMI(BB, MI->getOperand(4).getImmedValue(), 2) |
| 1688 | .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); |
| 1689 | MachineFunction *F = BB->getParent(); |
| 1690 | F->getBasicBlockList().insert(It, copy0MBB); |
| 1691 | F->getBasicBlockList().insert(It, sinkMBB); |
Nate Begeman | f15485a | 2006-03-27 01:32:24 +0000 | [diff] [blame] | 1692 | // Update machine-CFG edges by first adding all successors of the current |
| 1693 | // block to the new block which will contain the Phi node for the select. |
| 1694 | for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), |
| 1695 | e = BB->succ_end(); i != e; ++i) |
| 1696 | sinkMBB->addSuccessor(*i); |
| 1697 | // Next, remove all successors of the current block, and add the true |
| 1698 | // and fallthrough blocks as its successors. |
| 1699 | while(!BB->succ_empty()) |
| 1700 | BB->removeSuccessor(BB->succ_begin()); |
Chris Lattner | 8a2d3ca | 2005-08-26 21:23:58 +0000 | [diff] [blame] | 1701 | BB->addSuccessor(copy0MBB); |
| 1702 | BB->addSuccessor(sinkMBB); |
| 1703 | |
| 1704 | // copy0MBB: |
| 1705 | // %FalseValue = ... |
| 1706 | // # fallthrough to sinkMBB |
| 1707 | BB = copy0MBB; |
| 1708 | |
| 1709 | // Update machine-CFG edges |
| 1710 | BB->addSuccessor(sinkMBB); |
| 1711 | |
| 1712 | // sinkMBB: |
| 1713 | // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] |
| 1714 | // ... |
| 1715 | BB = sinkMBB; |
| 1716 | BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg()) |
| 1717 | .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) |
| 1718 | .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); |
| 1719 | |
| 1720 | delete MI; // The pseudo instruction is gone now. |
| 1721 | return BB; |
| 1722 | } |
| 1723 | |
Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1724 | //===----------------------------------------------------------------------===// |
| 1725 | // Target Optimization Hooks |
| 1726 | //===----------------------------------------------------------------------===// |
| 1727 | |
Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 1728 | SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, |
| 1729 | DAGCombinerInfo &DCI) const { |
| 1730 | TargetMachine &TM = getTargetMachine(); |
| 1731 | SelectionDAG &DAG = DCI.DAG; |
| 1732 | switch (N->getOpcode()) { |
| 1733 | default: break; |
| 1734 | case ISD::SINT_TO_FP: |
| 1735 | if (TM.getSubtarget<PPCSubtarget>().is64Bit()) { |
Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 1736 | if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { |
| 1737 | // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. |
| 1738 | // We allow the src/dst to be either f32/f64, but the intermediate |
| 1739 | // type must be i64. |
| 1740 | if (N->getOperand(0).getValueType() == MVT::i64) { |
| 1741 | SDOperand Val = N->getOperand(0).getOperand(0); |
| 1742 | if (Val.getValueType() == MVT::f32) { |
| 1743 | Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); |
| 1744 | DCI.AddToWorklist(Val.Val); |
| 1745 | } |
| 1746 | |
| 1747 | Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val); |
Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 1748 | DCI.AddToWorklist(Val.Val); |
Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 1749 | Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val); |
Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 1750 | DCI.AddToWorklist(Val.Val); |
Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 1751 | if (N->getValueType(0) == MVT::f32) { |
| 1752 | Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val); |
| 1753 | DCI.AddToWorklist(Val.Val); |
| 1754 | } |
| 1755 | return Val; |
| 1756 | } else if (N->getOperand(0).getValueType() == MVT::i32) { |
| 1757 | // If the intermediate type is i32, we can avoid the load/store here |
| 1758 | // too. |
Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 1759 | } |
Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 1760 | } |
| 1761 | } |
| 1762 | break; |
Chris Lattner | 5126984 | 2006-03-01 05:50:56 +0000 | [diff] [blame] | 1763 | case ISD::STORE: |
| 1764 | // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). |
| 1765 | if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && |
| 1766 | N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && |
| 1767 | N->getOperand(1).getValueType() == MVT::i32) { |
| 1768 | SDOperand Val = N->getOperand(1).getOperand(0); |
| 1769 | if (Val.getValueType() == MVT::f32) { |
| 1770 | Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); |
| 1771 | DCI.AddToWorklist(Val.Val); |
| 1772 | } |
| 1773 | Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val); |
| 1774 | DCI.AddToWorklist(Val.Val); |
| 1775 | |
| 1776 | Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val, |
| 1777 | N->getOperand(2), N->getOperand(3)); |
| 1778 | DCI.AddToWorklist(Val.Val); |
| 1779 | return Val; |
| 1780 | } |
| 1781 | break; |
Chris Lattner | 4468c22 | 2006-03-31 06:02:07 +0000 | [diff] [blame] | 1782 | case PPCISD::VCMP: { |
| 1783 | // If a VCMPo node already exists with exactly the same operands as this |
| 1784 | // node, use its result instead of this node (VCMPo computes both a CR6 and |
| 1785 | // a normal output). |
| 1786 | // |
| 1787 | if (!N->getOperand(0).hasOneUse() && |
| 1788 | !N->getOperand(1).hasOneUse() && |
| 1789 | !N->getOperand(2).hasOneUse()) { |
| 1790 | |
| 1791 | // Scan all of the users of the LHS, looking for VCMPo's that match. |
| 1792 | SDNode *VCMPoNode = 0; |
| 1793 | |
| 1794 | SDNode *LHSN = N->getOperand(0).Val; |
| 1795 | for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); |
| 1796 | UI != E; ++UI) |
| 1797 | if ((*UI)->getOpcode() == PPCISD::VCMPo && |
| 1798 | (*UI)->getOperand(1) == N->getOperand(1) && |
| 1799 | (*UI)->getOperand(2) == N->getOperand(2) && |
| 1800 | (*UI)->getOperand(0) == N->getOperand(0)) { |
| 1801 | VCMPoNode = *UI; |
| 1802 | break; |
| 1803 | } |
| 1804 | |
| 1805 | // If there are non-zero uses of the flag value, use the VCMPo node! |
Chris Lattner | 33497cc | 2006-03-31 06:04:53 +0000 | [diff] [blame] | 1806 | if (VCMPoNode && !VCMPoNode->hasNUsesOfValue(0, 1)) |
Chris Lattner | 4468c22 | 2006-03-31 06:02:07 +0000 | [diff] [blame] | 1807 | return SDOperand(VCMPoNode, 0); |
| 1808 | } |
| 1809 | break; |
| 1810 | } |
Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 1811 | } |
| 1812 | |
| 1813 | return SDOperand(); |
| 1814 | } |
| 1815 | |
Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1816 | //===----------------------------------------------------------------------===// |
| 1817 | // Inline Assembly Support |
| 1818 | //===----------------------------------------------------------------------===// |
| 1819 | |
Chris Lattner | bbe77de | 2006-04-02 06:26:07 +0000 | [diff] [blame] | 1820 | void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, |
| 1821 | uint64_t Mask, |
| 1822 | uint64_t &KnownZero, |
| 1823 | uint64_t &KnownOne, |
| 1824 | unsigned Depth) const { |
| 1825 | KnownZero = 0; |
| 1826 | KnownOne = 0; |
| 1827 | switch (Op.getOpcode()) { |
| 1828 | default: break; |
| 1829 | case ISD::INTRINSIC_WO_CHAIN: { |
| 1830 | switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) { |
| 1831 | default: break; |
| 1832 | case Intrinsic::ppc_altivec_vcmpbfp_p: |
| 1833 | case Intrinsic::ppc_altivec_vcmpeqfp_p: |
| 1834 | case Intrinsic::ppc_altivec_vcmpequb_p: |
| 1835 | case Intrinsic::ppc_altivec_vcmpequh_p: |
| 1836 | case Intrinsic::ppc_altivec_vcmpequw_p: |
| 1837 | case Intrinsic::ppc_altivec_vcmpgefp_p: |
| 1838 | case Intrinsic::ppc_altivec_vcmpgtfp_p: |
| 1839 | case Intrinsic::ppc_altivec_vcmpgtsb_p: |
| 1840 | case Intrinsic::ppc_altivec_vcmpgtsh_p: |
| 1841 | case Intrinsic::ppc_altivec_vcmpgtsw_p: |
| 1842 | case Intrinsic::ppc_altivec_vcmpgtub_p: |
| 1843 | case Intrinsic::ppc_altivec_vcmpgtuh_p: |
| 1844 | case Intrinsic::ppc_altivec_vcmpgtuw_p: |
| 1845 | KnownZero = ~1U; // All bits but the low one are known to be zero. |
| 1846 | break; |
| 1847 | } |
| 1848 | } |
| 1849 | } |
| 1850 | } |
| 1851 | |
| 1852 | |
Chris Lattner | ad3bc8d | 2006-02-07 20:16:30 +0000 | [diff] [blame] | 1853 | /// getConstraintType - Given a constraint letter, return the type of |
| 1854 | /// constraint it is for this target. |
| 1855 | PPCTargetLowering::ConstraintType |
| 1856 | PPCTargetLowering::getConstraintType(char ConstraintLetter) const { |
| 1857 | switch (ConstraintLetter) { |
| 1858 | default: break; |
| 1859 | case 'b': |
| 1860 | case 'r': |
| 1861 | case 'f': |
| 1862 | case 'v': |
| 1863 | case 'y': |
| 1864 | return C_RegisterClass; |
| 1865 | } |
| 1866 | return TargetLowering::getConstraintType(ConstraintLetter); |
| 1867 | } |
| 1868 | |
| 1869 | |
Chris Lattner | ddc787d | 2006-01-31 19:20:21 +0000 | [diff] [blame] | 1870 | std::vector<unsigned> PPCTargetLowering:: |
Chris Lattner | 1efa40f | 2006-02-22 00:56:39 +0000 | [diff] [blame] | 1871 | getRegClassForInlineAsmConstraint(const std::string &Constraint, |
| 1872 | MVT::ValueType VT) const { |
Chris Lattner | ddc787d | 2006-01-31 19:20:21 +0000 | [diff] [blame] | 1873 | if (Constraint.size() == 1) { |
| 1874 | switch (Constraint[0]) { // GCC RS6000 Constraint Letters |
| 1875 | default: break; // Unknown constriant letter |
| 1876 | case 'b': |
| 1877 | return make_vector<unsigned>(/*no R0*/ PPC::R1 , PPC::R2 , PPC::R3 , |
| 1878 | PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 , |
| 1879 | PPC::R8 , PPC::R9 , PPC::R10, PPC::R11, |
| 1880 | PPC::R12, PPC::R13, PPC::R14, PPC::R15, |
| 1881 | PPC::R16, PPC::R17, PPC::R18, PPC::R19, |
| 1882 | PPC::R20, PPC::R21, PPC::R22, PPC::R23, |
| 1883 | PPC::R24, PPC::R25, PPC::R26, PPC::R27, |
| 1884 | PPC::R28, PPC::R29, PPC::R30, PPC::R31, |
| 1885 | 0); |
| 1886 | case 'r': |
| 1887 | return make_vector<unsigned>(PPC::R0 , PPC::R1 , PPC::R2 , PPC::R3 , |
| 1888 | PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 , |
| 1889 | PPC::R8 , PPC::R9 , PPC::R10, PPC::R11, |
| 1890 | PPC::R12, PPC::R13, PPC::R14, PPC::R15, |
| 1891 | PPC::R16, PPC::R17, PPC::R18, PPC::R19, |
| 1892 | PPC::R20, PPC::R21, PPC::R22, PPC::R23, |
| 1893 | PPC::R24, PPC::R25, PPC::R26, PPC::R27, |
| 1894 | PPC::R28, PPC::R29, PPC::R30, PPC::R31, |
| 1895 | 0); |
| 1896 | case 'f': |
| 1897 | return make_vector<unsigned>(PPC::F0 , PPC::F1 , PPC::F2 , PPC::F3 , |
| 1898 | PPC::F4 , PPC::F5 , PPC::F6 , PPC::F7 , |
| 1899 | PPC::F8 , PPC::F9 , PPC::F10, PPC::F11, |
| 1900 | PPC::F12, PPC::F13, PPC::F14, PPC::F15, |
| 1901 | PPC::F16, PPC::F17, PPC::F18, PPC::F19, |
| 1902 | PPC::F20, PPC::F21, PPC::F22, PPC::F23, |
| 1903 | PPC::F24, PPC::F25, PPC::F26, PPC::F27, |
| 1904 | PPC::F28, PPC::F29, PPC::F30, PPC::F31, |
| 1905 | 0); |
| 1906 | case 'v': |
| 1907 | return make_vector<unsigned>(PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 , |
| 1908 | PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 , |
| 1909 | PPC::V8 , PPC::V9 , PPC::V10, PPC::V11, |
| 1910 | PPC::V12, PPC::V13, PPC::V14, PPC::V15, |
| 1911 | PPC::V16, PPC::V17, PPC::V18, PPC::V19, |
| 1912 | PPC::V20, PPC::V21, PPC::V22, PPC::V23, |
| 1913 | PPC::V24, PPC::V25, PPC::V26, PPC::V27, |
| 1914 | PPC::V28, PPC::V29, PPC::V30, PPC::V31, |
| 1915 | 0); |
| 1916 | case 'y': |
| 1917 | return make_vector<unsigned>(PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3, |
| 1918 | PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7, |
| 1919 | 0); |
| 1920 | } |
| 1921 | } |
| 1922 | |
Chris Lattner | 1efa40f | 2006-02-22 00:56:39 +0000 | [diff] [blame] | 1923 | return std::vector<unsigned>(); |
Chris Lattner | ddc787d | 2006-01-31 19:20:21 +0000 | [diff] [blame] | 1924 | } |
Chris Lattner | 763317d | 2006-02-07 00:47:13 +0000 | [diff] [blame] | 1925 | |
| 1926 | // isOperandValidForConstraint |
| 1927 | bool PPCTargetLowering:: |
| 1928 | isOperandValidForConstraint(SDOperand Op, char Letter) { |
| 1929 | switch (Letter) { |
| 1930 | default: break; |
| 1931 | case 'I': |
| 1932 | case 'J': |
| 1933 | case 'K': |
| 1934 | case 'L': |
| 1935 | case 'M': |
| 1936 | case 'N': |
| 1937 | case 'O': |
| 1938 | case 'P': { |
| 1939 | if (!isa<ConstantSDNode>(Op)) return false; // Must be an immediate. |
| 1940 | unsigned Value = cast<ConstantSDNode>(Op)->getValue(); |
| 1941 | switch (Letter) { |
| 1942 | default: assert(0 && "Unknown constraint letter!"); |
| 1943 | case 'I': // "I" is a signed 16-bit constant. |
| 1944 | return (short)Value == (int)Value; |
| 1945 | case 'J': // "J" is a constant with only the high-order 16 bits nonzero. |
| 1946 | case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. |
| 1947 | return (short)Value == 0; |
| 1948 | case 'K': // "K" is a constant with only the low-order 16 bits nonzero. |
| 1949 | return (Value >> 16) == 0; |
| 1950 | case 'M': // "M" is a constant that is greater than 31. |
| 1951 | return Value > 31; |
| 1952 | case 'N': // "N" is a positive constant that is an exact power of two. |
| 1953 | return (int)Value > 0 && isPowerOf2_32(Value); |
| 1954 | case 'O': // "O" is the constant zero. |
| 1955 | return Value == 0; |
| 1956 | case 'P': // "P" is a constant whose negation is a signed 16-bit constant. |
| 1957 | return (short)-Value == (int)-Value; |
| 1958 | } |
| 1959 | break; |
| 1960 | } |
| 1961 | } |
| 1962 | |
| 1963 | // Handle standard constraint letters. |
| 1964 | return TargetLowering::isOperandValidForConstraint(Op, Letter); |
| 1965 | } |
Evan Cheng | c4c6257 | 2006-03-13 23:20:37 +0000 | [diff] [blame] | 1966 | |
| 1967 | /// isLegalAddressImmediate - Return true if the integer value can be used |
| 1968 | /// as the offset of the target addressing mode. |
| 1969 | bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const { |
| 1970 | // PPC allows a sign-extended 16-bit immediate field. |
| 1971 | return (V > -(1 << 16) && V < (1 << 16)-1); |
| 1972 | } |