| Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1 | //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 2 | // | 
|  | 3 | //                     The LLVM Compiler Infrastructure | 
|  | 4 | // | 
|  | 5 | // This file was developed by Chris Lattner and is distributed under | 
|  | 6 | // the University of Illinois Open Source License. See LICENSE.TXT for details. | 
|  | 7 | // | 
|  | 8 | //===----------------------------------------------------------------------===// | 
|  | 9 | // | 
| Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 10 | // This file implements the PPCISelLowering class. | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 11 | // | 
|  | 12 | //===----------------------------------------------------------------------===// | 
|  | 13 |  | 
| Chris Lattner | 16e71f2 | 2005-10-14 23:59:06 +0000 | [diff] [blame] | 14 | #include "PPCISelLowering.h" | 
|  | 15 | #include "PPCTargetMachine.h" | 
| Chris Lattner | 5913810 | 2006-04-17 05:28:54 +0000 | [diff] [blame] | 16 | #include "PPCPerfectShuffle.h" | 
| Nate Begeman | 750ac1b | 2006-02-01 07:19:44 +0000 | [diff] [blame] | 17 | #include "llvm/ADT/VectorExtras.h" | 
| Evan Cheng | c4c6257 | 2006-03-13 23:20:37 +0000 | [diff] [blame] | 18 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 19 | #include "llvm/CodeGen/MachineFrameInfo.h" | 
|  | 20 | #include "llvm/CodeGen/MachineFunction.h" | 
| Chris Lattner | 8a2d3ca | 2005-08-26 21:23:58 +0000 | [diff] [blame] | 21 | #include "llvm/CodeGen/MachineInstrBuilder.h" | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 22 | #include "llvm/CodeGen/SelectionDAG.h" | 
| Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 23 | #include "llvm/CodeGen/SSARegMap.h" | 
| Chris Lattner | 0b1e4e5 | 2005-08-26 17:36:52 +0000 | [diff] [blame] | 24 | #include "llvm/Constants.h" | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 25 | #include "llvm/Function.h" | 
| Chris Lattner | 6d92cad | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 26 | #include "llvm/Intrinsics.h" | 
| Nate Begeman | 750ac1b | 2006-02-01 07:19:44 +0000 | [diff] [blame] | 27 | #include "llvm/Support/MathExtras.h" | 
| Evan Cheng | d2ee218 | 2006-02-18 00:08:58 +0000 | [diff] [blame] | 28 | #include "llvm/Target/TargetOptions.h" | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 29 | using namespace llvm; | 
|  | 30 |  | 
| Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 31 | PPCTargetLowering::PPCTargetLowering(TargetMachine &TM) | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 32 | : TargetLowering(TM) { | 
|  | 33 |  | 
|  | 34 | // Fold away setcc operations if possible. | 
|  | 35 | setSetCCIsExpensive(); | 
| Nate Begeman | 405e3ec | 2005-10-21 00:02:42 +0000 | [diff] [blame] | 36 | setPow2DivIsCheap(); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 37 |  | 
| Chris Lattner | d145a61 | 2005-09-27 22:18:25 +0000 | [diff] [blame] | 38 | // Use _setjmp/_longjmp instead of setjmp/longjmp. | 
|  | 39 | setUseUnderscoreSetJmpLongJmp(true); | 
|  | 40 |  | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 41 | // Set up the register classes. | 
| Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 42 | addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); | 
|  | 43 | addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); | 
|  | 44 | addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 45 |  | 
| Chris Lattner | a54aa94 | 2006-01-29 06:26:08 +0000 | [diff] [blame] | 46 | setOperationAction(ISD::ConstantFP, MVT::f64, Expand); | 
|  | 47 | setOperationAction(ISD::ConstantFP, MVT::f32, Expand); | 
|  | 48 |  | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 49 | // PowerPC has no intrinsics for these particular operations | 
|  | 50 | setOperationAction(ISD::MEMMOVE, MVT::Other, Expand); | 
|  | 51 | setOperationAction(ISD::MEMSET, MVT::Other, Expand); | 
|  | 52 | setOperationAction(ISD::MEMCPY, MVT::Other, Expand); | 
|  | 53 |  | 
|  | 54 | // PowerPC has an i16 but no i8 (or i1) SEXTLOAD | 
|  | 55 | setOperationAction(ISD::SEXTLOAD, MVT::i1, Expand); | 
|  | 56 | setOperationAction(ISD::SEXTLOAD, MVT::i8, Expand); | 
|  | 57 |  | 
|  | 58 | // PowerPC has no SREM/UREM instructions | 
|  | 59 | setOperationAction(ISD::SREM, MVT::i32, Expand); | 
|  | 60 | setOperationAction(ISD::UREM, MVT::i32, Expand); | 
|  | 61 |  | 
|  | 62 | // We don't support sin/cos/sqrt/fmod | 
|  | 63 | setOperationAction(ISD::FSIN , MVT::f64, Expand); | 
|  | 64 | setOperationAction(ISD::FCOS , MVT::f64, Expand); | 
| Chris Lattner | 615c2d0 | 2005-09-28 22:29:58 +0000 | [diff] [blame] | 65 | setOperationAction(ISD::FREM , MVT::f64, Expand); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 66 | setOperationAction(ISD::FSIN , MVT::f32, Expand); | 
|  | 67 | setOperationAction(ISD::FCOS , MVT::f32, Expand); | 
| Chris Lattner | 615c2d0 | 2005-09-28 22:29:58 +0000 | [diff] [blame] | 68 | setOperationAction(ISD::FREM , MVT::f32, Expand); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 69 |  | 
|  | 70 | // If we're enabling GP optimizations, use hardware square root | 
| Chris Lattner | 1e9de3e | 2005-09-02 18:33:05 +0000 | [diff] [blame] | 71 | if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 72 | setOperationAction(ISD::FSQRT, MVT::f64, Expand); | 
|  | 73 | setOperationAction(ISD::FSQRT, MVT::f32, Expand); | 
|  | 74 | } | 
|  | 75 |  | 
| Chris Lattner | 9601a86 | 2006-03-05 05:08:37 +0000 | [diff] [blame] | 76 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); | 
|  | 77 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); | 
|  | 78 |  | 
| Nate Begeman | d88fc03 | 2006-01-14 03:14:10 +0000 | [diff] [blame] | 79 | // PowerPC does not have BSWAP, CTPOP or CTTZ | 
|  | 80 | setOperationAction(ISD::BSWAP, MVT::i32  , Expand); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 81 | setOperationAction(ISD::CTPOP, MVT::i32  , Expand); | 
|  | 82 | setOperationAction(ISD::CTTZ , MVT::i32  , Expand); | 
|  | 83 |  | 
| Nate Begeman | 35ef913 | 2006-01-11 21:21:00 +0000 | [diff] [blame] | 84 | // PowerPC does not have ROTR | 
|  | 85 | setOperationAction(ISD::ROTR, MVT::i32   , Expand); | 
|  | 86 |  | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 87 | // PowerPC does not have Select | 
|  | 88 | setOperationAction(ISD::SELECT, MVT::i32, Expand); | 
|  | 89 | setOperationAction(ISD::SELECT, MVT::f32, Expand); | 
|  | 90 | setOperationAction(ISD::SELECT, MVT::f64, Expand); | 
| Chris Lattner | e4bc9ea | 2005-08-26 00:52:45 +0000 | [diff] [blame] | 91 |  | 
| Chris Lattner | 0b1e4e5 | 2005-08-26 17:36:52 +0000 | [diff] [blame] | 92 | // PowerPC wants to turn select_cc of FP into fsel when possible. | 
|  | 93 | setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); | 
|  | 94 | setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); | 
| Nate Begeman | 4477590 | 2006-01-31 08:17:29 +0000 | [diff] [blame] | 95 |  | 
| Nate Begeman | 750ac1b | 2006-02-01 07:19:44 +0000 | [diff] [blame] | 96 | // PowerPC wants to optimize integer setcc a bit | 
| Nate Begeman | 4477590 | 2006-01-31 08:17:29 +0000 | [diff] [blame] | 97 | setOperationAction(ISD::SETCC, MVT::i32, Custom); | 
| Chris Lattner | eb9b62e | 2005-08-31 19:09:57 +0000 | [diff] [blame] | 98 |  | 
| Nate Begeman | 81e8097 | 2006-03-17 01:40:33 +0000 | [diff] [blame] | 99 | // PowerPC does not have BRCOND which requires SetCC | 
|  | 100 | setOperationAction(ISD::BRCOND, MVT::Other, Expand); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 101 |  | 
| Chris Lattner | f760532 | 2005-08-31 21:09:52 +0000 | [diff] [blame] | 102 | // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. | 
|  | 103 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); | 
| Nate Begeman | c09eeec | 2005-09-06 22:03:27 +0000 | [diff] [blame] | 104 |  | 
| Jim Laskey | ad23c9d | 2005-08-17 00:40:22 +0000 | [diff] [blame] | 105 | // PowerPC does not have [U|S]INT_TO_FP | 
|  | 106 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); | 
|  | 107 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); | 
|  | 108 |  | 
| Chris Lattner | 53e8845 | 2005-12-23 05:13:35 +0000 | [diff] [blame] | 109 | setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); | 
|  | 110 | setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); | 
|  | 111 |  | 
| Chris Lattner | e6ec9f2 | 2005-09-10 00:21:06 +0000 | [diff] [blame] | 112 | // PowerPC does not have truncstore for i1. | 
|  | 113 | setOperationAction(ISD::TRUNCSTORE, MVT::i1, Promote); | 
| Chris Lattner | f73bae1 | 2005-11-29 06:16:21 +0000 | [diff] [blame] | 114 |  | 
| Jim Laskey | abf6d17 | 2006-01-05 01:25:28 +0000 | [diff] [blame] | 115 | // Support label based line numbers. | 
| Chris Lattner | f73bae1 | 2005-11-29 06:16:21 +0000 | [diff] [blame] | 116 | setOperationAction(ISD::LOCATION, MVT::Other, Expand); | 
| Jim Laskey | e0bce71 | 2006-01-05 01:47:43 +0000 | [diff] [blame] | 117 | setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); | 
| Jim Laskey | abf6d17 | 2006-01-05 01:25:28 +0000 | [diff] [blame] | 118 | // FIXME - use subtarget debug flags | 
| Jim Laskey | e0bce71 | 2006-01-05 01:47:43 +0000 | [diff] [blame] | 119 | if (!TM.getSubtarget<PPCSubtarget>().isDarwin()) | 
| Jim Laskey | abf6d17 | 2006-01-05 01:25:28 +0000 | [diff] [blame] | 120 | setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand); | 
| Chris Lattner | e6ec9f2 | 2005-09-10 00:21:06 +0000 | [diff] [blame] | 121 |  | 
| Nate Begeman | 28a6b02 | 2005-12-10 02:36:00 +0000 | [diff] [blame] | 122 | // We want to legalize GlobalAddress and ConstantPool nodes into the | 
|  | 123 | // appropriate instructions to materialize the address. | 
| Chris Lattner | 3eef4e3 | 2005-11-17 18:26:56 +0000 | [diff] [blame] | 124 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); | 
| Nate Begeman | 28a6b02 | 2005-12-10 02:36:00 +0000 | [diff] [blame] | 125 | setOperationAction(ISD::ConstantPool,  MVT::i32, Custom); | 
| Chris Lattner | b99329e | 2006-01-13 02:42:53 +0000 | [diff] [blame] | 126 |  | 
| Nate Begeman | ee62557 | 2006-01-27 21:09:22 +0000 | [diff] [blame] | 127 | // RET must be custom lowered, to meet ABI requirements | 
|  | 128 | setOperationAction(ISD::RET               , MVT::Other, Custom); | 
|  | 129 |  | 
| Nate Begeman | acc398c | 2006-01-25 18:21:52 +0000 | [diff] [blame] | 130 | // VASTART needs to be custom lowered to use the VarArgsFrameIndex | 
|  | 131 | setOperationAction(ISD::VASTART           , MVT::Other, Custom); | 
|  | 132 |  | 
| Chris Lattner | b22c08b | 2006-01-15 09:02:48 +0000 | [diff] [blame] | 133 | // Use the default implementation. | 
| Nate Begeman | acc398c | 2006-01-25 18:21:52 +0000 | [diff] [blame] | 134 | setOperationAction(ISD::VAARG             , MVT::Other, Expand); | 
|  | 135 | setOperationAction(ISD::VACOPY            , MVT::Other, Expand); | 
|  | 136 | setOperationAction(ISD::VAEND             , MVT::Other, Expand); | 
| Chris Lattner | b22c08b | 2006-01-15 09:02:48 +0000 | [diff] [blame] | 137 | setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand); | 
|  | 138 | setOperationAction(ISD::STACKRESTORE      , MVT::Other, Expand); | 
|  | 139 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Expand); | 
| Chris Lattner | 860e886 | 2005-11-17 07:30:41 +0000 | [diff] [blame] | 140 |  | 
| Chris Lattner | 6d92cad | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 141 | // We want to custom lower some of our intrinsics. | 
| Chris Lattner | 48b61a7 | 2006-03-28 00:40:33 +0000 | [diff] [blame] | 142 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); | 
| Chris Lattner | 6d92cad | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 143 |  | 
| Nate Begeman | c09eeec | 2005-09-06 22:03:27 +0000 | [diff] [blame] | 144 | if (TM.getSubtarget<PPCSubtarget>().is64Bit()) { | 
| Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 145 | // They also have instructions for converting between i64 and fp. | 
| Nate Begeman | c09eeec | 2005-09-06 22:03:27 +0000 | [diff] [blame] | 146 | setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); | 
|  | 147 | setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); | 
| Chris Lattner | 7fbcef7 | 2006-03-24 07:53:47 +0000 | [diff] [blame] | 148 |  | 
|  | 149 | // FIXME: disable this lowered code.  This generates 64-bit register values, | 
|  | 150 | // and we don't model the fact that the top part is clobbered by calls.  We | 
|  | 151 | // need to flag these together so that the value isn't live across a call. | 
|  | 152 | //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); | 
|  | 153 |  | 
| Nate Begeman | ae749a9 | 2005-10-25 23:48:36 +0000 | [diff] [blame] | 154 | // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT | 
|  | 155 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); | 
|  | 156 | } else { | 
| Chris Lattner | 860e886 | 2005-11-17 07:30:41 +0000 | [diff] [blame] | 157 | // PowerPC does not have FP_TO_UINT on 32-bit implementations. | 
| Nate Begeman | ae749a9 | 2005-10-25 23:48:36 +0000 | [diff] [blame] | 158 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); | 
| Nate Begeman | 9d2b817 | 2005-10-18 00:56:42 +0000 | [diff] [blame] | 159 | } | 
|  | 160 |  | 
|  | 161 | if (TM.getSubtarget<PPCSubtarget>().has64BitRegs()) { | 
|  | 162 | // 64 bit PowerPC implementations can support i64 types directly | 
|  | 163 | addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); | 
| Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 164 | // BUILD_PAIR can't be handled natively, and should be expanded to shl/or | 
|  | 165 | setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); | 
| Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 166 | } else { | 
|  | 167 | // 32 bit PowerPC wants to expand i64 shifts itself. | 
|  | 168 | setOperationAction(ISD::SHL, MVT::i64, Custom); | 
|  | 169 | setOperationAction(ISD::SRL, MVT::i64, Custom); | 
|  | 170 | setOperationAction(ISD::SRA, MVT::i64, Custom); | 
| Nate Begeman | c09eeec | 2005-09-06 22:03:27 +0000 | [diff] [blame] | 171 | } | 
| Evan Cheng | d30bf01 | 2006-03-01 01:11:20 +0000 | [diff] [blame] | 172 |  | 
| Nate Begeman | 425a969 | 2005-11-29 08:17:20 +0000 | [diff] [blame] | 173 | if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { | 
| Chris Lattner | e3fea5a | 2006-03-31 19:52:36 +0000 | [diff] [blame] | 174 | // First set operation action for all vector types to expand. Then we | 
|  | 175 | // will selectively turn on ones that can be effectively codegen'd. | 
|  | 176 | for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; | 
|  | 177 | VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { | 
| Chris Lattner | f3f69de | 2006-04-16 01:37:57 +0000 | [diff] [blame] | 178 | // add/sub are legal for all supported vector VT's. | 
| Chris Lattner | e3fea5a | 2006-03-31 19:52:36 +0000 | [diff] [blame] | 179 | setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); | 
|  | 180 | setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); | 
| Chris Lattner | e3fea5a | 2006-03-31 19:52:36 +0000 | [diff] [blame] | 181 |  | 
| Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 182 | // We promote all shuffles to v16i8. | 
|  | 183 | setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote); | 
| Chris Lattner | f3f69de | 2006-04-16 01:37:57 +0000 | [diff] [blame] | 184 | AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8); | 
|  | 185 |  | 
|  | 186 | // We promote all non-typed operations to v4i32. | 
|  | 187 | setOperationAction(ISD::AND   , (MVT::ValueType)VT, Promote); | 
|  | 188 | AddPromotedToType (ISD::AND   , (MVT::ValueType)VT, MVT::v4i32); | 
|  | 189 | setOperationAction(ISD::OR    , (MVT::ValueType)VT, Promote); | 
|  | 190 | AddPromotedToType (ISD::OR    , (MVT::ValueType)VT, MVT::v4i32); | 
|  | 191 | setOperationAction(ISD::XOR   , (MVT::ValueType)VT, Promote); | 
|  | 192 | AddPromotedToType (ISD::XOR   , (MVT::ValueType)VT, MVT::v4i32); | 
|  | 193 | setOperationAction(ISD::LOAD  , (MVT::ValueType)VT, Promote); | 
|  | 194 | AddPromotedToType (ISD::LOAD  , (MVT::ValueType)VT, MVT::v4i32); | 
|  | 195 | setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); | 
|  | 196 | AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32); | 
|  | 197 | setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote); | 
|  | 198 | AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32); | 
| Chris Lattner | e3fea5a | 2006-03-31 19:52:36 +0000 | [diff] [blame] | 199 |  | 
| Chris Lattner | f3f69de | 2006-04-16 01:37:57 +0000 | [diff] [blame] | 200 | // No other operations are legal. | 
| Chris Lattner | e3fea5a | 2006-03-31 19:52:36 +0000 | [diff] [blame] | 201 | setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); | 
|  | 202 | setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); | 
|  | 203 | setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); | 
|  | 204 | setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); | 
|  | 205 | setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); | 
|  | 206 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); | 
|  | 207 | setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); | 
|  | 208 | setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand); | 
| Chris Lattner | 01cae07 | 2006-04-03 23:55:43 +0000 | [diff] [blame] | 209 |  | 
|  | 210 | setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand); | 
| Chris Lattner | e3fea5a | 2006-03-31 19:52:36 +0000 | [diff] [blame] | 211 | } | 
|  | 212 |  | 
| Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 213 | // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle | 
|  | 214 | // with merges, splats, etc. | 
|  | 215 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); | 
|  | 216 |  | 
| Chris Lattner | f3f69de | 2006-04-16 01:37:57 +0000 | [diff] [blame] | 217 | setOperationAction(ISD::AND   , MVT::v4i32, Legal); | 
|  | 218 | setOperationAction(ISD::OR    , MVT::v4i32, Legal); | 
|  | 219 | setOperationAction(ISD::XOR   , MVT::v4i32, Legal); | 
|  | 220 | setOperationAction(ISD::LOAD  , MVT::v4i32, Legal); | 
|  | 221 | setOperationAction(ISD::SELECT, MVT::v4i32, Expand); | 
|  | 222 | setOperationAction(ISD::STORE , MVT::v4i32, Legal); | 
|  | 223 |  | 
| Nate Begeman | 425a969 | 2005-11-29 08:17:20 +0000 | [diff] [blame] | 224 | addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); | 
| Nate Begeman | 7fd1edd | 2005-12-19 23:25:09 +0000 | [diff] [blame] | 225 | addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); | 
| Chris Lattner | 8d052bc | 2006-03-25 07:39:07 +0000 | [diff] [blame] | 226 | addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); | 
|  | 227 | addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); | 
| Chris Lattner | ec4a0c7 | 2006-01-29 06:32:58 +0000 | [diff] [blame] | 228 |  | 
| Chris Lattner | e3fea5a | 2006-03-31 19:52:36 +0000 | [diff] [blame] | 229 | setOperationAction(ISD::MUL, MVT::v4f32, Legal); | 
| Chris Lattner | f1d0b2b | 2006-03-20 01:53:53 +0000 | [diff] [blame] | 230 |  | 
| Chris Lattner | b2177b9 | 2006-03-19 06:55:52 +0000 | [diff] [blame] | 231 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); | 
|  | 232 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); | 
| Chris Lattner | 64b3a08 | 2006-03-24 07:48:08 +0000 | [diff] [blame] | 233 |  | 
| Chris Lattner | 541f91b | 2006-04-02 00:43:36 +0000 | [diff] [blame] | 234 | setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); | 
|  | 235 | setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); | 
| Chris Lattner | 64b3a08 | 2006-03-24 07:48:08 +0000 | [diff] [blame] | 236 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); | 
|  | 237 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); | 
| Nate Begeman | 425a969 | 2005-11-29 08:17:20 +0000 | [diff] [blame] | 238 | } | 
|  | 239 |  | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 240 | setSetCCResultContents(ZeroOrOneSetCCResult); | 
| Chris Lattner | cadd742 | 2006-01-13 17:52:03 +0000 | [diff] [blame] | 241 | setStackPointerRegisterToSaveRestore(PPC::R1); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 242 |  | 
| Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 243 | // We have target-specific dag combine patterns for the following nodes: | 
|  | 244 | setTargetDAGCombine(ISD::SINT_TO_FP); | 
| Chris Lattner | 5126984 | 2006-03-01 05:50:56 +0000 | [diff] [blame] | 245 | setTargetDAGCombine(ISD::STORE); | 
| Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 246 |  | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 247 | computeRegisterProperties(); | 
|  | 248 | } | 
|  | 249 |  | 
| Chris Lattner | da6d20f | 2006-01-09 23:52:17 +0000 | [diff] [blame] | 250 | const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { | 
|  | 251 | switch (Opcode) { | 
|  | 252 | default: return 0; | 
|  | 253 | case PPCISD::FSEL:          return "PPCISD::FSEL"; | 
|  | 254 | case PPCISD::FCFID:         return "PPCISD::FCFID"; | 
|  | 255 | case PPCISD::FCTIDZ:        return "PPCISD::FCTIDZ"; | 
|  | 256 | case PPCISD::FCTIWZ:        return "PPCISD::FCTIWZ"; | 
| Chris Lattner | 5126984 | 2006-03-01 05:50:56 +0000 | [diff] [blame] | 257 | case PPCISD::STFIWX:        return "PPCISD::STFIWX"; | 
| Chris Lattner | da6d20f | 2006-01-09 23:52:17 +0000 | [diff] [blame] | 258 | case PPCISD::VMADDFP:       return "PPCISD::VMADDFP"; | 
|  | 259 | case PPCISD::VNMSUBFP:      return "PPCISD::VNMSUBFP"; | 
| Chris Lattner | f1d0b2b | 2006-03-20 01:53:53 +0000 | [diff] [blame] | 260 | case PPCISD::VPERM:         return "PPCISD::VPERM"; | 
| Chris Lattner | da6d20f | 2006-01-09 23:52:17 +0000 | [diff] [blame] | 261 | case PPCISD::Hi:            return "PPCISD::Hi"; | 
|  | 262 | case PPCISD::Lo:            return "PPCISD::Lo"; | 
|  | 263 | case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; | 
|  | 264 | case PPCISD::SRL:           return "PPCISD::SRL"; | 
|  | 265 | case PPCISD::SRA:           return "PPCISD::SRA"; | 
|  | 266 | case PPCISD::SHL:           return "PPCISD::SHL"; | 
| Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 267 | case PPCISD::EXTSW_32:      return "PPCISD::EXTSW_32"; | 
|  | 268 | case PPCISD::STD_32:        return "PPCISD::STD_32"; | 
| Chris Lattner | e00ebf0 | 2006-01-28 07:33:03 +0000 | [diff] [blame] | 269 | case PPCISD::CALL:          return "PPCISD::CALL"; | 
| Chris Lattner | da6d20f | 2006-01-09 23:52:17 +0000 | [diff] [blame] | 270 | case PPCISD::RET_FLAG:      return "PPCISD::RET_FLAG"; | 
| Chris Lattner | 6d92cad | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 271 | case PPCISD::MFCR:          return "PPCISD::MFCR"; | 
| Chris Lattner | a17b155 | 2006-03-31 05:13:27 +0000 | [diff] [blame] | 272 | case PPCISD::VCMP:          return "PPCISD::VCMP"; | 
| Chris Lattner | 6d92cad | 2006-03-26 10:06:40 +0000 | [diff] [blame] | 273 | case PPCISD::VCMPo:         return "PPCISD::VCMPo"; | 
| Chris Lattner | da6d20f | 2006-01-09 23:52:17 +0000 | [diff] [blame] | 274 | } | 
|  | 275 | } | 
|  | 276 |  | 
| Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 277 | //===----------------------------------------------------------------------===// | 
|  | 278 | // Node matching predicates, for use by the tblgen matching code. | 
|  | 279 | //===----------------------------------------------------------------------===// | 
|  | 280 |  | 
| Chris Lattner | 0b1e4e5 | 2005-08-26 17:36:52 +0000 | [diff] [blame] | 281 | /// isFloatingPointZero - Return true if this is 0.0 or -0.0. | 
|  | 282 | static bool isFloatingPointZero(SDOperand Op) { | 
|  | 283 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) | 
|  | 284 | return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); | 
|  | 285 | else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) { | 
|  | 286 | // Maybe this has already been legalized into the constant pool? | 
|  | 287 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) | 
|  | 288 | if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get())) | 
|  | 289 | return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); | 
|  | 290 | } | 
|  | 291 | return false; | 
|  | 292 | } | 
|  | 293 |  | 
| Chris Lattner | ddb739e | 2006-04-06 17:23:16 +0000 | [diff] [blame] | 294 | /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return | 
|  | 295 | /// true if Op is undef or if it matches the specified value. | 
|  | 296 | static bool isConstantOrUndef(SDOperand Op, unsigned Val) { | 
|  | 297 | return Op.getOpcode() == ISD::UNDEF || | 
|  | 298 | cast<ConstantSDNode>(Op)->getValue() == Val; | 
|  | 299 | } | 
|  | 300 |  | 
|  | 301 | /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a | 
|  | 302 | /// VPKUHUM instruction. | 
| Chris Lattner | f24380e | 2006-04-06 22:28:36 +0000 | [diff] [blame] | 303 | bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) { | 
|  | 304 | if (!isUnary) { | 
|  | 305 | for (unsigned i = 0; i != 16; ++i) | 
|  | 306 | if (!isConstantOrUndef(N->getOperand(i),  i*2+1)) | 
|  | 307 | return false; | 
|  | 308 | } else { | 
|  | 309 | for (unsigned i = 0; i != 8; ++i) | 
|  | 310 | if (!isConstantOrUndef(N->getOperand(i),  i*2+1) || | 
|  | 311 | !isConstantOrUndef(N->getOperand(i+8),  i*2+1)) | 
|  | 312 | return false; | 
|  | 313 | } | 
| Chris Lattner | d0608e1 | 2006-04-06 18:26:28 +0000 | [diff] [blame] | 314 | return true; | 
| Chris Lattner | ddb739e | 2006-04-06 17:23:16 +0000 | [diff] [blame] | 315 | } | 
|  | 316 |  | 
|  | 317 | /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a | 
|  | 318 | /// VPKUWUM instruction. | 
| Chris Lattner | f24380e | 2006-04-06 22:28:36 +0000 | [diff] [blame] | 319 | bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) { | 
|  | 320 | if (!isUnary) { | 
|  | 321 | for (unsigned i = 0; i != 16; i += 2) | 
|  | 322 | if (!isConstantOrUndef(N->getOperand(i  ),  i*2+2) || | 
|  | 323 | !isConstantOrUndef(N->getOperand(i+1),  i*2+3)) | 
|  | 324 | return false; | 
|  | 325 | } else { | 
|  | 326 | for (unsigned i = 0; i != 8; i += 2) | 
|  | 327 | if (!isConstantOrUndef(N->getOperand(i  ),  i*2+2) || | 
|  | 328 | !isConstantOrUndef(N->getOperand(i+1),  i*2+3) || | 
|  | 329 | !isConstantOrUndef(N->getOperand(i+8),  i*2+2) || | 
|  | 330 | !isConstantOrUndef(N->getOperand(i+9),  i*2+3)) | 
|  | 331 | return false; | 
|  | 332 | } | 
| Chris Lattner | d0608e1 | 2006-04-06 18:26:28 +0000 | [diff] [blame] | 333 | return true; | 
| Chris Lattner | ddb739e | 2006-04-06 17:23:16 +0000 | [diff] [blame] | 334 | } | 
|  | 335 |  | 
| Chris Lattner | caad163 | 2006-04-06 22:02:42 +0000 | [diff] [blame] | 336 | /// isVMerge - Common function, used to match vmrg* shuffles. | 
|  | 337 | /// | 
|  | 338 | static bool isVMerge(SDNode *N, unsigned UnitSize, | 
|  | 339 | unsigned LHSStart, unsigned RHSStart) { | 
| Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 340 | assert(N->getOpcode() == ISD::BUILD_VECTOR && | 
|  | 341 | N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); | 
|  | 342 | assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && | 
|  | 343 | "Unsupported merge size!"); | 
|  | 344 |  | 
|  | 345 | for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units | 
|  | 346 | for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit | 
|  | 347 | if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j), | 
| Chris Lattner | caad163 | 2006-04-06 22:02:42 +0000 | [diff] [blame] | 348 | LHSStart+j+i*UnitSize) || | 
| Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 349 | !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j), | 
| Chris Lattner | caad163 | 2006-04-06 22:02:42 +0000 | [diff] [blame] | 350 | RHSStart+j+i*UnitSize)) | 
| Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 351 | return false; | 
|  | 352 | } | 
| Chris Lattner | caad163 | 2006-04-06 22:02:42 +0000 | [diff] [blame] | 353 | return true; | 
|  | 354 | } | 
|  | 355 |  | 
|  | 356 | /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for | 
|  | 357 | /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). | 
|  | 358 | bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { | 
|  | 359 | if (!isUnary) | 
|  | 360 | return isVMerge(N, UnitSize, 8, 24); | 
|  | 361 | return isVMerge(N, UnitSize, 8, 8); | 
| Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 362 | } | 
|  | 363 |  | 
|  | 364 | /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for | 
|  | 365 | /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). | 
| Chris Lattner | caad163 | 2006-04-06 22:02:42 +0000 | [diff] [blame] | 366 | bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { | 
|  | 367 | if (!isUnary) | 
|  | 368 | return isVMerge(N, UnitSize, 0, 16); | 
|  | 369 | return isVMerge(N, UnitSize, 0, 0); | 
| Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 370 | } | 
|  | 371 |  | 
|  | 372 |  | 
| Chris Lattner | d0608e1 | 2006-04-06 18:26:28 +0000 | [diff] [blame] | 373 | /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift | 
|  | 374 | /// amount, otherwise return -1. | 
| Chris Lattner | f24380e | 2006-04-06 22:28:36 +0000 | [diff] [blame] | 375 | int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { | 
| Chris Lattner | 116cc48 | 2006-04-06 21:11:54 +0000 | [diff] [blame] | 376 | assert(N->getOpcode() == ISD::BUILD_VECTOR && | 
|  | 377 | N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); | 
| Chris Lattner | d0608e1 | 2006-04-06 18:26:28 +0000 | [diff] [blame] | 378 | // Find the first non-undef value in the shuffle mask. | 
|  | 379 | unsigned i; | 
|  | 380 | for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i) | 
|  | 381 | /*search*/; | 
|  | 382 |  | 
|  | 383 | if (i == 16) return -1;  // all undef. | 
|  | 384 |  | 
|  | 385 | // Otherwise, check to see if the rest of the elements are consequtively | 
|  | 386 | // numbered from this value. | 
|  | 387 | unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue(); | 
|  | 388 | if (ShiftAmt < i) return -1; | 
|  | 389 | ShiftAmt -= i; | 
| Chris Lattner | ddb739e | 2006-04-06 17:23:16 +0000 | [diff] [blame] | 390 |  | 
| Chris Lattner | f24380e | 2006-04-06 22:28:36 +0000 | [diff] [blame] | 391 | if (!isUnary) { | 
|  | 392 | // Check the rest of the elements to see if they are consequtive. | 
|  | 393 | for (++i; i != 16; ++i) | 
|  | 394 | if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i)) | 
|  | 395 | return -1; | 
|  | 396 | } else { | 
|  | 397 | // Check the rest of the elements to see if they are consequtive. | 
|  | 398 | for (++i; i != 16; ++i) | 
|  | 399 | if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15)) | 
|  | 400 | return -1; | 
|  | 401 | } | 
| Chris Lattner | d0608e1 | 2006-04-06 18:26:28 +0000 | [diff] [blame] | 402 |  | 
|  | 403 | return ShiftAmt; | 
|  | 404 | } | 
| Chris Lattner | ef819f8 | 2006-03-20 06:33:01 +0000 | [diff] [blame] | 405 |  | 
|  | 406 | /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand | 
|  | 407 | /// specifies a splat of a single element that is suitable for input to | 
|  | 408 | /// VSPLTB/VSPLTH/VSPLTW. | 
| Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 409 | bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) { | 
|  | 410 | assert(N->getOpcode() == ISD::BUILD_VECTOR && | 
|  | 411 | N->getNumOperands() == 16 && | 
|  | 412 | (EltSize == 1 || EltSize == 2 || EltSize == 4)); | 
| Chris Lattner | dd4d2d0 | 2006-03-20 06:51:10 +0000 | [diff] [blame] | 413 |  | 
| Chris Lattner | 88a99ef | 2006-03-20 06:37:44 +0000 | [diff] [blame] | 414 | // This is a splat operation if each element of the permute is the same, and | 
|  | 415 | // if the value doesn't reference the second vector. | 
| Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 416 | unsigned ElementBase = 0; | 
| Chris Lattner | 88a99ef | 2006-03-20 06:37:44 +0000 | [diff] [blame] | 417 | SDOperand Elt = N->getOperand(0); | 
| Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 418 | if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) | 
|  | 419 | ElementBase = EltV->getValue(); | 
|  | 420 | else | 
|  | 421 | return false;   // FIXME: Handle UNDEF elements too! | 
|  | 422 |  | 
|  | 423 | if (cast<ConstantSDNode>(Elt)->getValue() >= 16) | 
|  | 424 | return false; | 
|  | 425 |  | 
|  | 426 | // Check that they are consequtive. | 
|  | 427 | for (unsigned i = 1; i != EltSize; ++i) { | 
|  | 428 | if (!isa<ConstantSDNode>(N->getOperand(i)) || | 
|  | 429 | cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase) | 
|  | 430 | return false; | 
|  | 431 | } | 
|  | 432 |  | 
| Chris Lattner | 88a99ef | 2006-03-20 06:37:44 +0000 | [diff] [blame] | 433 | assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!"); | 
| Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 434 | for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { | 
| Chris Lattner | b097aa9 | 2006-04-14 23:19:08 +0000 | [diff] [blame] | 435 | if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; | 
| Chris Lattner | 88a99ef | 2006-03-20 06:37:44 +0000 | [diff] [blame] | 436 | assert(isa<ConstantSDNode>(N->getOperand(i)) && | 
|  | 437 | "Invalid VECTOR_SHUFFLE mask!"); | 
| Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 438 | for (unsigned j = 0; j != EltSize; ++j) | 
|  | 439 | if (N->getOperand(i+j) != N->getOperand(j)) | 
|  | 440 | return false; | 
| Chris Lattner | 88a99ef | 2006-03-20 06:37:44 +0000 | [diff] [blame] | 441 | } | 
|  | 442 |  | 
| Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 443 | return true; | 
| Chris Lattner | ef819f8 | 2006-03-20 06:33:01 +0000 | [diff] [blame] | 444 | } | 
|  | 445 |  | 
|  | 446 | /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the | 
|  | 447 | /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. | 
| Chris Lattner | 7ff7e67 | 2006-04-04 17:25:31 +0000 | [diff] [blame] | 448 | unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { | 
|  | 449 | assert(isSplatShuffleMask(N, EltSize)); | 
|  | 450 | return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize; | 
| Chris Lattner | ef819f8 | 2006-03-20 06:33:01 +0000 | [diff] [blame] | 451 | } | 
|  | 452 |  | 
| Chris Lattner | e87192a | 2006-04-12 17:37:20 +0000 | [diff] [blame] | 453 | /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed | 
| Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 454 | /// by using a vspltis[bhw] instruction of the specified element size, return | 
|  | 455 | /// the constant being splatted.  The ByteSize field indicates the number of | 
|  | 456 | /// bytes of each element [124] -> [bhw]. | 
| Chris Lattner | e87192a | 2006-04-12 17:37:20 +0000 | [diff] [blame] | 457 | SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { | 
| Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 458 | SDOperand OpVal(0, 0); | 
| Chris Lattner | 79d9a88 | 2006-04-08 07:14:26 +0000 | [diff] [blame] | 459 |  | 
|  | 460 | // If ByteSize of the splat is bigger than the element size of the | 
|  | 461 | // build_vector, then we have a case where we are checking for a splat where | 
|  | 462 | // multiple elements of the buildvector are folded together into a single | 
|  | 463 | // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). | 
|  | 464 | unsigned EltSize = 16/N->getNumOperands(); | 
|  | 465 | if (EltSize < ByteSize) { | 
|  | 466 | unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval. | 
|  | 467 | SDOperand UniquedVals[4]; | 
|  | 468 | assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); | 
|  | 469 |  | 
|  | 470 | // See if all of the elements in the buildvector agree across. | 
|  | 471 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { | 
|  | 472 | if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; | 
|  | 473 | // If the element isn't a constant, bail fully out. | 
|  | 474 | if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand(); | 
|  | 475 |  | 
|  | 476 |  | 
|  | 477 | if (UniquedVals[i&(Multiple-1)].Val == 0) | 
|  | 478 | UniquedVals[i&(Multiple-1)] = N->getOperand(i); | 
|  | 479 | else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) | 
|  | 480 | return SDOperand();  // no match. | 
|  | 481 | } | 
|  | 482 |  | 
|  | 483 | // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains | 
|  | 484 | // either constant or undef values that are identical for each chunk.  See | 
|  | 485 | // if these chunks can form into a larger vspltis*. | 
|  | 486 |  | 
|  | 487 | // Check to see if all of the leading entries are either 0 or -1.  If | 
|  | 488 | // neither, then this won't fit into the immediate field. | 
|  | 489 | bool LeadingZero = true; | 
|  | 490 | bool LeadingOnes = true; | 
|  | 491 | for (unsigned i = 0; i != Multiple-1; ++i) { | 
|  | 492 | if (UniquedVals[i].Val == 0) continue;  // Must have been undefs. | 
|  | 493 |  | 
|  | 494 | LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); | 
|  | 495 | LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); | 
|  | 496 | } | 
|  | 497 | // Finally, check the least significant entry. | 
|  | 498 | if (LeadingZero) { | 
|  | 499 | if (UniquedVals[Multiple-1].Val == 0) | 
|  | 500 | return DAG.getTargetConstant(0, MVT::i32);  // 0,0,0,undef | 
|  | 501 | int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue(); | 
|  | 502 | if (Val < 16) | 
|  | 503 | return DAG.getTargetConstant(Val, MVT::i32);  // 0,0,0,4 -> vspltisw(4) | 
|  | 504 | } | 
|  | 505 | if (LeadingOnes) { | 
|  | 506 | if (UniquedVals[Multiple-1].Val == 0) | 
|  | 507 | return DAG.getTargetConstant(~0U, MVT::i32);  // -1,-1,-1,undef | 
|  | 508 | int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended(); | 
|  | 509 | if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2) | 
|  | 510 | return DAG.getTargetConstant(Val, MVT::i32); | 
|  | 511 | } | 
|  | 512 |  | 
|  | 513 | return SDOperand(); | 
|  | 514 | } | 
|  | 515 |  | 
| Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 516 | // Check to see if this buildvec has a single non-undef value in its elements. | 
|  | 517 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { | 
|  | 518 | if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; | 
|  | 519 | if (OpVal.Val == 0) | 
|  | 520 | OpVal = N->getOperand(i); | 
|  | 521 | else if (OpVal != N->getOperand(i)) | 
| Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 522 | return SDOperand(); | 
| Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 523 | } | 
|  | 524 |  | 
| Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 525 | if (OpVal.Val == 0) return SDOperand();  // All UNDEF: use implicit def. | 
| Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 526 |  | 
| Nate Begeman | 98e70cc | 2006-03-28 04:15:58 +0000 | [diff] [blame] | 527 | unsigned ValSizeInBytes = 0; | 
|  | 528 | uint64_t Value = 0; | 
| Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 529 | if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { | 
|  | 530 | Value = CN->getValue(); | 
|  | 531 | ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8; | 
|  | 532 | } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { | 
|  | 533 | assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); | 
|  | 534 | Value = FloatToBits(CN->getValue()); | 
|  | 535 | ValSizeInBytes = 4; | 
|  | 536 | } | 
|  | 537 |  | 
|  | 538 | // If the splat value is larger than the element value, then we can never do | 
|  | 539 | // this splat.  The only case that we could fit the replicated bits into our | 
|  | 540 | // immediate field for would be zero, and we prefer to use vxor for it. | 
| Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 541 | if (ValSizeInBytes < ByteSize) return SDOperand(); | 
| Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 542 |  | 
|  | 543 | // If the element value is larger than the splat value, cut it in half and | 
|  | 544 | // check to see if the two halves are equal.  Continue doing this until we | 
|  | 545 | // get to ByteSize.  This allows us to handle 0x01010101 as 0x01. | 
|  | 546 | while (ValSizeInBytes > ByteSize) { | 
|  | 547 | ValSizeInBytes >>= 1; | 
|  | 548 |  | 
|  | 549 | // If the top half equals the bottom half, we're still ok. | 
| Chris Lattner | 9b42bdd | 2006-04-05 17:39:25 +0000 | [diff] [blame] | 550 | if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != | 
|  | 551 | (Value                        & ((1 << (8*ValSizeInBytes))-1))) | 
| Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 552 | return SDOperand(); | 
| Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 553 | } | 
|  | 554 |  | 
|  | 555 | // Properly sign extend the value. | 
|  | 556 | int ShAmt = (4-ByteSize)*8; | 
|  | 557 | int MaskVal = ((int)Value << ShAmt) >> ShAmt; | 
|  | 558 |  | 
| Evan Cheng | 5b6a01b | 2006-03-26 09:52:32 +0000 | [diff] [blame] | 559 | // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. | 
| Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 560 | if (MaskVal == 0) return SDOperand(); | 
| Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 561 |  | 
| Chris Lattner | 140a58f | 2006-04-08 06:46:53 +0000 | [diff] [blame] | 562 | // Finally, if this value fits in a 5 bit sext field, return it | 
|  | 563 | if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) | 
|  | 564 | return DAG.getTargetConstant(MaskVal, MVT::i32); | 
|  | 565 | return SDOperand(); | 
| Chris Lattner | 9c61dcf | 2006-03-25 06:12:06 +0000 | [diff] [blame] | 566 | } | 
|  | 567 |  | 
| Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 568 | //===----------------------------------------------------------------------===// | 
|  | 569 | //  LowerOperation implementation | 
|  | 570 | //===----------------------------------------------------------------------===// | 
|  | 571 |  | 
|  | 572 | static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { | 
|  | 573 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); | 
|  | 574 | Constant *C = CP->get(); | 
|  | 575 | SDOperand CPI = DAG.getTargetConstantPool(C, MVT::i32, CP->getAlignment()); | 
|  | 576 | SDOperand Zero = DAG.getConstant(0, MVT::i32); | 
|  | 577 |  | 
|  | 578 | const TargetMachine &TM = DAG.getTarget(); | 
|  | 579 |  | 
|  | 580 | // If this is a non-darwin platform, we don't support non-static relo models | 
|  | 581 | // yet. | 
|  | 582 | if (TM.getRelocationModel() == Reloc::Static || | 
|  | 583 | !TM.getSubtarget<PPCSubtarget>().isDarwin()) { | 
|  | 584 | // Generate non-pic code that has direct accesses to the constant pool. | 
|  | 585 | // The address of the global is just (hi(&g)+lo(&g)). | 
|  | 586 | SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero); | 
|  | 587 | SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero); | 
|  | 588 | return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); | 
|  | 589 | } | 
|  | 590 |  | 
|  | 591 | SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero); | 
|  | 592 | if (TM.getRelocationModel() == Reloc::PIC) { | 
|  | 593 | // With PIC, the first instruction is actually "GR+hi(&G)". | 
|  | 594 | Hi = DAG.getNode(ISD::ADD, MVT::i32, | 
|  | 595 | DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi); | 
|  | 596 | } | 
|  | 597 |  | 
|  | 598 | SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero); | 
|  | 599 | Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); | 
|  | 600 | return Lo; | 
|  | 601 | } | 
|  | 602 |  | 
|  | 603 | static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { | 
|  | 604 | GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); | 
|  | 605 | GlobalValue *GV = GSDN->getGlobal(); | 
|  | 606 | SDOperand GA = DAG.getTargetGlobalAddress(GV, MVT::i32, GSDN->getOffset()); | 
|  | 607 | SDOperand Zero = DAG.getConstant(0, MVT::i32); | 
|  | 608 |  | 
|  | 609 | const TargetMachine &TM = DAG.getTarget(); | 
|  | 610 |  | 
|  | 611 | // If this is a non-darwin platform, we don't support non-static relo models | 
|  | 612 | // yet. | 
|  | 613 | if (TM.getRelocationModel() == Reloc::Static || | 
|  | 614 | !TM.getSubtarget<PPCSubtarget>().isDarwin()) { | 
|  | 615 | // Generate non-pic code that has direct accesses to globals. | 
|  | 616 | // The address of the global is just (hi(&g)+lo(&g)). | 
|  | 617 | SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero); | 
|  | 618 | SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero); | 
|  | 619 | return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); | 
|  | 620 | } | 
|  | 621 |  | 
|  | 622 | SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero); | 
|  | 623 | if (TM.getRelocationModel() == Reloc::PIC) { | 
|  | 624 | // With PIC, the first instruction is actually "GR+hi(&G)". | 
|  | 625 | Hi = DAG.getNode(ISD::ADD, MVT::i32, | 
|  | 626 | DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi); | 
|  | 627 | } | 
|  | 628 |  | 
|  | 629 | SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero); | 
|  | 630 | Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); | 
|  | 631 |  | 
|  | 632 | if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() && | 
|  | 633 | (!GV->isExternal() || GV->hasNotBeenReadFromBytecode())) | 
|  | 634 | return Lo; | 
|  | 635 |  | 
|  | 636 | // If the global is weak or external, we have to go through the lazy | 
|  | 637 | // resolution stub. | 
|  | 638 | return DAG.getLoad(MVT::i32, DAG.getEntryNode(), Lo, DAG.getSrcValue(0)); | 
|  | 639 | } | 
|  | 640 |  | 
|  | 641 | static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) { | 
|  | 642 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); | 
|  | 643 |  | 
|  | 644 | // If we're comparing for equality to zero, expose the fact that this is | 
|  | 645 | // implented as a ctlz/srl pair on ppc, so that the dag combiner can | 
|  | 646 | // fold the new nodes. | 
|  | 647 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { | 
|  | 648 | if (C->isNullValue() && CC == ISD::SETEQ) { | 
|  | 649 | MVT::ValueType VT = Op.getOperand(0).getValueType(); | 
|  | 650 | SDOperand Zext = Op.getOperand(0); | 
|  | 651 | if (VT < MVT::i32) { | 
|  | 652 | VT = MVT::i32; | 
|  | 653 | Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); | 
|  | 654 | } | 
|  | 655 | unsigned Log2b = Log2_32(MVT::getSizeInBits(VT)); | 
|  | 656 | SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); | 
|  | 657 | SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, | 
|  | 658 | DAG.getConstant(Log2b, MVT::i32)); | 
|  | 659 | return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc); | 
|  | 660 | } | 
|  | 661 | // Leave comparisons against 0 and -1 alone for now, since they're usually | 
|  | 662 | // optimized.  FIXME: revisit this when we can custom lower all setcc | 
|  | 663 | // optimizations. | 
|  | 664 | if (C->isAllOnesValue() || C->isNullValue()) | 
|  | 665 | return SDOperand(); | 
|  | 666 | } | 
|  | 667 |  | 
|  | 668 | // If we have an integer seteq/setne, turn it into a compare against zero | 
|  | 669 | // by subtracting the rhs from the lhs, which is faster than setting a | 
|  | 670 | // condition register, reading it back out, and masking the correct bit. | 
|  | 671 | MVT::ValueType LHSVT = Op.getOperand(0).getValueType(); | 
|  | 672 | if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { | 
|  | 673 | MVT::ValueType VT = Op.getValueType(); | 
|  | 674 | SDOperand Sub = DAG.getNode(ISD::SUB, LHSVT, Op.getOperand(0), | 
|  | 675 | Op.getOperand(1)); | 
|  | 676 | return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); | 
|  | 677 | } | 
|  | 678 | return SDOperand(); | 
|  | 679 | } | 
|  | 680 |  | 
|  | 681 | static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, | 
|  | 682 | unsigned VarArgsFrameIndex) { | 
|  | 683 | // vastart just stores the address of the VarArgsFrameIndex slot into the | 
|  | 684 | // memory location argument. | 
|  | 685 | SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32); | 
|  | 686 | return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR, | 
|  | 687 | Op.getOperand(1), Op.getOperand(2)); | 
|  | 688 | } | 
|  | 689 |  | 
|  | 690 | static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { | 
|  | 691 | SDOperand Copy; | 
|  | 692 | switch(Op.getNumOperands()) { | 
|  | 693 | default: | 
|  | 694 | assert(0 && "Do not know how to return this many arguments!"); | 
|  | 695 | abort(); | 
|  | 696 | case 1: | 
|  | 697 | return SDOperand(); // ret void is legal | 
|  | 698 | case 2: { | 
|  | 699 | MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); | 
|  | 700 | unsigned ArgReg; | 
|  | 701 | if (MVT::isVector(ArgVT)) | 
|  | 702 | ArgReg = PPC::V2; | 
|  | 703 | else if (MVT::isInteger(ArgVT)) | 
|  | 704 | ArgReg = PPC::R3; | 
|  | 705 | else { | 
|  | 706 | assert(MVT::isFloatingPoint(ArgVT)); | 
|  | 707 | ArgReg = PPC::F1; | 
|  | 708 | } | 
|  | 709 |  | 
|  | 710 | Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1), | 
|  | 711 | SDOperand()); | 
|  | 712 |  | 
|  | 713 | // If we haven't noted the R3/F1 are live out, do so now. | 
|  | 714 | if (DAG.getMachineFunction().liveout_empty()) | 
|  | 715 | DAG.getMachineFunction().addLiveOut(ArgReg); | 
|  | 716 | break; | 
|  | 717 | } | 
|  | 718 | case 3: | 
|  | 719 | Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(2), | 
|  | 720 | SDOperand()); | 
|  | 721 | Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1)); | 
|  | 722 | // If we haven't noted the R3+R4 are live out, do so now. | 
|  | 723 | if (DAG.getMachineFunction().liveout_empty()) { | 
|  | 724 | DAG.getMachineFunction().addLiveOut(PPC::R3); | 
|  | 725 | DAG.getMachineFunction().addLiveOut(PPC::R4); | 
|  | 726 | } | 
|  | 727 | break; | 
|  | 728 | } | 
|  | 729 | return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1)); | 
|  | 730 | } | 
|  | 731 |  | 
|  | 732 | /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when | 
|  | 733 | /// possible. | 
|  | 734 | static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { | 
|  | 735 | // Not FP? Not a fsel. | 
|  | 736 | if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) || | 
|  | 737 | !MVT::isFloatingPoint(Op.getOperand(2).getValueType())) | 
|  | 738 | return SDOperand(); | 
|  | 739 |  | 
|  | 740 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); | 
|  | 741 |  | 
|  | 742 | // Cannot handle SETEQ/SETNE. | 
|  | 743 | if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand(); | 
|  | 744 |  | 
|  | 745 | MVT::ValueType ResVT = Op.getValueType(); | 
|  | 746 | MVT::ValueType CmpVT = Op.getOperand(0).getValueType(); | 
|  | 747 | SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); | 
|  | 748 | SDOperand TV  = Op.getOperand(2), FV  = Op.getOperand(3); | 
|  | 749 |  | 
|  | 750 | // If the RHS of the comparison is a 0.0, we don't need to do the | 
|  | 751 | // subtraction at all. | 
|  | 752 | if (isFloatingPointZero(RHS)) | 
|  | 753 | switch (CC) { | 
|  | 754 | default: break;       // SETUO etc aren't handled by fsel. | 
|  | 755 | case ISD::SETULT: | 
|  | 756 | case ISD::SETLT: | 
|  | 757 | std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt | 
|  | 758 | case ISD::SETUGE: | 
|  | 759 | case ISD::SETGE: | 
|  | 760 | if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits | 
|  | 761 | LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); | 
|  | 762 | return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV); | 
|  | 763 | case ISD::SETUGT: | 
|  | 764 | case ISD::SETGT: | 
|  | 765 | std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt | 
|  | 766 | case ISD::SETULE: | 
|  | 767 | case ISD::SETLE: | 
|  | 768 | if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits | 
|  | 769 | LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); | 
|  | 770 | return DAG.getNode(PPCISD::FSEL, ResVT, | 
|  | 771 | DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV); | 
|  | 772 | } | 
|  | 773 |  | 
|  | 774 | SDOperand Cmp; | 
|  | 775 | switch (CC) { | 
|  | 776 | default: break;       // SETUO etc aren't handled by fsel. | 
|  | 777 | case ISD::SETULT: | 
|  | 778 | case ISD::SETLT: | 
|  | 779 | Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); | 
|  | 780 | if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits | 
|  | 781 | Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); | 
|  | 782 | return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); | 
|  | 783 | case ISD::SETUGE: | 
|  | 784 | case ISD::SETGE: | 
|  | 785 | Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); | 
|  | 786 | if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits | 
|  | 787 | Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); | 
|  | 788 | return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); | 
|  | 789 | case ISD::SETUGT: | 
|  | 790 | case ISD::SETGT: | 
|  | 791 | Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); | 
|  | 792 | if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits | 
|  | 793 | Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); | 
|  | 794 | return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); | 
|  | 795 | case ISD::SETULE: | 
|  | 796 | case ISD::SETLE: | 
|  | 797 | Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); | 
|  | 798 | if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits | 
|  | 799 | Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); | 
|  | 800 | return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); | 
|  | 801 | } | 
|  | 802 | return SDOperand(); | 
|  | 803 | } | 
|  | 804 |  | 
|  | 805 | static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { | 
|  | 806 | assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType())); | 
|  | 807 | SDOperand Src = Op.getOperand(0); | 
|  | 808 | if (Src.getValueType() == MVT::f32) | 
|  | 809 | Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); | 
|  | 810 |  | 
|  | 811 | SDOperand Tmp; | 
|  | 812 | switch (Op.getValueType()) { | 
|  | 813 | default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); | 
|  | 814 | case MVT::i32: | 
|  | 815 | Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); | 
|  | 816 | break; | 
|  | 817 | case MVT::i64: | 
|  | 818 | Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src); | 
|  | 819 | break; | 
|  | 820 | } | 
|  | 821 |  | 
|  | 822 | // Convert the FP value to an int value through memory. | 
|  | 823 | SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp); | 
|  | 824 | if (Op.getValueType() == MVT::i32) | 
|  | 825 | Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits); | 
|  | 826 | return Bits; | 
|  | 827 | } | 
|  | 828 |  | 
|  | 829 | static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { | 
|  | 830 | if (Op.getOperand(0).getValueType() == MVT::i64) { | 
|  | 831 | SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); | 
|  | 832 | SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); | 
|  | 833 | if (Op.getValueType() == MVT::f32) | 
|  | 834 | FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); | 
|  | 835 | return FP; | 
|  | 836 | } | 
|  | 837 |  | 
|  | 838 | assert(Op.getOperand(0).getValueType() == MVT::i32 && | 
|  | 839 | "Unhandled SINT_TO_FP type in custom expander!"); | 
|  | 840 | // Since we only generate this in 64-bit mode, we can take advantage of | 
|  | 841 | // 64-bit registers.  In particular, sign extend the input value into the | 
|  | 842 | // 64-bit register with extsw, store the WHOLE 64-bit value into the stack | 
|  | 843 | // then lfd it and fcfid it. | 
|  | 844 | MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); | 
|  | 845 | int FrameIdx = FrameInfo->CreateStackObject(8, 8); | 
|  | 846 | SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32); | 
|  | 847 |  | 
|  | 848 | SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, | 
|  | 849 | Op.getOperand(0)); | 
|  | 850 |  | 
|  | 851 | // STD the extended value into the stack slot. | 
|  | 852 | SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other, | 
|  | 853 | DAG.getEntryNode(), Ext64, FIdx, | 
|  | 854 | DAG.getSrcValue(NULL)); | 
|  | 855 | // Load the value as a double. | 
|  | 856 | SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, DAG.getSrcValue(NULL)); | 
|  | 857 |  | 
|  | 858 | // FCFID it and return it. | 
|  | 859 | SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); | 
|  | 860 | if (Op.getValueType() == MVT::f32) | 
|  | 861 | FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); | 
|  | 862 | return FP; | 
|  | 863 | } | 
|  | 864 |  | 
|  | 865 | static SDOperand LowerSHL(SDOperand Op, SelectionDAG &DAG) { | 
|  | 866 | assert(Op.getValueType() == MVT::i64 && | 
|  | 867 | Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!"); | 
|  | 868 | // The generic code does a fine job expanding shift by a constant. | 
|  | 869 | if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand(); | 
|  | 870 |  | 
|  | 871 | // Otherwise, expand into a bunch of logical ops.  Note that these ops | 
|  | 872 | // depend on the PPC behavior for oversized shift amounts. | 
|  | 873 | SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), | 
|  | 874 | DAG.getConstant(0, MVT::i32)); | 
|  | 875 | SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), | 
|  | 876 | DAG.getConstant(1, MVT::i32)); | 
|  | 877 | SDOperand Amt = Op.getOperand(1); | 
|  | 878 |  | 
|  | 879 | SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, | 
|  | 880 | DAG.getConstant(32, MVT::i32), Amt); | 
|  | 881 | SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt); | 
|  | 882 | SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1); | 
|  | 883 | SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); | 
|  | 884 | SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, | 
|  | 885 | DAG.getConstant(-32U, MVT::i32)); | 
|  | 886 | SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5); | 
|  | 887 | SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); | 
|  | 888 | SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt); | 
|  | 889 | return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); | 
|  | 890 | } | 
|  | 891 |  | 
|  | 892 | static SDOperand LowerSRL(SDOperand Op, SelectionDAG &DAG) { | 
|  | 893 | assert(Op.getValueType() == MVT::i64 && | 
|  | 894 | Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!"); | 
|  | 895 | // The generic code does a fine job expanding shift by a constant. | 
|  | 896 | if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand(); | 
|  | 897 |  | 
|  | 898 | // Otherwise, expand into a bunch of logical ops.  Note that these ops | 
|  | 899 | // depend on the PPC behavior for oversized shift amounts. | 
|  | 900 | SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), | 
|  | 901 | DAG.getConstant(0, MVT::i32)); | 
|  | 902 | SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), | 
|  | 903 | DAG.getConstant(1, MVT::i32)); | 
|  | 904 | SDOperand Amt = Op.getOperand(1); | 
|  | 905 |  | 
|  | 906 | SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, | 
|  | 907 | DAG.getConstant(32, MVT::i32), Amt); | 
|  | 908 | SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); | 
|  | 909 | SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); | 
|  | 910 | SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); | 
|  | 911 | SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, | 
|  | 912 | DAG.getConstant(-32U, MVT::i32)); | 
|  | 913 | SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5); | 
|  | 914 | SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); | 
|  | 915 | SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt); | 
|  | 916 | return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); | 
|  | 917 | } | 
|  | 918 |  | 
|  | 919 | static SDOperand LowerSRA(SDOperand Op, SelectionDAG &DAG) { | 
|  | 920 | assert(Op.getValueType() == MVT::i64 && | 
|  | 921 | Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!"); | 
|  | 922 | // The generic code does a fine job expanding shift by a constant. | 
|  | 923 | if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand(); | 
|  | 924 |  | 
|  | 925 | // Otherwise, expand into a bunch of logical ops, followed by a select_cc. | 
|  | 926 | SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), | 
|  | 927 | DAG.getConstant(0, MVT::i32)); | 
|  | 928 | SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), | 
|  | 929 | DAG.getConstant(1, MVT::i32)); | 
|  | 930 | SDOperand Amt = Op.getOperand(1); | 
|  | 931 |  | 
|  | 932 | SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, | 
|  | 933 | DAG.getConstant(32, MVT::i32), Amt); | 
|  | 934 | SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); | 
|  | 935 | SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); | 
|  | 936 | SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); | 
|  | 937 | SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, | 
|  | 938 | DAG.getConstant(-32U, MVT::i32)); | 
|  | 939 | SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5); | 
|  | 940 | SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt); | 
|  | 941 | SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32), | 
|  | 942 | Tmp4, Tmp6, ISD::SETLE); | 
|  | 943 | return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); | 
|  | 944 | } | 
|  | 945 |  | 
|  | 946 | //===----------------------------------------------------------------------===// | 
|  | 947 | // Vector related lowering. | 
|  | 948 | // | 
|  | 949 |  | 
| Chris Lattner | ac225ca | 2006-04-12 19:07:14 +0000 | [diff] [blame] | 950 | // If this is a vector of constants or undefs, get the bits.  A bit in | 
|  | 951 | // UndefBits is set if the corresponding element of the vector is an | 
|  | 952 | // ISD::UNDEF value.  For undefs, the corresponding VectorBits values are | 
|  | 953 | // zero.   Return true if this is not an array of constants, false if it is. | 
|  | 954 | // | 
| Chris Lattner | ac225ca | 2006-04-12 19:07:14 +0000 | [diff] [blame] | 955 | static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], | 
|  | 956 | uint64_t UndefBits[2]) { | 
|  | 957 | // Start with zero'd results. | 
|  | 958 | VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; | 
|  | 959 |  | 
|  | 960 | unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); | 
|  | 961 | for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { | 
|  | 962 | SDOperand OpVal = BV->getOperand(i); | 
|  | 963 |  | 
|  | 964 | unsigned PartNo = i >= e/2;     // In the upper 128 bits? | 
| Chris Lattner | b17f167 | 2006-04-16 01:01:29 +0000 | [diff] [blame] | 965 | unsigned SlotNo = e/2 - (i & (e/2-1))-1;  // Which subpiece of the uint64_t. | 
| Chris Lattner | ac225ca | 2006-04-12 19:07:14 +0000 | [diff] [blame] | 966 |  | 
|  | 967 | uint64_t EltBits = 0; | 
|  | 968 | if (OpVal.getOpcode() == ISD::UNDEF) { | 
|  | 969 | uint64_t EltUndefBits = ~0U >> (32-EltBitSize); | 
|  | 970 | UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize); | 
|  | 971 | continue; | 
|  | 972 | } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { | 
|  | 973 | EltBits = CN->getValue() & (~0U >> (32-EltBitSize)); | 
|  | 974 | } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { | 
|  | 975 | assert(CN->getValueType(0) == MVT::f32 && | 
|  | 976 | "Only one legal FP vector type!"); | 
|  | 977 | EltBits = FloatToBits(CN->getValue()); | 
|  | 978 | } else { | 
|  | 979 | // Nonconstant element. | 
|  | 980 | return true; | 
|  | 981 | } | 
|  | 982 |  | 
|  | 983 | VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize); | 
|  | 984 | } | 
|  | 985 |  | 
|  | 986 | //printf("%llx %llx  %llx %llx\n", | 
|  | 987 | //       VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]); | 
|  | 988 | return false; | 
|  | 989 | } | 
| Chris Lattner | ef819f8 | 2006-03-20 06:33:01 +0000 | [diff] [blame] | 990 |  | 
| Chris Lattner | b17f167 | 2006-04-16 01:01:29 +0000 | [diff] [blame] | 991 | // If this is a splat (repetition) of a value across the whole vector, return | 
|  | 992 | // the smallest size that splats it.  For example, "0x01010101010101..." is a | 
|  | 993 | // splat of 0x01, 0x0101, and 0x01010101.  We return SplatBits = 0x01 and | 
|  | 994 | // SplatSize = 1 byte. | 
|  | 995 | static bool isConstantSplat(const uint64_t Bits128[2], | 
|  | 996 | const uint64_t Undef128[2], | 
|  | 997 | unsigned &SplatBits, unsigned &SplatUndef, | 
|  | 998 | unsigned &SplatSize) { | 
|  | 999 |  | 
|  | 1000 | // Don't let undefs prevent splats from matching.  See if the top 64-bits are | 
|  | 1001 | // the same as the lower 64-bits, ignoring undefs. | 
|  | 1002 | if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0])) | 
|  | 1003 | return false;  // Can't be a splat if two pieces don't match. | 
|  | 1004 |  | 
|  | 1005 | uint64_t Bits64  = Bits128[0] | Bits128[1]; | 
|  | 1006 | uint64_t Undef64 = Undef128[0] & Undef128[1]; | 
|  | 1007 |  | 
|  | 1008 | // Check that the top 32-bits are the same as the lower 32-bits, ignoring | 
|  | 1009 | // undefs. | 
|  | 1010 | if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64)) | 
|  | 1011 | return false;  // Can't be a splat if two pieces don't match. | 
|  | 1012 |  | 
|  | 1013 | uint32_t Bits32  = uint32_t(Bits64) | uint32_t(Bits64 >> 32); | 
|  | 1014 | uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32); | 
|  | 1015 |  | 
|  | 1016 | // If the top 16-bits are different than the lower 16-bits, ignoring | 
|  | 1017 | // undefs, we have an i32 splat. | 
|  | 1018 | if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) { | 
|  | 1019 | SplatBits = Bits32; | 
|  | 1020 | SplatUndef = Undef32; | 
|  | 1021 | SplatSize = 4; | 
|  | 1022 | return true; | 
|  | 1023 | } | 
|  | 1024 |  | 
|  | 1025 | uint16_t Bits16  = uint16_t(Bits32)  | uint16_t(Bits32 >> 16); | 
|  | 1026 | uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16); | 
|  | 1027 |  | 
|  | 1028 | // If the top 8-bits are different than the lower 8-bits, ignoring | 
|  | 1029 | // undefs, we have an i16 splat. | 
|  | 1030 | if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) { | 
|  | 1031 | SplatBits = Bits16; | 
|  | 1032 | SplatUndef = Undef16; | 
|  | 1033 | SplatSize = 2; | 
|  | 1034 | return true; | 
|  | 1035 | } | 
|  | 1036 |  | 
|  | 1037 | // Otherwise, we have an 8-bit splat. | 
|  | 1038 | SplatBits  = uint8_t(Bits16)  | uint8_t(Bits16 >> 8); | 
|  | 1039 | SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8); | 
|  | 1040 | SplatSize = 1; | 
|  | 1041 | return true; | 
|  | 1042 | } | 
|  | 1043 |  | 
| Chris Lattner | 4a998b9 | 2006-04-17 06:00:21 +0000 | [diff] [blame] | 1044 | /// BuildSplatI - Build a canonical splati of Val with an element size of | 
|  | 1045 | /// SplatSize.  Cast the result to VT. | 
|  | 1046 | static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT, | 
|  | 1047 | SelectionDAG &DAG) { | 
|  | 1048 | assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); | 
| Chris Lattner | 6876e66 | 2006-04-17 06:58:41 +0000 | [diff] [blame] | 1049 |  | 
|  | 1050 | // Force vspltis[hw] -1 to vspltisb -1. | 
|  | 1051 | if (Val == -1) SplatSize = 1; | 
|  | 1052 |  | 
| Chris Lattner | 4a998b9 | 2006-04-17 06:00:21 +0000 | [diff] [blame] | 1053 | static const MVT::ValueType VTys[] = { // canonical VT to use for each size. | 
|  | 1054 | MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 | 
|  | 1055 | }; | 
|  | 1056 | MVT::ValueType CanonicalVT = VTys[SplatSize-1]; | 
|  | 1057 |  | 
|  | 1058 | // Build a canonical splat for this value. | 
|  | 1059 | SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT)); | 
|  | 1060 | std::vector<SDOperand> Ops(MVT::getVectorNumElements(CanonicalVT), Elt); | 
|  | 1061 | SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, Ops); | 
|  | 1062 | return DAG.getNode(ISD::BIT_CONVERT, VT, Res); | 
|  | 1063 | } | 
|  | 1064 |  | 
| Chris Lattner | 6876e66 | 2006-04-17 06:58:41 +0000 | [diff] [blame] | 1065 | /// BuildIntrinsicBinOp - Return a binary operator intrinsic node with the | 
|  | 1066 | /// specified intrinsic ID. | 
|  | 1067 | static SDOperand BuildIntrinsicBinOp(unsigned IID, SDOperand LHS, SDOperand RHS, | 
|  | 1068 | SelectionDAG &DAG) { | 
|  | 1069 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, LHS.getValueType(), | 
|  | 1070 | DAG.getConstant(IID, MVT::i32), LHS, RHS); | 
|  | 1071 | } | 
|  | 1072 |  | 
| Chris Lattner | f1b4708 | 2006-04-14 05:19:18 +0000 | [diff] [blame] | 1073 | // If this is a case we can't handle, return null and let the default | 
|  | 1074 | // expansion code take care of it.  If we CAN select this case, and if it | 
|  | 1075 | // selects to a single instruction, return Op.  Otherwise, if we can codegen | 
|  | 1076 | // this case more efficiently than a constant pool load, lower it to the | 
|  | 1077 | // sequence of ops that should be used. | 
|  | 1078 | static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { | 
|  | 1079 | // If this is a vector of constants or undefs, get the bits.  A bit in | 
|  | 1080 | // UndefBits is set if the corresponding element of the vector is an | 
|  | 1081 | // ISD::UNDEF value.  For undefs, the corresponding VectorBits values are | 
|  | 1082 | // zero. | 
|  | 1083 | uint64_t VectorBits[2]; | 
|  | 1084 | uint64_t UndefBits[2]; | 
|  | 1085 | if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)) | 
|  | 1086 | return SDOperand();   // Not a constant vector. | 
|  | 1087 |  | 
| Chris Lattner | b17f167 | 2006-04-16 01:01:29 +0000 | [diff] [blame] | 1088 | // If this is a splat (repetition) of a value across the whole vector, return | 
|  | 1089 | // the smallest size that splats it.  For example, "0x01010101010101..." is a | 
|  | 1090 | // splat of 0x01, 0x0101, and 0x01010101.  We return SplatBits = 0x01 and | 
|  | 1091 | // SplatSize = 1 byte. | 
|  | 1092 | unsigned SplatBits, SplatUndef, SplatSize; | 
|  | 1093 | if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){ | 
|  | 1094 | bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0; | 
|  | 1095 |  | 
|  | 1096 | // First, handle single instruction cases. | 
|  | 1097 |  | 
|  | 1098 | // All zeros? | 
|  | 1099 | if (SplatBits == 0) { | 
|  | 1100 | // Canonicalize all zero vectors to be v4i32. | 
|  | 1101 | if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { | 
|  | 1102 | SDOperand Z = DAG.getConstant(0, MVT::i32); | 
|  | 1103 | Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z); | 
|  | 1104 | Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z); | 
|  | 1105 | } | 
|  | 1106 | return Op; | 
| Chris Lattner | f1b4708 | 2006-04-14 05:19:18 +0000 | [diff] [blame] | 1107 | } | 
| Chris Lattner | b17f167 | 2006-04-16 01:01:29 +0000 | [diff] [blame] | 1108 |  | 
|  | 1109 | // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. | 
|  | 1110 | int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize); | 
| Chris Lattner | 4a998b9 | 2006-04-17 06:00:21 +0000 | [diff] [blame] | 1111 | if (SextVal >= -16 && SextVal <= 15) | 
|  | 1112 | return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG); | 
| Chris Lattner | b17f167 | 2006-04-16 01:01:29 +0000 | [diff] [blame] | 1113 |  | 
| Chris Lattner | 4a998b9 | 2006-04-17 06:00:21 +0000 | [diff] [blame] | 1114 | // If this value is in the range [-32,30] and is even, use: | 
|  | 1115 | //    tmp = VSPLTI[bhw], result = add tmp, tmp | 
|  | 1116 | if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { | 
|  | 1117 | Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG); | 
|  | 1118 | return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op); | 
|  | 1119 | } | 
| Chris Lattner | 6876e66 | 2006-04-17 06:58:41 +0000 | [diff] [blame] | 1120 |  | 
|  | 1121 | // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is | 
|  | 1122 | // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important | 
|  | 1123 | // for fneg/fabs. | 
|  | 1124 | if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { | 
|  | 1125 | // Make -1 and vspltisw -1: | 
|  | 1126 | SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG); | 
|  | 1127 |  | 
|  | 1128 | // Make the VSLW intrinsic, computing 0x8000_0000. | 
|  | 1129 | SDOperand Res = BuildIntrinsicBinOp(Intrinsic::ppc_altivec_vslw, OnesV, | 
|  | 1130 | OnesV, DAG); | 
|  | 1131 |  | 
|  | 1132 | // xor by OnesV to invert it. | 
|  | 1133 | Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV); | 
|  | 1134 | return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); | 
|  | 1135 | } | 
|  | 1136 |  | 
|  | 1137 | // Check to see if this is a wide variety of vsplti*, binop self cases. | 
|  | 1138 | unsigned SplatBitSize = SplatSize*8; | 
|  | 1139 | static const char SplatCsts[] = { | 
|  | 1140 | -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, | 
|  | 1141 | -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 14, -15 | 
|  | 1142 | }; | 
|  | 1143 | for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){ | 
|  | 1144 | // Indirect through the SplatCsts array so that we favor 'vsplti -1' for | 
|  | 1145 | // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1' | 
|  | 1146 | int i = SplatCsts[idx]; | 
|  | 1147 |  | 
|  | 1148 | // Figure out what shift amount will be used by altivec if shifted by i in | 
|  | 1149 | // this splat size. | 
|  | 1150 | unsigned TypeShiftAmt = i & (SplatBitSize-1); | 
|  | 1151 |  | 
|  | 1152 | // vsplti + shl self. | 
|  | 1153 | if (SextVal == (i << (int)TypeShiftAmt)) { | 
|  | 1154 | Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG); | 
|  | 1155 | static const unsigned IIDs[] = { // Intrinsic to use for each size. | 
|  | 1156 | Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, | 
|  | 1157 | Intrinsic::ppc_altivec_vslw | 
|  | 1158 | }; | 
|  | 1159 | return BuildIntrinsicBinOp(IIDs[SplatSize-1], Op, Op, DAG); | 
|  | 1160 | } | 
|  | 1161 |  | 
|  | 1162 | // vsplti + srl self. | 
|  | 1163 | if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { | 
|  | 1164 | Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG); | 
|  | 1165 | static const unsigned IIDs[] = { // Intrinsic to use for each size. | 
|  | 1166 | Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, | 
|  | 1167 | Intrinsic::ppc_altivec_vsrw | 
|  | 1168 | }; | 
|  | 1169 | return BuildIntrinsicBinOp(IIDs[SplatSize-1], Op, Op, DAG); | 
|  | 1170 | } | 
|  | 1171 |  | 
|  | 1172 | // vsplti + sra self. | 
|  | 1173 | if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { | 
|  | 1174 | Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG); | 
|  | 1175 | static const unsigned IIDs[] = { // Intrinsic to use for each size. | 
|  | 1176 | Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, | 
|  | 1177 | Intrinsic::ppc_altivec_vsraw | 
|  | 1178 | }; | 
|  | 1179 | return BuildIntrinsicBinOp(IIDs[SplatSize-1], Op, Op, DAG); | 
|  | 1180 | } | 
|  | 1181 |  | 
|  | 1182 | // TODO: ROL. | 
|  | 1183 | } | 
|  | 1184 |  | 
|  | 1185 |  | 
|  | 1186 |  | 
|  | 1187 | // Three instruction sequences. | 
|  | 1188 |  | 
| Chris Lattner | c408382 | 2006-04-17 06:07:44 +0000 | [diff] [blame] | 1189 | // Otherwise, in range [17,29]:  (vsplti 15) + (vsplti C). | 
|  | 1190 | if (SextVal >= 0 && SextVal <= 29) { | 
|  | 1191 | SDOperand LHS = BuildSplatI(15, SplatSize, Op.getValueType(), DAG); | 
|  | 1192 | SDOperand RHS = BuildSplatI(SextVal-15, SplatSize, Op.getValueType(),DAG); | 
|  | 1193 | return DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS); | 
| Chris Lattner | f1b4708 | 2006-04-14 05:19:18 +0000 | [diff] [blame] | 1194 | } | 
|  | 1195 | } | 
| Chris Lattner | b17f167 | 2006-04-16 01:01:29 +0000 | [diff] [blame] | 1196 |  | 
| Chris Lattner | f1b4708 | 2006-04-14 05:19:18 +0000 | [diff] [blame] | 1197 | return SDOperand(); | 
|  | 1198 | } | 
|  | 1199 |  | 
| Chris Lattner | 5913810 | 2006-04-17 05:28:54 +0000 | [diff] [blame] | 1200 | /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit | 
|  | 1201 | /// the specified operations to build the shuffle. | 
|  | 1202 | static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS, | 
|  | 1203 | SDOperand RHS, SelectionDAG &DAG) { | 
|  | 1204 | unsigned OpNum = (PFEntry >> 26) & 0x0F; | 
|  | 1205 | unsigned LHSID  = (PFEntry >> 13) & ((1 << 13)-1); | 
|  | 1206 | unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1); | 
|  | 1207 |  | 
|  | 1208 | enum { | 
|  | 1209 | OP_COPY = 0,   // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> | 
|  | 1210 | OP_VMRGHW, | 
|  | 1211 | OP_VMRGLW, | 
|  | 1212 | OP_VSPLTISW0, | 
|  | 1213 | OP_VSPLTISW1, | 
|  | 1214 | OP_VSPLTISW2, | 
|  | 1215 | OP_VSPLTISW3, | 
|  | 1216 | OP_VSLDOI4, | 
|  | 1217 | OP_VSLDOI8, | 
|  | 1218 | OP_VSLDOI12, | 
|  | 1219 | }; | 
|  | 1220 |  | 
|  | 1221 | if (OpNum == OP_COPY) { | 
|  | 1222 | if (LHSID == (1*9+2)*9+3) return LHS; | 
|  | 1223 | assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); | 
|  | 1224 | return RHS; | 
|  | 1225 | } | 
|  | 1226 |  | 
|  | 1227 | unsigned ShufIdxs[16]; | 
|  | 1228 | switch (OpNum) { | 
|  | 1229 | default: assert(0 && "Unknown i32 permute!"); | 
|  | 1230 | case OP_VMRGHW: | 
|  | 1231 | ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3; | 
|  | 1232 | ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; | 
|  | 1233 | ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7; | 
|  | 1234 | ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; | 
|  | 1235 | break; | 
|  | 1236 | case OP_VMRGLW: | 
|  | 1237 | ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; | 
|  | 1238 | ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; | 
|  | 1239 | ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; | 
|  | 1240 | ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; | 
|  | 1241 | break; | 
|  | 1242 | case OP_VSPLTISW0: | 
|  | 1243 | for (unsigned i = 0; i != 16; ++i) | 
|  | 1244 | ShufIdxs[i] = (i&3)+0; | 
|  | 1245 | break; | 
|  | 1246 | case OP_VSPLTISW1: | 
|  | 1247 | for (unsigned i = 0; i != 16; ++i) | 
|  | 1248 | ShufIdxs[i] = (i&3)+4; | 
|  | 1249 | break; | 
|  | 1250 | case OP_VSPLTISW2: | 
|  | 1251 | for (unsigned i = 0; i != 16; ++i) | 
|  | 1252 | ShufIdxs[i] = (i&3)+8; | 
|  | 1253 | break; | 
|  | 1254 | case OP_VSPLTISW3: | 
|  | 1255 | for (unsigned i = 0; i != 16; ++i) | 
|  | 1256 | ShufIdxs[i] = (i&3)+12; | 
|  | 1257 | break; | 
|  | 1258 | case OP_VSLDOI4: | 
|  | 1259 | for (unsigned i = 0; i != 16; ++i) | 
|  | 1260 | ShufIdxs[i] = i+4; | 
|  | 1261 | break; | 
|  | 1262 | case OP_VSLDOI8: | 
|  | 1263 | for (unsigned i = 0; i != 16; ++i) | 
|  | 1264 | ShufIdxs[i] = i+8; | 
|  | 1265 | break; | 
|  | 1266 | case OP_VSLDOI12: | 
|  | 1267 | for (unsigned i = 0; i != 16; ++i) | 
|  | 1268 | ShufIdxs[i] = i+12; | 
|  | 1269 | break; | 
|  | 1270 | } | 
|  | 1271 | std::vector<SDOperand> Ops; | 
|  | 1272 | for (unsigned i = 0; i != 16; ++i) | 
|  | 1273 | Ops.push_back(DAG.getConstant(ShufIdxs[i], MVT::i32)); | 
|  | 1274 | SDOperand OpLHS, OpRHS; | 
|  | 1275 | OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG); | 
|  | 1276 | OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG); | 
|  | 1277 |  | 
|  | 1278 | return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS, | 
|  | 1279 | DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops)); | 
|  | 1280 | } | 
|  | 1281 |  | 
| Chris Lattner | f1b4708 | 2006-04-14 05:19:18 +0000 | [diff] [blame] | 1282 | /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this | 
|  | 1283 | /// is a shuffle we can handle in a single instruction, return it.  Otherwise, | 
|  | 1284 | /// return the code it can be lowered into.  Worst case, it can always be | 
|  | 1285 | /// lowered into a vperm. | 
|  | 1286 | static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { | 
|  | 1287 | SDOperand V1 = Op.getOperand(0); | 
|  | 1288 | SDOperand V2 = Op.getOperand(1); | 
|  | 1289 | SDOperand PermMask = Op.getOperand(2); | 
|  | 1290 |  | 
|  | 1291 | // Cases that are handled by instructions that take permute immediates | 
|  | 1292 | // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be | 
|  | 1293 | // selected by the instruction selector. | 
|  | 1294 | if (V2.getOpcode() == ISD::UNDEF) { | 
|  | 1295 | if (PPC::isSplatShuffleMask(PermMask.Val, 1) || | 
|  | 1296 | PPC::isSplatShuffleMask(PermMask.Val, 2) || | 
|  | 1297 | PPC::isSplatShuffleMask(PermMask.Val, 4) || | 
|  | 1298 | PPC::isVPKUWUMShuffleMask(PermMask.Val, true) || | 
|  | 1299 | PPC::isVPKUHUMShuffleMask(PermMask.Val, true) || | 
|  | 1300 | PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 || | 
|  | 1301 | PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) || | 
|  | 1302 | PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) || | 
|  | 1303 | PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) || | 
|  | 1304 | PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) || | 
|  | 1305 | PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) || | 
|  | 1306 | PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) { | 
|  | 1307 | return Op; | 
|  | 1308 | } | 
|  | 1309 | } | 
|  | 1310 |  | 
|  | 1311 | // Altivec has a variety of "shuffle immediates" that take two vector inputs | 
|  | 1312 | // and produce a fixed permutation.  If any of these match, do not lower to | 
|  | 1313 | // VPERM. | 
|  | 1314 | if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) || | 
|  | 1315 | PPC::isVPKUHUMShuffleMask(PermMask.Val, false) || | 
|  | 1316 | PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 || | 
|  | 1317 | PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) || | 
|  | 1318 | PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) || | 
|  | 1319 | PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) || | 
|  | 1320 | PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) || | 
|  | 1321 | PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) || | 
|  | 1322 | PPC::isVMRGHShuffleMask(PermMask.Val, 4, false)) | 
|  | 1323 | return Op; | 
|  | 1324 |  | 
| Chris Lattner | 5913810 | 2006-04-17 05:28:54 +0000 | [diff] [blame] | 1325 | // Check to see if this is a shuffle of 4-byte values.  If so, we can use our | 
|  | 1326 | // perfect shuffle table to emit an optimal matching sequence. | 
|  | 1327 | unsigned PFIndexes[4]; | 
|  | 1328 | bool isFourElementShuffle = true; | 
|  | 1329 | for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number | 
|  | 1330 | unsigned EltNo = 8;   // Start out undef. | 
|  | 1331 | for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte. | 
|  | 1332 | if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF) | 
|  | 1333 | continue;   // Undef, ignore it. | 
|  | 1334 |  | 
|  | 1335 | unsigned ByteSource = | 
|  | 1336 | cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue(); | 
|  | 1337 | if ((ByteSource & 3) != j) { | 
|  | 1338 | isFourElementShuffle = false; | 
|  | 1339 | break; | 
|  | 1340 | } | 
|  | 1341 |  | 
|  | 1342 | if (EltNo == 8) { | 
|  | 1343 | EltNo = ByteSource/4; | 
|  | 1344 | } else if (EltNo != ByteSource/4) { | 
|  | 1345 | isFourElementShuffle = false; | 
|  | 1346 | break; | 
|  | 1347 | } | 
|  | 1348 | } | 
|  | 1349 | PFIndexes[i] = EltNo; | 
|  | 1350 | } | 
|  | 1351 |  | 
|  | 1352 | // If this shuffle can be expressed as a shuffle of 4-byte elements, use the | 
|  | 1353 | // perfect shuffle vector to determine if it is cost effective to do this as | 
|  | 1354 | // discrete instructions, or whether we should use a vperm. | 
|  | 1355 | if (isFourElementShuffle) { | 
|  | 1356 | // Compute the index in the perfect shuffle table. | 
|  | 1357 | unsigned PFTableIndex = | 
|  | 1358 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; | 
|  | 1359 |  | 
|  | 1360 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; | 
|  | 1361 | unsigned Cost  = (PFEntry >> 30); | 
|  | 1362 |  | 
|  | 1363 | // Determining when to avoid vperm is tricky.  Many things affect the cost | 
|  | 1364 | // of vperm, particularly how many times the perm mask needs to be computed. | 
|  | 1365 | // For example, if the perm mask can be hoisted out of a loop or is already | 
|  | 1366 | // used (perhaps because there are multiple permutes with the same shuffle | 
|  | 1367 | // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of | 
|  | 1368 | // the loop requires an extra register. | 
|  | 1369 | // | 
|  | 1370 | // As a compromise, we only emit discrete instructions if the shuffle can be | 
|  | 1371 | // generated in 3 or fewer operations.  When we have loop information | 
|  | 1372 | // available, if this block is within a loop, we should avoid using vperm | 
|  | 1373 | // for 3-operation perms and use a constant pool load instead. | 
|  | 1374 | if (Cost < 3) | 
|  | 1375 | return GeneratePerfectShuffle(PFEntry, V1, V2, DAG); | 
|  | 1376 | } | 
| Chris Lattner | f1b4708 | 2006-04-14 05:19:18 +0000 | [diff] [blame] | 1377 |  | 
|  | 1378 | // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant | 
|  | 1379 | // vector that will get spilled to the constant pool. | 
|  | 1380 | if (V2.getOpcode() == ISD::UNDEF) V2 = V1; | 
|  | 1381 |  | 
|  | 1382 | // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except | 
|  | 1383 | // that it is in input element units, not in bytes.  Convert now. | 
|  | 1384 | MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType()); | 
|  | 1385 | unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; | 
|  | 1386 |  | 
|  | 1387 | std::vector<SDOperand> ResultMask; | 
|  | 1388 | for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { | 
| Chris Lattner | 730b456 | 2006-04-15 23:48:05 +0000 | [diff] [blame] | 1389 | unsigned SrcElt; | 
|  | 1390 | if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF) | 
|  | 1391 | SrcElt = 0; | 
|  | 1392 | else | 
|  | 1393 | SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue(); | 
| Chris Lattner | f1b4708 | 2006-04-14 05:19:18 +0000 | [diff] [blame] | 1394 |  | 
|  | 1395 | for (unsigned j = 0; j != BytesPerElement; ++j) | 
|  | 1396 | ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, | 
|  | 1397 | MVT::i8)); | 
|  | 1398 | } | 
|  | 1399 |  | 
|  | 1400 | SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, ResultMask); | 
|  | 1401 | return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); | 
|  | 1402 | } | 
|  | 1403 |  | 
| Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1404 | /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom | 
|  | 1405 | /// lower, do it, otherwise return null. | 
|  | 1406 | static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { | 
|  | 1407 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); | 
|  | 1408 |  | 
|  | 1409 | // If this is a lowered altivec predicate compare, CompareOpc is set to the | 
|  | 1410 | // opcode number of the comparison. | 
|  | 1411 | int CompareOpc = -1; | 
|  | 1412 | bool isDot = false; | 
|  | 1413 | switch (IntNo) { | 
|  | 1414 | default: return SDOperand();    // Don't custom lower most intrinsics. | 
|  | 1415 | // Comparison predicates. | 
|  | 1416 | case Intrinsic::ppc_altivec_vcmpbfp_p:  CompareOpc = 966; isDot = 1; break; | 
|  | 1417 | case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; | 
|  | 1418 | case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc =   6; isDot = 1; break; | 
|  | 1419 | case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc =  70; isDot = 1; break; | 
|  | 1420 | case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; | 
|  | 1421 | case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; | 
|  | 1422 | case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; | 
|  | 1423 | case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; | 
|  | 1424 | case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; | 
|  | 1425 | case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; | 
|  | 1426 | case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; | 
|  | 1427 | case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; | 
|  | 1428 | case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; | 
|  | 1429 |  | 
|  | 1430 | // Normal Comparisons. | 
|  | 1431 | case Intrinsic::ppc_altivec_vcmpbfp:    CompareOpc = 966; isDot = 0; break; | 
|  | 1432 | case Intrinsic::ppc_altivec_vcmpeqfp:   CompareOpc = 198; isDot = 0; break; | 
|  | 1433 | case Intrinsic::ppc_altivec_vcmpequb:   CompareOpc =   6; isDot = 0; break; | 
|  | 1434 | case Intrinsic::ppc_altivec_vcmpequh:   CompareOpc =  70; isDot = 0; break; | 
|  | 1435 | case Intrinsic::ppc_altivec_vcmpequw:   CompareOpc = 134; isDot = 0; break; | 
|  | 1436 | case Intrinsic::ppc_altivec_vcmpgefp:   CompareOpc = 454; isDot = 0; break; | 
|  | 1437 | case Intrinsic::ppc_altivec_vcmpgtfp:   CompareOpc = 710; isDot = 0; break; | 
|  | 1438 | case Intrinsic::ppc_altivec_vcmpgtsb:   CompareOpc = 774; isDot = 0; break; | 
|  | 1439 | case Intrinsic::ppc_altivec_vcmpgtsh:   CompareOpc = 838; isDot = 0; break; | 
|  | 1440 | case Intrinsic::ppc_altivec_vcmpgtsw:   CompareOpc = 902; isDot = 0; break; | 
|  | 1441 | case Intrinsic::ppc_altivec_vcmpgtub:   CompareOpc = 518; isDot = 0; break; | 
|  | 1442 | case Intrinsic::ppc_altivec_vcmpgtuh:   CompareOpc = 582; isDot = 0; break; | 
|  | 1443 | case Intrinsic::ppc_altivec_vcmpgtuw:   CompareOpc = 646; isDot = 0; break; | 
|  | 1444 | } | 
|  | 1445 |  | 
|  | 1446 | assert(CompareOpc>0 && "We only lower altivec predicate compares so far!"); | 
|  | 1447 |  | 
|  | 1448 | // If this is a non-dot comparison, make the VCMP node. | 
|  | 1449 | if (!isDot) { | 
|  | 1450 | SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), | 
|  | 1451 | Op.getOperand(1), Op.getOperand(2), | 
|  | 1452 | DAG.getConstant(CompareOpc, MVT::i32)); | 
|  | 1453 | return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp); | 
|  | 1454 | } | 
|  | 1455 |  | 
|  | 1456 | // Create the PPCISD altivec 'dot' comparison node. | 
|  | 1457 | std::vector<SDOperand> Ops; | 
|  | 1458 | std::vector<MVT::ValueType> VTs; | 
|  | 1459 | Ops.push_back(Op.getOperand(2));  // LHS | 
|  | 1460 | Ops.push_back(Op.getOperand(3));  // RHS | 
|  | 1461 | Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32)); | 
|  | 1462 | VTs.push_back(Op.getOperand(2).getValueType()); | 
|  | 1463 | VTs.push_back(MVT::Flag); | 
|  | 1464 | SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops); | 
|  | 1465 |  | 
|  | 1466 | // Now that we have the comparison, emit a copy from the CR to a GPR. | 
|  | 1467 | // This is flagged to the above dot comparison. | 
|  | 1468 | SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, | 
|  | 1469 | DAG.getRegister(PPC::CR6, MVT::i32), | 
|  | 1470 | CompNode.getValue(1)); | 
|  | 1471 |  | 
|  | 1472 | // Unpack the result based on how the target uses it. | 
|  | 1473 | unsigned BitNo;   // Bit # of CR6. | 
|  | 1474 | bool InvertBit;   // Invert result? | 
|  | 1475 | switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { | 
|  | 1476 | default:  // Can't happen, don't crash on invalid number though. | 
|  | 1477 | case 0:   // Return the value of the EQ bit of CR6. | 
|  | 1478 | BitNo = 0; InvertBit = false; | 
|  | 1479 | break; | 
|  | 1480 | case 1:   // Return the inverted value of the EQ bit of CR6. | 
|  | 1481 | BitNo = 0; InvertBit = true; | 
|  | 1482 | break; | 
|  | 1483 | case 2:   // Return the value of the LT bit of CR6. | 
|  | 1484 | BitNo = 2; InvertBit = false; | 
|  | 1485 | break; | 
|  | 1486 | case 3:   // Return the inverted value of the LT bit of CR6. | 
|  | 1487 | BitNo = 2; InvertBit = true; | 
|  | 1488 | break; | 
|  | 1489 | } | 
|  | 1490 |  | 
|  | 1491 | // Shift the bit into the low position. | 
|  | 1492 | Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags, | 
|  | 1493 | DAG.getConstant(8-(3-BitNo), MVT::i32)); | 
|  | 1494 | // Isolate the bit. | 
|  | 1495 | Flags = DAG.getNode(ISD::AND, MVT::i32, Flags, | 
|  | 1496 | DAG.getConstant(1, MVT::i32)); | 
|  | 1497 |  | 
|  | 1498 | // If we are supposed to, toggle the bit. | 
|  | 1499 | if (InvertBit) | 
|  | 1500 | Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags, | 
|  | 1501 | DAG.getConstant(1, MVT::i32)); | 
|  | 1502 | return Flags; | 
|  | 1503 | } | 
|  | 1504 |  | 
|  | 1505 | static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { | 
|  | 1506 | // Create a stack slot that is 16-byte aligned. | 
|  | 1507 | MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); | 
|  | 1508 | int FrameIdx = FrameInfo->CreateStackObject(16, 16); | 
|  | 1509 | SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32); | 
|  | 1510 |  | 
|  | 1511 | // Store the input value into Value#0 of the stack slot. | 
|  | 1512 | SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(), | 
|  | 1513 | Op.getOperand(0), FIdx,DAG.getSrcValue(NULL)); | 
|  | 1514 | // Load it out. | 
|  | 1515 | return DAG.getLoad(Op.getValueType(), Store, FIdx, DAG.getSrcValue(NULL)); | 
|  | 1516 | } | 
|  | 1517 |  | 
| Chris Lattner | e4bc9ea | 2005-08-26 00:52:45 +0000 | [diff] [blame] | 1518 | /// LowerOperation - Provide custom lowering hooks for some operations. | 
|  | 1519 | /// | 
| Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 1520 | SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { | 
| Chris Lattner | e4bc9ea | 2005-08-26 00:52:45 +0000 | [diff] [blame] | 1521 | switch (Op.getOpcode()) { | 
|  | 1522 | default: assert(0 && "Wasn't expecting to be able to lower this!"); | 
| Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1523 | case ISD::ConstantPool:       return LowerConstantPool(Op, DAG); | 
|  | 1524 | case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG); | 
|  | 1525 | case ISD::SETCC:              return LowerSETCC(Op, DAG); | 
|  | 1526 | case ISD::VASTART:            return LowerVASTART(Op, DAG, VarArgsFrameIndex); | 
|  | 1527 | case ISD::RET:                return LowerRET(Op, DAG); | 
| Chris Lattner | 7c0d664 | 2005-10-02 06:37:13 +0000 | [diff] [blame] | 1528 |  | 
| Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1529 | case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG); | 
|  | 1530 | case ISD::FP_TO_SINT:         return LowerFP_TO_SINT(Op, DAG); | 
|  | 1531 | case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG); | 
| Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 1532 |  | 
| Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1533 | // Lower 64-bit shifts. | 
|  | 1534 | case ISD::SHL:                return LowerSHL(Op, DAG); | 
|  | 1535 | case ISD::SRL:                return LowerSRL(Op, DAG); | 
|  | 1536 | case ISD::SRA:                return LowerSRA(Op, DAG); | 
| Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 1537 |  | 
| Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1538 | // Vector-related lowering. | 
|  | 1539 | case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG); | 
|  | 1540 | case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG); | 
|  | 1541 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); | 
|  | 1542 | case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG); | 
| Chris Lattner | bc11c34 | 2005-08-31 20:23:54 +0000 | [diff] [blame] | 1543 | } | 
| Chris Lattner | e4bc9ea | 2005-08-26 00:52:45 +0000 | [diff] [blame] | 1544 | return SDOperand(); | 
|  | 1545 | } | 
|  | 1546 |  | 
| Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1547 | //===----------------------------------------------------------------------===// | 
|  | 1548 | //  Other Lowering Code | 
|  | 1549 | //===----------------------------------------------------------------------===// | 
|  | 1550 |  | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1551 | std::vector<SDOperand> | 
| Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 1552 | PPCTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1553 | // | 
|  | 1554 | // add beautiful description of PPC stack frame format, or at least some docs | 
|  | 1555 | // | 
|  | 1556 | MachineFunction &MF = DAG.getMachineFunction(); | 
|  | 1557 | MachineFrameInfo *MFI = MF.getFrameInfo(); | 
|  | 1558 | MachineBasicBlock& BB = MF.front(); | 
| Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1559 | SSARegMap *RegMap = MF.getSSARegMap(); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1560 | std::vector<SDOperand> ArgValues; | 
|  | 1561 |  | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1562 | unsigned ArgOffset = 24; | 
|  | 1563 | unsigned GPR_remaining = 8; | 
|  | 1564 | unsigned FPR_remaining = 13; | 
|  | 1565 | unsigned GPR_idx = 0, FPR_idx = 0; | 
|  | 1566 | static const unsigned GPR[] = { | 
|  | 1567 | PPC::R3, PPC::R4, PPC::R5, PPC::R6, | 
|  | 1568 | PPC::R7, PPC::R8, PPC::R9, PPC::R10, | 
|  | 1569 | }; | 
|  | 1570 | static const unsigned FPR[] = { | 
|  | 1571 | PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, | 
|  | 1572 | PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 | 
|  | 1573 | }; | 
|  | 1574 |  | 
|  | 1575 | // Add DAG nodes to load the arguments...  On entry to a function on PPC, | 
|  | 1576 | // the arguments start at offset 24, although they are likely to be passed | 
|  | 1577 | // in registers. | 
|  | 1578 | for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { | 
|  | 1579 | SDOperand newroot, argt; | 
|  | 1580 | unsigned ObjSize; | 
|  | 1581 | bool needsLoad = false; | 
|  | 1582 | bool ArgLive = !I->use_empty(); | 
|  | 1583 | MVT::ValueType ObjectVT = getValueType(I->getType()); | 
|  | 1584 |  | 
|  | 1585 | switch (ObjectVT) { | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1586 | default: assert(0 && "Unhandled argument type!"); | 
|  | 1587 | case MVT::i1: | 
|  | 1588 | case MVT::i8: | 
|  | 1589 | case MVT::i16: | 
|  | 1590 | case MVT::i32: | 
|  | 1591 | ObjSize = 4; | 
|  | 1592 | if (!ArgLive) break; | 
|  | 1593 | if (GPR_remaining > 0) { | 
| Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1594 | unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); | 
| Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1595 | MF.addLiveIn(GPR[GPR_idx], VReg); | 
|  | 1596 | argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32); | 
| Nate Begeman | 49296f1 | 2005-08-31 01:58:39 +0000 | [diff] [blame] | 1597 | if (ObjectVT != MVT::i32) { | 
|  | 1598 | unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext | 
|  | 1599 | : ISD::AssertZext; | 
|  | 1600 | argt = DAG.getNode(AssertOp, MVT::i32, argt, | 
|  | 1601 | DAG.getValueType(ObjectVT)); | 
|  | 1602 | argt = DAG.getNode(ISD::TRUNCATE, ObjectVT, argt); | 
|  | 1603 | } | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1604 | } else { | 
|  | 1605 | needsLoad = true; | 
|  | 1606 | } | 
|  | 1607 | break; | 
| Chris Lattner | 80720a9 | 2005-11-30 20:40:54 +0000 | [diff] [blame] | 1608 | case MVT::i64: | 
|  | 1609 | ObjSize = 8; | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1610 | if (!ArgLive) break; | 
|  | 1611 | if (GPR_remaining > 0) { | 
|  | 1612 | SDOperand argHi, argLo; | 
| Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1613 | unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); | 
| Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1614 | MF.addLiveIn(GPR[GPR_idx], VReg); | 
|  | 1615 | argHi = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32); | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1616 | // If we have two or more remaining argument registers, then both halves | 
|  | 1617 | // of the i64 can be sourced from there.  Otherwise, the lower half will | 
|  | 1618 | // have to come off the stack.  This can happen when an i64 is preceded | 
|  | 1619 | // by 28 bytes of arguments. | 
|  | 1620 | if (GPR_remaining > 1) { | 
| Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1621 | unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); | 
| Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1622 | MF.addLiveIn(GPR[GPR_idx+1], VReg); | 
|  | 1623 | argLo = DAG.getCopyFromReg(argHi, VReg, MVT::i32); | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1624 | } else { | 
|  | 1625 | int FI = MFI->CreateFixedObject(4, ArgOffset+4); | 
|  | 1626 | SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32); | 
|  | 1627 | argLo = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN, | 
|  | 1628 | DAG.getSrcValue(NULL)); | 
|  | 1629 | } | 
|  | 1630 | // Build the outgoing arg thingy | 
|  | 1631 | argt = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, argLo, argHi); | 
|  | 1632 | newroot = argLo; | 
|  | 1633 | } else { | 
|  | 1634 | needsLoad = true; | 
|  | 1635 | } | 
|  | 1636 | break; | 
|  | 1637 | case MVT::f32: | 
|  | 1638 | case MVT::f64: | 
|  | 1639 | ObjSize = (ObjectVT == MVT::f64) ? 8 : 4; | 
| Chris Lattner | 413b979 | 2006-01-11 18:21:25 +0000 | [diff] [blame] | 1640 | if (!ArgLive) { | 
|  | 1641 | if (FPR_remaining > 0) { | 
|  | 1642 | --FPR_remaining; | 
|  | 1643 | ++FPR_idx; | 
|  | 1644 | } | 
|  | 1645 | break; | 
|  | 1646 | } | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1647 | if (FPR_remaining > 0) { | 
| Chris Lattner | 919c032 | 2005-10-01 01:35:02 +0000 | [diff] [blame] | 1648 | unsigned VReg; | 
|  | 1649 | if (ObjectVT == MVT::f32) | 
| Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1650 | VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass); | 
| Chris Lattner | 919c032 | 2005-10-01 01:35:02 +0000 | [diff] [blame] | 1651 | else | 
| Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1652 | VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass); | 
| Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1653 | MF.addLiveIn(FPR[FPR_idx], VReg); | 
|  | 1654 | argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, ObjectVT); | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1655 | --FPR_remaining; | 
|  | 1656 | ++FPR_idx; | 
|  | 1657 | } else { | 
|  | 1658 | needsLoad = true; | 
|  | 1659 | } | 
|  | 1660 | break; | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1661 | } | 
|  | 1662 |  | 
|  | 1663 | // We need to load the argument to a virtual register if we determined above | 
|  | 1664 | // that we ran out of physical registers of the appropriate type | 
|  | 1665 | if (needsLoad) { | 
|  | 1666 | unsigned SubregOffset = 0; | 
|  | 1667 | if (ObjectVT == MVT::i8 || ObjectVT == MVT::i1) SubregOffset = 3; | 
|  | 1668 | if (ObjectVT == MVT::i16) SubregOffset = 2; | 
|  | 1669 | int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); | 
|  | 1670 | SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32); | 
|  | 1671 | FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN, | 
|  | 1672 | DAG.getConstant(SubregOffset, MVT::i32)); | 
|  | 1673 | argt = newroot = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN, | 
|  | 1674 | DAG.getSrcValue(NULL)); | 
|  | 1675 | } | 
|  | 1676 |  | 
|  | 1677 | // Every 4 bytes of argument space consumes one of the GPRs available for | 
|  | 1678 | // argument passing. | 
|  | 1679 | if (GPR_remaining > 0) { | 
|  | 1680 | unsigned delta = (GPR_remaining > 1 && ObjSize == 8) ? 2 : 1; | 
|  | 1681 | GPR_remaining -= delta; | 
|  | 1682 | GPR_idx += delta; | 
|  | 1683 | } | 
|  | 1684 | ArgOffset += ObjSize; | 
|  | 1685 | if (newroot.Val) | 
|  | 1686 | DAG.setRoot(newroot.getValue(1)); | 
|  | 1687 |  | 
|  | 1688 | ArgValues.push_back(argt); | 
|  | 1689 | } | 
|  | 1690 |  | 
|  | 1691 | // If the function takes variable number of arguments, make a frame index for | 
|  | 1692 | // the start of the first vararg value... for expansion of llvm.va_start. | 
|  | 1693 | if (F.isVarArg()) { | 
|  | 1694 | VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset); | 
|  | 1695 | SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32); | 
|  | 1696 | // If this function is vararg, store any remaining integer argument regs | 
|  | 1697 | // to their spots on the stack so that they may be loaded by deferencing the | 
|  | 1698 | // result of va_next. | 
|  | 1699 | std::vector<SDOperand> MemOps; | 
|  | 1700 | for (; GPR_remaining > 0; --GPR_remaining, ++GPR_idx) { | 
| Nate Begeman | 1d9d742 | 2005-10-18 00:28:58 +0000 | [diff] [blame] | 1701 | unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); | 
| Chris Lattner | 7b73834 | 2005-09-13 19:33:40 +0000 | [diff] [blame] | 1702 | MF.addLiveIn(GPR[GPR_idx], VReg); | 
|  | 1703 | SDOperand Val = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1704 | SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1), | 
|  | 1705 | Val, FIN, DAG.getSrcValue(NULL)); | 
|  | 1706 | MemOps.push_back(Store); | 
|  | 1707 | // Increment the address by four for the next argument to store | 
|  | 1708 | SDOperand PtrOff = DAG.getConstant(4, getPointerTy()); | 
|  | 1709 | FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN, PtrOff); | 
|  | 1710 | } | 
| Chris Lattner | 80720a9 | 2005-11-30 20:40:54 +0000 | [diff] [blame] | 1711 | if (!MemOps.empty()) { | 
|  | 1712 | MemOps.push_back(DAG.getRoot()); | 
|  | 1713 | DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps)); | 
|  | 1714 | } | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1715 | } | 
|  | 1716 |  | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1717 | return ArgValues; | 
|  | 1718 | } | 
|  | 1719 |  | 
|  | 1720 | std::pair<SDOperand, SDOperand> | 
| Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 1721 | PPCTargetLowering::LowerCallTo(SDOperand Chain, | 
|  | 1722 | const Type *RetTy, bool isVarArg, | 
|  | 1723 | unsigned CallingConv, bool isTailCall, | 
|  | 1724 | SDOperand Callee, ArgListTy &Args, | 
|  | 1725 | SelectionDAG &DAG) { | 
| Chris Lattner | 281b55e | 2006-01-27 23:34:02 +0000 | [diff] [blame] | 1726 | // args_to_use will accumulate outgoing args for the PPCISD::CALL case in | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1727 | // SelectExpr to use to put the arguments in the appropriate registers. | 
|  | 1728 | std::vector<SDOperand> args_to_use; | 
|  | 1729 |  | 
|  | 1730 | // Count how many bytes are to be pushed on the stack, including the linkage | 
|  | 1731 | // area, and parameter passing area. | 
|  | 1732 | unsigned NumBytes = 24; | 
|  | 1733 |  | 
|  | 1734 | if (Args.empty()) { | 
| Chris Lattner | 45b3976 | 2006-02-13 08:55:29 +0000 | [diff] [blame] | 1735 | Chain = DAG.getCALLSEQ_START(Chain, | 
|  | 1736 | DAG.getConstant(NumBytes, getPointerTy())); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1737 | } else { | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1738 | for (unsigned i = 0, e = Args.size(); i != e; ++i) { | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1739 | switch (getValueType(Args[i].second)) { | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1740 | default: assert(0 && "Unknown value type!"); | 
|  | 1741 | case MVT::i1: | 
|  | 1742 | case MVT::i8: | 
|  | 1743 | case MVT::i16: | 
|  | 1744 | case MVT::i32: | 
|  | 1745 | case MVT::f32: | 
|  | 1746 | NumBytes += 4; | 
|  | 1747 | break; | 
|  | 1748 | case MVT::i64: | 
|  | 1749 | case MVT::f64: | 
|  | 1750 | NumBytes += 8; | 
|  | 1751 | break; | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1752 | } | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1753 | } | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1754 |  | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1755 | // Just to be safe, we'll always reserve the full 24 bytes of linkage area | 
|  | 1756 | // plus 32 bytes of argument space in case any called code gets funky on us. | 
|  | 1757 | // (Required by ABI to support var arg) | 
|  | 1758 | if (NumBytes < 56) NumBytes = 56; | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1759 |  | 
|  | 1760 | // Adjust the stack pointer for the new arguments... | 
|  | 1761 | // These operations are automatically eliminated by the prolog/epilog pass | 
| Chris Lattner | 45b3976 | 2006-02-13 08:55:29 +0000 | [diff] [blame] | 1762 | Chain = DAG.getCALLSEQ_START(Chain, | 
|  | 1763 | DAG.getConstant(NumBytes, getPointerTy())); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1764 |  | 
|  | 1765 | // Set up a copy of the stack pointer for use loading and storing any | 
|  | 1766 | // arguments that may not fit in the registers available for argument | 
|  | 1767 | // passing. | 
| Chris Lattner | a243db8 | 2006-01-11 19:55:07 +0000 | [diff] [blame] | 1768 | SDOperand StackPtr = DAG.getRegister(PPC::R1, MVT::i32); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1769 |  | 
|  | 1770 | // Figure out which arguments are going to go in registers, and which in | 
|  | 1771 | // memory.  Also, if this is a vararg function, floating point operations | 
|  | 1772 | // must be stored to our stack, and loaded into integer regs as well, if | 
|  | 1773 | // any integer regs are available for argument passing. | 
|  | 1774 | unsigned ArgOffset = 24; | 
|  | 1775 | unsigned GPR_remaining = 8; | 
|  | 1776 | unsigned FPR_remaining = 13; | 
|  | 1777 |  | 
|  | 1778 | std::vector<SDOperand> MemOps; | 
|  | 1779 | for (unsigned i = 0, e = Args.size(); i != e; ++i) { | 
|  | 1780 | // PtrOff will be used to store the current argument to the stack if a | 
|  | 1781 | // register cannot be found for it. | 
|  | 1782 | SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); | 
|  | 1783 | PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff); | 
|  | 1784 | MVT::ValueType ArgVT = getValueType(Args[i].second); | 
|  | 1785 |  | 
|  | 1786 | switch (ArgVT) { | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1787 | default: assert(0 && "Unexpected ValueType for argument!"); | 
|  | 1788 | case MVT::i1: | 
|  | 1789 | case MVT::i8: | 
|  | 1790 | case MVT::i16: | 
|  | 1791 | // Promote the integer to 32 bits.  If the input type is signed use a | 
|  | 1792 | // sign extend, otherwise use a zero extend. | 
|  | 1793 | if (Args[i].second->isSigned()) | 
|  | 1794 | Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first); | 
|  | 1795 | else | 
|  | 1796 | Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first); | 
|  | 1797 | // FALL THROUGH | 
|  | 1798 | case MVT::i32: | 
|  | 1799 | if (GPR_remaining > 0) { | 
|  | 1800 | args_to_use.push_back(Args[i].first); | 
|  | 1801 | --GPR_remaining; | 
|  | 1802 | } else { | 
|  | 1803 | MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, | 
|  | 1804 | Args[i].first, PtrOff, | 
|  | 1805 | DAG.getSrcValue(NULL))); | 
|  | 1806 | } | 
|  | 1807 | ArgOffset += 4; | 
|  | 1808 | break; | 
|  | 1809 | case MVT::i64: | 
|  | 1810 | // If we have one free GPR left, we can place the upper half of the i64 | 
|  | 1811 | // in it, and store the other half to the stack.  If we have two or more | 
|  | 1812 | // free GPRs, then we can pass both halves of the i64 in registers. | 
|  | 1813 | if (GPR_remaining > 0) { | 
|  | 1814 | SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, | 
|  | 1815 | Args[i].first, DAG.getConstant(1, MVT::i32)); | 
|  | 1816 | SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, | 
|  | 1817 | Args[i].first, DAG.getConstant(0, MVT::i32)); | 
|  | 1818 | args_to_use.push_back(Hi); | 
|  | 1819 | --GPR_remaining; | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1820 | if (GPR_remaining > 0) { | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1821 | args_to_use.push_back(Lo); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1822 | --GPR_remaining; | 
|  | 1823 | } else { | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1824 | SDOperand ConstFour = DAG.getConstant(4, getPointerTy()); | 
|  | 1825 | PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1826 | MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1827 | Lo, PtrOff, DAG.getSrcValue(NULL))); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1828 | } | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1829 | } else { | 
|  | 1830 | MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, | 
|  | 1831 | Args[i].first, PtrOff, | 
|  | 1832 | DAG.getSrcValue(NULL))); | 
|  | 1833 | } | 
|  | 1834 | ArgOffset += 8; | 
|  | 1835 | break; | 
|  | 1836 | case MVT::f32: | 
|  | 1837 | case MVT::f64: | 
|  | 1838 | if (FPR_remaining > 0) { | 
|  | 1839 | args_to_use.push_back(Args[i].first); | 
|  | 1840 | --FPR_remaining; | 
|  | 1841 | if (isVarArg) { | 
|  | 1842 | SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Chain, | 
|  | 1843 | Args[i].first, PtrOff, | 
|  | 1844 | DAG.getSrcValue(NULL)); | 
|  | 1845 | MemOps.push_back(Store); | 
|  | 1846 | // Float varargs are always shadowed in available integer registers | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1847 | if (GPR_remaining > 0) { | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1848 | SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff, | 
|  | 1849 | DAG.getSrcValue(NULL)); | 
| Chris Lattner | 1df7478 | 2005-11-17 18:30:17 +0000 | [diff] [blame] | 1850 | MemOps.push_back(Load.getValue(1)); | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1851 | args_to_use.push_back(Load); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1852 | --GPR_remaining; | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1853 | } | 
|  | 1854 | if (GPR_remaining > 0 && MVT::f64 == ArgVT) { | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1855 | SDOperand ConstFour = DAG.getConstant(4, getPointerTy()); | 
|  | 1856 | PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour); | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1857 | SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff, | 
|  | 1858 | DAG.getSrcValue(NULL)); | 
| Chris Lattner | 1df7478 | 2005-11-17 18:30:17 +0000 | [diff] [blame] | 1859 | MemOps.push_back(Load.getValue(1)); | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1860 | args_to_use.push_back(Load); | 
|  | 1861 | --GPR_remaining; | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1862 | } | 
|  | 1863 | } else { | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1864 | // If we have any FPRs remaining, we may also have GPRs remaining. | 
|  | 1865 | // Args passed in FPRs consume either 1 (f32) or 2 (f64) available | 
|  | 1866 | // GPRs. | 
|  | 1867 | if (GPR_remaining > 0) { | 
|  | 1868 | args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); | 
|  | 1869 | --GPR_remaining; | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1870 | } | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1871 | if (GPR_remaining > 0 && MVT::f64 == ArgVT) { | 
|  | 1872 | args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); | 
|  | 1873 | --GPR_remaining; | 
|  | 1874 | } | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1875 | } | 
| Chris Lattner | 915fb30 | 2005-08-30 00:19:00 +0000 | [diff] [blame] | 1876 | } else { | 
|  | 1877 | MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, | 
|  | 1878 | Args[i].first, PtrOff, | 
|  | 1879 | DAG.getSrcValue(NULL))); | 
|  | 1880 | } | 
|  | 1881 | ArgOffset += (ArgVT == MVT::f32) ? 4 : 8; | 
|  | 1882 | break; | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1883 | } | 
|  | 1884 | } | 
|  | 1885 | if (!MemOps.empty()) | 
|  | 1886 | Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps); | 
|  | 1887 | } | 
|  | 1888 |  | 
|  | 1889 | std::vector<MVT::ValueType> RetVals; | 
|  | 1890 | MVT::ValueType RetTyVT = getValueType(RetTy); | 
| Chris Lattner | f505949 | 2005-09-02 01:24:55 +0000 | [diff] [blame] | 1891 | MVT::ValueType ActualRetTyVT = RetTyVT; | 
|  | 1892 | if (RetTyVT >= MVT::i1 && RetTyVT <= MVT::i16) | 
|  | 1893 | ActualRetTyVT = MVT::i32;   // Promote result to i32. | 
|  | 1894 |  | 
| Chris Lattner | e00ebf0 | 2006-01-28 07:33:03 +0000 | [diff] [blame] | 1895 | if (RetTyVT == MVT::i64) { | 
|  | 1896 | RetVals.push_back(MVT::i32); | 
|  | 1897 | RetVals.push_back(MVT::i32); | 
|  | 1898 | } else if (RetTyVT != MVT::isVoid) { | 
| Chris Lattner | f505949 | 2005-09-02 01:24:55 +0000 | [diff] [blame] | 1899 | RetVals.push_back(ActualRetTyVT); | 
| Chris Lattner | e00ebf0 | 2006-01-28 07:33:03 +0000 | [diff] [blame] | 1900 | } | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1901 | RetVals.push_back(MVT::Other); | 
|  | 1902 |  | 
| Chris Lattner | 2823b3e | 2005-11-17 05:56:14 +0000 | [diff] [blame] | 1903 | // If the callee is a GlobalAddress node (quite common, every direct call is) | 
|  | 1904 | // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. | 
|  | 1905 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) | 
|  | 1906 | Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32); | 
|  | 1907 |  | 
| Chris Lattner | 281b55e | 2006-01-27 23:34:02 +0000 | [diff] [blame] | 1908 | std::vector<SDOperand> Ops; | 
|  | 1909 | Ops.push_back(Chain); | 
|  | 1910 | Ops.push_back(Callee); | 
|  | 1911 | Ops.insert(Ops.end(), args_to_use.begin(), args_to_use.end()); | 
|  | 1912 | SDOperand TheCall = DAG.getNode(PPCISD::CALL, RetVals, Ops); | 
| Chris Lattner | e00ebf0 | 2006-01-28 07:33:03 +0000 | [diff] [blame] | 1913 | Chain = TheCall.getValue(TheCall.Val->getNumValues()-1); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1914 | Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain, | 
|  | 1915 | DAG.getConstant(NumBytes, getPointerTy())); | 
| Chris Lattner | f505949 | 2005-09-02 01:24:55 +0000 | [diff] [blame] | 1916 | SDOperand RetVal = TheCall; | 
|  | 1917 |  | 
|  | 1918 | // If the result is a small value, add a note so that we keep track of the | 
|  | 1919 | // information about whether it is sign or zero extended. | 
|  | 1920 | if (RetTyVT != ActualRetTyVT) { | 
|  | 1921 | RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext : ISD::AssertZext, | 
|  | 1922 | MVT::i32, RetVal, DAG.getValueType(RetTyVT)); | 
|  | 1923 | RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal); | 
| Chris Lattner | e00ebf0 | 2006-01-28 07:33:03 +0000 | [diff] [blame] | 1924 | } else if (RetTyVT == MVT::i64) { | 
|  | 1925 | RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, RetVal, RetVal.getValue(1)); | 
| Chris Lattner | f505949 | 2005-09-02 01:24:55 +0000 | [diff] [blame] | 1926 | } | 
|  | 1927 |  | 
|  | 1928 | return std::make_pair(RetVal, Chain); | 
| Chris Lattner | 7c5a3d3 | 2005-08-16 17:14:42 +0000 | [diff] [blame] | 1929 | } | 
|  | 1930 |  | 
| Chris Lattner | 8a2d3ca | 2005-08-26 21:23:58 +0000 | [diff] [blame] | 1931 | MachineBasicBlock * | 
| Nate Begeman | 21e463b | 2005-10-16 05:39:50 +0000 | [diff] [blame] | 1932 | PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, | 
|  | 1933 | MachineBasicBlock *BB) { | 
| Chris Lattner | 8a2d3ca | 2005-08-26 21:23:58 +0000 | [diff] [blame] | 1934 | assert((MI->getOpcode() == PPC::SELECT_CC_Int || | 
| Chris Lattner | 919c032 | 2005-10-01 01:35:02 +0000 | [diff] [blame] | 1935 | MI->getOpcode() == PPC::SELECT_CC_F4 || | 
| Chris Lattner | 710ff32 | 2006-04-08 22:45:08 +0000 | [diff] [blame] | 1936 | MI->getOpcode() == PPC::SELECT_CC_F8 || | 
|  | 1937 | MI->getOpcode() == PPC::SELECT_CC_VRRC) && | 
| Chris Lattner | 8a2d3ca | 2005-08-26 21:23:58 +0000 | [diff] [blame] | 1938 | "Unexpected instr type to insert"); | 
|  | 1939 |  | 
|  | 1940 | // To "insert" a SELECT_CC instruction, we actually have to insert the diamond | 
|  | 1941 | // control-flow pattern.  The incoming instruction knows the destination vreg | 
|  | 1942 | // to set, the condition code register to branch on, the true/false values to | 
|  | 1943 | // select between, and a branch opcode to use. | 
|  | 1944 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); | 
|  | 1945 | ilist<MachineBasicBlock>::iterator It = BB; | 
|  | 1946 | ++It; | 
|  | 1947 |  | 
|  | 1948 | //  thisMBB: | 
|  | 1949 | //  ... | 
|  | 1950 | //   TrueVal = ... | 
|  | 1951 | //   cmpTY ccX, r1, r2 | 
|  | 1952 | //   bCC copy1MBB | 
|  | 1953 | //   fallthrough --> copy0MBB | 
|  | 1954 | MachineBasicBlock *thisMBB = BB; | 
|  | 1955 | MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); | 
|  | 1956 | MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); | 
|  | 1957 | BuildMI(BB, MI->getOperand(4).getImmedValue(), 2) | 
|  | 1958 | .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); | 
|  | 1959 | MachineFunction *F = BB->getParent(); | 
|  | 1960 | F->getBasicBlockList().insert(It, copy0MBB); | 
|  | 1961 | F->getBasicBlockList().insert(It, sinkMBB); | 
| Nate Begeman | f15485a | 2006-03-27 01:32:24 +0000 | [diff] [blame] | 1962 | // Update machine-CFG edges by first adding all successors of the current | 
|  | 1963 | // block to the new block which will contain the Phi node for the select. | 
|  | 1964 | for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), | 
|  | 1965 | e = BB->succ_end(); i != e; ++i) | 
|  | 1966 | sinkMBB->addSuccessor(*i); | 
|  | 1967 | // Next, remove all successors of the current block, and add the true | 
|  | 1968 | // and fallthrough blocks as its successors. | 
|  | 1969 | while(!BB->succ_empty()) | 
|  | 1970 | BB->removeSuccessor(BB->succ_begin()); | 
| Chris Lattner | 8a2d3ca | 2005-08-26 21:23:58 +0000 | [diff] [blame] | 1971 | BB->addSuccessor(copy0MBB); | 
|  | 1972 | BB->addSuccessor(sinkMBB); | 
|  | 1973 |  | 
|  | 1974 | //  copy0MBB: | 
|  | 1975 | //   %FalseValue = ... | 
|  | 1976 | //   # fallthrough to sinkMBB | 
|  | 1977 | BB = copy0MBB; | 
|  | 1978 |  | 
|  | 1979 | // Update machine-CFG edges | 
|  | 1980 | BB->addSuccessor(sinkMBB); | 
|  | 1981 |  | 
|  | 1982 | //  sinkMBB: | 
|  | 1983 | //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] | 
|  | 1984 | //  ... | 
|  | 1985 | BB = sinkMBB; | 
|  | 1986 | BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg()) | 
|  | 1987 | .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) | 
|  | 1988 | .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); | 
|  | 1989 |  | 
|  | 1990 | delete MI;   // The pseudo instruction is gone now. | 
|  | 1991 | return BB; | 
|  | 1992 | } | 
|  | 1993 |  | 
| Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 1994 | //===----------------------------------------------------------------------===// | 
|  | 1995 | // Target Optimization Hooks | 
|  | 1996 | //===----------------------------------------------------------------------===// | 
|  | 1997 |  | 
| Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 1998 | SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, | 
|  | 1999 | DAGCombinerInfo &DCI) const { | 
|  | 2000 | TargetMachine &TM = getTargetMachine(); | 
|  | 2001 | SelectionDAG &DAG = DCI.DAG; | 
|  | 2002 | switch (N->getOpcode()) { | 
|  | 2003 | default: break; | 
|  | 2004 | case ISD::SINT_TO_FP: | 
|  | 2005 | if (TM.getSubtarget<PPCSubtarget>().is64Bit()) { | 
| Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 2006 | if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { | 
|  | 2007 | // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. | 
|  | 2008 | // We allow the src/dst to be either f32/f64, but the intermediate | 
|  | 2009 | // type must be i64. | 
|  | 2010 | if (N->getOperand(0).getValueType() == MVT::i64) { | 
|  | 2011 | SDOperand Val = N->getOperand(0).getOperand(0); | 
|  | 2012 | if (Val.getValueType() == MVT::f32) { | 
|  | 2013 | Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); | 
|  | 2014 | DCI.AddToWorklist(Val.Val); | 
|  | 2015 | } | 
|  | 2016 |  | 
|  | 2017 | Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val); | 
| Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 2018 | DCI.AddToWorklist(Val.Val); | 
| Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 2019 | Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val); | 
| Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 2020 | DCI.AddToWorklist(Val.Val); | 
| Chris Lattner | ecfe55e | 2006-03-22 05:30:33 +0000 | [diff] [blame] | 2021 | if (N->getValueType(0) == MVT::f32) { | 
|  | 2022 | Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val); | 
|  | 2023 | DCI.AddToWorklist(Val.Val); | 
|  | 2024 | } | 
|  | 2025 | return Val; | 
|  | 2026 | } else if (N->getOperand(0).getValueType() == MVT::i32) { | 
|  | 2027 | // If the intermediate type is i32, we can avoid the load/store here | 
|  | 2028 | // too. | 
| Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 2029 | } | 
| Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 2030 | } | 
|  | 2031 | } | 
|  | 2032 | break; | 
| Chris Lattner | 5126984 | 2006-03-01 05:50:56 +0000 | [diff] [blame] | 2033 | case ISD::STORE: | 
|  | 2034 | // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). | 
|  | 2035 | if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && | 
|  | 2036 | N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && | 
|  | 2037 | N->getOperand(1).getValueType() == MVT::i32) { | 
|  | 2038 | SDOperand Val = N->getOperand(1).getOperand(0); | 
|  | 2039 | if (Val.getValueType() == MVT::f32) { | 
|  | 2040 | Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); | 
|  | 2041 | DCI.AddToWorklist(Val.Val); | 
|  | 2042 | } | 
|  | 2043 | Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val); | 
|  | 2044 | DCI.AddToWorklist(Val.Val); | 
|  | 2045 |  | 
|  | 2046 | Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val, | 
|  | 2047 | N->getOperand(2), N->getOperand(3)); | 
|  | 2048 | DCI.AddToWorklist(Val.Val); | 
|  | 2049 | return Val; | 
|  | 2050 | } | 
|  | 2051 | break; | 
| Chris Lattner | 4468c22 | 2006-03-31 06:02:07 +0000 | [diff] [blame] | 2052 | case PPCISD::VCMP: { | 
|  | 2053 | // If a VCMPo node already exists with exactly the same operands as this | 
|  | 2054 | // node, use its result instead of this node (VCMPo computes both a CR6 and | 
|  | 2055 | // a normal output). | 
|  | 2056 | // | 
|  | 2057 | if (!N->getOperand(0).hasOneUse() && | 
|  | 2058 | !N->getOperand(1).hasOneUse() && | 
|  | 2059 | !N->getOperand(2).hasOneUse()) { | 
|  | 2060 |  | 
|  | 2061 | // Scan all of the users of the LHS, looking for VCMPo's that match. | 
|  | 2062 | SDNode *VCMPoNode = 0; | 
|  | 2063 |  | 
|  | 2064 | SDNode *LHSN = N->getOperand(0).Val; | 
|  | 2065 | for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); | 
|  | 2066 | UI != E; ++UI) | 
|  | 2067 | if ((*UI)->getOpcode() == PPCISD::VCMPo && | 
|  | 2068 | (*UI)->getOperand(1) == N->getOperand(1) && | 
|  | 2069 | (*UI)->getOperand(2) == N->getOperand(2) && | 
|  | 2070 | (*UI)->getOperand(0) == N->getOperand(0)) { | 
|  | 2071 | VCMPoNode = *UI; | 
|  | 2072 | break; | 
|  | 2073 | } | 
|  | 2074 |  | 
|  | 2075 | // If there are non-zero uses of the flag value, use the VCMPo node! | 
| Chris Lattner | 33497cc | 2006-03-31 06:04:53 +0000 | [diff] [blame] | 2076 | if (VCMPoNode && !VCMPoNode->hasNUsesOfValue(0, 1)) | 
| Chris Lattner | 4468c22 | 2006-03-31 06:02:07 +0000 | [diff] [blame] | 2077 | return SDOperand(VCMPoNode, 0); | 
|  | 2078 | } | 
|  | 2079 | break; | 
|  | 2080 | } | 
| Chris Lattner | 8c13d0a | 2006-03-01 04:57:39 +0000 | [diff] [blame] | 2081 | } | 
|  | 2082 |  | 
|  | 2083 | return SDOperand(); | 
|  | 2084 | } | 
|  | 2085 |  | 
| Chris Lattner | 1a635d6 | 2006-04-14 06:01:58 +0000 | [diff] [blame] | 2086 | //===----------------------------------------------------------------------===// | 
|  | 2087 | // Inline Assembly Support | 
|  | 2088 | //===----------------------------------------------------------------------===// | 
|  | 2089 |  | 
| Chris Lattner | bbe77de | 2006-04-02 06:26:07 +0000 | [diff] [blame] | 2090 | void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, | 
|  | 2091 | uint64_t Mask, | 
|  | 2092 | uint64_t &KnownZero, | 
|  | 2093 | uint64_t &KnownOne, | 
|  | 2094 | unsigned Depth) const { | 
|  | 2095 | KnownZero = 0; | 
|  | 2096 | KnownOne = 0; | 
|  | 2097 | switch (Op.getOpcode()) { | 
|  | 2098 | default: break; | 
|  | 2099 | case ISD::INTRINSIC_WO_CHAIN: { | 
|  | 2100 | switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) { | 
|  | 2101 | default: break; | 
|  | 2102 | case Intrinsic::ppc_altivec_vcmpbfp_p: | 
|  | 2103 | case Intrinsic::ppc_altivec_vcmpeqfp_p: | 
|  | 2104 | case Intrinsic::ppc_altivec_vcmpequb_p: | 
|  | 2105 | case Intrinsic::ppc_altivec_vcmpequh_p: | 
|  | 2106 | case Intrinsic::ppc_altivec_vcmpequw_p: | 
|  | 2107 | case Intrinsic::ppc_altivec_vcmpgefp_p: | 
|  | 2108 | case Intrinsic::ppc_altivec_vcmpgtfp_p: | 
|  | 2109 | case Intrinsic::ppc_altivec_vcmpgtsb_p: | 
|  | 2110 | case Intrinsic::ppc_altivec_vcmpgtsh_p: | 
|  | 2111 | case Intrinsic::ppc_altivec_vcmpgtsw_p: | 
|  | 2112 | case Intrinsic::ppc_altivec_vcmpgtub_p: | 
|  | 2113 | case Intrinsic::ppc_altivec_vcmpgtuh_p: | 
|  | 2114 | case Intrinsic::ppc_altivec_vcmpgtuw_p: | 
|  | 2115 | KnownZero = ~1U;  // All bits but the low one are known to be zero. | 
|  | 2116 | break; | 
|  | 2117 | } | 
|  | 2118 | } | 
|  | 2119 | } | 
|  | 2120 | } | 
|  | 2121 |  | 
|  | 2122 |  | 
| Chris Lattner | ad3bc8d | 2006-02-07 20:16:30 +0000 | [diff] [blame] | 2123 | /// getConstraintType - Given a constraint letter, return the type of | 
|  | 2124 | /// constraint it is for this target. | 
|  | 2125 | PPCTargetLowering::ConstraintType | 
|  | 2126 | PPCTargetLowering::getConstraintType(char ConstraintLetter) const { | 
|  | 2127 | switch (ConstraintLetter) { | 
|  | 2128 | default: break; | 
|  | 2129 | case 'b': | 
|  | 2130 | case 'r': | 
|  | 2131 | case 'f': | 
|  | 2132 | case 'v': | 
|  | 2133 | case 'y': | 
|  | 2134 | return C_RegisterClass; | 
|  | 2135 | } | 
|  | 2136 | return TargetLowering::getConstraintType(ConstraintLetter); | 
|  | 2137 | } | 
|  | 2138 |  | 
|  | 2139 |  | 
| Chris Lattner | ddc787d | 2006-01-31 19:20:21 +0000 | [diff] [blame] | 2140 | std::vector<unsigned> PPCTargetLowering:: | 
| Chris Lattner | 1efa40f | 2006-02-22 00:56:39 +0000 | [diff] [blame] | 2141 | getRegClassForInlineAsmConstraint(const std::string &Constraint, | 
|  | 2142 | MVT::ValueType VT) const { | 
| Chris Lattner | ddc787d | 2006-01-31 19:20:21 +0000 | [diff] [blame] | 2143 | if (Constraint.size() == 1) { | 
|  | 2144 | switch (Constraint[0]) {      // GCC RS6000 Constraint Letters | 
|  | 2145 | default: break;  // Unknown constriant letter | 
|  | 2146 | case 'b': | 
|  | 2147 | return make_vector<unsigned>(/*no R0*/ PPC::R1 , PPC::R2 , PPC::R3 , | 
|  | 2148 | PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 , | 
|  | 2149 | PPC::R8 , PPC::R9 , PPC::R10, PPC::R11, | 
|  | 2150 | PPC::R12, PPC::R13, PPC::R14, PPC::R15, | 
|  | 2151 | PPC::R16, PPC::R17, PPC::R18, PPC::R19, | 
|  | 2152 | PPC::R20, PPC::R21, PPC::R22, PPC::R23, | 
|  | 2153 | PPC::R24, PPC::R25, PPC::R26, PPC::R27, | 
|  | 2154 | PPC::R28, PPC::R29, PPC::R30, PPC::R31, | 
|  | 2155 | 0); | 
|  | 2156 | case 'r': | 
|  | 2157 | return make_vector<unsigned>(PPC::R0 , PPC::R1 , PPC::R2 , PPC::R3 , | 
|  | 2158 | PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 , | 
|  | 2159 | PPC::R8 , PPC::R9 , PPC::R10, PPC::R11, | 
|  | 2160 | PPC::R12, PPC::R13, PPC::R14, PPC::R15, | 
|  | 2161 | PPC::R16, PPC::R17, PPC::R18, PPC::R19, | 
|  | 2162 | PPC::R20, PPC::R21, PPC::R22, PPC::R23, | 
|  | 2163 | PPC::R24, PPC::R25, PPC::R26, PPC::R27, | 
|  | 2164 | PPC::R28, PPC::R29, PPC::R30, PPC::R31, | 
|  | 2165 | 0); | 
|  | 2166 | case 'f': | 
|  | 2167 | return make_vector<unsigned>(PPC::F0 , PPC::F1 , PPC::F2 , PPC::F3 , | 
|  | 2168 | PPC::F4 , PPC::F5 , PPC::F6 , PPC::F7 , | 
|  | 2169 | PPC::F8 , PPC::F9 , PPC::F10, PPC::F11, | 
|  | 2170 | PPC::F12, PPC::F13, PPC::F14, PPC::F15, | 
|  | 2171 | PPC::F16, PPC::F17, PPC::F18, PPC::F19, | 
|  | 2172 | PPC::F20, PPC::F21, PPC::F22, PPC::F23, | 
|  | 2173 | PPC::F24, PPC::F25, PPC::F26, PPC::F27, | 
|  | 2174 | PPC::F28, PPC::F29, PPC::F30, PPC::F31, | 
|  | 2175 | 0); | 
|  | 2176 | case 'v': | 
|  | 2177 | return make_vector<unsigned>(PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 , | 
|  | 2178 | PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 , | 
|  | 2179 | PPC::V8 , PPC::V9 , PPC::V10, PPC::V11, | 
|  | 2180 | PPC::V12, PPC::V13, PPC::V14, PPC::V15, | 
|  | 2181 | PPC::V16, PPC::V17, PPC::V18, PPC::V19, | 
|  | 2182 | PPC::V20, PPC::V21, PPC::V22, PPC::V23, | 
|  | 2183 | PPC::V24, PPC::V25, PPC::V26, PPC::V27, | 
|  | 2184 | PPC::V28, PPC::V29, PPC::V30, PPC::V31, | 
|  | 2185 | 0); | 
|  | 2186 | case 'y': | 
|  | 2187 | return make_vector<unsigned>(PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3, | 
|  | 2188 | PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7, | 
|  | 2189 | 0); | 
|  | 2190 | } | 
|  | 2191 | } | 
|  | 2192 |  | 
| Chris Lattner | 1efa40f | 2006-02-22 00:56:39 +0000 | [diff] [blame] | 2193 | return std::vector<unsigned>(); | 
| Chris Lattner | ddc787d | 2006-01-31 19:20:21 +0000 | [diff] [blame] | 2194 | } | 
| Chris Lattner | 763317d | 2006-02-07 00:47:13 +0000 | [diff] [blame] | 2195 |  | 
|  | 2196 | // isOperandValidForConstraint | 
|  | 2197 | bool PPCTargetLowering:: | 
|  | 2198 | isOperandValidForConstraint(SDOperand Op, char Letter) { | 
|  | 2199 | switch (Letter) { | 
|  | 2200 | default: break; | 
|  | 2201 | case 'I': | 
|  | 2202 | case 'J': | 
|  | 2203 | case 'K': | 
|  | 2204 | case 'L': | 
|  | 2205 | case 'M': | 
|  | 2206 | case 'N': | 
|  | 2207 | case 'O': | 
|  | 2208 | case 'P': { | 
|  | 2209 | if (!isa<ConstantSDNode>(Op)) return false;  // Must be an immediate. | 
|  | 2210 | unsigned Value = cast<ConstantSDNode>(Op)->getValue(); | 
|  | 2211 | switch (Letter) { | 
|  | 2212 | default: assert(0 && "Unknown constraint letter!"); | 
|  | 2213 | case 'I':  // "I" is a signed 16-bit constant. | 
|  | 2214 | return (short)Value == (int)Value; | 
|  | 2215 | case 'J':  // "J" is a constant with only the high-order 16 bits nonzero. | 
|  | 2216 | case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits. | 
|  | 2217 | return (short)Value == 0; | 
|  | 2218 | case 'K':  // "K" is a constant with only the low-order 16 bits nonzero. | 
|  | 2219 | return (Value >> 16) == 0; | 
|  | 2220 | case 'M':  // "M" is a constant that is greater than 31. | 
|  | 2221 | return Value > 31; | 
|  | 2222 | case 'N':  // "N" is a positive constant that is an exact power of two. | 
|  | 2223 | return (int)Value > 0 && isPowerOf2_32(Value); | 
|  | 2224 | case 'O':  // "O" is the constant zero. | 
|  | 2225 | return Value == 0; | 
|  | 2226 | case 'P':  // "P" is a constant whose negation is a signed 16-bit constant. | 
|  | 2227 | return (short)-Value == (int)-Value; | 
|  | 2228 | } | 
|  | 2229 | break; | 
|  | 2230 | } | 
|  | 2231 | } | 
|  | 2232 |  | 
|  | 2233 | // Handle standard constraint letters. | 
|  | 2234 | return TargetLowering::isOperandValidForConstraint(Op, Letter); | 
|  | 2235 | } | 
| Evan Cheng | c4c6257 | 2006-03-13 23:20:37 +0000 | [diff] [blame] | 2236 |  | 
|  | 2237 | /// isLegalAddressImmediate - Return true if the integer value can be used | 
|  | 2238 | /// as the offset of the target addressing mode. | 
|  | 2239 | bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const { | 
|  | 2240 | // PPC allows a sign-extended 16-bit immediate field. | 
|  | 2241 | return (V > -(1 << 16) && V < (1 << 16)-1); | 
|  | 2242 | } |