Dan Gohman | f17a25c | 2007-07-18 16:29:46 +0000 | [diff] [blame] | 1 | //===-- IA64ISelLowering.cpp - IA64 DAG Lowering Implementation -----------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file was developed by Duraid Madina and is distributed under |
| 6 | // the University of Illinois Open Source License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements the IA64ISelLowering class. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "IA64ISelLowering.h" |
| 15 | #include "IA64MachineFunctionInfo.h" |
| 16 | #include "IA64TargetMachine.h" |
| 17 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 18 | #include "llvm/CodeGen/MachineFunction.h" |
| 19 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 20 | #include "llvm/CodeGen/SelectionDAG.h" |
| 21 | #include "llvm/CodeGen/SSARegMap.h" |
| 22 | #include "llvm/Constants.h" |
| 23 | #include "llvm/Function.h" |
| 24 | using namespace llvm; |
| 25 | |
| 26 | IA64TargetLowering::IA64TargetLowering(TargetMachine &TM) |
| 27 | : TargetLowering(TM) { |
| 28 | |
| 29 | // register class for general registers |
| 30 | addRegisterClass(MVT::i64, IA64::GRRegisterClass); |
| 31 | |
| 32 | // register class for FP registers |
| 33 | addRegisterClass(MVT::f64, IA64::FPRegisterClass); |
| 34 | |
| 35 | // register class for predicate registers |
| 36 | addRegisterClass(MVT::i1, IA64::PRRegisterClass); |
| 37 | |
| 38 | setLoadXAction(ISD::EXTLOAD , MVT::i1 , Promote); |
| 39 | |
| 40 | setLoadXAction(ISD::ZEXTLOAD , MVT::i1 , Expand); |
| 41 | |
| 42 | setLoadXAction(ISD::SEXTLOAD , MVT::i1 , Expand); |
| 43 | setLoadXAction(ISD::SEXTLOAD , MVT::i8 , Expand); |
| 44 | setLoadXAction(ISD::SEXTLOAD , MVT::i16 , Expand); |
| 45 | setLoadXAction(ISD::SEXTLOAD , MVT::i32 , Expand); |
| 46 | |
| 47 | setOperationAction(ISD::BRIND , MVT::Other, Expand); |
| 48 | setOperationAction(ISD::BR_JT , MVT::Other, Expand); |
| 49 | setOperationAction(ISD::BR_CC , MVT::Other, Expand); |
| 50 | setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); |
| 51 | |
| 52 | // ia64 uses SELECT not SELECT_CC |
| 53 | setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); |
| 54 | |
| 55 | // We need to handle ISD::RET for void functions ourselves, |
| 56 | // so we get a chance to restore ar.pfs before adding a |
| 57 | // br.ret insn |
| 58 | setOperationAction(ISD::RET, MVT::Other, Custom); |
| 59 | |
| 60 | setSetCCResultType(MVT::i1); |
| 61 | setShiftAmountType(MVT::i64); |
| 62 | |
| 63 | setOperationAction(ISD::FREM , MVT::f32 , Expand); |
| 64 | setOperationAction(ISD::FREM , MVT::f64 , Expand); |
| 65 | |
| 66 | setOperationAction(ISD::UREM , MVT::f32 , Expand); |
| 67 | setOperationAction(ISD::UREM , MVT::f64 , Expand); |
| 68 | |
| 69 | setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); |
| 70 | setOperationAction(ISD::MEMSET , MVT::Other, Expand); |
| 71 | setOperationAction(ISD::MEMCPY , MVT::Other, Expand); |
| 72 | |
| 73 | setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); |
| 74 | setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); |
| 75 | |
| 76 | // We don't support sin/cos/sqrt |
| 77 | setOperationAction(ISD::FSIN , MVT::f64, Expand); |
| 78 | setOperationAction(ISD::FCOS , MVT::f64, Expand); |
| 79 | setOperationAction(ISD::FSQRT, MVT::f64, Expand); |
| 80 | setOperationAction(ISD::FSIN , MVT::f32, Expand); |
| 81 | setOperationAction(ISD::FCOS , MVT::f32, Expand); |
| 82 | setOperationAction(ISD::FSQRT, MVT::f32, Expand); |
| 83 | |
| 84 | // FIXME: IA64 supports fcopysign natively! |
| 85 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); |
| 86 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); |
| 87 | |
| 88 | // We don't have line number support yet. |
| 89 | setOperationAction(ISD::LOCATION, MVT::Other, Expand); |
| 90 | setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); |
| 91 | setOperationAction(ISD::LABEL, MVT::Other, Expand); |
| 92 | |
| 93 | //IA64 has these, but they are not implemented |
| 94 | setOperationAction(ISD::CTTZ , MVT::i64 , Expand); |
| 95 | setOperationAction(ISD::CTLZ , MVT::i64 , Expand); |
| 96 | setOperationAction(ISD::ROTL , MVT::i64 , Expand); |
| 97 | setOperationAction(ISD::ROTR , MVT::i64 , Expand); |
| 98 | setOperationAction(ISD::BSWAP, MVT::i64 , Expand); // mux @rev |
| 99 | |
Duncan Sands | 38947cd | 2007-07-27 12:58:54 +0000 | [diff] [blame] | 100 | setOperationAction(ISD::ADJUST_TRAMP, MVT::i64, Expand); |
| 101 | |
Dan Gohman | f17a25c | 2007-07-18 16:29:46 +0000 | [diff] [blame] | 102 | // VASTART needs to be custom lowered to use the VarArgsFrameIndex |
| 103 | setOperationAction(ISD::VAARG , MVT::Other, Custom); |
| 104 | setOperationAction(ISD::VASTART , MVT::Other, Custom); |
| 105 | |
| 106 | // Use the default implementation. |
| 107 | setOperationAction(ISD::VACOPY , MVT::Other, Expand); |
| 108 | setOperationAction(ISD::VAEND , MVT::Other, Expand); |
| 109 | setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); |
| 110 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); |
| 111 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); |
| 112 | |
| 113 | // Thread Local Storage |
| 114 | setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); |
| 115 | |
| 116 | setStackPointerRegisterToSaveRestore(IA64::r12); |
| 117 | |
| 118 | setJumpBufSize(704); // on ia64-linux, jmp_bufs are 704 bytes.. |
| 119 | setJumpBufAlignment(16); // ...and must be 16-byte aligned |
| 120 | |
| 121 | computeRegisterProperties(); |
| 122 | |
| 123 | setOperationAction(ISD::ConstantFP, MVT::f64, Expand); |
Dale Johannesen | bbe2b70 | 2007-08-30 00:23:21 +0000 | [diff] [blame^] | 124 | addLegalFPImmediate(APFloat(+0.0)); |
| 125 | addLegalFPImmediate(APFloat(+1.0)); |
Dan Gohman | f17a25c | 2007-07-18 16:29:46 +0000 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | const char *IA64TargetLowering::getTargetNodeName(unsigned Opcode) const { |
| 129 | switch (Opcode) { |
| 130 | default: return 0; |
| 131 | case IA64ISD::GETFD: return "IA64ISD::GETFD"; |
| 132 | case IA64ISD::BRCALL: return "IA64ISD::BRCALL"; |
| 133 | case IA64ISD::RET_FLAG: return "IA64ISD::RET_FLAG"; |
| 134 | } |
| 135 | } |
| 136 | |
| 137 | |
| 138 | std::vector<SDOperand> |
| 139 | IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { |
| 140 | std::vector<SDOperand> ArgValues; |
| 141 | // |
| 142 | // add beautiful description of IA64 stack frame format |
| 143 | // here (from intel 24535803.pdf most likely) |
| 144 | // |
| 145 | MachineFunction &MF = DAG.getMachineFunction(); |
| 146 | MachineFrameInfo *MFI = MF.getFrameInfo(); |
| 147 | const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); |
| 148 | |
| 149 | GP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64)); |
| 150 | SP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64)); |
| 151 | RP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64)); |
| 152 | |
| 153 | MachineBasicBlock& BB = MF.front(); |
| 154 | |
| 155 | unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35, |
| 156 | IA64::r36, IA64::r37, IA64::r38, IA64::r39}; |
| 157 | |
| 158 | unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11, |
| 159 | IA64::F12,IA64::F13,IA64::F14, IA64::F15}; |
| 160 | |
| 161 | unsigned argVreg[8]; |
| 162 | unsigned argPreg[8]; |
| 163 | unsigned argOpc[8]; |
| 164 | |
| 165 | unsigned used_FPArgs = 0; // how many FP args have been used so far? |
| 166 | |
| 167 | unsigned ArgOffset = 0; |
| 168 | int count = 0; |
| 169 | |
| 170 | for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) |
| 171 | { |
| 172 | SDOperand newroot, argt; |
| 173 | if(count < 8) { // need to fix this logic? maybe. |
| 174 | |
| 175 | switch (getValueType(I->getType())) { |
| 176 | default: |
| 177 | assert(0 && "ERROR in LowerArgs: can't lower this type of arg.\n"); |
| 178 | case MVT::f32: |
| 179 | // fixme? (well, will need to for weird FP structy stuff, |
| 180 | // see intel ABI docs) |
| 181 | case MVT::f64: |
| 182 | //XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]); |
| 183 | MF.addLiveIn(args_FP[used_FPArgs]); // mark this reg as liveIn |
| 184 | // floating point args go into f8..f15 as-needed, the increment |
| 185 | argVreg[count] = // is below..: |
| 186 | MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::f64)); |
| 187 | // FP args go into f8..f15 as needed: (hence the ++) |
| 188 | argPreg[count] = args_FP[used_FPArgs++]; |
| 189 | argOpc[count] = IA64::FMOV; |
| 190 | argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), argVreg[count], |
| 191 | MVT::f64); |
| 192 | if (I->getType() == Type::FloatTy) |
| 193 | argt = DAG.getNode(ISD::FP_ROUND, MVT::f32, argt); |
| 194 | break; |
| 195 | case MVT::i1: // NOTE: as far as C abi stuff goes, |
| 196 | // bools are just boring old ints |
| 197 | case MVT::i8: |
| 198 | case MVT::i16: |
| 199 | case MVT::i32: |
| 200 | case MVT::i64: |
| 201 | //XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]); |
| 202 | MF.addLiveIn(args_int[count]); // mark this register as liveIn |
| 203 | argVreg[count] = |
| 204 | MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64)); |
| 205 | argPreg[count] = args_int[count]; |
| 206 | argOpc[count] = IA64::MOV; |
| 207 | argt = newroot = |
| 208 | DAG.getCopyFromReg(DAG.getRoot(), argVreg[count], MVT::i64); |
| 209 | if ( getValueType(I->getType()) != MVT::i64) |
| 210 | argt = DAG.getNode(ISD::TRUNCATE, getValueType(I->getType()), |
| 211 | newroot); |
| 212 | break; |
| 213 | } |
| 214 | } else { // more than 8 args go into the frame |
| 215 | // Create the frame index object for this incoming parameter... |
| 216 | ArgOffset = 16 + 8 * (count - 8); |
| 217 | int FI = MFI->CreateFixedObject(8, ArgOffset); |
| 218 | |
| 219 | // Create the SelectionDAG nodes corresponding to a load |
| 220 | //from this parameter |
| 221 | SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64); |
| 222 | argt = newroot = DAG.getLoad(getValueType(I->getType()), |
| 223 | DAG.getEntryNode(), FIN, NULL, 0); |
| 224 | } |
| 225 | ++count; |
| 226 | DAG.setRoot(newroot.getValue(1)); |
| 227 | ArgValues.push_back(argt); |
| 228 | } |
| 229 | |
| 230 | |
| 231 | // Create a vreg to hold the output of (what will become) |
| 232 | // the "alloc" instruction |
| 233 | VirtGPR = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64)); |
| 234 | BuildMI(&BB, TII->get(IA64::PSEUDO_ALLOC), VirtGPR); |
| 235 | // we create a PSEUDO_ALLOC (pseudo)instruction for now |
| 236 | /* |
| 237 | BuildMI(&BB, IA64::IDEF, 0, IA64::r1); |
| 238 | |
| 239 | // hmm: |
| 240 | BuildMI(&BB, IA64::IDEF, 0, IA64::r12); |
| 241 | BuildMI(&BB, IA64::IDEF, 0, IA64::rp); |
| 242 | // ..hmm. |
| 243 | |
| 244 | BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1); |
| 245 | |
| 246 | // hmm: |
| 247 | BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12); |
| 248 | BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp); |
| 249 | // ..hmm. |
| 250 | */ |
| 251 | |
| 252 | unsigned tempOffset=0; |
| 253 | |
| 254 | // if this is a varargs function, we simply lower llvm.va_start by |
| 255 | // pointing to the first entry |
| 256 | if(F.isVarArg()) { |
| 257 | tempOffset=0; |
| 258 | VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset); |
| 259 | } |
| 260 | |
| 261 | // here we actually do the moving of args, and store them to the stack |
| 262 | // too if this is a varargs function: |
| 263 | for (int i = 0; i < count && i < 8; ++i) { |
| 264 | BuildMI(&BB, TII->get(argOpc[i]), argVreg[i]).addReg(argPreg[i]); |
| 265 | if(F.isVarArg()) { |
| 266 | // if this is a varargs function, we copy the input registers to the stack |
| 267 | int FI = MFI->CreateFixedObject(8, tempOffset); |
| 268 | tempOffset+=8; //XXX: is it safe to use r22 like this? |
| 269 | BuildMI(&BB, TII->get(IA64::MOV), IA64::r22).addFrameIndex(FI); |
| 270 | // FIXME: we should use st8.spill here, one day |
| 271 | BuildMI(&BB, TII->get(IA64::ST8), IA64::r22).addReg(argPreg[i]); |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | // Finally, inform the code generator which regs we return values in. |
| 276 | // (see the ISD::RET: case in the instruction selector) |
| 277 | switch (getValueType(F.getReturnType())) { |
| 278 | default: assert(0 && "i have no idea where to return this type!"); |
| 279 | case MVT::isVoid: break; |
| 280 | case MVT::i1: |
| 281 | case MVT::i8: |
| 282 | case MVT::i16: |
| 283 | case MVT::i32: |
| 284 | case MVT::i64: |
| 285 | MF.addLiveOut(IA64::r8); |
| 286 | break; |
| 287 | case MVT::f32: |
| 288 | case MVT::f64: |
| 289 | MF.addLiveOut(IA64::F8); |
| 290 | break; |
| 291 | } |
| 292 | |
| 293 | return ArgValues; |
| 294 | } |
| 295 | |
| 296 | std::pair<SDOperand, SDOperand> |
| 297 | IA64TargetLowering::LowerCallTo(SDOperand Chain, |
| 298 | const Type *RetTy, bool RetTyIsSigned, |
| 299 | bool isVarArg, unsigned CallingConv, |
| 300 | bool isTailCall, SDOperand Callee, |
| 301 | ArgListTy &Args, SelectionDAG &DAG) { |
| 302 | |
| 303 | MachineFunction &MF = DAG.getMachineFunction(); |
| 304 | |
| 305 | unsigned NumBytes = 16; |
| 306 | unsigned outRegsUsed = 0; |
| 307 | |
| 308 | if (Args.size() > 8) { |
| 309 | NumBytes += (Args.size() - 8) * 8; |
| 310 | outRegsUsed = 8; |
| 311 | } else { |
| 312 | outRegsUsed = Args.size(); |
| 313 | } |
| 314 | |
| 315 | // FIXME? this WILL fail if we ever try to pass around an arg that |
| 316 | // consumes more than a single output slot (a 'real' double, int128 |
| 317 | // some sort of aggregate etc.), as we'll underestimate how many 'outX' |
| 318 | // registers we use. Hopefully, the assembler will notice. |
| 319 | MF.getInfo<IA64FunctionInfo>()->outRegsUsed= |
| 320 | std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed); |
| 321 | |
| 322 | // keep stack frame 16-byte aligned |
| 323 | // assert(NumBytes==((NumBytes+15) & ~15) && |
| 324 | // "stack frame not 16-byte aligned!"); |
| 325 | NumBytes = (NumBytes+15) & ~15; |
| 326 | |
| 327 | Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); |
| 328 | |
| 329 | SDOperand StackPtr; |
| 330 | std::vector<SDOperand> Stores; |
| 331 | std::vector<SDOperand> Converts; |
| 332 | std::vector<SDOperand> RegValuesToPass; |
| 333 | unsigned ArgOffset = 16; |
| 334 | |
| 335 | for (unsigned i = 0, e = Args.size(); i != e; ++i) |
| 336 | { |
| 337 | SDOperand Val = Args[i].Node; |
| 338 | MVT::ValueType ObjectVT = Val.getValueType(); |
| 339 | SDOperand ValToStore(0, 0), ValToConvert(0, 0); |
| 340 | unsigned ObjSize=8; |
| 341 | switch (ObjectVT) { |
| 342 | default: assert(0 && "unexpected argument type!"); |
| 343 | case MVT::i1: |
| 344 | case MVT::i8: |
| 345 | case MVT::i16: |
| 346 | case MVT::i32: { |
| 347 | //promote to 64-bits, sign/zero extending based on type |
| 348 | //of the argument |
| 349 | ISD::NodeType ExtendKind = ISD::ANY_EXTEND; |
| 350 | if (Args[i].isSExt) |
| 351 | ExtendKind = ISD::SIGN_EXTEND; |
| 352 | else if (Args[i].isZExt) |
| 353 | ExtendKind = ISD::ZERO_EXTEND; |
| 354 | Val = DAG.getNode(ExtendKind, MVT::i64, Val); |
| 355 | // XXX: fall through |
| 356 | } |
| 357 | case MVT::i64: |
| 358 | //ObjSize = 8; |
| 359 | if(RegValuesToPass.size() >= 8) { |
| 360 | ValToStore = Val; |
| 361 | } else { |
| 362 | RegValuesToPass.push_back(Val); |
| 363 | } |
| 364 | break; |
| 365 | case MVT::f32: |
| 366 | //promote to 64-bits |
| 367 | Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); |
| 368 | // XXX: fall through |
| 369 | case MVT::f64: |
| 370 | if(RegValuesToPass.size() >= 8) { |
| 371 | ValToStore = Val; |
| 372 | } else { |
| 373 | RegValuesToPass.push_back(Val); |
| 374 | if(1 /* TODO: if(calling external or varadic function)*/ ) { |
| 375 | ValToConvert = Val; // additionally pass this FP value as an int |
| 376 | } |
| 377 | } |
| 378 | break; |
| 379 | } |
| 380 | |
| 381 | if(ValToStore.Val) { |
| 382 | if(!StackPtr.Val) { |
| 383 | StackPtr = DAG.getRegister(IA64::r12, MVT::i64); |
| 384 | } |
| 385 | SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); |
| 386 | PtrOff = DAG.getNode(ISD::ADD, MVT::i64, StackPtr, PtrOff); |
| 387 | Stores.push_back(DAG.getStore(Chain, ValToStore, PtrOff, NULL, 0)); |
| 388 | ArgOffset += ObjSize; |
| 389 | } |
| 390 | |
| 391 | if(ValToConvert.Val) { |
| 392 | Converts.push_back(DAG.getNode(IA64ISD::GETFD, MVT::i64, ValToConvert)); |
| 393 | } |
| 394 | } |
| 395 | |
| 396 | // Emit all stores, make sure they occur before any copies into physregs. |
| 397 | if (!Stores.empty()) |
| 398 | Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Stores[0],Stores.size()); |
| 399 | |
| 400 | static const unsigned IntArgRegs[] = { |
| 401 | IA64::out0, IA64::out1, IA64::out2, IA64::out3, |
| 402 | IA64::out4, IA64::out5, IA64::out6, IA64::out7 |
| 403 | }; |
| 404 | |
| 405 | static const unsigned FPArgRegs[] = { |
| 406 | IA64::F8, IA64::F9, IA64::F10, IA64::F11, |
| 407 | IA64::F12, IA64::F13, IA64::F14, IA64::F15 |
| 408 | }; |
| 409 | |
| 410 | SDOperand InFlag; |
| 411 | |
| 412 | // save the current GP, SP and RP : FIXME: do we need to do all 3 always? |
| 413 | SDOperand GPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r1, MVT::i64, InFlag); |
| 414 | Chain = GPBeforeCall.getValue(1); |
| 415 | InFlag = Chain.getValue(2); |
| 416 | SDOperand SPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r12, MVT::i64, InFlag); |
| 417 | Chain = SPBeforeCall.getValue(1); |
| 418 | InFlag = Chain.getValue(2); |
| 419 | SDOperand RPBeforeCall = DAG.getCopyFromReg(Chain, IA64::rp, MVT::i64, InFlag); |
| 420 | Chain = RPBeforeCall.getValue(1); |
| 421 | InFlag = Chain.getValue(2); |
| 422 | |
| 423 | // Build a sequence of copy-to-reg nodes chained together with token chain |
| 424 | // and flag operands which copy the outgoing integer args into regs out[0-7] |
| 425 | // mapped 1:1 and the FP args into regs F8-F15 "lazily" |
| 426 | // TODO: for performance, we should only copy FP args into int regs when we |
| 427 | // know this is required (i.e. for varardic or external (unknown) functions) |
| 428 | |
| 429 | // first to the FP->(integer representation) conversions, these are |
| 430 | // flagged for now, but shouldn't have to be (TODO) |
| 431 | unsigned seenConverts = 0; |
| 432 | for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) { |
| 433 | if(MVT::isFloatingPoint(RegValuesToPass[i].getValueType())) { |
| 434 | Chain = DAG.getCopyToReg(Chain, IntArgRegs[i], Converts[seenConverts++], |
| 435 | InFlag); |
| 436 | InFlag = Chain.getValue(1); |
| 437 | } |
| 438 | } |
| 439 | |
| 440 | // next copy args into the usual places, these are flagged |
| 441 | unsigned usedFPArgs = 0; |
| 442 | for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) { |
| 443 | Chain = DAG.getCopyToReg(Chain, |
| 444 | MVT::isInteger(RegValuesToPass[i].getValueType()) ? |
| 445 | IntArgRegs[i] : FPArgRegs[usedFPArgs++], RegValuesToPass[i], InFlag); |
| 446 | InFlag = Chain.getValue(1); |
| 447 | } |
| 448 | |
| 449 | // If the callee is a GlobalAddress node (quite common, every direct call is) |
| 450 | // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. |
| 451 | /* |
| 452 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { |
| 453 | Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i64); |
| 454 | } |
| 455 | */ |
| 456 | |
| 457 | std::vector<MVT::ValueType> NodeTys; |
| 458 | std::vector<SDOperand> CallOperands; |
| 459 | NodeTys.push_back(MVT::Other); // Returns a chain |
| 460 | NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. |
| 461 | CallOperands.push_back(Chain); |
| 462 | CallOperands.push_back(Callee); |
| 463 | |
| 464 | // emit the call itself |
| 465 | if (InFlag.Val) |
| 466 | CallOperands.push_back(InFlag); |
| 467 | else |
| 468 | assert(0 && "this should never happen!\n"); |
| 469 | |
| 470 | // to make way for a hack: |
| 471 | Chain = DAG.getNode(IA64ISD::BRCALL, NodeTys, |
| 472 | &CallOperands[0], CallOperands.size()); |
| 473 | InFlag = Chain.getValue(1); |
| 474 | |
| 475 | // restore the GP, SP and RP after the call |
| 476 | Chain = DAG.getCopyToReg(Chain, IA64::r1, GPBeforeCall, InFlag); |
| 477 | InFlag = Chain.getValue(1); |
| 478 | Chain = DAG.getCopyToReg(Chain, IA64::r12, SPBeforeCall, InFlag); |
| 479 | InFlag = Chain.getValue(1); |
| 480 | Chain = DAG.getCopyToReg(Chain, IA64::rp, RPBeforeCall, InFlag); |
| 481 | InFlag = Chain.getValue(1); |
| 482 | |
| 483 | std::vector<MVT::ValueType> RetVals; |
| 484 | RetVals.push_back(MVT::Other); |
| 485 | RetVals.push_back(MVT::Flag); |
| 486 | |
| 487 | MVT::ValueType RetTyVT = getValueType(RetTy); |
| 488 | SDOperand RetVal; |
| 489 | if (RetTyVT != MVT::isVoid) { |
| 490 | switch (RetTyVT) { |
| 491 | default: assert(0 && "Unknown value type to return!"); |
| 492 | case MVT::i1: { // bools are just like other integers (returned in r8) |
| 493 | // we *could* fall through to the truncate below, but this saves a |
| 494 | // few redundant predicate ops |
| 495 | SDOperand boolInR8 = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64,InFlag); |
| 496 | InFlag = boolInR8.getValue(2); |
| 497 | Chain = boolInR8.getValue(1); |
| 498 | SDOperand zeroReg = DAG.getCopyFromReg(Chain, IA64::r0, MVT::i64, InFlag); |
| 499 | InFlag = zeroReg.getValue(2); |
| 500 | Chain = zeroReg.getValue(1); |
| 501 | |
| 502 | RetVal = DAG.getSetCC(MVT::i1, boolInR8, zeroReg, ISD::SETNE); |
| 503 | break; |
| 504 | } |
| 505 | case MVT::i8: |
| 506 | case MVT::i16: |
| 507 | case MVT::i32: |
| 508 | RetVal = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag); |
| 509 | Chain = RetVal.getValue(1); |
| 510 | |
| 511 | // keep track of whether it is sign or zero extended (todo: bools?) |
| 512 | /* XXX |
| 513 | RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext :ISD::AssertZext, |
| 514 | MVT::i64, RetVal, DAG.getValueType(RetTyVT)); |
| 515 | */ |
| 516 | RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal); |
| 517 | break; |
| 518 | case MVT::i64: |
| 519 | RetVal = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag); |
| 520 | Chain = RetVal.getValue(1); |
| 521 | InFlag = RetVal.getValue(2); // XXX dead |
| 522 | break; |
| 523 | case MVT::f32: |
| 524 | RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag); |
| 525 | Chain = RetVal.getValue(1); |
| 526 | RetVal = DAG.getNode(ISD::TRUNCATE, MVT::f32, RetVal); |
| 527 | break; |
| 528 | case MVT::f64: |
| 529 | RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag); |
| 530 | Chain = RetVal.getValue(1); |
| 531 | InFlag = RetVal.getValue(2); // XXX dead |
| 532 | break; |
| 533 | } |
| 534 | } |
| 535 | |
| 536 | Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain, |
| 537 | DAG.getConstant(NumBytes, getPointerTy())); |
| 538 | |
| 539 | return std::make_pair(RetVal, Chain); |
| 540 | } |
| 541 | |
| 542 | SDOperand IA64TargetLowering:: |
| 543 | LowerOperation(SDOperand Op, SelectionDAG &DAG) { |
| 544 | switch (Op.getOpcode()) { |
| 545 | default: assert(0 && "Should not custom lower this!"); |
| 546 | case ISD::GlobalTLSAddress: |
| 547 | assert(0 && "TLS not implemented for IA64."); |
| 548 | case ISD::RET: { |
| 549 | SDOperand AR_PFSVal, Copy; |
| 550 | |
| 551 | switch(Op.getNumOperands()) { |
| 552 | default: |
| 553 | assert(0 && "Do not know how to return this many arguments!"); |
| 554 | abort(); |
| 555 | case 1: |
| 556 | AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64); |
| 557 | AR_PFSVal = DAG.getCopyToReg(AR_PFSVal.getValue(1), IA64::AR_PFS, |
| 558 | AR_PFSVal); |
| 559 | return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other, AR_PFSVal); |
| 560 | case 3: { |
| 561 | // Copy the result into the output register & restore ar.pfs |
| 562 | MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); |
| 563 | unsigned ArgReg = MVT::isInteger(ArgVT) ? IA64::r8 : IA64::F8; |
| 564 | |
| 565 | AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64); |
| 566 | Copy = DAG.getCopyToReg(AR_PFSVal.getValue(1), ArgReg, Op.getOperand(1), |
| 567 | SDOperand()); |
| 568 | AR_PFSVal = DAG.getCopyToReg(Copy.getValue(0), IA64::AR_PFS, AR_PFSVal, |
| 569 | Copy.getValue(1)); |
| 570 | return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other, |
| 571 | AR_PFSVal, AR_PFSVal.getValue(1)); |
| 572 | } |
| 573 | } |
| 574 | return SDOperand(); |
| 575 | } |
| 576 | case ISD::VAARG: { |
| 577 | MVT::ValueType VT = getPointerTy(); |
| 578 | SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); |
| 579 | SDOperand VAList = DAG.getLoad(VT, Op.getOperand(0), Op.getOperand(1), |
| 580 | SV->getValue(), SV->getOffset()); |
| 581 | // Increment the pointer, VAList, to the next vaarg |
| 582 | SDOperand VAIncr = DAG.getNode(ISD::ADD, VT, VAList, |
| 583 | DAG.getConstant(MVT::getSizeInBits(VT)/8, |
| 584 | VT)); |
| 585 | // Store the incremented VAList to the legalized pointer |
| 586 | VAIncr = DAG.getStore(VAList.getValue(1), VAIncr, |
| 587 | Op.getOperand(1), SV->getValue(), SV->getOffset()); |
| 588 | // Load the actual argument out of the pointer VAList |
| 589 | return DAG.getLoad(Op.getValueType(), VAIncr, VAList, NULL, 0); |
| 590 | } |
| 591 | case ISD::VASTART: { |
| 592 | // vastart just stores the address of the VarArgsFrameIndex slot into the |
| 593 | // memory location argument. |
| 594 | SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64); |
| 595 | SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); |
| 596 | return DAG.getStore(Op.getOperand(0), FR, |
| 597 | Op.getOperand(1), SV->getValue(), SV->getOffset()); |
| 598 | } |
| 599 | // Frame & Return address. Currently unimplemented |
| 600 | case ISD::RETURNADDR: break; |
| 601 | case ISD::FRAMEADDR: break; |
| 602 | } |
| 603 | return SDOperand(); |
| 604 | } |