blob: 56f675183cb4c8c51d70357c63ba4cabc5ad27bc [file] [log] [blame]
Tim Northovere0e3aef2013-01-31 12:12:40 +00001//===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that AArch64 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "aarch64-isel"
16#include "AArch64.h"
17#include "AArch64ISelLowering.h"
18#include "AArch64MachineFunctionInfo.h"
19#include "AArch64TargetMachine.h"
20#include "AArch64TargetObjectFile.h"
Tim Northover969afbe2013-02-05 13:24:47 +000021#include "Utils/AArch64BaseInfo.h"
Tim Northovere0e3aef2013-01-31 12:12:40 +000022#include "llvm/CodeGen/Analysis.h"
23#include "llvm/CodeGen/CallingConvLower.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstrBuilder.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28#include "llvm/IR/CallingConv.h"
29
30using namespace llvm;
31
32static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) {
33 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
34
35 if (Subtarget->isTargetLinux())
36 return new AArch64LinuxTargetObjectFile();
37 if (Subtarget->isTargetELF())
38 return new TargetLoweringObjectFileELF();
39 llvm_unreachable("unknown subtarget type");
40}
41
42
43AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
44 : TargetLowering(TM, createTLOF(TM)),
45 Subtarget(&TM.getSubtarget<AArch64Subtarget>()),
46 RegInfo(TM.getRegisterInfo()),
47 Itins(TM.getInstrItineraryData()) {
48
49 // SIMD compares set the entire lane's bits to 1
50 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
51
52 // Scalar register <-> type mapping
53 addRegisterClass(MVT::i32, &AArch64::GPR32RegClass);
54 addRegisterClass(MVT::i64, &AArch64::GPR64RegClass);
55 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
56 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
57 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
58 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
59
Tim Northovere0e3aef2013-01-31 12:12:40 +000060 computeRegisterProperties();
61
Tim Northovere0e3aef2013-01-31 12:12:40 +000062 // We combine OR nodes for bitfield and NEON BSL operations.
63 setTargetDAGCombine(ISD::OR);
64
65 setTargetDAGCombine(ISD::AND);
66 setTargetDAGCombine(ISD::SRA);
67
68 // AArch64 does not have i1 loads, or much of anything for i1 really.
69 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
70 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
71 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
72
73 setStackPointerRegisterToSaveRestore(AArch64::XSP);
74 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
75 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
76 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
77
78 // We'll lower globals to wrappers for selection.
79 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
80 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
81
82 // A64 instructions have the comparison predicate attached to the user of the
83 // result, but having a separate comparison is valuable for matching.
84 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
85 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
86 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
87 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
88
89 setOperationAction(ISD::SELECT, MVT::i32, Custom);
90 setOperationAction(ISD::SELECT, MVT::i64, Custom);
91 setOperationAction(ISD::SELECT, MVT::f32, Custom);
92 setOperationAction(ISD::SELECT, MVT::f64, Custom);
93
94 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
95 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
96 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
97 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
98
99 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
100
101 setOperationAction(ISD::SETCC, MVT::i32, Custom);
102 setOperationAction(ISD::SETCC, MVT::i64, Custom);
103 setOperationAction(ISD::SETCC, MVT::f32, Custom);
104 setOperationAction(ISD::SETCC, MVT::f64, Custom);
105
106 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
107 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
108 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
109
110 setOperationAction(ISD::VASTART, MVT::Other, Custom);
111 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
112 setOperationAction(ISD::VAEND, MVT::Other, Expand);
113 setOperationAction(ISD::VAARG, MVT::Other, Expand);
114
115 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
116
117 setOperationAction(ISD::ROTL, MVT::i32, Expand);
118 setOperationAction(ISD::ROTL, MVT::i64, Expand);
119
120 setOperationAction(ISD::UREM, MVT::i32, Expand);
121 setOperationAction(ISD::UREM, MVT::i64, Expand);
122 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
123 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
124
125 setOperationAction(ISD::SREM, MVT::i32, Expand);
126 setOperationAction(ISD::SREM, MVT::i64, Expand);
127 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
128 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
129
130 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
131 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
132
133 // Legal floating-point operations.
134 setOperationAction(ISD::FABS, MVT::f32, Legal);
135 setOperationAction(ISD::FABS, MVT::f64, Legal);
136
137 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
138 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
139
140 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
141 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
142
143 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
144 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
145
146 setOperationAction(ISD::FNEG, MVT::f32, Legal);
147 setOperationAction(ISD::FNEG, MVT::f64, Legal);
148
149 setOperationAction(ISD::FRINT, MVT::f32, Legal);
150 setOperationAction(ISD::FRINT, MVT::f64, Legal);
151
152 setOperationAction(ISD::FSQRT, MVT::f32, Legal);
153 setOperationAction(ISD::FSQRT, MVT::f64, Legal);
154
155 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
156 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
157
158 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
159 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
160 setOperationAction(ISD::ConstantFP, MVT::f128, Legal);
161
162 // Illegal floating-point operations.
163 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
164 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
165
166 setOperationAction(ISD::FCOS, MVT::f32, Expand);
167 setOperationAction(ISD::FCOS, MVT::f64, Expand);
168
169 setOperationAction(ISD::FEXP, MVT::f32, Expand);
170 setOperationAction(ISD::FEXP, MVT::f64, Expand);
171
172 setOperationAction(ISD::FEXP2, MVT::f32, Expand);
173 setOperationAction(ISD::FEXP2, MVT::f64, Expand);
174
175 setOperationAction(ISD::FLOG, MVT::f32, Expand);
176 setOperationAction(ISD::FLOG, MVT::f64, Expand);
177
178 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
179 setOperationAction(ISD::FLOG2, MVT::f64, Expand);
180
181 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
182 setOperationAction(ISD::FLOG10, MVT::f64, Expand);
183
184 setOperationAction(ISD::FPOW, MVT::f32, Expand);
185 setOperationAction(ISD::FPOW, MVT::f64, Expand);
186
187 setOperationAction(ISD::FPOWI, MVT::f32, Expand);
188 setOperationAction(ISD::FPOWI, MVT::f64, Expand);
189
190 setOperationAction(ISD::FREM, MVT::f32, Expand);
191 setOperationAction(ISD::FREM, MVT::f64, Expand);
192
193 setOperationAction(ISD::FSIN, MVT::f32, Expand);
194 setOperationAction(ISD::FSIN, MVT::f64, Expand);
195
Tim Northover95f48922013-03-08 13:55:07 +0000196 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
197 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
Tim Northovere0e3aef2013-01-31 12:12:40 +0000198
199 // Virtually no operation on f128 is legal, but LLVM can't expand them when
200 // there's a valid register class, so we need custom operations in most cases.
201 setOperationAction(ISD::FABS, MVT::f128, Expand);
202 setOperationAction(ISD::FADD, MVT::f128, Custom);
203 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
204 setOperationAction(ISD::FCOS, MVT::f128, Expand);
205 setOperationAction(ISD::FDIV, MVT::f128, Custom);
206 setOperationAction(ISD::FMA, MVT::f128, Expand);
207 setOperationAction(ISD::FMUL, MVT::f128, Custom);
208 setOperationAction(ISD::FNEG, MVT::f128, Expand);
209 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
210 setOperationAction(ISD::FP_ROUND, MVT::f128, Expand);
211 setOperationAction(ISD::FPOW, MVT::f128, Expand);
212 setOperationAction(ISD::FREM, MVT::f128, Expand);
213 setOperationAction(ISD::FRINT, MVT::f128, Expand);
214 setOperationAction(ISD::FSIN, MVT::f128, Expand);
Tim Northover95f48922013-03-08 13:55:07 +0000215 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
Tim Northovere0e3aef2013-01-31 12:12:40 +0000216 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
217 setOperationAction(ISD::FSUB, MVT::f128, Custom);
218 setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
219 setOperationAction(ISD::SETCC, MVT::f128, Custom);
220 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
221 setOperationAction(ISD::SELECT, MVT::f128, Expand);
222 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
223 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
224
225 // Lowering for many of the conversions is actually specified by the non-f128
226 // type. The LowerXXX function will be trivial when f128 isn't involved.
227 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
228 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
229 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
230 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
231 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
232 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
233 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
234 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
235 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
236 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
237 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
238 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
239 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
240 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
241
242 // This prevents LLVM trying to compress double constants into a floating
243 // constant-pool entry and trying to load from there. It's of doubtful benefit
244 // for A64: we'd need LDR followed by FCVT, I believe.
245 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
246 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
247 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
248
249 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
250 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
251 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
252 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
253 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
254 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
255
256 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
257 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
258
259 setExceptionPointerRegister(AArch64::X0);
260 setExceptionSelectorRegister(AArch64::X1);
261}
262
263EVT AArch64TargetLowering::getSetCCResultType(EVT VT) const {
264 // It's reasonably important that this value matches the "natural" legal
265 // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself
266 // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64).
267 if (!VT.isVector()) return MVT::i32;
268 return VT.changeVectorElementTypeToInteger();
269}
270
Tim Northover15410e92013-04-08 08:40:41 +0000271static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
272 unsigned &LdrOpc,
273 unsigned &StrOpc) {
274 static unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword,
275 AArch64::LDXR_word, AArch64::LDXR_dword};
276 static unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword,
277 AArch64::LDAXR_word, AArch64::LDAXR_dword};
278 static unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword,
279 AArch64::STXR_word, AArch64::STXR_dword};
280 static unsigned StoreRels[] = {AArch64::STLXR_byte, AArch64::STLXR_hword,
281 AArch64::STLXR_word, AArch64::STLXR_dword};
282
283 unsigned *LoadOps, *StoreOps;
284 if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent)
285 LoadOps = LoadAcqs;
286 else
287 LoadOps = LoadBares;
288
289 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
290 StoreOps = StoreRels;
291 else
292 StoreOps = StoreBares;
293
294 assert(isPowerOf2_32(Size) && Size <= 8 &&
295 "unsupported size for atomic binary op!");
296
297 LdrOpc = LoadOps[Log2_32(Size)];
298 StrOpc = StoreOps[Log2_32(Size)];
Tim Northovere0e3aef2013-01-31 12:12:40 +0000299}
300
301MachineBasicBlock *
302AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
303 unsigned Size,
304 unsigned BinOpcode) const {
305 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
306 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
307
308 const BasicBlock *LLVM_BB = BB->getBasicBlock();
309 MachineFunction *MF = BB->getParent();
310 MachineFunction::iterator It = BB;
311 ++It;
312
313 unsigned dest = MI->getOperand(0).getReg();
314 unsigned ptr = MI->getOperand(1).getReg();
315 unsigned incr = MI->getOperand(2).getReg();
Tim Northover15410e92013-04-08 08:40:41 +0000316 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
Tim Northovere0e3aef2013-01-31 12:12:40 +0000317 DebugLoc dl = MI->getDebugLoc();
318
319 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
320
321 unsigned ldrOpc, strOpc;
Tim Northover15410e92013-04-08 08:40:41 +0000322 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
Tim Northovere0e3aef2013-01-31 12:12:40 +0000323
324 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
325 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
326 MF->insert(It, loopMBB);
327 MF->insert(It, exitMBB);
328
329 // Transfer the remainder of BB and its successor edges to exitMBB.
330 exitMBB->splice(exitMBB->begin(), BB,
331 llvm::next(MachineBasicBlock::iterator(MI)),
332 BB->end());
333 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
334
335 const TargetRegisterClass *TRC
336 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
337 unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
338
339 // thisMBB:
340 // ...
341 // fallthrough --> loopMBB
342 BB->addSuccessor(loopMBB);
343
344 // loopMBB:
345 // ldxr dest, ptr
346 // <binop> scratch, dest, incr
347 // stxr stxr_status, scratch, ptr
Tim Northover9fafdf62013-02-28 13:52:07 +0000348 // cbnz stxr_status, loopMBB
Tim Northovere0e3aef2013-01-31 12:12:40 +0000349 // fallthrough --> exitMBB
350 BB = loopMBB;
351 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
352 if (BinOpcode) {
353 // All arithmetic operations we'll be creating are designed to take an extra
354 // shift or extend operand, which we can conveniently set to zero.
355
356 // Operand order needs to go the other way for NAND.
357 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl)
358 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
359 .addReg(incr).addReg(dest).addImm(0);
360 else
361 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
362 .addReg(dest).addReg(incr).addImm(0);
363 }
364
365 // From the stxr, the register is GPR32; from the cmp it's GPR32wsp
366 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
367 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
368
369 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr);
Tim Northover9fafdf62013-02-28 13:52:07 +0000370 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
371 .addReg(stxr_status).addMBB(loopMBB);
Tim Northovere0e3aef2013-01-31 12:12:40 +0000372
373 BB->addSuccessor(loopMBB);
374 BB->addSuccessor(exitMBB);
375
376 // exitMBB:
377 // ...
378 BB = exitMBB;
379
380 MI->eraseFromParent(); // The instruction is gone now.
381
382 return BB;
383}
384
385MachineBasicBlock *
386AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI,
387 MachineBasicBlock *BB,
388 unsigned Size,
389 unsigned CmpOp,
390 A64CC::CondCodes Cond) const {
391 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
392
393 const BasicBlock *LLVM_BB = BB->getBasicBlock();
394 MachineFunction *MF = BB->getParent();
395 MachineFunction::iterator It = BB;
396 ++It;
397
398 unsigned dest = MI->getOperand(0).getReg();
399 unsigned ptr = MI->getOperand(1).getReg();
400 unsigned incr = MI->getOperand(2).getReg();
Tim Northover15410e92013-04-08 08:40:41 +0000401 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
402
Tim Northovere0e3aef2013-01-31 12:12:40 +0000403 unsigned oldval = dest;
404 DebugLoc dl = MI->getDebugLoc();
405
406 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
407 const TargetRegisterClass *TRC, *TRCsp;
408 if (Size == 8) {
409 TRC = &AArch64::GPR64RegClass;
410 TRCsp = &AArch64::GPR64xspRegClass;
411 } else {
412 TRC = &AArch64::GPR32RegClass;
413 TRCsp = &AArch64::GPR32wspRegClass;
414 }
415
416 unsigned ldrOpc, strOpc;
Tim Northover15410e92013-04-08 08:40:41 +0000417 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
Tim Northovere0e3aef2013-01-31 12:12:40 +0000418
419 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
420 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
421 MF->insert(It, loopMBB);
422 MF->insert(It, exitMBB);
423
424 // Transfer the remainder of BB and its successor edges to exitMBB.
425 exitMBB->splice(exitMBB->begin(), BB,
426 llvm::next(MachineBasicBlock::iterator(MI)),
427 BB->end());
428 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
429
430 unsigned scratch = MRI.createVirtualRegister(TRC);
431 MRI.constrainRegClass(scratch, TRCsp);
432
433 // thisMBB:
434 // ...
435 // fallthrough --> loopMBB
436 BB->addSuccessor(loopMBB);
437
438 // loopMBB:
439 // ldxr dest, ptr
440 // cmp incr, dest (, sign extend if necessary)
441 // csel scratch, dest, incr, cond
442 // stxr stxr_status, scratch, ptr
Tim Northover9fafdf62013-02-28 13:52:07 +0000443 // cbnz stxr_status, loopMBB
Tim Northovere0e3aef2013-01-31 12:12:40 +0000444 // fallthrough --> exitMBB
445 BB = loopMBB;
446 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
447
448 // Build compare and cmov instructions.
449 MRI.constrainRegClass(incr, TRCsp);
450 BuildMI(BB, dl, TII->get(CmpOp))
451 .addReg(incr).addReg(oldval).addImm(0);
452
453 BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc),
454 scratch)
455 .addReg(oldval).addReg(incr).addImm(Cond);
456
457 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
458 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
459
460 BuildMI(BB, dl, TII->get(strOpc), stxr_status)
461 .addReg(scratch).addReg(ptr);
Tim Northover9fafdf62013-02-28 13:52:07 +0000462 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
463 .addReg(stxr_status).addMBB(loopMBB);
Tim Northovere0e3aef2013-01-31 12:12:40 +0000464
465 BB->addSuccessor(loopMBB);
466 BB->addSuccessor(exitMBB);
467
468 // exitMBB:
469 // ...
470 BB = exitMBB;
471
472 MI->eraseFromParent(); // The instruction is gone now.
473
474 return BB;
475}
476
477MachineBasicBlock *
478AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
479 MachineBasicBlock *BB,
480 unsigned Size) const {
481 unsigned dest = MI->getOperand(0).getReg();
482 unsigned ptr = MI->getOperand(1).getReg();
483 unsigned oldval = MI->getOperand(2).getReg();
484 unsigned newval = MI->getOperand(3).getReg();
Tim Northover15410e92013-04-08 08:40:41 +0000485 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm());
Tim Northovere0e3aef2013-01-31 12:12:40 +0000486 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
487 DebugLoc dl = MI->getDebugLoc();
488
489 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
490 const TargetRegisterClass *TRCsp;
491 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass;
492
493 unsigned ldrOpc, strOpc;
Tim Northover15410e92013-04-08 08:40:41 +0000494 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
Tim Northovere0e3aef2013-01-31 12:12:40 +0000495
496 MachineFunction *MF = BB->getParent();
497 const BasicBlock *LLVM_BB = BB->getBasicBlock();
498 MachineFunction::iterator It = BB;
499 ++It; // insert the new blocks after the current block
500
501 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
502 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
503 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
504 MF->insert(It, loop1MBB);
505 MF->insert(It, loop2MBB);
506 MF->insert(It, exitMBB);
507
508 // Transfer the remainder of BB and its successor edges to exitMBB.
509 exitMBB->splice(exitMBB->begin(), BB,
510 llvm::next(MachineBasicBlock::iterator(MI)),
511 BB->end());
512 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
513
514 // thisMBB:
515 // ...
516 // fallthrough --> loop1MBB
517 BB->addSuccessor(loop1MBB);
518
519 // loop1MBB:
520 // ldxr dest, [ptr]
521 // cmp dest, oldval
522 // b.ne exitMBB
523 BB = loop1MBB;
524 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
525
526 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl;
527 MRI.constrainRegClass(dest, TRCsp);
528 BuildMI(BB, dl, TII->get(CmpOp))
529 .addReg(dest).addReg(oldval).addImm(0);
530 BuildMI(BB, dl, TII->get(AArch64::Bcc))
531 .addImm(A64CC::NE).addMBB(exitMBB);
532 BB->addSuccessor(loop2MBB);
533 BB->addSuccessor(exitMBB);
534
535 // loop2MBB:
536 // strex stxr_status, newval, [ptr]
Tim Northover9fafdf62013-02-28 13:52:07 +0000537 // cbnz stxr_status, loop1MBB
Tim Northovere0e3aef2013-01-31 12:12:40 +0000538 BB = loop2MBB;
539 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
540 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
541
542 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr);
Tim Northover9fafdf62013-02-28 13:52:07 +0000543 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
544 .addReg(stxr_status).addMBB(loop1MBB);
Tim Northovere0e3aef2013-01-31 12:12:40 +0000545 BB->addSuccessor(loop1MBB);
546 BB->addSuccessor(exitMBB);
547
548 // exitMBB:
549 // ...
550 BB = exitMBB;
551
552 MI->eraseFromParent(); // The instruction is gone now.
553
554 return BB;
555}
556
557MachineBasicBlock *
558AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
559 MachineBasicBlock *MBB) const {
560 // We materialise the F128CSEL pseudo-instruction using conditional branches
561 // and loads, giving an instruciton sequence like:
562 // str q0, [sp]
563 // b.ne IfTrue
564 // b Finish
565 // IfTrue:
566 // str q1, [sp]
567 // Finish:
568 // ldr q0, [sp]
569 //
570 // Using virtual registers would probably not be beneficial since COPY
571 // instructions are expensive for f128 (there's no actual instruction to
572 // implement them).
573 //
574 // An alternative would be to do an integer-CSEL on some address. E.g.:
575 // mov x0, sp
576 // add x1, sp, #16
577 // str q0, [x0]
578 // str q1, [x1]
579 // csel x0, x0, x1, ne
580 // ldr q0, [x0]
581 //
582 // It's unclear which approach is actually optimal.
583 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
584 MachineFunction *MF = MBB->getParent();
585 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
586 DebugLoc DL = MI->getDebugLoc();
587 MachineFunction::iterator It = MBB;
588 ++It;
589
590 unsigned DestReg = MI->getOperand(0).getReg();
591 unsigned IfTrueReg = MI->getOperand(1).getReg();
592 unsigned IfFalseReg = MI->getOperand(2).getReg();
593 unsigned CondCode = MI->getOperand(3).getImm();
594 bool NZCVKilled = MI->getOperand(4).isKill();
595
596 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
597 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
598 MF->insert(It, TrueBB);
599 MF->insert(It, EndBB);
600
601 // Transfer rest of current basic-block to EndBB
602 EndBB->splice(EndBB->begin(), MBB,
603 llvm::next(MachineBasicBlock::iterator(MI)),
604 MBB->end());
605 EndBB->transferSuccessorsAndUpdatePHIs(MBB);
606
607 // We need somewhere to store the f128 value needed.
608 int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16);
609
610 // [... start of incoming MBB ...]
611 // str qIFFALSE, [sp]
612 // b.cc IfTrue
613 // b Done
614 BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR))
615 .addReg(IfFalseReg)
616 .addFrameIndex(ScratchFI)
617 .addImm(0);
618 BuildMI(MBB, DL, TII->get(AArch64::Bcc))
619 .addImm(CondCode)
620 .addMBB(TrueBB);
621 BuildMI(MBB, DL, TII->get(AArch64::Bimm))
622 .addMBB(EndBB);
623 MBB->addSuccessor(TrueBB);
624 MBB->addSuccessor(EndBB);
625
626 // IfTrue:
627 // str qIFTRUE, [sp]
628 BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR))
629 .addReg(IfTrueReg)
630 .addFrameIndex(ScratchFI)
631 .addImm(0);
632
633 // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the
634 // blocks.
635 TrueBB->addSuccessor(EndBB);
636
637 // Done:
638 // ldr qDEST, [sp]
639 // [... rest of incoming MBB ...]
640 if (!NZCVKilled)
641 EndBB->addLiveIn(AArch64::NZCV);
642 MachineInstr *StartOfEnd = EndBB->begin();
643 BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg)
644 .addFrameIndex(ScratchFI)
645 .addImm(0);
646
647 MI->eraseFromParent();
648 return EndBB;
649}
650
651MachineBasicBlock *
652AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
653 MachineBasicBlock *MBB) const {
654 switch (MI->getOpcode()) {
655 default: llvm_unreachable("Unhandled instruction with custom inserter");
656 case AArch64::F128CSEL:
657 return EmitF128CSEL(MI, MBB);
658 case AArch64::ATOMIC_LOAD_ADD_I8:
659 return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl);
660 case AArch64::ATOMIC_LOAD_ADD_I16:
661 return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl);
662 case AArch64::ATOMIC_LOAD_ADD_I32:
663 return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl);
664 case AArch64::ATOMIC_LOAD_ADD_I64:
665 return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl);
666
667 case AArch64::ATOMIC_LOAD_SUB_I8:
668 return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl);
669 case AArch64::ATOMIC_LOAD_SUB_I16:
670 return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl);
671 case AArch64::ATOMIC_LOAD_SUB_I32:
672 return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl);
673 case AArch64::ATOMIC_LOAD_SUB_I64:
674 return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl);
675
676 case AArch64::ATOMIC_LOAD_AND_I8:
677 return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl);
678 case AArch64::ATOMIC_LOAD_AND_I16:
679 return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl);
680 case AArch64::ATOMIC_LOAD_AND_I32:
681 return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl);
682 case AArch64::ATOMIC_LOAD_AND_I64:
683 return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl);
684
685 case AArch64::ATOMIC_LOAD_OR_I8:
686 return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl);
687 case AArch64::ATOMIC_LOAD_OR_I16:
688 return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl);
689 case AArch64::ATOMIC_LOAD_OR_I32:
690 return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl);
691 case AArch64::ATOMIC_LOAD_OR_I64:
692 return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl);
693
694 case AArch64::ATOMIC_LOAD_XOR_I8:
695 return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl);
696 case AArch64::ATOMIC_LOAD_XOR_I16:
697 return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl);
698 case AArch64::ATOMIC_LOAD_XOR_I32:
699 return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl);
700 case AArch64::ATOMIC_LOAD_XOR_I64:
701 return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl);
702
703 case AArch64::ATOMIC_LOAD_NAND_I8:
704 return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl);
705 case AArch64::ATOMIC_LOAD_NAND_I16:
706 return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl);
707 case AArch64::ATOMIC_LOAD_NAND_I32:
708 return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl);
709 case AArch64::ATOMIC_LOAD_NAND_I64:
710 return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl);
711
712 case AArch64::ATOMIC_LOAD_MIN_I8:
713 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT);
714 case AArch64::ATOMIC_LOAD_MIN_I16:
715 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT);
716 case AArch64::ATOMIC_LOAD_MIN_I32:
717 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT);
718 case AArch64::ATOMIC_LOAD_MIN_I64:
719 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT);
720
721 case AArch64::ATOMIC_LOAD_MAX_I8:
722 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT);
723 case AArch64::ATOMIC_LOAD_MAX_I16:
724 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT);
725 case AArch64::ATOMIC_LOAD_MAX_I32:
726 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT);
727 case AArch64::ATOMIC_LOAD_MAX_I64:
728 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT);
729
730 case AArch64::ATOMIC_LOAD_UMIN_I8:
731 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI);
732 case AArch64::ATOMIC_LOAD_UMIN_I16:
733 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI);
734 case AArch64::ATOMIC_LOAD_UMIN_I32:
735 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI);
736 case AArch64::ATOMIC_LOAD_UMIN_I64:
737 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI);
738
739 case AArch64::ATOMIC_LOAD_UMAX_I8:
740 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO);
741 case AArch64::ATOMIC_LOAD_UMAX_I16:
742 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO);
743 case AArch64::ATOMIC_LOAD_UMAX_I32:
744 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO);
745 case AArch64::ATOMIC_LOAD_UMAX_I64:
746 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO);
747
748 case AArch64::ATOMIC_SWAP_I8:
749 return emitAtomicBinary(MI, MBB, 1, 0);
750 case AArch64::ATOMIC_SWAP_I16:
751 return emitAtomicBinary(MI, MBB, 2, 0);
752 case AArch64::ATOMIC_SWAP_I32:
753 return emitAtomicBinary(MI, MBB, 4, 0);
754 case AArch64::ATOMIC_SWAP_I64:
755 return emitAtomicBinary(MI, MBB, 8, 0);
756
757 case AArch64::ATOMIC_CMP_SWAP_I8:
758 return emitAtomicCmpSwap(MI, MBB, 1);
759 case AArch64::ATOMIC_CMP_SWAP_I16:
760 return emitAtomicCmpSwap(MI, MBB, 2);
761 case AArch64::ATOMIC_CMP_SWAP_I32:
762 return emitAtomicCmpSwap(MI, MBB, 4);
763 case AArch64::ATOMIC_CMP_SWAP_I64:
764 return emitAtomicCmpSwap(MI, MBB, 8);
765 }
766}
767
768
769const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
770 switch (Opcode) {
771 case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC";
772 case AArch64ISD::Call: return "AArch64ISD::Call";
773 case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV";
774 case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad";
775 case AArch64ISD::BFI: return "AArch64ISD::BFI";
776 case AArch64ISD::EXTR: return "AArch64ISD::EXTR";
777 case AArch64ISD::Ret: return "AArch64ISD::Ret";
778 case AArch64ISD::SBFX: return "AArch64ISD::SBFX";
779 case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC";
780 case AArch64ISD::SETCC: return "AArch64ISD::SETCC";
781 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN";
782 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
783 case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL";
Tim Northover2dbef342013-05-04 16:53:46 +0000784 case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge";
Tim Northovere0e3aef2013-01-31 12:12:40 +0000785 case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall";
786
787 default: return NULL;
788 }
789}
790
791static const uint16_t AArch64FPRArgRegs[] = {
792 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
793 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7
794};
795static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs);
796
797static const uint16_t AArch64ArgRegs[] = {
798 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3,
799 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7
800};
801static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs);
802
803static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
804 CCValAssign::LocInfo LocInfo,
805 ISD::ArgFlagsTy ArgFlags, CCState &State) {
806 // Mark all remaining general purpose registers as allocated. We don't
807 // backtrack: if (for example) an i128 gets put on the stack, no subsequent
808 // i64 will go in registers (C.11).
809 for (unsigned i = 0; i < NumArgRegs; ++i)
810 State.AllocateReg(AArch64ArgRegs[i]);
811
812 return false;
813}
814
815#include "AArch64GenCallingConv.inc"
816
817CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
818
819 switch(CC) {
820 default: llvm_unreachable("Unsupported calling convention");
821 case CallingConv::Fast:
822 case CallingConv::C:
823 return CC_A64_APCS;
824 }
825}
826
827void
828AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
829 DebugLoc DL, SDValue &Chain) const {
830 MachineFunction &MF = DAG.getMachineFunction();
831 MachineFrameInfo *MFI = MF.getFrameInfo();
Tim Northoverbcaca872013-02-05 13:24:56 +0000832 AArch64MachineFunctionInfo *FuncInfo
833 = MF.getInfo<AArch64MachineFunctionInfo>();
Tim Northovere0e3aef2013-01-31 12:12:40 +0000834
835 SmallVector<SDValue, 8> MemOps;
836
837 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs,
838 NumArgRegs);
839 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs,
840 NumFPRArgRegs);
841
842 unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR);
843 int GPRIdx = 0;
844 if (GPRSaveSize != 0) {
845 GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false);
846
847 SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy());
848
849 for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) {
850 unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass);
851 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
852 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
853 MachinePointerInfo::getStack(i * 8),
854 false, false, 0);
855 MemOps.push_back(Store);
856 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
857 DAG.getConstant(8, getPointerTy()));
858 }
859 }
860
861 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
862 int FPRIdx = 0;
863 if (FPRSaveSize != 0) {
864 FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
865
866 SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
867
868 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
869 unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i],
870 &AArch64::FPR128RegClass);
871 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
872 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
873 MachinePointerInfo::getStack(i * 16),
874 false, false, 0);
875 MemOps.push_back(Store);
876 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
877 DAG.getConstant(16, getPointerTy()));
878 }
879 }
880
881 int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true);
882
883 FuncInfo->setVariadicStackIdx(StackIdx);
884 FuncInfo->setVariadicGPRIdx(GPRIdx);
885 FuncInfo->setVariadicGPRSize(GPRSaveSize);
886 FuncInfo->setVariadicFPRIdx(FPRIdx);
887 FuncInfo->setVariadicFPRSize(FPRSaveSize);
888
889 if (!MemOps.empty()) {
890 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
891 MemOps.size());
892 }
893}
894
895
896SDValue
897AArch64TargetLowering::LowerFormalArguments(SDValue Chain,
898 CallingConv::ID CallConv, bool isVarArg,
899 const SmallVectorImpl<ISD::InputArg> &Ins,
900 DebugLoc dl, SelectionDAG &DAG,
901 SmallVectorImpl<SDValue> &InVals) const {
902 MachineFunction &MF = DAG.getMachineFunction();
903 AArch64MachineFunctionInfo *FuncInfo
904 = MF.getInfo<AArch64MachineFunctionInfo>();
905 MachineFrameInfo *MFI = MF.getFrameInfo();
906 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
907
908 SmallVector<CCValAssign, 16> ArgLocs;
909 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
910 getTargetMachine(), ArgLocs, *DAG.getContext());
911 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
912
913 SmallVector<SDValue, 16> ArgValues;
914
915 SDValue ArgValue;
916 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
917 CCValAssign &VA = ArgLocs[i];
918 ISD::ArgFlagsTy Flags = Ins[i].Flags;
919
920 if (Flags.isByVal()) {
921 // Byval is used for small structs and HFAs in the PCS, but the system
922 // should work in a non-compliant manner for larger structs.
923 EVT PtrTy = getPointerTy();
924 int Size = Flags.getByValSize();
925 unsigned NumRegs = (Size + 7) / 8;
926
927 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs,
928 VA.getLocMemOffset(),
929 false);
930 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy);
931 InVals.push_back(FrameIdxN);
932
933 continue;
934 } else if (VA.isRegLoc()) {
935 MVT RegVT = VA.getLocVT();
936 const TargetRegisterClass *RC = getRegClassFor(RegVT);
937 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
938
939 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
940 } else { // VA.isRegLoc()
941 assert(VA.isMemLoc());
942
943 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
944 VA.getLocMemOffset(), true);
945
946 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
947 ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
948 MachinePointerInfo::getFixedStack(FI),
949 false, false, false, 0);
950
951
952 }
953
954 switch (VA.getLocInfo()) {
955 default: llvm_unreachable("Unknown loc info!");
956 case CCValAssign::Full: break;
957 case CCValAssign::BCvt:
958 ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue);
959 break;
960 case CCValAssign::SExt:
961 case CCValAssign::ZExt:
962 case CCValAssign::AExt: {
963 unsigned DestSize = VA.getValVT().getSizeInBits();
964 unsigned DestSubReg;
965
966 switch (DestSize) {
967 case 8: DestSubReg = AArch64::sub_8; break;
968 case 16: DestSubReg = AArch64::sub_16; break;
969 case 32: DestSubReg = AArch64::sub_32; break;
970 case 64: DestSubReg = AArch64::sub_64; break;
971 default: llvm_unreachable("Unexpected argument promotion");
972 }
973
974 ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
975 VA.getValVT(), ArgValue,
976 DAG.getTargetConstant(DestSubReg, MVT::i32)),
977 0);
978 break;
979 }
980 }
981
982 InVals.push_back(ArgValue);
983 }
984
985 if (isVarArg)
986 SaveVarArgRegisters(CCInfo, DAG, dl, Chain);
987
988 unsigned StackArgSize = CCInfo.getNextStackOffset();
989 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
990 // This is a non-standard ABI so by fiat I say we're allowed to make full
991 // use of the stack area to be popped, which must be aligned to 16 bytes in
992 // any case:
993 StackArgSize = RoundUpToAlignment(StackArgSize, 16);
994
995 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
996 // a multiple of 16.
997 FuncInfo->setArgumentStackToRestore(StackArgSize);
998
999 // This realignment carries over to the available bytes below. Our own
1000 // callers will guarantee the space is free by giving an aligned value to
1001 // CALLSEQ_START.
1002 }
1003 // Even if we're not expected to free up the space, it's useful to know how
1004 // much is there while considering tail calls (because we can reuse it).
1005 FuncInfo->setBytesInStackArgArea(StackArgSize);
1006
1007 return Chain;
1008}
1009
1010SDValue
1011AArch64TargetLowering::LowerReturn(SDValue Chain,
1012 CallingConv::ID CallConv, bool isVarArg,
1013 const SmallVectorImpl<ISD::OutputArg> &Outs,
1014 const SmallVectorImpl<SDValue> &OutVals,
1015 DebugLoc dl, SelectionDAG &DAG) const {
1016 // CCValAssign - represent the assignment of the return value to a location.
1017 SmallVector<CCValAssign, 16> RVLocs;
1018
1019 // CCState - Info about the registers and stack slots.
1020 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1021 getTargetMachine(), RVLocs, *DAG.getContext());
1022
1023 // Analyze outgoing return values.
1024 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv));
1025
Tim Northovere0e3aef2013-01-31 12:12:40 +00001026 SDValue Flag;
Jakob Stoklund Olesendbc8c512013-02-05 18:21:49 +00001027 SmallVector<SDValue, 4> RetOps(1, Chain);
Tim Northovere0e3aef2013-01-31 12:12:40 +00001028
1029 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
Tim Northoverbcaca872013-02-05 13:24:56 +00001030 // PCS: "If the type, T, of the result of a function is such that
1031 // void func(T arg) would require that arg be passed as a value in a
1032 // register (or set of registers) according to the rules in 5.4, then the
1033 // result is returned in the same registers as would be used for such an
1034 // argument.
Tim Northovere0e3aef2013-01-31 12:12:40 +00001035 //
1036 // Otherwise, the caller shall reserve a block of memory of sufficient
1037 // size and alignment to hold the result. The address of the memory block
1038 // shall be passed as an additional argument to the function in x8."
1039 //
1040 // This is implemented in two places. The register-return values are dealt
1041 // with here, more complex returns are passed as an sret parameter, which
1042 // means we don't have to worry about it during actual return.
1043 CCValAssign &VA = RVLocs[i];
1044 assert(VA.isRegLoc() && "Only register-returns should be created by PCS");
1045
1046
1047 SDValue Arg = OutVals[i];
1048
1049 // There's no convenient note in the ABI about this as there is for normal
1050 // arguments, but it says return values are passed in the same registers as
1051 // an argument would be. I believe that includes the comments about
1052 // unspecified higher bits, putting the burden of widening on the *caller*
1053 // for return values.
1054 switch (VA.getLocInfo()) {
1055 default: llvm_unreachable("Unknown loc info");
1056 case CCValAssign::Full: break;
1057 case CCValAssign::SExt:
1058 case CCValAssign::ZExt:
1059 case CCValAssign::AExt:
1060 // Floating-point values should only be extended when they're going into
1061 // memory, which can't happen here so an integer extend is acceptable.
1062 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1063 break;
1064 case CCValAssign::BCvt:
1065 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1066 break;
1067 }
1068
1069 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1070 Flag = Chain.getValue(1);
Jakob Stoklund Olesendbc8c512013-02-05 18:21:49 +00001071 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
Tim Northovere0e3aef2013-01-31 12:12:40 +00001072 }
1073
Jakob Stoklund Olesendbc8c512013-02-05 18:21:49 +00001074 RetOps[0] = Chain; // Update chain.
1075
1076 // Add the flag if we have it.
1077 if (Flag.getNode())
1078 RetOps.push_back(Flag);
1079
1080 return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other,
1081 &RetOps[0], RetOps.size());
Tim Northovere0e3aef2013-01-31 12:12:40 +00001082}
1083
1084SDValue
1085AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
1086 SmallVectorImpl<SDValue> &InVals) const {
1087 SelectionDAG &DAG = CLI.DAG;
1088 DebugLoc &dl = CLI.DL;
1089 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
1090 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
1091 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
1092 SDValue Chain = CLI.Chain;
1093 SDValue Callee = CLI.Callee;
1094 bool &IsTailCall = CLI.IsTailCall;
1095 CallingConv::ID CallConv = CLI.CallConv;
1096 bool IsVarArg = CLI.IsVarArg;
1097
1098 MachineFunction &MF = DAG.getMachineFunction();
1099 AArch64MachineFunctionInfo *FuncInfo
1100 = MF.getInfo<AArch64MachineFunctionInfo>();
1101 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1102 bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet();
1103 bool IsSibCall = false;
1104
1105 if (IsTailCall) {
1106 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1107 IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1108 Outs, OutVals, Ins, DAG);
1109
1110 // A sibling call is one where we're under the usual C ABI and not planning
1111 // to change that but can still do a tail call:
1112 if (!TailCallOpt && IsTailCall)
1113 IsSibCall = true;
1114 }
1115
1116 SmallVector<CCValAssign, 16> ArgLocs;
1117 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1118 getTargetMachine(), ArgLocs, *DAG.getContext());
1119 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
1120
1121 // On AArch64 (and all other architectures I'm aware of) the most this has to
1122 // do is adjust the stack pointer.
1123 unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16);
1124 if (IsSibCall) {
1125 // Since we're not changing the ABI to make this a tail call, the memory
1126 // operands are already available in the caller's incoming argument space.
1127 NumBytes = 0;
1128 }
1129
1130 // FPDiff is the byte offset of the call's argument area from the callee's.
1131 // Stores to callee stack arguments will be placed in FixedStackSlots offset
1132 // by this amount for a tail call. In a sibling call it must be 0 because the
1133 // caller will deallocate the entire stack and the callee still expects its
1134 // arguments to begin at SP+0. Completely unused for non-tail calls.
1135 int FPDiff = 0;
1136
1137 if (IsTailCall && !IsSibCall) {
1138 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1139
1140 // FPDiff will be negative if this tail call requires more space than we
1141 // would automatically have in our incoming argument space. Positive if we
1142 // can actually shrink the stack.
1143 FPDiff = NumReusableBytes - NumBytes;
1144
1145 // The stack pointer must be 16-byte aligned at all times it's used for a
1146 // memory operation, which in practice means at *all* times and in
1147 // particular across call boundaries. Therefore our own arguments started at
1148 // a 16-byte aligned SP and the delta applied for the tail call should
1149 // satisfy the same constraint.
1150 assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
1151 }
1152
1153 if (!IsSibCall)
1154 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
1155
Tim Northoverbcaca872013-02-05 13:24:56 +00001156 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
1157 getPointerTy());
Tim Northovere0e3aef2013-01-31 12:12:40 +00001158
1159 SmallVector<SDValue, 8> MemOpChains;
1160 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1161
1162 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1163 CCValAssign &VA = ArgLocs[i];
1164 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1165 SDValue Arg = OutVals[i];
1166
1167 // Callee does the actual widening, so all extensions just use an implicit
1168 // definition of the rest of the Loc. Aesthetically, this would be nicer as
1169 // an ANY_EXTEND, but that isn't valid for floating-point types and this
1170 // alternative works on integer types too.
1171 switch (VA.getLocInfo()) {
1172 default: llvm_unreachable("Unknown loc info!");
1173 case CCValAssign::Full: break;
1174 case CCValAssign::SExt:
1175 case CCValAssign::ZExt:
1176 case CCValAssign::AExt: {
1177 unsigned SrcSize = VA.getValVT().getSizeInBits();
1178 unsigned SrcSubReg;
1179
1180 switch (SrcSize) {
1181 case 8: SrcSubReg = AArch64::sub_8; break;
1182 case 16: SrcSubReg = AArch64::sub_16; break;
1183 case 32: SrcSubReg = AArch64::sub_32; break;
1184 case 64: SrcSubReg = AArch64::sub_64; break;
1185 default: llvm_unreachable("Unexpected argument promotion");
1186 }
1187
1188 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
1189 VA.getLocVT(),
1190 DAG.getUNDEF(VA.getLocVT()),
1191 Arg,
1192 DAG.getTargetConstant(SrcSubReg, MVT::i32)),
1193 0);
1194
1195 break;
1196 }
1197 case CCValAssign::BCvt:
1198 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1199 break;
1200 }
1201
1202 if (VA.isRegLoc()) {
1203 // A normal register (sub-) argument. For now we just note it down because
1204 // we want to copy things into registers as late as possible to avoid
1205 // register-pressure (and possibly worse).
1206 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1207 continue;
1208 }
1209
1210 assert(VA.isMemLoc() && "unexpected argument location");
1211
1212 SDValue DstAddr;
1213 MachinePointerInfo DstInfo;
1214 if (IsTailCall) {
1215 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() :
1216 VA.getLocVT().getSizeInBits();
1217 OpSize = (OpSize + 7) / 8;
1218 int32_t Offset = VA.getLocMemOffset() + FPDiff;
1219 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
1220
1221 DstAddr = DAG.getFrameIndex(FI, getPointerTy());
1222 DstInfo = MachinePointerInfo::getFixedStack(FI);
1223
1224 // Make sure any stack arguments overlapping with where we're storing are
1225 // loaded before this eventual operation. Otherwise they'll be clobbered.
1226 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
1227 } else {
1228 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset());
1229
1230 DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1231 DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset());
1232 }
1233
1234 if (Flags.isByVal()) {
1235 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64);
1236 SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode,
1237 Flags.getByValAlign(),
1238 /*isVolatile = */ false,
1239 /*alwaysInline = */ false,
1240 DstInfo, MachinePointerInfo(0));
1241 MemOpChains.push_back(Cpy);
1242 } else {
1243 // Normal stack argument, put it where it's needed.
1244 SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo,
1245 false, false, 0);
1246 MemOpChains.push_back(Store);
1247 }
1248 }
1249
1250 // The loads and stores generated above shouldn't clash with each
1251 // other. Combining them with this TokenFactor notes that fact for the rest of
1252 // the backend.
1253 if (!MemOpChains.empty())
1254 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1255 &MemOpChains[0], MemOpChains.size());
1256
1257 // Most of the rest of the instructions need to be glued together; we don't
1258 // want assignments to actual registers used by a call to be rearranged by a
1259 // well-meaning scheduler.
1260 SDValue InFlag;
1261
1262 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1263 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1264 RegsToPass[i].second, InFlag);
1265 InFlag = Chain.getValue(1);
1266 }
1267
1268 // The linker is responsible for inserting veneers when necessary to put a
1269 // function call destination in range, so we don't need to bother with a
1270 // wrapper here.
1271 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1272 const GlobalValue *GV = G->getGlobal();
1273 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
1274 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1275 const char *Sym = S->getSymbol();
1276 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
1277 }
1278
1279 // We don't usually want to end the call-sequence here because we would tidy
1280 // the frame up *after* the call, however in the ABI-changing tail-call case
1281 // we've carefully laid out the parameters so that when sp is reset they'll be
1282 // in the correct location.
1283 if (IsTailCall && !IsSibCall) {
1284 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1285 DAG.getIntPtrConstant(0, true), InFlag);
1286 InFlag = Chain.getValue(1);
1287 }
1288
1289 // We produce the following DAG scheme for the actual call instruction:
1290 // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag?
1291 //
1292 // Most arguments aren't going to be used and just keep the values live as
1293 // far as LLVM is concerned. It's expected to be selected as simply "bl
1294 // callee" (for a direct, non-tail call).
1295 std::vector<SDValue> Ops;
1296 Ops.push_back(Chain);
1297 Ops.push_back(Callee);
1298
1299 if (IsTailCall) {
1300 // Each tail call may have to adjust the stack by a different amount, so
1301 // this information must travel along with the operation for eventual
1302 // consumption by emitEpilogue.
1303 Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32));
1304 }
1305
1306 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1307 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1308 RegsToPass[i].second.getValueType()));
1309
1310
1311 // Add a register mask operand representing the call-preserved registers. This
1312 // is used later in codegen to constrain register-allocation.
1313 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
1314 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
1315 assert(Mask && "Missing call preserved mask for calling convention");
1316 Ops.push_back(DAG.getRegisterMask(Mask));
1317
1318 // If we needed glue, put it in as the last argument.
1319 if (InFlag.getNode())
1320 Ops.push_back(InFlag);
1321
1322 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1323
1324 if (IsTailCall) {
1325 return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
1326 }
1327
1328 Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size());
1329 InFlag = Chain.getValue(1);
1330
1331 // Now we can reclaim the stack, just as well do it before working out where
1332 // our return value is.
1333 if (!IsSibCall) {
1334 uint64_t CalleePopBytes
1335 = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0;
1336
1337 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1338 DAG.getIntPtrConstant(CalleePopBytes, true),
1339 InFlag);
1340 InFlag = Chain.getValue(1);
1341 }
1342
1343 return LowerCallResult(Chain, InFlag, CallConv,
1344 IsVarArg, Ins, dl, DAG, InVals);
1345}
1346
1347SDValue
1348AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1349 CallingConv::ID CallConv, bool IsVarArg,
1350 const SmallVectorImpl<ISD::InputArg> &Ins,
1351 DebugLoc dl, SelectionDAG &DAG,
1352 SmallVectorImpl<SDValue> &InVals) const {
1353 // Assign locations to each value returned by this call.
1354 SmallVector<CCValAssign, 16> RVLocs;
1355 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1356 getTargetMachine(), RVLocs, *DAG.getContext());
1357 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv));
1358
1359 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1360 CCValAssign VA = RVLocs[i];
1361
1362 // Return values that are too big to fit into registers should use an sret
1363 // pointer, so this can be a lot simpler than the main argument code.
1364 assert(VA.isRegLoc() && "Memory locations not expected for call return");
1365
1366 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1367 InFlag);
1368 Chain = Val.getValue(1);
1369 InFlag = Val.getValue(2);
1370
1371 switch (VA.getLocInfo()) {
1372 default: llvm_unreachable("Unknown loc info!");
1373 case CCValAssign::Full: break;
1374 case CCValAssign::BCvt:
1375 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1376 break;
1377 case CCValAssign::ZExt:
1378 case CCValAssign::SExt:
1379 case CCValAssign::AExt:
1380 // Floating-point arguments only get extended/truncated if they're going
1381 // in memory, so using the integer operation is acceptable here.
1382 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
1383 break;
1384 }
1385
1386 InVals.push_back(Val);
1387 }
1388
1389 return Chain;
1390}
1391
1392bool
1393AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
1394 CallingConv::ID CalleeCC,
1395 bool IsVarArg,
1396 bool IsCalleeStructRet,
1397 bool IsCallerStructRet,
1398 const SmallVectorImpl<ISD::OutputArg> &Outs,
1399 const SmallVectorImpl<SDValue> &OutVals,
1400 const SmallVectorImpl<ISD::InputArg> &Ins,
1401 SelectionDAG& DAG) const {
1402
1403 // For CallingConv::C this function knows whether the ABI needs
1404 // changing. That's not true for other conventions so they will have to opt in
1405 // manually.
1406 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
1407 return false;
1408
1409 const MachineFunction &MF = DAG.getMachineFunction();
1410 const Function *CallerF = MF.getFunction();
1411 CallingConv::ID CallerCC = CallerF->getCallingConv();
1412 bool CCMatch = CallerCC == CalleeCC;
1413
1414 // Byval parameters hand the function a pointer directly into the stack area
1415 // we want to reuse during a tail call. Working around this *is* possible (see
1416 // X86) but less efficient and uglier in LowerCall.
1417 for (Function::const_arg_iterator i = CallerF->arg_begin(),
1418 e = CallerF->arg_end(); i != e; ++i)
1419 if (i->hasByValAttr())
1420 return false;
1421
1422 if (getTargetMachine().Options.GuaranteedTailCallOpt) {
1423 if (IsTailCallConvention(CalleeCC) && CCMatch)
1424 return true;
1425 return false;
1426 }
1427
1428 // Now we search for cases where we can use a tail call without changing the
1429 // ABI. Sibcall is used in some places (particularly gcc) to refer to this
1430 // concept.
1431
1432 // I want anyone implementing a new calling convention to think long and hard
1433 // about this assert.
1434 assert((!IsVarArg || CalleeCC == CallingConv::C)
1435 && "Unexpected variadic calling convention");
1436
1437 if (IsVarArg && !Outs.empty()) {
1438 // At least two cases here: if caller is fastcc then we can't have any
1439 // memory arguments (we'd be expected to clean up the stack afterwards). If
1440 // caller is C then we could potentially use its argument area.
1441
1442 // FIXME: for now we take the most conservative of these in both cases:
1443 // disallow all variadic memory operands.
1444 SmallVector<CCValAssign, 16> ArgLocs;
1445 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1446 getTargetMachine(), ArgLocs, *DAG.getContext());
1447
1448 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1449 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1450 if (!ArgLocs[i].isRegLoc())
1451 return false;
1452 }
1453
1454 // If the calling conventions do not match, then we'd better make sure the
1455 // results are returned in the same way as what the caller expects.
1456 if (!CCMatch) {
1457 SmallVector<CCValAssign, 16> RVLocs1;
1458 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
1459 getTargetMachine(), RVLocs1, *DAG.getContext());
1460 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC));
1461
1462 SmallVector<CCValAssign, 16> RVLocs2;
1463 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
1464 getTargetMachine(), RVLocs2, *DAG.getContext());
1465 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC));
1466
1467 if (RVLocs1.size() != RVLocs2.size())
1468 return false;
1469 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
1470 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1471 return false;
1472 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1473 return false;
1474 if (RVLocs1[i].isRegLoc()) {
1475 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1476 return false;
1477 } else {
1478 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1479 return false;
1480 }
1481 }
1482 }
1483
1484 // Nothing more to check if the callee is taking no arguments
1485 if (Outs.empty())
1486 return true;
1487
1488 SmallVector<CCValAssign, 16> ArgLocs;
1489 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1490 getTargetMachine(), ArgLocs, *DAG.getContext());
1491
1492 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1493
1494 const AArch64MachineFunctionInfo *FuncInfo
1495 = MF.getInfo<AArch64MachineFunctionInfo>();
1496
1497 // If the stack arguments for this call would fit into our own save area then
1498 // the call can be made tail.
1499 return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea();
1500}
1501
1502bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
1503 bool TailCallOpt) const {
1504 return CallCC == CallingConv::Fast && TailCallOpt;
1505}
1506
1507bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const {
1508 return CallCC == CallingConv::Fast;
1509}
1510
1511SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
1512 SelectionDAG &DAG,
1513 MachineFrameInfo *MFI,
1514 int ClobberedFI) const {
1515 SmallVector<SDValue, 8> ArgChains;
1516 int64_t FirstByte = MFI->getObjectOffset(ClobberedFI);
1517 int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1;
1518
1519 // Include the original chain at the beginning of the list. When this is
1520 // used by target LowerCall hooks, this helps legalize find the
1521 // CALLSEQ_BEGIN node.
1522 ArgChains.push_back(Chain);
1523
1524 // Add a chain value for each stack argument corresponding
1525 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1526 UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U)
1527 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
1528 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
1529 if (FI->getIndex() < 0) {
1530 int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex());
1531 int64_t InLastByte = InFirstByte;
1532 InLastByte += MFI->getObjectSize(FI->getIndex()) - 1;
1533
1534 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1535 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1536 ArgChains.push_back(SDValue(L, 1));
1537 }
1538
1539 // Build a tokenfactor for all the chains.
1540 return DAG.getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other,
1541 &ArgChains[0], ArgChains.size());
1542}
1543
1544static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) {
1545 switch (CC) {
1546 case ISD::SETEQ: return A64CC::EQ;
1547 case ISD::SETGT: return A64CC::GT;
1548 case ISD::SETGE: return A64CC::GE;
1549 case ISD::SETLT: return A64CC::LT;
1550 case ISD::SETLE: return A64CC::LE;
1551 case ISD::SETNE: return A64CC::NE;
1552 case ISD::SETUGT: return A64CC::HI;
1553 case ISD::SETUGE: return A64CC::HS;
1554 case ISD::SETULT: return A64CC::LO;
1555 case ISD::SETULE: return A64CC::LS;
1556 default: llvm_unreachable("Unexpected condition code");
1557 }
1558}
1559
1560bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const {
1561 // icmp is implemented using adds/subs immediate, which take an unsigned
1562 // 12-bit immediate, optionally shifted left by 12 bits.
1563
1564 // Symmetric by using adds/subs
1565 if (Val < 0)
1566 Val = -Val;
1567
1568 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0;
1569}
1570
1571SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS,
1572 ISD::CondCode CC, SDValue &A64cc,
1573 SelectionDAG &DAG, DebugLoc &dl) const {
1574 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
1575 int64_t C = 0;
1576 EVT VT = RHSC->getValueType(0);
1577 bool knownInvalid = false;
1578
1579 // I'm not convinced the rest of LLVM handles these edge cases properly, but
1580 // we can at least get it right.
1581 if (isSignedIntSetCC(CC)) {
1582 C = RHSC->getSExtValue();
1583 } else if (RHSC->getZExtValue() > INT64_MAX) {
1584 // A 64-bit constant not representable by a signed 64-bit integer is far
1585 // too big to fit into a SUBS immediate anyway.
1586 knownInvalid = true;
1587 } else {
1588 C = RHSC->getZExtValue();
1589 }
1590
1591 if (!knownInvalid && !isLegalICmpImmediate(C)) {
1592 // Constant does not fit, try adjusting it by one?
1593 switch (CC) {
1594 default: break;
1595 case ISD::SETLT:
1596 case ISD::SETGE:
1597 if (isLegalICmpImmediate(C-1)) {
1598 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
1599 RHS = DAG.getConstant(C-1, VT);
1600 }
1601 break;
1602 case ISD::SETULT:
1603 case ISD::SETUGE:
1604 if (isLegalICmpImmediate(C-1)) {
1605 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
1606 RHS = DAG.getConstant(C-1, VT);
1607 }
1608 break;
1609 case ISD::SETLE:
1610 case ISD::SETGT:
1611 if (isLegalICmpImmediate(C+1)) {
1612 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
1613 RHS = DAG.getConstant(C+1, VT);
1614 }
1615 break;
1616 case ISD::SETULE:
1617 case ISD::SETUGT:
1618 if (isLegalICmpImmediate(C+1)) {
1619 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1620 RHS = DAG.getConstant(C+1, VT);
1621 }
1622 break;
1623 }
1624 }
1625 }
1626
1627 A64CC::CondCodes CondCode = IntCCToA64CC(CC);
1628 A64cc = DAG.getConstant(CondCode, MVT::i32);
1629 return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
1630 DAG.getCondCode(CC));
1631}
1632
1633static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC,
1634 A64CC::CondCodes &Alternative) {
1635 A64CC::CondCodes CondCode = A64CC::Invalid;
1636 Alternative = A64CC::Invalid;
1637
1638 switch (CC) {
1639 default: llvm_unreachable("Unknown FP condition!");
1640 case ISD::SETEQ:
1641 case ISD::SETOEQ: CondCode = A64CC::EQ; break;
1642 case ISD::SETGT:
1643 case ISD::SETOGT: CondCode = A64CC::GT; break;
1644 case ISD::SETGE:
1645 case ISD::SETOGE: CondCode = A64CC::GE; break;
1646 case ISD::SETOLT: CondCode = A64CC::MI; break;
1647 case ISD::SETOLE: CondCode = A64CC::LS; break;
1648 case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break;
1649 case ISD::SETO: CondCode = A64CC::VC; break;
1650 case ISD::SETUO: CondCode = A64CC::VS; break;
1651 case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break;
1652 case ISD::SETUGT: CondCode = A64CC::HI; break;
1653 case ISD::SETUGE: CondCode = A64CC::PL; break;
1654 case ISD::SETLT:
1655 case ISD::SETULT: CondCode = A64CC::LT; break;
1656 case ISD::SETLE:
1657 case ISD::SETULE: CondCode = A64CC::LE; break;
1658 case ISD::SETNE:
1659 case ISD::SETUNE: CondCode = A64CC::NE; break;
1660 }
1661 return CondCode;
1662}
1663
1664SDValue
1665AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1666 DebugLoc DL = Op.getDebugLoc();
1667 EVT PtrVT = getPointerTy();
1668 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1669
Tim Northover9fc1cdd2013-05-04 16:53:53 +00001670 switch(getTargetMachine().getCodeModel()) {
1671 case CodeModel::Small:
1672 // The most efficient code is PC-relative anyway for the small memory model,
1673 // so we don't need to worry about relocation model.
1674 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
1675 DAG.getTargetBlockAddress(BA, PtrVT, 0,
1676 AArch64II::MO_NO_FLAG),
1677 DAG.getTargetBlockAddress(BA, PtrVT, 0,
1678 AArch64II::MO_LO12),
1679 DAG.getConstant(/*Alignment=*/ 4, MVT::i32));
1680 case CodeModel::Large:
1681 return DAG.getNode(
1682 AArch64ISD::WrapperLarge, DL, PtrVT,
1683 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3),
1684 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
1685 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
1686 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
1687 default:
1688 llvm_unreachable("Only small and large code models supported now");
1689 }
Tim Northovere0e3aef2013-01-31 12:12:40 +00001690}
1691
1692
1693// (BRCOND chain, val, dest)
1694SDValue
1695AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
1696 DebugLoc dl = Op.getDebugLoc();
1697 SDValue Chain = Op.getOperand(0);
1698 SDValue TheBit = Op.getOperand(1);
1699 SDValue DestBB = Op.getOperand(2);
1700
1701 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
1702 // that as the consumer we are responsible for ignoring rubbish in higher
1703 // bits.
1704 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
1705 DAG.getConstant(1, MVT::i32));
1706
1707 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
1708 DAG.getConstant(0, TheBit.getValueType()),
1709 DAG.getCondCode(ISD::SETNE));
1710
1711 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain,
1712 A64CMP, DAG.getConstant(A64CC::NE, MVT::i32),
1713 DestBB);
1714}
1715
1716// (BR_CC chain, condcode, lhs, rhs, dest)
1717SDValue
1718AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
1719 DebugLoc dl = Op.getDebugLoc();
1720 SDValue Chain = Op.getOperand(0);
1721 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1722 SDValue LHS = Op.getOperand(2);
1723 SDValue RHS = Op.getOperand(3);
1724 SDValue DestBB = Op.getOperand(4);
1725
1726 if (LHS.getValueType() == MVT::f128) {
1727 // f128 comparisons are lowered to runtime calls by a routine which sets
1728 // LHS, RHS and CC appropriately for the rest of this function to continue.
1729 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
1730
1731 // If softenSetCCOperands returned a scalar, we need to compare the result
1732 // against zero to select between true and false values.
1733 if (RHS.getNode() == 0) {
1734 RHS = DAG.getConstant(0, LHS.getValueType());
1735 CC = ISD::SETNE;
1736 }
1737 }
1738
1739 if (LHS.getValueType().isInteger()) {
1740 SDValue A64cc;
1741
1742 // Integers are handled in a separate function because the combinations of
1743 // immediates and tests can get hairy and we may want to fiddle things.
1744 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
1745
1746 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1747 Chain, CmpOp, A64cc, DestBB);
1748 }
1749
1750 // Note that some LLVM floating-point CondCodes can't be lowered to a single
1751 // conditional branch, hence FPCCToA64CC can set a second test, where either
1752 // passing is sufficient.
1753 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
1754 CondCode = FPCCToA64CC(CC, Alternative);
1755 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
1756 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
1757 DAG.getCondCode(CC));
1758 SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1759 Chain, SetCC, A64cc, DestBB);
1760
1761 if (Alternative != A64CC::Invalid) {
1762 A64cc = DAG.getConstant(Alternative, MVT::i32);
1763 A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1764 A64BR_CC, SetCC, A64cc, DestBB);
1765
1766 }
1767
1768 return A64BR_CC;
1769}
1770
1771SDValue
1772AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
1773 RTLIB::Libcall Call) const {
1774 ArgListTy Args;
1775 ArgListEntry Entry;
1776 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
1777 EVT ArgVT = Op.getOperand(i).getValueType();
1778 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
1779 Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy;
1780 Entry.isSExt = false;
1781 Entry.isZExt = false;
1782 Args.push_back(Entry);
1783 }
1784 SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy());
1785
1786 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
1787
1788 // By default, the input chain to this libcall is the entry node of the
1789 // function. If the libcall is going to be emitted as a tail call then
1790 // isUsedByReturnOnly will change it to the right chain if the return
1791 // node which is being folded has a non-entry input chain.
1792 SDValue InChain = DAG.getEntryNode();
1793
1794 // isTailCall may be true since the callee does not reference caller stack
1795 // frame. Check if it's in the right position.
1796 SDValue TCChain = InChain;
1797 bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain);
1798 if (isTailCall)
1799 InChain = TCChain;
1800
1801 TargetLowering::
1802 CallLoweringInfo CLI(InChain, RetTy, false, false, false, false,
1803 0, getLibcallCallingConv(Call), isTailCall,
1804 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
1805 Callee, Args, DAG, Op->getDebugLoc());
1806 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
1807
1808 if (!CallInfo.second.getNode())
1809 // It's a tailcall, return the chain (which is the DAG root).
1810 return DAG.getRoot();
1811
1812 return CallInfo.first;
1813}
1814
1815SDValue
1816AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
1817 if (Op.getOperand(0).getValueType() != MVT::f128) {
1818 // It's legal except when f128 is involved
1819 return Op;
1820 }
1821
1822 RTLIB::Libcall LC;
1823 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
1824
1825 SDValue SrcVal = Op.getOperand(0);
1826 return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
1827 /*isSigned*/ false, Op.getDebugLoc());
1828}
1829
1830SDValue
1831AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
1832 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
1833
1834 RTLIB::Libcall LC;
1835 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
1836
1837 return LowerF128ToCall(Op, DAG, LC);
1838}
1839
1840SDValue
1841AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
1842 bool IsSigned) const {
1843 if (Op.getOperand(0).getValueType() != MVT::f128) {
1844 // It's legal except when f128 is involved
1845 return Op;
1846 }
1847
1848 RTLIB::Libcall LC;
1849 if (IsSigned)
1850 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType());
1851 else
1852 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType());
1853
1854 return LowerF128ToCall(Op, DAG, LC);
1855}
1856
1857SDValue
Tim Northover2dbef342013-05-04 16:53:46 +00001858AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op,
1859 SelectionDAG &DAG) const {
1860 assert(getTargetMachine().getCodeModel() == CodeModel::Large);
1861 assert(getTargetMachine().getRelocationModel() == Reloc::Static);
Tim Northovere0e3aef2013-01-31 12:12:40 +00001862
Tim Northover2dbef342013-05-04 16:53:46 +00001863 EVT PtrVT = getPointerTy();
1864 DebugLoc dl = Op.getDebugLoc();
1865 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
1866 const GlobalValue *GV = GN->getGlobal();
1867
1868 SDValue GlobalAddr = DAG.getNode(
1869 AArch64ISD::WrapperLarge, dl, PtrVT,
1870 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3),
1871 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
1872 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
1873 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
1874
1875 if (GN->getOffset() != 0)
1876 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
1877 DAG.getConstant(GN->getOffset(), PtrVT));
1878
1879 return GlobalAddr;
1880}
1881
1882SDValue
1883AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
1884 SelectionDAG &DAG) const {
Tim Northovere0e3aef2013-01-31 12:12:40 +00001885 assert(getTargetMachine().getCodeModel() == CodeModel::Small);
1886
1887 EVT PtrVT = getPointerTy();
1888 DebugLoc dl = Op.getDebugLoc();
1889 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
1890 const GlobalValue *GV = GN->getGlobal();
1891 unsigned Alignment = GV->getAlignment();
Tim Northover228d9d32013-02-06 16:43:33 +00001892 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
Tim Northoverc3c5c092013-02-28 14:36:31 +00001893 if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) {
1894 // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate
1895 // to zero when they remain undefined. In PIC mode the GOT can take care of
1896 // this, but in absolute mode we use a constant pool load.
Tim Northover3533ad6b2013-02-15 09:33:43 +00001897 SDValue PoolAddr;
1898 PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
1899 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
1900 AArch64II::MO_NO_FLAG),
1901 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
1902 AArch64II::MO_LO12),
1903 DAG.getConstant(8, MVT::i32));
Tim Northoverb9d4fd22013-02-28 14:36:24 +00001904 SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr,
1905 MachinePointerInfo::getConstantPool(),
1906 /*isVolatile=*/ false,
1907 /*isNonTemporal=*/ true,
1908 /*isInvariant=*/ true, 8);
1909 if (GN->getOffset() != 0)
1910 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
1911 DAG.getConstant(GN->getOffset(), PtrVT));
1912
1913 return GlobalAddr;
Tim Northover228d9d32013-02-06 16:43:33 +00001914 }
Tim Northovere0e3aef2013-01-31 12:12:40 +00001915
1916 if (Alignment == 0) {
1917 const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
Tim Northoverbcaca872013-02-05 13:24:56 +00001918 if (GVPtrTy->getElementType()->isSized()) {
1919 Alignment
1920 = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
1921 } else {
Tim Northovere0e3aef2013-01-31 12:12:40 +00001922 // Be conservative if we can't guess, not that it really matters:
1923 // functions and labels aren't valid for loads, and the methods used to
1924 // actually calculate an address work with any alignment.
1925 Alignment = 1;
1926 }
1927 }
1928
1929 unsigned char HiFixup, LoFixup;
Tim Northovere0e3aef2013-01-31 12:12:40 +00001930 bool UseGOT = Subtarget->GVIsIndirectSymbol(GV, RelocM);
1931
1932 if (UseGOT) {
1933 HiFixup = AArch64II::MO_GOT;
1934 LoFixup = AArch64II::MO_GOT_LO12;
1935 Alignment = 8;
1936 } else {
1937 HiFixup = AArch64II::MO_NO_FLAG;
1938 LoFixup = AArch64II::MO_LO12;
1939 }
1940
1941 // AArch64's small model demands the following sequence:
1942 // ADRP x0, somewhere
1943 // ADD x0, x0, #:lo12:somewhere ; (or LDR directly).
1944 SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
1945 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1946 HiFixup),
1947 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1948 LoFixup),
1949 DAG.getConstant(Alignment, MVT::i32));
1950
1951 if (UseGOT) {
1952 GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(),
1953 GlobalRef);
1954 }
1955
1956 if (GN->getOffset() != 0)
1957 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef,
1958 DAG.getConstant(GN->getOffset(), PtrVT));
1959
1960 return GlobalRef;
1961}
1962
Tim Northover2dbef342013-05-04 16:53:46 +00001963SDValue
1964AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
1965 SelectionDAG &DAG) const {
1966 // TableGen doesn't have easy access to the CodeModel or RelocationModel, so
1967 // we make those distinctions here.
1968
1969 switch (getTargetMachine().getCodeModel()) {
1970 case CodeModel::Small:
1971 return LowerGlobalAddressELFSmall(Op, DAG);
1972 case CodeModel::Large:
1973 return LowerGlobalAddressELFLarge(Op, DAG);
1974 default:
1975 llvm_unreachable("Only small and large code models supported now");
1976 }
1977}
1978
Tim Northovere0e3aef2013-01-31 12:12:40 +00001979SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
1980 SDValue DescAddr,
1981 DebugLoc DL,
1982 SelectionDAG &DAG) const {
1983 EVT PtrVT = getPointerTy();
1984
1985 // The function we need to call is simply the first entry in the GOT for this
1986 // descriptor, load it in preparation.
1987 SDValue Func, Chain;
1988 Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
1989 DescAddr);
1990
1991 // The function takes only one argument: the address of the descriptor itself
1992 // in X0.
1993 SDValue Glue;
1994 Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue);
1995 Glue = Chain.getValue(1);
1996
1997 // Finally, there's a special calling-convention which means that the lookup
1998 // must preserve all registers (except X0, obviously).
1999 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
2000 const AArch64RegisterInfo *A64RI
2001 = static_cast<const AArch64RegisterInfo *>(TRI);
2002 const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask();
2003
2004 // We're now ready to populate the argument list, as with a normal call:
2005 std::vector<SDValue> Ops;
2006 Ops.push_back(Chain);
2007 Ops.push_back(Func);
2008 Ops.push_back(SymAddr);
2009 Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT));
2010 Ops.push_back(DAG.getRegisterMask(Mask));
2011 Ops.push_back(Glue);
2012
2013 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
Tim Northoverbcaca872013-02-05 13:24:56 +00002014 Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0],
2015 Ops.size());
Tim Northovere0e3aef2013-01-31 12:12:40 +00002016 Glue = Chain.getValue(1);
2017
2018 // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it
2019 // back to the generic handling code.
2020 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
2021}
2022
2023SDValue
2024AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
2025 SelectionDAG &DAG) const {
2026 assert(Subtarget->isTargetELF() &&
2027 "TLS not implemented for non-ELF targets");
Tim Northover85dcbde2013-05-04 16:54:11 +00002028 assert(getTargetMachine().getCodeModel() == CodeModel::Small
2029 && "TLS only supported in small memory model");
Tim Northovere0e3aef2013-01-31 12:12:40 +00002030 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2031
2032 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
2033
2034 SDValue TPOff;
2035 EVT PtrVT = getPointerTy();
2036 DebugLoc DL = Op.getDebugLoc();
2037 const GlobalValue *GV = GA->getGlobal();
2038
2039 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
2040
2041 if (Model == TLSModel::InitialExec) {
2042 TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2043 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2044 AArch64II::MO_GOTTPREL),
2045 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2046 AArch64II::MO_GOTTPREL_LO12),
2047 DAG.getConstant(8, MVT::i32));
2048 TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2049 TPOff);
2050 } else if (Model == TLSModel::LocalExec) {
2051 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2052 AArch64II::MO_TPREL_G1);
2053 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2054 AArch64II::MO_TPREL_G0_NC);
2055
2056 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2057 DAG.getTargetConstant(0, MVT::i32)), 0);
Tim Northoverbcaca872013-02-05 13:24:56 +00002058 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2059 TPOff, LoVar,
Tim Northovere0e3aef2013-01-31 12:12:40 +00002060 DAG.getTargetConstant(0, MVT::i32)), 0);
2061 } else if (Model == TLSModel::GeneralDynamic) {
2062 // Accesses used in this sequence go via the TLS descriptor which lives in
2063 // the GOT. Prepare an address we can use to handle this.
2064 SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2065 AArch64II::MO_TLSDESC);
2066 SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2067 AArch64II::MO_TLSDESC_LO12);
2068 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
Tim Northoverbcaca872013-02-05 13:24:56 +00002069 HiDesc, LoDesc,
2070 DAG.getConstant(8, MVT::i32));
Tim Northovere0e3aef2013-01-31 12:12:40 +00002071 SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0);
2072
2073 TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2074 } else if (Model == TLSModel::LocalDynamic) {
2075 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
2076 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
2077 // the beginning of the module's TLS region, followed by a DTPREL offset
2078 // calculation.
2079
2080 // These accesses will need deduplicating if there's more than one.
2081 AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction()
2082 .getInfo<AArch64MachineFunctionInfo>();
2083 MFI->incNumLocalDynamicTLSAccesses();
2084
2085
2086 // Get the location of _TLS_MODULE_BASE_:
2087 SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2088 AArch64II::MO_TLSDESC);
2089 SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2090 AArch64II::MO_TLSDESC_LO12);
2091 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
Tim Northoverbcaca872013-02-05 13:24:56 +00002092 HiDesc, LoDesc,
2093 DAG.getConstant(8, MVT::i32));
Tim Northovere0e3aef2013-01-31 12:12:40 +00002094 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT);
2095
2096 ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2097
2098 // Get the variable's offset from _TLS_MODULE_BASE_
2099 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2100 AArch64II::MO_DTPREL_G1);
2101 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2102 AArch64II::MO_DTPREL_G0_NC);
2103
2104 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2105 DAG.getTargetConstant(0, MVT::i32)), 0);
Tim Northoverbcaca872013-02-05 13:24:56 +00002106 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2107 TPOff, LoVar,
Tim Northovere0e3aef2013-01-31 12:12:40 +00002108 DAG.getTargetConstant(0, MVT::i32)), 0);
2109 } else
2110 llvm_unreachable("Unsupported TLS access model");
2111
2112
2113 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
2114}
2115
2116SDValue
2117AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2118 bool IsSigned) const {
2119 if (Op.getValueType() != MVT::f128) {
2120 // Legal for everything except f128.
2121 return Op;
2122 }
2123
2124 RTLIB::Libcall LC;
2125 if (IsSigned)
2126 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2127 else
2128 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2129
2130 return LowerF128ToCall(Op, DAG, LC);
2131}
2132
2133
2134SDValue
2135AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2136 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2137 DebugLoc dl = JT->getDebugLoc();
Tim Northover8ff187d2013-05-04 16:54:00 +00002138 EVT PtrVT = getPointerTy();
Tim Northovere0e3aef2013-01-31 12:12:40 +00002139
2140 // When compiling PIC, jump tables get put in the code section so a static
2141 // relocation-style is acceptable for both cases.
Tim Northover8ff187d2013-05-04 16:54:00 +00002142 switch (getTargetMachine().getCodeModel()) {
2143 case CodeModel::Small:
2144 return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2145 DAG.getTargetJumpTable(JT->getIndex(), PtrVT),
2146 DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2147 AArch64II::MO_LO12),
2148 DAG.getConstant(1, MVT::i32));
2149 case CodeModel::Large:
2150 return DAG.getNode(
2151 AArch64ISD::WrapperLarge, dl, PtrVT,
2152 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3),
2153 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC),
2154 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC),
2155 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC));
2156 default:
2157 llvm_unreachable("Only small and large code models supported now");
2158 }
Tim Northovere0e3aef2013-01-31 12:12:40 +00002159}
2160
2161// (SELECT_CC lhs, rhs, iftrue, iffalse, condcode)
2162SDValue
2163AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
2164 DebugLoc dl = Op.getDebugLoc();
2165 SDValue LHS = Op.getOperand(0);
2166 SDValue RHS = Op.getOperand(1);
2167 SDValue IfTrue = Op.getOperand(2);
2168 SDValue IfFalse = Op.getOperand(3);
2169 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2170
2171 if (LHS.getValueType() == MVT::f128) {
2172 // f128 comparisons are lowered to libcalls, but slot in nicely here
2173 // afterwards.
2174 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2175
2176 // If softenSetCCOperands returned a scalar, we need to compare the result
2177 // against zero to select between true and false values.
2178 if (RHS.getNode() == 0) {
2179 RHS = DAG.getConstant(0, LHS.getValueType());
2180 CC = ISD::SETNE;
2181 }
2182 }
2183
2184 if (LHS.getValueType().isInteger()) {
2185 SDValue A64cc;
2186
2187 // Integers are handled in a separate function because the combinations of
2188 // immediates and tests can get hairy and we may want to fiddle things.
2189 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2190
2191 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2192 CmpOp, IfTrue, IfFalse, A64cc);
2193 }
2194
2195 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2196 // conditional branch, hence FPCCToA64CC can set a second test, where either
2197 // passing is sufficient.
2198 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2199 CondCode = FPCCToA64CC(CC, Alternative);
2200 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2201 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2202 DAG.getCondCode(CC));
Tim Northoverbcaca872013-02-05 13:24:56 +00002203 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl,
2204 Op.getValueType(),
Tim Northovere0e3aef2013-01-31 12:12:40 +00002205 SetCC, IfTrue, IfFalse, A64cc);
2206
2207 if (Alternative != A64CC::Invalid) {
2208 A64cc = DAG.getConstant(Alternative, MVT::i32);
2209 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2210 SetCC, IfTrue, A64SELECT_CC, A64cc);
2211
2212 }
2213
2214 return A64SELECT_CC;
2215}
2216
2217// (SELECT testbit, iftrue, iffalse)
2218SDValue
2219AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2220 DebugLoc dl = Op.getDebugLoc();
2221 SDValue TheBit = Op.getOperand(0);
2222 SDValue IfTrue = Op.getOperand(1);
2223 SDValue IfFalse = Op.getOperand(2);
2224
2225 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
2226 // that as the consumer we are responsible for ignoring rubbish in higher
2227 // bits.
2228 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
2229 DAG.getConstant(1, MVT::i32));
2230 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
2231 DAG.getConstant(0, TheBit.getValueType()),
2232 DAG.getCondCode(ISD::SETNE));
2233
2234 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2235 A64CMP, IfTrue, IfFalse,
2236 DAG.getConstant(A64CC::NE, MVT::i32));
2237}
2238
2239// (SETCC lhs, rhs, condcode)
2240SDValue
2241AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2242 DebugLoc dl = Op.getDebugLoc();
2243 SDValue LHS = Op.getOperand(0);
2244 SDValue RHS = Op.getOperand(1);
2245 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2246 EVT VT = Op.getValueType();
2247
2248 if (LHS.getValueType() == MVT::f128) {
2249 // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS
2250 // for the rest of the function (some i32 or i64 values).
2251 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2252
2253 // If softenSetCCOperands returned a scalar, use it.
2254 if (RHS.getNode() == 0) {
2255 assert(LHS.getValueType() == Op.getValueType() &&
2256 "Unexpected setcc expansion!");
2257 return LHS;
2258 }
2259 }
2260
2261 if (LHS.getValueType().isInteger()) {
2262 SDValue A64cc;
2263
2264 // Integers are handled in a separate function because the combinations of
2265 // immediates and tests can get hairy and we may want to fiddle things.
2266 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2267
2268 return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2269 CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT),
2270 A64cc);
2271 }
2272
2273 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2274 // conditional branch, hence FPCCToA64CC can set a second test, where either
2275 // passing is sufficient.
2276 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2277 CondCode = FPCCToA64CC(CC, Alternative);
2278 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2279 SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2280 DAG.getCondCode(CC));
2281 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2282 CmpOp, DAG.getConstant(1, VT),
2283 DAG.getConstant(0, VT), A64cc);
2284
2285 if (Alternative != A64CC::Invalid) {
2286 A64cc = DAG.getConstant(Alternative, MVT::i32);
2287 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
2288 DAG.getConstant(1, VT), A64SELECT_CC, A64cc);
2289 }
2290
2291 return A64SELECT_CC;
2292}
2293
2294SDValue
2295AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
2296 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2297 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2298
2299 // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes
2300 // rather than just 8.
2301 return DAG.getMemcpy(Op.getOperand(0), Op.getDebugLoc(),
2302 Op.getOperand(1), Op.getOperand(2),
2303 DAG.getConstant(32, MVT::i32), 8, false, false,
2304 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
2305}
2306
2307SDValue
2308AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2309 // The layout of the va_list struct is specified in the AArch64 Procedure Call
2310 // Standard, section B.3.
2311 MachineFunction &MF = DAG.getMachineFunction();
Tim Northoverbcaca872013-02-05 13:24:56 +00002312 AArch64MachineFunctionInfo *FuncInfo
2313 = MF.getInfo<AArch64MachineFunctionInfo>();
Tim Northovere0e3aef2013-01-31 12:12:40 +00002314 DebugLoc DL = Op.getDebugLoc();
2315
2316 SDValue Chain = Op.getOperand(0);
2317 SDValue VAList = Op.getOperand(1);
2318 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2319 SmallVector<SDValue, 4> MemOps;
2320
2321 // void *__stack at offset 0
2322 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(),
2323 getPointerTy());
2324 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
2325 MachinePointerInfo(SV), false, false, 0));
2326
2327 // void *__gr_top at offset 8
2328 int GPRSize = FuncInfo->getVariadicGPRSize();
2329 if (GPRSize > 0) {
2330 SDValue GRTop, GRTopAddr;
2331
2332 GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2333 DAG.getConstant(8, getPointerTy()));
2334
2335 GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy());
2336 GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop,
2337 DAG.getConstant(GPRSize, getPointerTy()));
2338
2339 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
2340 MachinePointerInfo(SV, 8),
2341 false, false, 0));
2342 }
2343
2344 // void *__vr_top at offset 16
2345 int FPRSize = FuncInfo->getVariadicFPRSize();
2346 if (FPRSize > 0) {
2347 SDValue VRTop, VRTopAddr;
2348 VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2349 DAG.getConstant(16, getPointerTy()));
2350
2351 VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy());
2352 VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop,
2353 DAG.getConstant(FPRSize, getPointerTy()));
2354
2355 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
2356 MachinePointerInfo(SV, 16),
2357 false, false, 0));
2358 }
2359
2360 // int __gr_offs at offset 24
2361 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2362 DAG.getConstant(24, getPointerTy()));
2363 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32),
2364 GROffsAddr, MachinePointerInfo(SV, 24),
2365 false, false, 0));
2366
2367 // int __vr_offs at offset 28
2368 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2369 DAG.getConstant(28, getPointerTy()));
2370 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32),
2371 VROffsAddr, MachinePointerInfo(SV, 28),
2372 false, false, 0));
2373
2374 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
2375 MemOps.size());
2376}
2377
2378SDValue
2379AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2380 switch (Op.getOpcode()) {
2381 default: llvm_unreachable("Don't know how to custom lower this!");
2382 case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128);
2383 case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128);
2384 case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128);
2385 case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128);
2386 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true);
2387 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false);
2388 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true);
2389 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false);
2390 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
2391 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
2392
2393 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
2394 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
2395 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
2396 case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG);
2397 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
2398 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
2399 case ISD::SELECT: return LowerSELECT(Op, DAG);
2400 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
2401 case ISD::SETCC: return LowerSETCC(Op, DAG);
2402 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
2403 case ISD::VASTART: return LowerVASTART(Op, DAG);
2404 }
2405
2406 return SDValue();
2407}
2408
2409static SDValue PerformANDCombine(SDNode *N,
2410 TargetLowering::DAGCombinerInfo &DCI) {
2411
2412 SelectionDAG &DAG = DCI.DAG;
2413 DebugLoc DL = N->getDebugLoc();
2414 EVT VT = N->getValueType(0);
2415
2416 // We're looking for an SRA/SHL pair which form an SBFX.
2417
2418 if (VT != MVT::i32 && VT != MVT::i64)
2419 return SDValue();
2420
2421 if (!isa<ConstantSDNode>(N->getOperand(1)))
2422 return SDValue();
2423
2424 uint64_t TruncMask = N->getConstantOperandVal(1);
2425 if (!isMask_64(TruncMask))
2426 return SDValue();
2427
2428 uint64_t Width = CountPopulation_64(TruncMask);
2429 SDValue Shift = N->getOperand(0);
2430
2431 if (Shift.getOpcode() != ISD::SRL)
2432 return SDValue();
2433
2434 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
2435 return SDValue();
2436 uint64_t LSB = Shift->getConstantOperandVal(1);
2437
2438 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
2439 return SDValue();
2440
2441 return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0),
2442 DAG.getConstant(LSB, MVT::i64),
2443 DAG.getConstant(LSB + Width - 1, MVT::i64));
2444}
2445
Tim Northovere0e3aef2013-01-31 12:12:40 +00002446/// For a true bitfield insert, the bits getting into that contiguous mask
2447/// should come from the low part of an existing value: they must be formed from
2448/// a compatible SHL operation (unless they're already low). This function
2449/// checks that condition and returns the least-significant bit that's
2450/// intended. If the operation not a field preparation, -1 is returned.
2451static int32_t getLSBForBFI(SelectionDAG &DAG, DebugLoc DL, EVT VT,
2452 SDValue &MaskedVal, uint64_t Mask) {
2453 if (!isShiftedMask_64(Mask))
2454 return -1;
2455
2456 // Now we need to alter MaskedVal so that it is an appropriate input for a BFI
2457 // instruction. BFI will do a left-shift by LSB before applying the mask we've
2458 // spotted, so in general we should pre-emptively "undo" that by making sure
2459 // the incoming bits have had a right-shift applied to them.
2460 //
2461 // This right shift, however, will combine with existing left/right shifts. In
2462 // the simplest case of a completely straight bitfield operation, it will be
2463 // expected to completely cancel out with an existing SHL. More complicated
2464 // cases (e.g. bitfield to bitfield copy) may still need a real shift before
2465 // the BFI.
2466
2467 uint64_t LSB = CountTrailingZeros_64(Mask);
2468 int64_t ShiftRightRequired = LSB;
2469 if (MaskedVal.getOpcode() == ISD::SHL &&
2470 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
2471 ShiftRightRequired -= MaskedVal.getConstantOperandVal(1);
2472 MaskedVal = MaskedVal.getOperand(0);
2473 } else if (MaskedVal.getOpcode() == ISD::SRL &&
2474 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
2475 ShiftRightRequired += MaskedVal.getConstantOperandVal(1);
2476 MaskedVal = MaskedVal.getOperand(0);
2477 }
2478
2479 if (ShiftRightRequired > 0)
2480 MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal,
2481 DAG.getConstant(ShiftRightRequired, MVT::i64));
2482 else if (ShiftRightRequired < 0) {
2483 // We could actually end up with a residual left shift, for example with
2484 // "struc.bitfield = val << 1".
2485 MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal,
2486 DAG.getConstant(-ShiftRightRequired, MVT::i64));
2487 }
2488
2489 return LSB;
2490}
2491
2492/// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by
2493/// a mask and an extension. Returns true if a BFI was found and provides
2494/// information on its surroundings.
2495static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask,
2496 bool &Extended) {
2497 Extended = false;
2498 if (N.getOpcode() == ISD::ZERO_EXTEND) {
2499 Extended = true;
2500 N = N.getOperand(0);
2501 }
2502
2503 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
2504 Mask = N->getConstantOperandVal(1);
2505 N = N.getOperand(0);
2506 } else {
2507 // Mask is the whole width.
Benjamin Kramera5dce352013-02-17 17:55:32 +00002508 Mask = -1ULL >> (64 - N.getValueType().getSizeInBits());
Tim Northovere0e3aef2013-01-31 12:12:40 +00002509 }
2510
2511 if (N.getOpcode() == AArch64ISD::BFI) {
2512 BFI = N;
2513 return true;
2514 }
2515
2516 return false;
2517}
2518
2519/// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which
2520/// is roughly equivalent to (and (BFI ...), mask). This form is used because it
2521/// can often be further combined with a larger mask. Ultimately, we want mask
2522/// to be 2^32-1 or 2^64-1 so the AND can be skipped.
2523static SDValue tryCombineToBFI(SDNode *N,
2524 TargetLowering::DAGCombinerInfo &DCI,
2525 const AArch64Subtarget *Subtarget) {
2526 SelectionDAG &DAG = DCI.DAG;
2527 DebugLoc DL = N->getDebugLoc();
2528 EVT VT = N->getValueType(0);
2529
2530 assert(N->getOpcode() == ISD::OR && "Unexpected root");
2531
2532 // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or
2533 // abandon the effort.
2534 SDValue LHS = N->getOperand(0);
2535 if (LHS.getOpcode() != ISD::AND)
2536 return SDValue();
2537
2538 uint64_t LHSMask;
2539 if (isa<ConstantSDNode>(LHS.getOperand(1)))
2540 LHSMask = LHS->getConstantOperandVal(1);
2541 else
2542 return SDValue();
2543
2544 // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask
2545 // is or abandon the effort.
2546 SDValue RHS = N->getOperand(1);
2547 if (RHS.getOpcode() != ISD::AND)
2548 return SDValue();
2549
2550 uint64_t RHSMask;
2551 if (isa<ConstantSDNode>(RHS.getOperand(1)))
2552 RHSMask = RHS->getConstantOperandVal(1);
2553 else
2554 return SDValue();
2555
2556 // Can't do anything if the masks are incompatible.
2557 if (LHSMask & RHSMask)
2558 return SDValue();
2559
2560 // Now we need one of the masks to be a contiguous field. Without loss of
2561 // generality that should be the RHS one.
2562 SDValue Bitfield = LHS.getOperand(0);
2563 if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) {
2564 // We know that LHS is a candidate new value, and RHS isn't already a better
2565 // one.
2566 std::swap(LHS, RHS);
2567 std::swap(LHSMask, RHSMask);
2568 }
2569
2570 // We've done our best to put the right operands in the right places, all we
2571 // can do now is check whether a BFI exists.
2572 Bitfield = RHS.getOperand(0);
2573 int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask);
2574 if (LSB == -1)
2575 return SDValue();
2576
2577 uint32_t Width = CountPopulation_64(RHSMask);
2578 assert(Width && "Expected non-zero bitfield width");
2579
2580 SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
2581 LHS.getOperand(0), Bitfield,
2582 DAG.getConstant(LSB, MVT::i64),
2583 DAG.getConstant(Width, MVT::i64));
2584
2585 // Mask is trivial
Benjamin Kramera5dce352013-02-17 17:55:32 +00002586 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits())))
Tim Northovere0e3aef2013-01-31 12:12:40 +00002587 return BFI;
2588
2589 return DAG.getNode(ISD::AND, DL, VT, BFI,
2590 DAG.getConstant(LHSMask | RHSMask, VT));
2591}
2592
2593/// Search for the bitwise combining (with careful masks) of a MaskedBFI and its
2594/// original input. This is surprisingly common because SROA splits things up
2595/// into i8 chunks, so the originally detected MaskedBFI may actually only act
2596/// on the low (say) byte of a word. This is then orred into the rest of the
2597/// word afterwards.
2598///
2599/// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)).
2600///
2601/// If MASK1 and MASK2 are compatible, we can fold the whole thing into the
2602/// MaskedBFI. We can also deal with a certain amount of extend/truncate being
2603/// involved.
2604static SDValue tryCombineToLargerBFI(SDNode *N,
2605 TargetLowering::DAGCombinerInfo &DCI,
2606 const AArch64Subtarget *Subtarget) {
2607 SelectionDAG &DAG = DCI.DAG;
2608 DebugLoc DL = N->getDebugLoc();
2609 EVT VT = N->getValueType(0);
2610
2611 // First job is to hunt for a MaskedBFI on either the left or right. Swap
2612 // operands if it's actually on the right.
2613 SDValue BFI;
2614 SDValue PossExtraMask;
2615 uint64_t ExistingMask = 0;
2616 bool Extended = false;
2617 if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended))
2618 PossExtraMask = N->getOperand(1);
2619 else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended))
2620 PossExtraMask = N->getOperand(0);
2621 else
2622 return SDValue();
2623
2624 // We can only combine a BFI with another compatible mask.
2625 if (PossExtraMask.getOpcode() != ISD::AND ||
2626 !isa<ConstantSDNode>(PossExtraMask.getOperand(1)))
2627 return SDValue();
2628
2629 uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1);
2630
2631 // Masks must be compatible.
2632 if (ExtraMask & ExistingMask)
2633 return SDValue();
2634
2635 SDValue OldBFIVal = BFI.getOperand(0);
2636 SDValue NewBFIVal = BFI.getOperand(1);
2637 if (Extended) {
2638 // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be
2639 // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments
2640 // need to be made compatible.
2641 assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32
2642 && "Invalid types for BFI");
2643 OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal);
2644 NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal);
2645 }
2646
2647 // We need the MaskedBFI to be combined with a mask of the *same* value.
2648 if (PossExtraMask.getOperand(0) != OldBFIVal)
2649 return SDValue();
2650
2651 BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
2652 OldBFIVal, NewBFIVal,
2653 BFI.getOperand(2), BFI.getOperand(3));
2654
2655 // If the masking is trivial, we don't need to create it.
Benjamin Kramera5dce352013-02-17 17:55:32 +00002656 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits())))
Tim Northovere0e3aef2013-01-31 12:12:40 +00002657 return BFI;
2658
2659 return DAG.getNode(ISD::AND, DL, VT, BFI,
2660 DAG.getConstant(ExtraMask | ExistingMask, VT));
2661}
2662
2663/// An EXTR instruction is made up of two shifts, ORed together. This helper
2664/// searches for and classifies those shifts.
2665static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
2666 bool &FromHi) {
2667 if (N.getOpcode() == ISD::SHL)
2668 FromHi = false;
2669 else if (N.getOpcode() == ISD::SRL)
2670 FromHi = true;
2671 else
2672 return false;
2673
2674 if (!isa<ConstantSDNode>(N.getOperand(1)))
2675 return false;
2676
2677 ShiftAmount = N->getConstantOperandVal(1);
2678 Src = N->getOperand(0);
2679 return true;
2680}
2681
Joel Jones440d8e42013-02-10 23:56:30 +00002682/// EXTR instruction extracts a contiguous chunk of bits from two existing
Tim Northovere0e3aef2013-01-31 12:12:40 +00002683/// registers viewed as a high/low pair. This function looks for the pattern:
2684/// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an
2685/// EXTR. Can't quite be done in TableGen because the two immediates aren't
2686/// independent.
2687static SDValue tryCombineToEXTR(SDNode *N,
2688 TargetLowering::DAGCombinerInfo &DCI) {
2689 SelectionDAG &DAG = DCI.DAG;
2690 DebugLoc DL = N->getDebugLoc();
2691 EVT VT = N->getValueType(0);
2692
2693 assert(N->getOpcode() == ISD::OR && "Unexpected root");
2694
2695 if (VT != MVT::i32 && VT != MVT::i64)
2696 return SDValue();
2697
2698 SDValue LHS;
2699 uint32_t ShiftLHS = 0;
2700 bool LHSFromHi = 0;
2701 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
2702 return SDValue();
2703
2704 SDValue RHS;
2705 uint32_t ShiftRHS = 0;
2706 bool RHSFromHi = 0;
2707 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
2708 return SDValue();
2709
2710 // If they're both trying to come from the high part of the register, they're
2711 // not really an EXTR.
2712 if (LHSFromHi == RHSFromHi)
2713 return SDValue();
2714
2715 if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
2716 return SDValue();
2717
2718 if (LHSFromHi) {
2719 std::swap(LHS, RHS);
2720 std::swap(ShiftLHS, ShiftRHS);
2721 }
2722
2723 return DAG.getNode(AArch64ISD::EXTR, DL, VT,
2724 LHS, RHS,
2725 DAG.getConstant(ShiftRHS, MVT::i64));
2726}
2727
2728/// Target-specific dag combine xforms for ISD::OR
2729static SDValue PerformORCombine(SDNode *N,
2730 TargetLowering::DAGCombinerInfo &DCI,
2731 const AArch64Subtarget *Subtarget) {
2732
2733 SelectionDAG &DAG = DCI.DAG;
2734 EVT VT = N->getValueType(0);
2735
2736 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
2737 return SDValue();
2738
2739 // Attempt to recognise bitfield-insert operations.
2740 SDValue Res = tryCombineToBFI(N, DCI, Subtarget);
2741 if (Res.getNode())
2742 return Res;
2743
2744 // Attempt to combine an existing MaskedBFI operation into one with a larger
2745 // mask.
2746 Res = tryCombineToLargerBFI(N, DCI, Subtarget);
2747 if (Res.getNode())
2748 return Res;
2749
2750 Res = tryCombineToEXTR(N, DCI);
2751 if (Res.getNode())
2752 return Res;
2753
2754 return SDValue();
2755}
2756
2757/// Target-specific dag combine xforms for ISD::SRA
2758static SDValue PerformSRACombine(SDNode *N,
2759 TargetLowering::DAGCombinerInfo &DCI) {
2760
2761 SelectionDAG &DAG = DCI.DAG;
2762 DebugLoc DL = N->getDebugLoc();
2763 EVT VT = N->getValueType(0);
2764
2765 // We're looking for an SRA/SHL pair which form an SBFX.
2766
2767 if (VT != MVT::i32 && VT != MVT::i64)
2768 return SDValue();
2769
2770 if (!isa<ConstantSDNode>(N->getOperand(1)))
2771 return SDValue();
2772
2773 uint64_t ExtraSignBits = N->getConstantOperandVal(1);
2774 SDValue Shift = N->getOperand(0);
2775
2776 if (Shift.getOpcode() != ISD::SHL)
2777 return SDValue();
2778
2779 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
2780 return SDValue();
2781
2782 uint64_t BitsOnLeft = Shift->getConstantOperandVal(1);
2783 uint64_t Width = VT.getSizeInBits() - ExtraSignBits;
2784 uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft;
2785
2786 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
2787 return SDValue();
2788
2789 return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0),
2790 DAG.getConstant(LSB, MVT::i64),
2791 DAG.getConstant(LSB + Width - 1, MVT::i64));
2792}
2793
2794
2795SDValue
2796AArch64TargetLowering::PerformDAGCombine(SDNode *N,
2797 DAGCombinerInfo &DCI) const {
2798 switch (N->getOpcode()) {
2799 default: break;
2800 case ISD::AND: return PerformANDCombine(N, DCI);
Tim Northovere0e3aef2013-01-31 12:12:40 +00002801 case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
2802 case ISD::SRA: return PerformSRACombine(N, DCI);
2803 }
2804 return SDValue();
2805}
2806
2807AArch64TargetLowering::ConstraintType
2808AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
2809 if (Constraint.size() == 1) {
2810 switch (Constraint[0]) {
2811 default: break;
2812 case 'w': // An FP/SIMD vector register
2813 return C_RegisterClass;
2814 case 'I': // Constant that can be used with an ADD instruction
2815 case 'J': // Constant that can be used with a SUB instruction
2816 case 'K': // Constant that can be used with a 32-bit logical instruction
2817 case 'L': // Constant that can be used with a 64-bit logical instruction
2818 case 'M': // Constant that can be used as a 32-bit MOV immediate
2819 case 'N': // Constant that can be used as a 64-bit MOV immediate
2820 case 'Y': // Floating point constant zero
2821 case 'Z': // Integer constant zero
2822 return C_Other;
2823 case 'Q': // A memory reference with base register and no offset
2824 return C_Memory;
2825 case 'S': // A symbolic address
2826 return C_Other;
2827 }
2828 }
2829
2830 // FIXME: Ump, Utf, Usa, Ush
Tim Northoverbcaca872013-02-05 13:24:56 +00002831 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes,
2832 // whatever they may be
Tim Northovere0e3aef2013-01-31 12:12:40 +00002833 // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
2834 // Usa: An absolute symbolic address
2835 // Ush: The high part (bits 32:12) of a pc-relative symbolic address
2836 assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa"
2837 && Constraint != "Ush" && "Unimplemented constraints");
2838
2839 return TargetLowering::getConstraintType(Constraint);
2840}
2841
2842TargetLowering::ConstraintWeight
2843AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info,
2844 const char *Constraint) const {
2845
2846 llvm_unreachable("Constraint weight unimplemented");
2847}
2848
2849void
2850AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2851 std::string &Constraint,
2852 std::vector<SDValue> &Ops,
2853 SelectionDAG &DAG) const {
2854 SDValue Result(0, 0);
2855
2856 // Only length 1 constraints are C_Other.
2857 if (Constraint.size() != 1) return;
2858
2859 // Only C_Other constraints get lowered like this. That means constants for us
2860 // so return early if there's no hope the constraint can be lowered.
2861
2862 switch(Constraint[0]) {
2863 default: break;
2864 case 'I': case 'J': case 'K': case 'L':
2865 case 'M': case 'N': case 'Z': {
2866 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2867 if (!C)
2868 return;
2869
2870 uint64_t CVal = C->getZExtValue();
2871 uint32_t Bits;
2872
2873 switch (Constraint[0]) {
2874 default:
2875 // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J'
2876 // is a peculiarly useless SUB constraint.
2877 llvm_unreachable("Unimplemented C_Other constraint");
2878 case 'I':
2879 if (CVal <= 0xfff)
2880 break;
2881 return;
2882 case 'K':
2883 if (A64Imms::isLogicalImm(32, CVal, Bits))
2884 break;
2885 return;
2886 case 'L':
2887 if (A64Imms::isLogicalImm(64, CVal, Bits))
2888 break;
2889 return;
2890 case 'Z':
2891 if (CVal == 0)
2892 break;
2893 return;
2894 }
2895
2896 Result = DAG.getTargetConstant(CVal, Op.getValueType());
2897 break;
2898 }
2899 case 'S': {
2900 // An absolute symbolic address or label reference.
2901 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
2902 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getDebugLoc(),
2903 GA->getValueType(0));
Tim Northoverbcaca872013-02-05 13:24:56 +00002904 } else if (const BlockAddressSDNode *BA
2905 = dyn_cast<BlockAddressSDNode>(Op)) {
Tim Northovere0e3aef2013-01-31 12:12:40 +00002906 Result = DAG.getTargetBlockAddress(BA->getBlockAddress(),
2907 BA->getValueType(0));
2908 } else if (const ExternalSymbolSDNode *ES
2909 = dyn_cast<ExternalSymbolSDNode>(Op)) {
2910 Result = DAG.getTargetExternalSymbol(ES->getSymbol(),
2911 ES->getValueType(0));
2912 } else
2913 return;
2914 break;
2915 }
2916 case 'Y':
2917 if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2918 if (CFP->isExactlyValue(0.0)) {
2919 Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0));
2920 break;
2921 }
2922 }
2923 return;
2924 }
2925
2926 if (Result.getNode()) {
2927 Ops.push_back(Result);
2928 return;
2929 }
2930
2931 // It's an unknown constraint for us. Let generic code have a go.
2932 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2933}
2934
2935std::pair<unsigned, const TargetRegisterClass*>
Tim Northoverbcaca872013-02-05 13:24:56 +00002936AArch64TargetLowering::getRegForInlineAsmConstraint(
2937 const std::string &Constraint,
2938 EVT VT) const {
Tim Northovere0e3aef2013-01-31 12:12:40 +00002939 if (Constraint.size() == 1) {
2940 switch (Constraint[0]) {
2941 case 'r':
2942 if (VT.getSizeInBits() <= 32)
2943 return std::make_pair(0U, &AArch64::GPR32RegClass);
2944 else if (VT == MVT::i64)
2945 return std::make_pair(0U, &AArch64::GPR64RegClass);
2946 break;
2947 case 'w':
2948 if (VT == MVT::f16)
2949 return std::make_pair(0U, &AArch64::FPR16RegClass);
2950 else if (VT == MVT::f32)
2951 return std::make_pair(0U, &AArch64::FPR32RegClass);
2952 else if (VT == MVT::f64)
2953 return std::make_pair(0U, &AArch64::FPR64RegClass);
2954 else if (VT.getSizeInBits() == 64)
2955 return std::make_pair(0U, &AArch64::VPR64RegClass);
2956 else if (VT == MVT::f128)
2957 return std::make_pair(0U, &AArch64::FPR128RegClass);
2958 else if (VT.getSizeInBits() == 128)
2959 return std::make_pair(0U, &AArch64::VPR128RegClass);
2960 break;
2961 }
2962 }
2963
2964 // Use the default implementation in TargetLowering to convert the register
2965 // constraint into a member of a register class.
2966 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2967}