blob: 0000485f2d0ec90798816231cddf794492542b6d [file] [log] [blame]
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001//===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the SystemZTargetLowering class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "systemz-lower"
15
16#include "SystemZISelLowering.h"
17#include "SystemZCallingConv.h"
18#include "SystemZConstantPoolValue.h"
19#include "SystemZMachineFunctionInfo.h"
20#include "SystemZTargetMachine.h"
21#include "llvm/CodeGen/CallingConvLower.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25
26using namespace llvm;
27
28// Classify VT as either 32 or 64 bit.
29static bool is32Bit(EVT VT) {
30 switch (VT.getSimpleVT().SimpleTy) {
31 case MVT::i32:
32 return true;
33 case MVT::i64:
34 return false;
35 default:
36 llvm_unreachable("Unsupported type");
37 }
38}
39
40// Return a version of MachineOperand that can be safely used before the
41// final use.
42static MachineOperand earlyUseOperand(MachineOperand Op) {
43 if (Op.isReg())
44 Op.setIsKill(false);
45 return Op;
46}
47
48SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
49 : TargetLowering(tm, new TargetLoweringObjectFileELF()),
50 Subtarget(*tm.getSubtargetImpl()), TM(tm) {
51 MVT PtrVT = getPointerTy();
52
53 // Set up the register classes.
54 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
55 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
56 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
57 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
58 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
59
60 // Compute derived properties from the register classes
61 computeRegisterProperties();
62
63 // Set up special registers.
64 setExceptionPointerRegister(SystemZ::R6D);
65 setExceptionSelectorRegister(SystemZ::R7D);
66 setStackPointerRegisterToSaveRestore(SystemZ::R15D);
67
68 // TODO: It may be better to default to latency-oriented scheduling, however
69 // LLVM's current latency-oriented scheduler can't handle physreg definitions
Richard Sandiford14a44492013-05-22 13:38:45 +000070 // such as SystemZ has with CC, so set this to the register-pressure
Ulrich Weigand5f613df2013-05-06 16:15:19 +000071 // scheduler, because it can.
72 setSchedulingPreference(Sched::RegPressure);
73
74 setBooleanContents(ZeroOrOneBooleanContent);
75 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
76
77 // Instructions are strings of 2-byte aligned 2-byte values.
78 setMinFunctionAlignment(2);
79
80 // Handle operations that are handled in a similar way for all types.
81 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
82 I <= MVT::LAST_FP_VALUETYPE;
83 ++I) {
84 MVT VT = MVT::SimpleValueType(I);
85 if (isTypeLegal(VT)) {
86 // Expand SETCC(X, Y, COND) into SELECT_CC(X, Y, 1, 0, COND).
87 setOperationAction(ISD::SETCC, VT, Expand);
88
89 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
90 setOperationAction(ISD::SELECT, VT, Expand);
91
92 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
93 setOperationAction(ISD::SELECT_CC, VT, Custom);
94 setOperationAction(ISD::BR_CC, VT, Custom);
95 }
96 }
97
98 // Expand jump table branches as address arithmetic followed by an
99 // indirect jump.
100 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
101
102 // Expand BRCOND into a BR_CC (see above).
103 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
104
105 // Handle integer types.
106 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
107 I <= MVT::LAST_INTEGER_VALUETYPE;
108 ++I) {
109 MVT VT = MVT::SimpleValueType(I);
110 if (isTypeLegal(VT)) {
111 // Expand individual DIV and REMs into DIVREMs.
112 setOperationAction(ISD::SDIV, VT, Expand);
113 setOperationAction(ISD::UDIV, VT, Expand);
114 setOperationAction(ISD::SREM, VT, Expand);
115 setOperationAction(ISD::UREM, VT, Expand);
116 setOperationAction(ISD::SDIVREM, VT, Custom);
117 setOperationAction(ISD::UDIVREM, VT, Custom);
118
119 // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP.
120 // FIXME: probably much too conservative.
121 setOperationAction(ISD::ATOMIC_LOAD, VT, Expand);
122 setOperationAction(ISD::ATOMIC_STORE, VT, Expand);
123
124 // No special instructions for these.
125 setOperationAction(ISD::CTPOP, VT, Expand);
126 setOperationAction(ISD::CTTZ, VT, Expand);
127 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
128 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
129 setOperationAction(ISD::ROTR, VT, Expand);
130
131 // Use *MUL_LOHI where possible and a wider multiplication otherwise.
132 setOperationAction(ISD::MULHS, VT, Expand);
133 setOperationAction(ISD::MULHU, VT, Expand);
134
135 // We have instructions for signed but not unsigned FP conversion.
136 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
137 }
138 }
139
140 // Type legalization will convert 8- and 16-bit atomic operations into
141 // forms that operate on i32s (but still keeping the original memory VT).
142 // Lower them into full i32 operations.
143 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom);
144 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom);
145 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
146 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
147 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom);
148 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom);
149 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
150 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom);
151 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom);
152 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
153 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
154 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
155
156 // We have instructions for signed but not unsigned FP conversion.
157 // Handle unsigned 32-bit types as signed 64-bit types.
158 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
159 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
160
161 // We have native support for a 64-bit CTLZ, via FLOGR.
162 setOperationAction(ISD::CTLZ, MVT::i32, Promote);
163 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
164
165 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
166 setOperationAction(ISD::OR, MVT::i64, Custom);
167
168 // The architecture has 32-bit SMUL_LOHI and UMUL_LOHI (MR and MLR),
169 // but they aren't really worth using. There is no 64-bit SMUL_LOHI,
170 // but there is a 64-bit UMUL_LOHI: MLGR.
171 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
172 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
173 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
174 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom);
175
176 // FIXME: Can we support these natively?
177 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
178 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
179 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
180
181 // We have native instructions for i8, i16 and i32 extensions, but not i1.
182 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
183 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
184 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
185 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
186
187 // Handle the various types of symbolic address.
188 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
189 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
190 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
191 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
192 setOperationAction(ISD::JumpTable, PtrVT, Custom);
193
194 // We need to handle dynamic allocations specially because of the
195 // 160-byte area at the bottom of the stack.
196 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
197
198 // Use custom expanders so that we can force the function to use
199 // a frame pointer.
200 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
201 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
202
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000203 // Handle floating-point types.
204 for (unsigned I = MVT::FIRST_FP_VALUETYPE;
205 I <= MVT::LAST_FP_VALUETYPE;
206 ++I) {
207 MVT VT = MVT::SimpleValueType(I);
208 if (isTypeLegal(VT)) {
209 // We can use FI for FRINT.
210 setOperationAction(ISD::FRINT, VT, Legal);
211
212 // No special instructions for these.
213 setOperationAction(ISD::FSIN, VT, Expand);
214 setOperationAction(ISD::FCOS, VT, Expand);
215 setOperationAction(ISD::FREM, VT, Expand);
216 }
217 }
218
219 // We have fused multiply-addition for f32 and f64 but not f128.
220 setOperationAction(ISD::FMA, MVT::f32, Legal);
221 setOperationAction(ISD::FMA, MVT::f64, Legal);
222 setOperationAction(ISD::FMA, MVT::f128, Expand);
223
224 // Needed so that we don't try to implement f128 constant loads using
225 // a load-and-extend of a f80 constant (in cases where the constant
226 // would fit in an f80).
227 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand);
228
229 // Floating-point truncation and stores need to be done separately.
230 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
231 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
232 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
233
234 // We have 64-bit FPR<->GPR moves, but need special handling for
235 // 32-bit forms.
236 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
237 setOperationAction(ISD::BITCAST, MVT::f32, Custom);
238
239 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
240 // structure, but VAEND is a no-op.
241 setOperationAction(ISD::VASTART, MVT::Other, Custom);
242 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
243 setOperationAction(ISD::VAEND, MVT::Other, Expand);
Richard Sandifordd131ff82013-07-08 09:35:23 +0000244
245 // We want to use MVC in preference to even a single load/store pair.
246 MaxStoresPerMemcpy = 0;
247 MaxStoresPerMemcpyOptSize = 0;
Richard Sandiford47660c12013-07-09 09:32:42 +0000248
249 // The main memset sequence is a byte store followed by an MVC.
250 // Two STC or MV..I stores win over that, but the kind of fused stores
251 // generated by target-independent code don't when the byte value is
252 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
253 // than "STC;MVC". Handle the choice in target-specific code instead.
254 MaxStoresPerMemset = 0;
255 MaxStoresPerMemsetOptSize = 0;
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000256}
257
Stephen Lin73de7bf2013-07-09 18:16:56 +0000258bool
259SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
260 VT = VT.getScalarType();
261
262 if (!VT.isSimple())
263 return false;
264
265 switch (VT.getSimpleVT().SimpleTy) {
266 case MVT::f32:
267 case MVT::f64:
268 return true;
269 case MVT::f128:
270 return false;
271 default:
272 break;
273 }
274
275 return false;
276}
277
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000278bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
279 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
280 return Imm.isZero() || Imm.isNegZero();
281}
282
Richard Sandiford46af5a22013-05-30 09:45:42 +0000283bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
284 bool *Fast) const {
285 // Unaligned accesses should never be slower than the expanded version.
286 // We check specifically for aligned accesses in the few cases where
287 // they are required.
288 if (Fast)
289 *Fast = true;
290 return true;
291}
292
Richard Sandiford791bea42013-07-31 12:58:26 +0000293bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM,
294 Type *Ty) const {
295 // Punt on globals for now, although they can be used in limited
296 // RELATIVE LONG cases.
297 if (AM.BaseGV)
298 return false;
299
300 // Require a 20-bit signed offset.
301 if (!isInt<20>(AM.BaseOffs))
302 return false;
303
304 // Indexing is OK but no scale factor can be applied.
305 return AM.Scale == 0 || AM.Scale == 1;
306}
307
Richard Sandiford709bda62013-08-19 12:42:31 +0000308bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
309 if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
310 return false;
311 unsigned FromBits = FromType->getPrimitiveSizeInBits();
312 unsigned ToBits = ToType->getPrimitiveSizeInBits();
313 return FromBits > ToBits;
314}
315
316bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
317 if (!FromVT.isInteger() || !ToVT.isInteger())
318 return false;
319 unsigned FromBits = FromVT.getSizeInBits();
320 unsigned ToBits = ToVT.getSizeInBits();
321 return FromBits > ToBits;
322}
323
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000324//===----------------------------------------------------------------------===//
325// Inline asm support
326//===----------------------------------------------------------------------===//
327
328TargetLowering::ConstraintType
329SystemZTargetLowering::getConstraintType(const std::string &Constraint) const {
330 if (Constraint.size() == 1) {
331 switch (Constraint[0]) {
332 case 'a': // Address register
333 case 'd': // Data register (equivalent to 'r')
334 case 'f': // Floating-point register
335 case 'r': // General-purpose register
336 return C_RegisterClass;
337
338 case 'Q': // Memory with base and unsigned 12-bit displacement
339 case 'R': // Likewise, plus an index
340 case 'S': // Memory with base and signed 20-bit displacement
341 case 'T': // Likewise, plus an index
342 case 'm': // Equivalent to 'T'.
343 return C_Memory;
344
345 case 'I': // Unsigned 8-bit constant
346 case 'J': // Unsigned 12-bit constant
347 case 'K': // Signed 16-bit constant
348 case 'L': // Signed 20-bit displacement (on all targets we support)
349 case 'M': // 0x7fffffff
350 return C_Other;
351
352 default:
353 break;
354 }
355 }
356 return TargetLowering::getConstraintType(Constraint);
357}
358
359TargetLowering::ConstraintWeight SystemZTargetLowering::
360getSingleConstraintMatchWeight(AsmOperandInfo &info,
361 const char *constraint) const {
362 ConstraintWeight weight = CW_Invalid;
363 Value *CallOperandVal = info.CallOperandVal;
364 // If we don't have a value, we can't do a match,
365 // but allow it at the lowest weight.
366 if (CallOperandVal == NULL)
367 return CW_Default;
368 Type *type = CallOperandVal->getType();
369 // Look at the constraint type.
370 switch (*constraint) {
371 default:
372 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
373 break;
374
375 case 'a': // Address register
376 case 'd': // Data register (equivalent to 'r')
377 case 'r': // General-purpose register
378 if (CallOperandVal->getType()->isIntegerTy())
379 weight = CW_Register;
380 break;
381
382 case 'f': // Floating-point register
383 if (type->isFloatingPointTy())
384 weight = CW_Register;
385 break;
386
387 case 'I': // Unsigned 8-bit constant
388 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
389 if (isUInt<8>(C->getZExtValue()))
390 weight = CW_Constant;
391 break;
392
393 case 'J': // Unsigned 12-bit constant
394 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
395 if (isUInt<12>(C->getZExtValue()))
396 weight = CW_Constant;
397 break;
398
399 case 'K': // Signed 16-bit constant
400 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
401 if (isInt<16>(C->getSExtValue()))
402 weight = CW_Constant;
403 break;
404
405 case 'L': // Signed 20-bit displacement (on all targets we support)
406 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
407 if (isInt<20>(C->getSExtValue()))
408 weight = CW_Constant;
409 break;
410
411 case 'M': // 0x7fffffff
412 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
413 if (C->getZExtValue() == 0x7fffffff)
414 weight = CW_Constant;
415 break;
416 }
417 return weight;
418}
419
Richard Sandifordb8204052013-07-12 09:08:12 +0000420// Parse a "{tNNN}" register constraint for which the register type "t"
421// has already been verified. MC is the class associated with "t" and
422// Map maps 0-based register numbers to LLVM register numbers.
423static std::pair<unsigned, const TargetRegisterClass *>
424parseRegisterNumber(const std::string &Constraint,
425 const TargetRegisterClass *RC, const unsigned *Map) {
426 assert(*(Constraint.end()-1) == '}' && "Missing '}'");
427 if (isdigit(Constraint[2])) {
428 std::string Suffix(Constraint.data() + 2, Constraint.size() - 2);
429 unsigned Index = atoi(Suffix.c_str());
430 if (Index < 16 && Map[Index])
431 return std::make_pair(Map[Index], RC);
432 }
433 return std::make_pair(0u, static_cast<TargetRegisterClass*>(0));
434}
435
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000436std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering::
Chad Rosier295bd432013-06-22 18:37:38 +0000437getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const {
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000438 if (Constraint.size() == 1) {
439 // GCC Constraint Letters
440 switch (Constraint[0]) {
441 default: break;
442 case 'd': // Data register (equivalent to 'r')
443 case 'r': // General-purpose register
444 if (VT == MVT::i64)
445 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
446 else if (VT == MVT::i128)
447 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
448 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
449
450 case 'a': // Address register
451 if (VT == MVT::i64)
452 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
453 else if (VT == MVT::i128)
454 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
455 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
456
457 case 'f': // Floating-point register
458 if (VT == MVT::f64)
459 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
460 else if (VT == MVT::f128)
461 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
462 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
463 }
464 }
Richard Sandifordb8204052013-07-12 09:08:12 +0000465 if (Constraint[0] == '{') {
466 // We need to override the default register parsing for GPRs and FPRs
467 // because the interpretation depends on VT. The internal names of
468 // the registers are also different from the external names
469 // (F0D and F0S instead of F0, etc.).
470 if (Constraint[1] == 'r') {
471 if (VT == MVT::i32)
472 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
473 SystemZMC::GR32Regs);
474 if (VT == MVT::i128)
475 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
476 SystemZMC::GR128Regs);
477 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
478 SystemZMC::GR64Regs);
479 }
480 if (Constraint[1] == 'f') {
481 if (VT == MVT::f32)
482 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
483 SystemZMC::FP32Regs);
484 if (VT == MVT::f128)
485 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
486 SystemZMC::FP128Regs);
487 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
488 SystemZMC::FP64Regs);
489 }
490 }
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000491 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
492}
493
494void SystemZTargetLowering::
495LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
496 std::vector<SDValue> &Ops,
497 SelectionDAG &DAG) const {
498 // Only support length 1 constraints for now.
499 if (Constraint.length() == 1) {
500 switch (Constraint[0]) {
501 case 'I': // Unsigned 8-bit constant
502 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
503 if (isUInt<8>(C->getZExtValue()))
504 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
505 Op.getValueType()));
506 return;
507
508 case 'J': // Unsigned 12-bit constant
509 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
510 if (isUInt<12>(C->getZExtValue()))
511 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
512 Op.getValueType()));
513 return;
514
515 case 'K': // Signed 16-bit constant
516 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
517 if (isInt<16>(C->getSExtValue()))
518 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
519 Op.getValueType()));
520 return;
521
522 case 'L': // Signed 20-bit displacement (on all targets we support)
523 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
524 if (isInt<20>(C->getSExtValue()))
525 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
526 Op.getValueType()));
527 return;
528
529 case 'M': // 0x7fffffff
530 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
531 if (C->getZExtValue() == 0x7fffffff)
532 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
533 Op.getValueType()));
534 return;
535 }
536 }
537 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
538}
539
540//===----------------------------------------------------------------------===//
541// Calling conventions
542//===----------------------------------------------------------------------===//
543
544#include "SystemZGenCallingConv.inc"
545
Richard Sandiford709bda62013-08-19 12:42:31 +0000546bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
547 Type *ToType) const {
548 return isTruncateFree(FromType, ToType);
549}
550
551bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
552 if (!CI->isTailCall())
553 return false;
554 return true;
555}
556
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000557// Value is a value that has been passed to us in the location described by VA
558// (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
559// any loads onto Chain.
Andrew Trickef9de2a2013-05-25 02:42:55 +0000560static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL,
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000561 CCValAssign &VA, SDValue Chain,
562 SDValue Value) {
563 // If the argument has been promoted from a smaller type, insert an
564 // assertion to capture this.
565 if (VA.getLocInfo() == CCValAssign::SExt)
566 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
567 DAG.getValueType(VA.getValVT()));
568 else if (VA.getLocInfo() == CCValAssign::ZExt)
569 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
570 DAG.getValueType(VA.getValVT()));
571
572 if (VA.isExtInLoc())
573 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
574 else if (VA.getLocInfo() == CCValAssign::Indirect)
575 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value,
576 MachinePointerInfo(), false, false, false, 0);
577 else
578 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
579 return Value;
580}
581
582// Value is a value of type VA.getValVT() that we need to copy into
583// the location described by VA. Return a copy of Value converted to
584// VA.getValVT(). The caller is responsible for handling indirect values.
Andrew Trickef9de2a2013-05-25 02:42:55 +0000585static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL,
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000586 CCValAssign &VA, SDValue Value) {
587 switch (VA.getLocInfo()) {
588 case CCValAssign::SExt:
589 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
590 case CCValAssign::ZExt:
591 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
592 case CCValAssign::AExt:
593 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
594 case CCValAssign::Full:
595 return Value;
596 default:
597 llvm_unreachable("Unhandled getLocInfo()");
598 }
599}
600
601SDValue SystemZTargetLowering::
602LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
603 const SmallVectorImpl<ISD::InputArg> &Ins,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000604 SDLoc DL, SelectionDAG &DAG,
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000605 SmallVectorImpl<SDValue> &InVals) const {
606 MachineFunction &MF = DAG.getMachineFunction();
607 MachineFrameInfo *MFI = MF.getFrameInfo();
608 MachineRegisterInfo &MRI = MF.getRegInfo();
609 SystemZMachineFunctionInfo *FuncInfo =
610 MF.getInfo<SystemZMachineFunctionInfo>();
611 const SystemZFrameLowering *TFL =
612 static_cast<const SystemZFrameLowering *>(TM.getFrameLowering());
613
614 // Assign locations to all of the incoming arguments.
615 SmallVector<CCValAssign, 16> ArgLocs;
616 CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
617 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
618
619 unsigned NumFixedGPRs = 0;
620 unsigned NumFixedFPRs = 0;
621 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
622 SDValue ArgValue;
623 CCValAssign &VA = ArgLocs[I];
624 EVT LocVT = VA.getLocVT();
625 if (VA.isRegLoc()) {
626 // Arguments passed in registers
627 const TargetRegisterClass *RC;
628 switch (LocVT.getSimpleVT().SimpleTy) {
629 default:
630 // Integers smaller than i64 should be promoted to i64.
631 llvm_unreachable("Unexpected argument type");
632 case MVT::i32:
633 NumFixedGPRs += 1;
634 RC = &SystemZ::GR32BitRegClass;
635 break;
636 case MVT::i64:
637 NumFixedGPRs += 1;
638 RC = &SystemZ::GR64BitRegClass;
639 break;
640 case MVT::f32:
641 NumFixedFPRs += 1;
642 RC = &SystemZ::FP32BitRegClass;
643 break;
644 case MVT::f64:
645 NumFixedFPRs += 1;
646 RC = &SystemZ::FP64BitRegClass;
647 break;
648 }
649
650 unsigned VReg = MRI.createVirtualRegister(RC);
651 MRI.addLiveIn(VA.getLocReg(), VReg);
652 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
653 } else {
654 assert(VA.isMemLoc() && "Argument not register or memory");
655
656 // Create the frame index object for this incoming parameter.
657 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8,
658 VA.getLocMemOffset(), true);
659
660 // Create the SelectionDAG nodes corresponding to a load
661 // from this parameter. Unpromoted ints and floats are
662 // passed as right-justified 8-byte values.
663 EVT PtrVT = getPointerTy();
664 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
665 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
666 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4));
667 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
668 MachinePointerInfo::getFixedStack(FI),
669 false, false, false, 0);
670 }
671
672 // Convert the value of the argument register into the value that's
673 // being passed.
674 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
675 }
676
677 if (IsVarArg) {
678 // Save the number of non-varargs registers for later use by va_start, etc.
679 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
680 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
681
682 // Likewise the address (in the form of a frame index) of where the
683 // first stack vararg would be. The 1-byte size here is arbitrary.
684 int64_t StackSize = CCInfo.getNextStackOffset();
685 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true));
686
687 // ...and a similar frame index for the caller-allocated save area
688 // that will be used to store the incoming registers.
689 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
690 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true);
691 FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
692
693 // Store the FPR varargs in the reserved frame slots. (We store the
694 // GPRs as part of the prologue.)
695 if (NumFixedFPRs < SystemZ::NumArgFPRs) {
696 SDValue MemOps[SystemZ::NumArgFPRs];
697 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
698 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
699 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true);
700 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
701 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
702 &SystemZ::FP64BitRegClass);
703 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
704 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
705 MachinePointerInfo::getFixedStack(FI),
706 false, false, 0);
707
708 }
709 // Join the stores, which are independent of one another.
710 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
711 &MemOps[NumFixedFPRs],
712 SystemZ::NumArgFPRs - NumFixedFPRs);
713 }
714 }
715
716 return Chain;
717}
718
Richard Sandiford709bda62013-08-19 12:42:31 +0000719static bool canUseSiblingCall(CCState ArgCCInfo,
720 SmallVectorImpl<CCValAssign> &ArgLocs) {
721 // Punt if there are any indirect or stack arguments, or if the call
722 // needs the call-saved argument register R6.
723 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
724 CCValAssign &VA = ArgLocs[I];
725 if (VA.getLocInfo() == CCValAssign::Indirect)
726 return false;
727 if (!VA.isRegLoc())
728 return false;
729 unsigned Reg = VA.getLocReg();
730 if (Reg == SystemZ::R6W || Reg == SystemZ::R6D)
731 return false;
732 }
733 return true;
734}
735
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000736SDValue
737SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
738 SmallVectorImpl<SDValue> &InVals) const {
739 SelectionDAG &DAG = CLI.DAG;
Andrew Trickef9de2a2013-05-25 02:42:55 +0000740 SDLoc &DL = CLI.DL;
Craig Topperb94011f2013-07-14 04:42:23 +0000741 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
742 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
743 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000744 SDValue Chain = CLI.Chain;
745 SDValue Callee = CLI.Callee;
Richard Sandiford709bda62013-08-19 12:42:31 +0000746 bool &IsTailCall = CLI.IsTailCall;
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000747 CallingConv::ID CallConv = CLI.CallConv;
748 bool IsVarArg = CLI.IsVarArg;
749 MachineFunction &MF = DAG.getMachineFunction();
750 EVT PtrVT = getPointerTy();
751
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000752 // Analyze the operands of the call, assigning locations to each operand.
753 SmallVector<CCValAssign, 16> ArgLocs;
754 CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
755 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
756
Richard Sandiford709bda62013-08-19 12:42:31 +0000757 // We don't support GuaranteedTailCallOpt, only automatically-detected
758 // sibling calls.
759 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs))
760 IsTailCall = false;
761
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000762 // Get a count of how many bytes are to be pushed on the stack.
763 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
764
765 // Mark the start of the call.
Richard Sandiford709bda62013-08-19 12:42:31 +0000766 if (!IsTailCall)
767 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true),
768 DL);
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000769
770 // Copy argument values to their designated locations.
771 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
772 SmallVector<SDValue, 8> MemOpChains;
773 SDValue StackPtr;
774 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
775 CCValAssign &VA = ArgLocs[I];
776 SDValue ArgValue = OutVals[I];
777
778 if (VA.getLocInfo() == CCValAssign::Indirect) {
779 // Store the argument in a stack slot and pass its address.
780 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
781 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
782 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot,
783 MachinePointerInfo::getFixedStack(FI),
784 false, false, 0));
785 ArgValue = SpillSlot;
786 } else
787 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
788
789 if (VA.isRegLoc())
790 // Queue up the argument copies and emit them at the end.
791 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
792 else {
793 assert(VA.isMemLoc() && "Argument not register or memory");
794
795 // Work out the address of the stack slot. Unpromoted ints and
796 // floats are passed as right-justified 8-byte values.
797 if (!StackPtr.getNode())
798 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
799 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
800 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
801 Offset += 4;
802 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
803 DAG.getIntPtrConstant(Offset));
804
805 // Emit the store.
806 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address,
807 MachinePointerInfo(),
808 false, false, 0));
809 }
810 }
811
812 // Join the stores, which are independent of one another.
813 if (!MemOpChains.empty())
814 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
815 &MemOpChains[0], MemOpChains.size());
816
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000817 // Accept direct calls by converting symbolic call addresses to the
Richard Sandiford709bda62013-08-19 12:42:31 +0000818 // associated Target* opcodes. Force %r1 to be used for indirect
819 // tail calls.
820 SDValue Glue;
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000821 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
822 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
823 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
824 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
825 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
826 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
Richard Sandiford709bda62013-08-19 12:42:31 +0000827 } else if (IsTailCall) {
828 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
829 Glue = Chain.getValue(1);
830 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
831 }
832
833 // Build a sequence of copy-to-reg nodes, chained and glued together.
834 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
835 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
836 RegsToPass[I].second, Glue);
837 Glue = Chain.getValue(1);
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000838 }
839
840 // The first call operand is the chain and the second is the target address.
841 SmallVector<SDValue, 8> Ops;
842 Ops.push_back(Chain);
843 Ops.push_back(Callee);
844
845 // Add argument registers to the end of the list so that they are
846 // known live into the call.
847 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
848 Ops.push_back(DAG.getRegister(RegsToPass[I].first,
849 RegsToPass[I].second.getValueType()));
850
851 // Glue the call to the argument copies, if any.
852 if (Glue.getNode())
853 Ops.push_back(Glue);
854
855 // Emit the call.
856 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
Richard Sandiford709bda62013-08-19 12:42:31 +0000857 if (IsTailCall)
858 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size());
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000859 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
860 Glue = Chain.getValue(1);
861
862 // Mark the end of the call, which is glued to the call itself.
863 Chain = DAG.getCALLSEQ_END(Chain,
864 DAG.getConstant(NumBytes, PtrVT, true),
865 DAG.getConstant(0, PtrVT, true),
Andrew Trickad6d08a2013-05-29 22:03:55 +0000866 Glue, DL);
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000867 Glue = Chain.getValue(1);
868
869 // Assign locations to each value returned by this call.
870 SmallVector<CCValAssign, 16> RetLocs;
871 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
872 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
873
874 // Copy all of the result registers out of their specified physreg.
875 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
876 CCValAssign &VA = RetLocs[I];
877
878 // Copy the value out, gluing the copy to the end of the call sequence.
879 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
880 VA.getLocVT(), Glue);
881 Chain = RetValue.getValue(1);
882 Glue = RetValue.getValue(2);
883
884 // Convert the value of the return register into the value that's
885 // being returned.
886 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
887 }
888
889 return Chain;
890}
891
892SDValue
893SystemZTargetLowering::LowerReturn(SDValue Chain,
894 CallingConv::ID CallConv, bool IsVarArg,
895 const SmallVectorImpl<ISD::OutputArg> &Outs,
896 const SmallVectorImpl<SDValue> &OutVals,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000897 SDLoc DL, SelectionDAG &DAG) const {
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000898 MachineFunction &MF = DAG.getMachineFunction();
899
900 // Assign locations to each returned value.
901 SmallVector<CCValAssign, 16> RetLocs;
902 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
903 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
904
905 // Quick exit for void returns
906 if (RetLocs.empty())
907 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
908
909 // Copy the result values into the output registers.
910 SDValue Glue;
911 SmallVector<SDValue, 4> RetOps;
912 RetOps.push_back(Chain);
913 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
914 CCValAssign &VA = RetLocs[I];
915 SDValue RetValue = OutVals[I];
916
917 // Make the return register live on exit.
918 assert(VA.isRegLoc() && "Can only return in registers!");
919
920 // Promote the value as required.
921 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
922
923 // Chain and glue the copies together.
924 unsigned Reg = VA.getLocReg();
925 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
926 Glue = Chain.getValue(1);
927 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
928 }
929
930 // Update chain and glue.
931 RetOps[0] = Chain;
932 if (Glue.getNode())
933 RetOps.push_back(Glue);
934
935 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other,
936 RetOps.data(), RetOps.size());
937}
938
939// CC is a comparison that will be implemented using an integer or
940// floating-point comparison. Return the condition code mask for
941// a branch on true. In the integer case, CCMASK_CMP_UO is set for
942// unsigned comparisons and clear for signed ones. In the floating-point
943// case, CCMASK_CMP_UO has its normal mask meaning (unordered).
944static unsigned CCMaskForCondCode(ISD::CondCode CC) {
945#define CONV(X) \
946 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
947 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
948 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
949
950 switch (CC) {
951 default:
952 llvm_unreachable("Invalid integer condition!");
953
954 CONV(EQ);
955 CONV(NE);
956 CONV(GT);
957 CONV(GE);
958 CONV(LT);
959 CONV(LE);
960
961 case ISD::SETO: return SystemZ::CCMASK_CMP_O;
962 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
963 }
964#undef CONV
965}
966
967// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
Richard Sandiforda0757082013-08-01 10:29:45 +0000968// can be converted to a comparison against zero, adjust the operands
969// as necessary.
970static void adjustZeroCmp(SelectionDAG &DAG, bool &IsUnsigned,
971 SDValue &CmpOp0, SDValue &CmpOp1,
972 unsigned &CCMask) {
973 if (IsUnsigned)
974 return;
975
976 ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(CmpOp1.getNode());
977 if (!ConstOp1)
978 return;
979
980 int64_t Value = ConstOp1->getSExtValue();
981 if ((Value == -1 && CCMask == SystemZ::CCMASK_CMP_GT) ||
982 (Value == -1 && CCMask == SystemZ::CCMASK_CMP_LE) ||
983 (Value == 1 && CCMask == SystemZ::CCMASK_CMP_LT) ||
984 (Value == 1 && CCMask == SystemZ::CCMASK_CMP_GE)) {
985 CCMask ^= SystemZ::CCMASK_CMP_EQ;
986 CmpOp1 = DAG.getConstant(0, CmpOp1.getValueType());
987 }
988}
989
990// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
Ulrich Weigand5f613df2013-05-06 16:15:19 +0000991// is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary.
992static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned,
993 SDValue &CmpOp0, SDValue &CmpOp1,
994 unsigned &CCMask) {
995 // For us to make any changes, it must a comparison between a single-use
996 // load and a constant.
997 if (!CmpOp0.hasOneUse() ||
998 CmpOp0.getOpcode() != ISD::LOAD ||
999 CmpOp1.getOpcode() != ISD::Constant)
1000 return;
1001
1002 // We must have an 8- or 16-bit load.
1003 LoadSDNode *Load = cast<LoadSDNode>(CmpOp0);
1004 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
1005 if (NumBits != 8 && NumBits != 16)
1006 return;
1007
1008 // The load must be an extending one and the constant must be within the
1009 // range of the unextended value.
1010 ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1);
1011 uint64_t Value = Constant->getZExtValue();
1012 uint64_t Mask = (1 << NumBits) - 1;
1013 if (Load->getExtensionType() == ISD::SEXTLOAD) {
1014 int64_t SignedValue = Constant->getSExtValue();
Aaron Ballmanb4284e62013-05-16 16:03:36 +00001015 if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask)
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001016 return;
1017 // Unsigned comparison between two sign-extended values is equivalent
1018 // to unsigned comparison between two zero-extended values.
1019 if (IsUnsigned)
1020 Value &= Mask;
1021 else if (CCMask == SystemZ::CCMASK_CMP_EQ ||
1022 CCMask == SystemZ::CCMASK_CMP_NE)
1023 // Any choice of IsUnsigned is OK for equality comparisons.
1024 // We could use either CHHSI or CLHHSI for 16-bit comparisons,
1025 // but since we use CLHHSI for zero extensions, it seems better
1026 // to be consistent and do the same here.
1027 Value &= Mask, IsUnsigned = true;
1028 else if (NumBits == 8) {
1029 // Try to treat the comparison as unsigned, so that we can use CLI.
1030 // Adjust CCMask and Value as necessary.
1031 if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT)
1032 // Test whether the high bit of the byte is set.
1033 Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true;
Richard Sandiforda0757082013-08-01 10:29:45 +00001034 else if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_GE)
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001035 // Test whether the high bit of the byte is clear.
1036 Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true;
1037 else
1038 // No instruction exists for this combination.
1039 return;
1040 }
1041 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
1042 if (Value > Mask)
1043 return;
1044 // Signed comparison between two zero-extended values is equivalent
1045 // to unsigned comparison.
1046 IsUnsigned = true;
1047 } else
1048 return;
1049
1050 // Make sure that the first operand is an i32 of the right extension type.
1051 ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD;
1052 if (CmpOp0.getValueType() != MVT::i32 ||
1053 Load->getExtensionType() != ExtType)
Andrew Trickef9de2a2013-05-25 02:42:55 +00001054 CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32,
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001055 Load->getChain(), Load->getBasePtr(),
1056 Load->getPointerInfo(), Load->getMemoryVT(),
1057 Load->isVolatile(), Load->isNonTemporal(),
1058 Load->getAlignment());
1059
1060 // Make sure that the second operand is an i32 with the right value.
1061 if (CmpOp1.getValueType() != MVT::i32 ||
1062 Value != Constant->getZExtValue())
1063 CmpOp1 = DAG.getConstant(Value, MVT::i32);
1064}
1065
1066// Return true if a comparison described by CCMask, CmpOp0 and CmpOp1
1067// is an equality comparison that is better implemented using unsigned
1068// rather than signed comparison instructions.
1069static bool preferUnsignedComparison(SelectionDAG &DAG, SDValue CmpOp0,
1070 SDValue CmpOp1, unsigned CCMask) {
1071 // The test must be for equality or inequality.
1072 if (CCMask != SystemZ::CCMASK_CMP_EQ && CCMask != SystemZ::CCMASK_CMP_NE)
1073 return false;
1074
1075 if (CmpOp1.getOpcode() == ISD::Constant) {
1076 uint64_t Value = cast<ConstantSDNode>(CmpOp1)->getSExtValue();
1077
1078 // If we're comparing with memory, prefer unsigned comparisons for
1079 // values that are in the unsigned 16-bit range but not the signed
1080 // 16-bit range. We want to use CLFHSI and CLGHSI.
1081 if (CmpOp0.hasOneUse() &&
1082 ISD::isNormalLoad(CmpOp0.getNode()) &&
1083 (Value >= 32768 && Value < 65536))
1084 return true;
1085
1086 // Use unsigned comparisons for values that are in the CLGFI range
1087 // but not in the CGFI range.
1088 if (CmpOp0.getValueType() == MVT::i64 && (Value >> 31) == 1)
1089 return true;
1090
1091 return false;
1092 }
1093
1094 // Prefer CL for zero-extended loads.
1095 if (CmpOp1.getOpcode() == ISD::ZERO_EXTEND ||
1096 ISD::isZEXTLoad(CmpOp1.getNode()))
1097 return true;
1098
1099 // ...and for "in-register" zero extensions.
1100 if (CmpOp1.getOpcode() == ISD::AND && CmpOp1.getValueType() == MVT::i64) {
1101 SDValue Mask = CmpOp1.getOperand(1);
1102 if (Mask.getOpcode() == ISD::Constant &&
1103 cast<ConstantSDNode>(Mask)->getZExtValue() == 0xffffffff)
1104 return true;
1105 }
1106
1107 return false;
1108}
1109
Richard Sandiford3d768e32013-07-31 12:30:20 +00001110// Return a target node that compares CmpOp0 with CmpOp1 and stores a
1111// 2-bit result in CC. Set CCValid to the CCMASK_* of all possible
1112// 2-bit results and CCMask to the subset of those results that are
1113// associated with Cond.
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001114static SDValue emitCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
Richard Sandiford3d768e32013-07-31 12:30:20 +00001115 ISD::CondCode Cond, unsigned &CCValid,
1116 unsigned &CCMask) {
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001117 bool IsUnsigned = false;
Richard Sandiford3d768e32013-07-31 12:30:20 +00001118 CCMask = CCMaskForCondCode(Cond);
1119 if (CmpOp0.getValueType().isFloatingPoint())
1120 CCValid = SystemZ::CCMASK_FCMP;
1121 else {
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001122 IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO;
Richard Sandiford3d768e32013-07-31 12:30:20 +00001123 CCValid = SystemZ::CCMASK_ICMP;
1124 CCMask &= CCValid;
Richard Sandiforda0757082013-08-01 10:29:45 +00001125 adjustZeroCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001126 adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
1127 if (preferUnsignedComparison(DAG, CmpOp0, CmpOp1, CCMask))
1128 IsUnsigned = true;
1129 }
1130
Andrew Trickef9de2a2013-05-25 02:42:55 +00001131 SDLoc DL(CmpOp0);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001132 return DAG.getNode((IsUnsigned ? SystemZISD::UCMP : SystemZISD::CMP),
1133 DL, MVT::Glue, CmpOp0, CmpOp1);
1134}
1135
1136// Lower a binary operation that produces two VT results, one in each
1137// half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
1138// Extend extends Op0 to a GR128, and Opcode performs the GR128 operation
1139// on the extended Op0 and (unextended) Op1. Store the even register result
1140// in Even and the odd register result in Odd.
Andrew Trickef9de2a2013-05-25 02:42:55 +00001141static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT,
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001142 unsigned Extend, unsigned Opcode,
1143 SDValue Op0, SDValue Op1,
1144 SDValue &Even, SDValue &Odd) {
1145 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0);
1146 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped,
1147 SDValue(In128, 0), Op1);
1148 bool Is32Bit = is32Bit(VT);
1149 SDValue SubReg0 = DAG.getTargetConstant(SystemZ::even128(Is32Bit), VT);
1150 SDValue SubReg1 = DAG.getTargetConstant(SystemZ::odd128(Is32Bit), VT);
1151 SDNode *Reg0 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
1152 VT, Result, SubReg0);
1153 SDNode *Reg1 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
1154 VT, Result, SubReg1);
1155 Even = SDValue(Reg0, 0);
1156 Odd = SDValue(Reg1, 0);
1157}
1158
1159SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
1160 SDValue Chain = Op.getOperand(0);
1161 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1162 SDValue CmpOp0 = Op.getOperand(2);
1163 SDValue CmpOp1 = Op.getOperand(3);
1164 SDValue Dest = Op.getOperand(4);
Andrew Trickef9de2a2013-05-25 02:42:55 +00001165 SDLoc DL(Op);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001166
Richard Sandiford3d768e32013-07-31 12:30:20 +00001167 unsigned CCValid, CCMask;
1168 SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCValid, CCMask);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001169 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
Richard Sandiford3d768e32013-07-31 12:30:20 +00001170 Chain, DAG.getConstant(CCValid, MVT::i32),
1171 DAG.getConstant(CCMask, MVT::i32), Dest, Flags);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001172}
1173
1174SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
1175 SelectionDAG &DAG) const {
1176 SDValue CmpOp0 = Op.getOperand(0);
1177 SDValue CmpOp1 = Op.getOperand(1);
1178 SDValue TrueOp = Op.getOperand(2);
1179 SDValue FalseOp = Op.getOperand(3);
1180 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
Andrew Trickef9de2a2013-05-25 02:42:55 +00001181 SDLoc DL(Op);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001182
Richard Sandiford3d768e32013-07-31 12:30:20 +00001183 unsigned CCValid, CCMask;
1184 SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCValid, CCMask);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001185
Richard Sandiford3d768e32013-07-31 12:30:20 +00001186 SmallVector<SDValue, 5> Ops;
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001187 Ops.push_back(TrueOp);
1188 Ops.push_back(FalseOp);
Richard Sandiford3d768e32013-07-31 12:30:20 +00001189 Ops.push_back(DAG.getConstant(CCValid, MVT::i32));
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001190 Ops.push_back(DAG.getConstant(CCMask, MVT::i32));
1191 Ops.push_back(Flags);
1192
1193 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
1194 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size());
1195}
1196
1197SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
1198 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00001199 SDLoc DL(Node);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001200 const GlobalValue *GV = Node->getGlobal();
1201 int64_t Offset = Node->getOffset();
1202 EVT PtrVT = getPointerTy();
1203 Reloc::Model RM = TM.getRelocationModel();
1204 CodeModel::Model CM = TM.getCodeModel();
1205
1206 SDValue Result;
1207 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) {
1208 // Make sure that the offset is aligned to a halfword. If it isn't,
1209 // create an "anchor" at the previous 12-bit boundary.
1210 // FIXME check whether there is a better way of handling this.
1211 if (Offset & 1) {
1212 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
1213 Offset & ~uint64_t(0xfff));
1214 Offset &= 0xfff;
1215 } else {
1216 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset);
1217 Offset = 0;
1218 }
1219 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1220 } else {
1221 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
1222 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1223 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
1224 MachinePointerInfo::getGOT(), false, false, false, 0);
1225 }
1226
1227 // If there was a non-zero offset that we didn't fold, create an explicit
1228 // addition for it.
1229 if (Offset != 0)
1230 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
1231 DAG.getConstant(Offset, PtrVT));
1232
1233 return Result;
1234}
1235
1236SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
1237 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00001238 SDLoc DL(Node);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001239 const GlobalValue *GV = Node->getGlobal();
1240 EVT PtrVT = getPointerTy();
1241 TLSModel::Model model = TM.getTLSModel(GV);
1242
1243 if (model != TLSModel::LocalExec)
1244 llvm_unreachable("only local-exec TLS mode supported");
1245
1246 // The high part of the thread pointer is in access register 0.
1247 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
1248 DAG.getConstant(0, MVT::i32));
1249 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
1250
1251 // The low part of the thread pointer is in access register 1.
1252 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
1253 DAG.getConstant(1, MVT::i32));
1254 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
1255
1256 // Merge them into a single 64-bit address.
1257 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
1258 DAG.getConstant(32, PtrVT));
1259 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
1260
1261 // Get the offset of GA from the thread pointer.
1262 SystemZConstantPoolValue *CPV =
1263 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
1264
1265 // Force the offset into the constant pool and load it from there.
1266 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8);
1267 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
1268 CPAddr, MachinePointerInfo::getConstantPool(),
1269 false, false, false, 0);
1270
1271 // Add the base and offset together.
1272 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
1273}
1274
1275SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
1276 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00001277 SDLoc DL(Node);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001278 const BlockAddress *BA = Node->getBlockAddress();
1279 int64_t Offset = Node->getOffset();
1280 EVT PtrVT = getPointerTy();
1281
1282 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
1283 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1284 return Result;
1285}
1286
1287SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
1288 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00001289 SDLoc DL(JT);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001290 EVT PtrVT = getPointerTy();
1291 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1292
1293 // Use LARL to load the address of the table.
1294 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1295}
1296
1297SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
1298 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00001299 SDLoc DL(CP);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001300 EVT PtrVT = getPointerTy();
1301
1302 SDValue Result;
1303 if (CP->isMachineConstantPoolEntry())
1304 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
1305 CP->getAlignment());
1306 else
1307 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
1308 CP->getAlignment(), CP->getOffset());
1309
1310 // Use LARL to load the address of the constant pool entry.
1311 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
1312}
1313
1314SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
1315 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00001316 SDLoc DL(Op);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001317 SDValue In = Op.getOperand(0);
1318 EVT InVT = In.getValueType();
1319 EVT ResVT = Op.getValueType();
1320
1321 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64);
1322 SDValue Shift32 = DAG.getConstant(32, MVT::i64);
1323 if (InVT == MVT::i32 && ResVT == MVT::f32) {
1324 SDValue In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
1325 SDValue Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, Shift32);
1326 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, Shift);
1327 SDNode *Out = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
1328 MVT::f32, Out64, SubReg32);
1329 return SDValue(Out, 0);
1330 }
1331 if (InVT == MVT::f32 && ResVT == MVT::i32) {
1332 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
1333 SDNode *In64 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
1334 MVT::f64, SDValue(U64, 0), In, SubReg32);
1335 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, SDValue(In64, 0));
1336 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, Shift32);
1337 SDValue Out = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
1338 return Out;
1339 }
1340 llvm_unreachable("Unexpected bitcast combination");
1341}
1342
1343SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
1344 SelectionDAG &DAG) const {
1345 MachineFunction &MF = DAG.getMachineFunction();
1346 SystemZMachineFunctionInfo *FuncInfo =
1347 MF.getInfo<SystemZMachineFunctionInfo>();
1348 EVT PtrVT = getPointerTy();
1349
1350 SDValue Chain = Op.getOperand(0);
1351 SDValue Addr = Op.getOperand(1);
1352 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
Andrew Trickef9de2a2013-05-25 02:42:55 +00001353 SDLoc DL(Op);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001354
1355 // The initial values of each field.
1356 const unsigned NumFields = 4;
1357 SDValue Fields[NumFields] = {
1358 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT),
1359 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT),
1360 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
1361 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
1362 };
1363
1364 // Store each field into its respective slot.
1365 SDValue MemOps[NumFields];
1366 unsigned Offset = 0;
1367 for (unsigned I = 0; I < NumFields; ++I) {
1368 SDValue FieldAddr = Addr;
1369 if (Offset != 0)
1370 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
1371 DAG.getIntPtrConstant(Offset));
1372 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
1373 MachinePointerInfo(SV, Offset),
1374 false, false, 0);
1375 Offset += 8;
1376 }
1377 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields);
1378}
1379
1380SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
1381 SelectionDAG &DAG) const {
1382 SDValue Chain = Op.getOperand(0);
1383 SDValue DstPtr = Op.getOperand(1);
1384 SDValue SrcPtr = Op.getOperand(2);
1385 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
1386 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
Andrew Trickef9de2a2013-05-25 02:42:55 +00001387 SDLoc DL(Op);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001388
1389 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32),
1390 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
1391 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
1392}
1393
1394SDValue SystemZTargetLowering::
1395lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
1396 SDValue Chain = Op.getOperand(0);
1397 SDValue Size = Op.getOperand(1);
Andrew Trickef9de2a2013-05-25 02:42:55 +00001398 SDLoc DL(Op);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001399
1400 unsigned SPReg = getStackPointerRegisterToSaveRestore();
1401
1402 // Get a reference to the stack pointer.
1403 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
1404
1405 // Get the new stack pointer value.
1406 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size);
1407
1408 // Copy the new stack pointer back.
1409 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
1410
1411 // The allocated data lives above the 160 bytes allocated for the standard
1412 // frame, plus any outgoing stack arguments. We don't know how much that
1413 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
1414 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
1415 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
1416
1417 SDValue Ops[2] = { Result, Chain };
1418 return DAG.getMergeValues(Ops, 2, DL);
1419}
1420
1421SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
1422 SelectionDAG &DAG) const {
1423 EVT VT = Op.getValueType();
Andrew Trickef9de2a2013-05-25 02:42:55 +00001424 SDLoc DL(Op);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001425 assert(!is32Bit(VT) && "Only support 64-bit UMUL_LOHI");
1426
1427 // UMUL_LOHI64 returns the low result in the odd register and the high
1428 // result in the even register. UMUL_LOHI is defined to return the
1429 // low half first, so the results are in reverse order.
1430 SDValue Ops[2];
1431 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
1432 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
1433 return DAG.getMergeValues(Ops, 2, DL);
1434}
1435
1436SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
1437 SelectionDAG &DAG) const {
1438 SDValue Op0 = Op.getOperand(0);
1439 SDValue Op1 = Op.getOperand(1);
1440 EVT VT = Op.getValueType();
Andrew Trickef9de2a2013-05-25 02:42:55 +00001441 SDLoc DL(Op);
Richard Sandiforde6e78852013-07-02 15:40:22 +00001442 unsigned Opcode;
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001443
1444 // We use DSGF for 32-bit division.
1445 if (is32Bit(VT)) {
1446 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
Richard Sandiforde6e78852013-07-02 15:40:22 +00001447 Opcode = SystemZISD::SDIVREM32;
1448 } else if (DAG.ComputeNumSignBits(Op1) > 32) {
1449 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
1450 Opcode = SystemZISD::SDIVREM32;
1451 } else
1452 Opcode = SystemZISD::SDIVREM64;
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001453
1454 // DSG(F) takes a 64-bit dividend, so the even register in the GR128
1455 // input is "don't care". The instruction returns the remainder in
1456 // the even register and the quotient in the odd register.
1457 SDValue Ops[2];
Richard Sandiforde6e78852013-07-02 15:40:22 +00001458 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode,
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001459 Op0, Op1, Ops[1], Ops[0]);
1460 return DAG.getMergeValues(Ops, 2, DL);
1461}
1462
1463SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
1464 SelectionDAG &DAG) const {
1465 EVT VT = Op.getValueType();
Andrew Trickef9de2a2013-05-25 02:42:55 +00001466 SDLoc DL(Op);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001467
1468 // DL(G) uses a double-width dividend, so we need to clear the even
1469 // register in the GR128 input. The instruction returns the remainder
1470 // in the even register and the quotient in the odd register.
1471 SDValue Ops[2];
1472 if (is32Bit(VT))
1473 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32,
1474 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
1475 else
1476 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64,
1477 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
1478 return DAG.getMergeValues(Ops, 2, DL);
1479}
1480
1481SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
1482 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
1483
1484 // Get the known-zero masks for each operand.
1485 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
1486 APInt KnownZero[2], KnownOne[2];
1487 DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]);
1488 DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]);
1489
1490 // See if the upper 32 bits of one operand and the lower 32 bits of the
1491 // other are known zero. They are the low and high operands respectively.
1492 uint64_t Masks[] = { KnownZero[0].getZExtValue(),
1493 KnownZero[1].getZExtValue() };
1494 unsigned High, Low;
1495 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
1496 High = 1, Low = 0;
1497 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
1498 High = 0, Low = 1;
1499 else
1500 return Op;
1501
1502 SDValue LowOp = Ops[Low];
1503 SDValue HighOp = Ops[High];
1504
1505 // If the high part is a constant, we're better off using IILH.
1506 if (HighOp.getOpcode() == ISD::Constant)
1507 return Op;
1508
1509 // If the low part is a constant that is outside the range of LHI,
1510 // then we're better off using IILF.
1511 if (LowOp.getOpcode() == ISD::Constant) {
1512 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
1513 if (!isInt<16>(Value))
1514 return Op;
1515 }
1516
1517 // Check whether the high part is an AND that doesn't change the
1518 // high 32 bits and just masks out low bits. We can skip it if so.
1519 if (HighOp.getOpcode() == ISD::AND &&
1520 HighOp.getOperand(1).getOpcode() == ISD::Constant) {
1521 ConstantSDNode *MaskNode = cast<ConstantSDNode>(HighOp.getOperand(1));
1522 uint64_t Mask = MaskNode->getZExtValue() | Masks[High];
1523 if ((Mask >> 32) == 0xffffffff)
1524 HighOp = HighOp.getOperand(0);
1525 }
1526
1527 // Take advantage of the fact that all GR32 operations only change the
1528 // low 32 bits by truncating Low to an i32 and inserting it directly
1529 // using a subreg. The interesting cases are those where the truncation
1530 // can be folded.
Andrew Trickef9de2a2013-05-25 02:42:55 +00001531 SDLoc DL(Op);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001532 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
1533 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64);
1534 SDNode *Result = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
1535 MVT::i64, HighOp, Low32, SubReg32);
1536 return SDValue(Result, 0);
1537}
1538
1539// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
1540// two into the fullword ATOMIC_LOADW_* operation given by Opcode.
1541SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
1542 SelectionDAG &DAG,
1543 unsigned Opcode) const {
1544 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
1545
1546 // 32-bit operations need no code outside the main loop.
1547 EVT NarrowVT = Node->getMemoryVT();
1548 EVT WideVT = MVT::i32;
1549 if (NarrowVT == WideVT)
1550 return Op;
1551
1552 int64_t BitSize = NarrowVT.getSizeInBits();
1553 SDValue ChainIn = Node->getChain();
1554 SDValue Addr = Node->getBasePtr();
1555 SDValue Src2 = Node->getVal();
1556 MachineMemOperand *MMO = Node->getMemOperand();
Andrew Trickef9de2a2013-05-25 02:42:55 +00001557 SDLoc DL(Node);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001558 EVT PtrVT = Addr.getValueType();
1559
1560 // Convert atomic subtracts of constants into additions.
1561 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
1562 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) {
1563 Opcode = SystemZISD::ATOMIC_LOADW_ADD;
1564 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType());
1565 }
1566
1567 // Get the address of the containing word.
1568 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
1569 DAG.getConstant(-4, PtrVT));
1570
1571 // Get the number of bits that the word must be rotated left in order
1572 // to bring the field to the top bits of a GR32.
1573 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
1574 DAG.getConstant(3, PtrVT));
1575 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
1576
1577 // Get the complementing shift amount, for rotating a field in the top
1578 // bits back to its proper position.
1579 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
1580 DAG.getConstant(0, WideVT), BitShift);
1581
1582 // Extend the source operand to 32 bits and prepare it for the inner loop.
1583 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
1584 // operations require the source to be shifted in advance. (This shift
1585 // can be folded if the source is constant.) For AND and NAND, the lower
1586 // bits must be set, while for other opcodes they should be left clear.
1587 if (Opcode != SystemZISD::ATOMIC_SWAPW)
1588 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
1589 DAG.getConstant(32 - BitSize, WideVT));
1590 if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
1591 Opcode == SystemZISD::ATOMIC_LOADW_NAND)
1592 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
1593 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT));
1594
1595 // Construct the ATOMIC_LOADW_* node.
1596 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
1597 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
1598 DAG.getConstant(BitSize, WideVT) };
1599 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
1600 array_lengthof(Ops),
1601 NarrowVT, MMO);
1602
1603 // Rotate the result of the final CS so that the field is in the lower
1604 // bits of a GR32, then truncate it.
1605 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
1606 DAG.getConstant(BitSize, WideVT));
1607 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
1608
1609 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
1610 return DAG.getMergeValues(RetOps, 2, DL);
1611}
1612
1613// Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two
1614// into a fullword ATOMIC_CMP_SWAPW operation.
1615SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
1616 SelectionDAG &DAG) const {
1617 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
1618
1619 // We have native support for 32-bit compare and swap.
1620 EVT NarrowVT = Node->getMemoryVT();
1621 EVT WideVT = MVT::i32;
1622 if (NarrowVT == WideVT)
1623 return Op;
1624
1625 int64_t BitSize = NarrowVT.getSizeInBits();
1626 SDValue ChainIn = Node->getOperand(0);
1627 SDValue Addr = Node->getOperand(1);
1628 SDValue CmpVal = Node->getOperand(2);
1629 SDValue SwapVal = Node->getOperand(3);
1630 MachineMemOperand *MMO = Node->getMemOperand();
Andrew Trickef9de2a2013-05-25 02:42:55 +00001631 SDLoc DL(Node);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001632 EVT PtrVT = Addr.getValueType();
1633
1634 // Get the address of the containing word.
1635 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
1636 DAG.getConstant(-4, PtrVT));
1637
1638 // Get the number of bits that the word must be rotated left in order
1639 // to bring the field to the top bits of a GR32.
1640 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
1641 DAG.getConstant(3, PtrVT));
1642 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
1643
1644 // Get the complementing shift amount, for rotating a field in the top
1645 // bits back to its proper position.
1646 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
1647 DAG.getConstant(0, WideVT), BitShift);
1648
1649 // Construct the ATOMIC_CMP_SWAPW node.
1650 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
1651 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
1652 NegBitShift, DAG.getConstant(BitSize, WideVT) };
1653 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
1654 VTList, Ops, array_lengthof(Ops),
1655 NarrowVT, MMO);
1656 return AtomicOp;
1657}
1658
1659SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
1660 SelectionDAG &DAG) const {
1661 MachineFunction &MF = DAG.getMachineFunction();
1662 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
Andrew Trickef9de2a2013-05-25 02:42:55 +00001663 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001664 SystemZ::R15D, Op.getValueType());
1665}
1666
1667SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
1668 SelectionDAG &DAG) const {
1669 MachineFunction &MF = DAG.getMachineFunction();
1670 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
Andrew Trickef9de2a2013-05-25 02:42:55 +00001671 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op),
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001672 SystemZ::R15D, Op.getOperand(1));
1673}
1674
1675SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
1676 SelectionDAG &DAG) const {
1677 switch (Op.getOpcode()) {
1678 case ISD::BR_CC:
1679 return lowerBR_CC(Op, DAG);
1680 case ISD::SELECT_CC:
1681 return lowerSELECT_CC(Op, DAG);
1682 case ISD::GlobalAddress:
1683 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
1684 case ISD::GlobalTLSAddress:
1685 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
1686 case ISD::BlockAddress:
1687 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
1688 case ISD::JumpTable:
1689 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
1690 case ISD::ConstantPool:
1691 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
1692 case ISD::BITCAST:
1693 return lowerBITCAST(Op, DAG);
1694 case ISD::VASTART:
1695 return lowerVASTART(Op, DAG);
1696 case ISD::VACOPY:
1697 return lowerVACOPY(Op, DAG);
1698 case ISD::DYNAMIC_STACKALLOC:
1699 return lowerDYNAMIC_STACKALLOC(Op, DAG);
1700 case ISD::UMUL_LOHI:
1701 return lowerUMUL_LOHI(Op, DAG);
1702 case ISD::SDIVREM:
1703 return lowerSDIVREM(Op, DAG);
1704 case ISD::UDIVREM:
1705 return lowerUDIVREM(Op, DAG);
1706 case ISD::OR:
1707 return lowerOR(Op, DAG);
1708 case ISD::ATOMIC_SWAP:
1709 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW);
1710 case ISD::ATOMIC_LOAD_ADD:
1711 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
1712 case ISD::ATOMIC_LOAD_SUB:
1713 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
1714 case ISD::ATOMIC_LOAD_AND:
1715 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
1716 case ISD::ATOMIC_LOAD_OR:
1717 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
1718 case ISD::ATOMIC_LOAD_XOR:
1719 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
1720 case ISD::ATOMIC_LOAD_NAND:
1721 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
1722 case ISD::ATOMIC_LOAD_MIN:
1723 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
1724 case ISD::ATOMIC_LOAD_MAX:
1725 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
1726 case ISD::ATOMIC_LOAD_UMIN:
1727 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
1728 case ISD::ATOMIC_LOAD_UMAX:
1729 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
1730 case ISD::ATOMIC_CMP_SWAP:
1731 return lowerATOMIC_CMP_SWAP(Op, DAG);
1732 case ISD::STACKSAVE:
1733 return lowerSTACKSAVE(Op, DAG);
1734 case ISD::STACKRESTORE:
1735 return lowerSTACKRESTORE(Op, DAG);
1736 default:
1737 llvm_unreachable("Unexpected node to lower");
1738 }
1739}
1740
1741const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
1742#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
1743 switch (Opcode) {
1744 OPCODE(RET_FLAG);
1745 OPCODE(CALL);
Richard Sandiford709bda62013-08-19 12:42:31 +00001746 OPCODE(SIBCALL);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001747 OPCODE(PCREL_WRAPPER);
1748 OPCODE(CMP);
1749 OPCODE(UCMP);
1750 OPCODE(BR_CCMASK);
1751 OPCODE(SELECT_CCMASK);
1752 OPCODE(ADJDYNALLOC);
1753 OPCODE(EXTRACT_ACCESS);
1754 OPCODE(UMUL_LOHI64);
1755 OPCODE(SDIVREM64);
1756 OPCODE(UDIVREM32);
1757 OPCODE(UDIVREM64);
Richard Sandifordd131ff82013-07-08 09:35:23 +00001758 OPCODE(MVC);
Richard Sandiford761703a2013-08-12 10:17:33 +00001759 OPCODE(CLC);
Richard Sandifordca232712013-08-16 11:21:54 +00001760 OPCODE(STRCMP);
Richard Sandifordbb83a502013-08-16 11:29:37 +00001761 OPCODE(STPCPY);
Richard Sandiford0dec06a2013-08-16 11:41:43 +00001762 OPCODE(SEARCH_STRING);
Richard Sandiford564681c2013-08-12 10:28:10 +00001763 OPCODE(IPM);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001764 OPCODE(ATOMIC_SWAPW);
1765 OPCODE(ATOMIC_LOADW_ADD);
1766 OPCODE(ATOMIC_LOADW_SUB);
1767 OPCODE(ATOMIC_LOADW_AND);
1768 OPCODE(ATOMIC_LOADW_OR);
1769 OPCODE(ATOMIC_LOADW_XOR);
1770 OPCODE(ATOMIC_LOADW_NAND);
1771 OPCODE(ATOMIC_LOADW_MIN);
1772 OPCODE(ATOMIC_LOADW_MAX);
1773 OPCODE(ATOMIC_LOADW_UMIN);
1774 OPCODE(ATOMIC_LOADW_UMAX);
1775 OPCODE(ATOMIC_CMP_SWAPW);
1776 }
1777 return NULL;
1778#undef OPCODE
1779}
1780
1781//===----------------------------------------------------------------------===//
1782// Custom insertion
1783//===----------------------------------------------------------------------===//
1784
1785// Create a new basic block after MBB.
1786static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
1787 MachineFunction &MF = *MBB->getParent();
1788 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
1789 MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB);
1790 return NewMBB;
1791}
1792
1793// Split MBB after MI and return the new block (the one that contains
1794// instructions after MI).
1795static MachineBasicBlock *splitBlockAfter(MachineInstr *MI,
1796 MachineBasicBlock *MBB) {
1797 MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
1798 NewMBB->splice(NewMBB->begin(), MBB,
1799 llvm::next(MachineBasicBlock::iterator(MI)),
1800 MBB->end());
1801 NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
1802 return NewMBB;
1803}
1804
1805// Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
1806MachineBasicBlock *
1807SystemZTargetLowering::emitSelect(MachineInstr *MI,
1808 MachineBasicBlock *MBB) const {
1809 const SystemZInstrInfo *TII = TM.getInstrInfo();
1810
1811 unsigned DestReg = MI->getOperand(0).getReg();
1812 unsigned TrueReg = MI->getOperand(1).getReg();
1813 unsigned FalseReg = MI->getOperand(2).getReg();
Richard Sandiford3d768e32013-07-31 12:30:20 +00001814 unsigned CCValid = MI->getOperand(3).getImm();
1815 unsigned CCMask = MI->getOperand(4).getImm();
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001816 DebugLoc DL = MI->getDebugLoc();
1817
1818 MachineBasicBlock *StartMBB = MBB;
1819 MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB);
1820 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
1821
1822 // StartMBB:
Richard Sandiford0fb90ab2013-05-28 10:41:11 +00001823 // BRC CCMask, JoinMBB
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001824 // # fallthrough to FalseMBB
1825 MBB = StartMBB;
Richard Sandiford3d768e32013-07-31 12:30:20 +00001826 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
1827 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001828 MBB->addSuccessor(JoinMBB);
1829 MBB->addSuccessor(FalseMBB);
1830
1831 // FalseMBB:
1832 // # fallthrough to JoinMBB
1833 MBB = FalseMBB;
1834 MBB->addSuccessor(JoinMBB);
1835
1836 // JoinMBB:
1837 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
1838 // ...
1839 MBB = JoinMBB;
1840 BuildMI(*MBB, MBB->begin(), DL, TII->get(SystemZ::PHI), DestReg)
1841 .addReg(TrueReg).addMBB(StartMBB)
1842 .addReg(FalseReg).addMBB(FalseMBB);
1843
1844 MI->eraseFromParent();
1845 return JoinMBB;
1846}
1847
Richard Sandifordb86a8342013-06-27 09:27:40 +00001848// Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
1849// StoreOpcode is the store to use and Invert says whether the store should
Richard Sandiforda68e6f52013-07-25 08:57:02 +00001850// happen when the condition is false rather than true. If a STORE ON
1851// CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
Richard Sandifordb86a8342013-06-27 09:27:40 +00001852MachineBasicBlock *
1853SystemZTargetLowering::emitCondStore(MachineInstr *MI,
1854 MachineBasicBlock *MBB,
Richard Sandiforda68e6f52013-07-25 08:57:02 +00001855 unsigned StoreOpcode, unsigned STOCOpcode,
1856 bool Invert) const {
Richard Sandifordb86a8342013-06-27 09:27:40 +00001857 const SystemZInstrInfo *TII = TM.getInstrInfo();
1858
Richard Sandiforda68e6f52013-07-25 08:57:02 +00001859 unsigned SrcReg = MI->getOperand(0).getReg();
1860 MachineOperand Base = MI->getOperand(1);
1861 int64_t Disp = MI->getOperand(2).getImm();
1862 unsigned IndexReg = MI->getOperand(3).getReg();
Richard Sandiford3d768e32013-07-31 12:30:20 +00001863 unsigned CCValid = MI->getOperand(4).getImm();
1864 unsigned CCMask = MI->getOperand(5).getImm();
Richard Sandifordb86a8342013-06-27 09:27:40 +00001865 DebugLoc DL = MI->getDebugLoc();
1866
1867 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
1868
Richard Sandiforda68e6f52013-07-25 08:57:02 +00001869 // Use STOCOpcode if possible. We could use different store patterns in
1870 // order to avoid matching the index register, but the performance trade-offs
1871 // might be more complicated in that case.
1872 if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
1873 if (Invert)
Richard Sandiford3d768e32013-07-31 12:30:20 +00001874 CCMask ^= CCValid;
Richard Sandiforda68e6f52013-07-25 08:57:02 +00001875 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
Richard Sandifordfd7f4ae2013-08-01 10:39:40 +00001876 .addReg(SrcReg).addOperand(Base).addImm(Disp)
1877 .addImm(CCValid).addImm(CCMask);
Richard Sandiforda68e6f52013-07-25 08:57:02 +00001878 MI->eraseFromParent();
1879 return MBB;
1880 }
1881
Richard Sandifordb86a8342013-06-27 09:27:40 +00001882 // Get the condition needed to branch around the store.
1883 if (!Invert)
Richard Sandiford3d768e32013-07-31 12:30:20 +00001884 CCMask ^= CCValid;
Richard Sandifordb86a8342013-06-27 09:27:40 +00001885
1886 MachineBasicBlock *StartMBB = MBB;
1887 MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB);
1888 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
1889
1890 // StartMBB:
1891 // BRC CCMask, JoinMBB
1892 // # fallthrough to FalseMBB
Richard Sandifordb86a8342013-06-27 09:27:40 +00001893 MBB = StartMBB;
Richard Sandiford3d768e32013-07-31 12:30:20 +00001894 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
1895 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
Richard Sandifordb86a8342013-06-27 09:27:40 +00001896 MBB->addSuccessor(JoinMBB);
1897 MBB->addSuccessor(FalseMBB);
1898
1899 // FalseMBB:
1900 // store %SrcReg, %Disp(%Index,%Base)
1901 // # fallthrough to JoinMBB
1902 MBB = FalseMBB;
1903 BuildMI(MBB, DL, TII->get(StoreOpcode))
1904 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg);
1905 MBB->addSuccessor(JoinMBB);
1906
1907 MI->eraseFromParent();
1908 return JoinMBB;
1909}
1910
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001911// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
1912// or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
1913// performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
1914// BitSize is the width of the field in bits, or 0 if this is a partword
1915// ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
1916// is one of the operands. Invert says whether the field should be
1917// inverted after performing BinOpcode (e.g. for NAND).
1918MachineBasicBlock *
1919SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
1920 MachineBasicBlock *MBB,
1921 unsigned BinOpcode,
1922 unsigned BitSize,
1923 bool Invert) const {
1924 const SystemZInstrInfo *TII = TM.getInstrInfo();
1925 MachineFunction &MF = *MBB->getParent();
1926 MachineRegisterInfo &MRI = MF.getRegInfo();
Ulrich Weigand5f613df2013-05-06 16:15:19 +00001927 bool IsSubWord = (BitSize < 32);
1928
1929 // Extract the operands. Base can be a register or a frame index.
1930 // Src2 can be a register or immediate.
1931 unsigned Dest = MI->getOperand(0).getReg();
1932 MachineOperand Base = earlyUseOperand(MI->getOperand(1));
1933 int64_t Disp = MI->getOperand(2).getImm();
1934 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3));
1935 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0);
1936 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
1937 DebugLoc DL = MI->getDebugLoc();
1938 if (IsSubWord)
1939 BitSize = MI->getOperand(6).getImm();
1940
1941 // Subword operations use 32-bit registers.
1942 const TargetRegisterClass *RC = (BitSize <= 32 ?
1943 &SystemZ::GR32BitRegClass :
1944 &SystemZ::GR64BitRegClass);
1945 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
1946 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
1947
1948 // Get the right opcodes for the displacement.
1949 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
1950 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
1951 assert(LOpcode && CSOpcode && "Displacement out of range");
1952
1953 // Create virtual registers for temporary results.
1954 unsigned OrigVal = MRI.createVirtualRegister(RC);
1955 unsigned OldVal = MRI.createVirtualRegister(RC);
1956 unsigned NewVal = (BinOpcode || IsSubWord ?
1957 MRI.createVirtualRegister(RC) : Src2.getReg());
1958 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
1959 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
1960
1961 // Insert a basic block for the main loop.
1962 MachineBasicBlock *StartMBB = MBB;
1963 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB);
1964 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
1965
1966 // StartMBB:
1967 // ...
1968 // %OrigVal = L Disp(%Base)
1969 // # fall through to LoopMMB
1970 MBB = StartMBB;
1971 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
1972 .addOperand(Base).addImm(Disp).addReg(0);
1973 MBB->addSuccessor(LoopMBB);
1974
1975 // LoopMBB:
1976 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
1977 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
1978 // %RotatedNewVal = OP %RotatedOldVal, %Src2
1979 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
1980 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
1981 // JNE LoopMBB
1982 // # fall through to DoneMMB
1983 MBB = LoopMBB;
1984 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
1985 .addReg(OrigVal).addMBB(StartMBB)
1986 .addReg(Dest).addMBB(LoopMBB);
1987 if (IsSubWord)
1988 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
1989 .addReg(OldVal).addReg(BitShift).addImm(0);
1990 if (Invert) {
1991 // Perform the operation normally and then invert every bit of the field.
1992 unsigned Tmp = MRI.createVirtualRegister(RC);
1993 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp)
1994 .addReg(RotatedOldVal).addOperand(Src2);
1995 if (BitSize < 32)
1996 // XILF with the upper BitSize bits set.
1997 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal)
1998 .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize)));
1999 else if (BitSize == 32)
2000 // XILF with every bit set.
2001 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal)
2002 .addReg(Tmp).addImm(~uint32_t(0));
2003 else {
2004 // Use LCGR and add -1 to the result, which is more compact than
2005 // an XILF, XILH pair.
2006 unsigned Tmp2 = MRI.createVirtualRegister(RC);
2007 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
2008 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
2009 .addReg(Tmp2).addImm(-1);
2010 }
2011 } else if (BinOpcode)
2012 // A simply binary operation.
2013 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
2014 .addReg(RotatedOldVal).addOperand(Src2);
2015 else if (IsSubWord)
2016 // Use RISBG to rotate Src2 into position and use it to replace the
2017 // field in RotatedOldVal.
2018 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
2019 .addReg(RotatedOldVal).addReg(Src2.getReg())
2020 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize);
2021 if (IsSubWord)
2022 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
2023 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
2024 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
2025 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
Richard Sandiford3d768e32013-07-31 12:30:20 +00002026 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2027 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002028 MBB->addSuccessor(LoopMBB);
2029 MBB->addSuccessor(DoneMBB);
2030
2031 MI->eraseFromParent();
2032 return DoneMBB;
2033}
2034
2035// Implement EmitInstrWithCustomInserter for pseudo
2036// ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
2037// instruction that should be used to compare the current field with the
2038// minimum or maximum value. KeepOldMask is the BRC condition-code mask
2039// for when the current field should be kept. BitSize is the width of
2040// the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
2041MachineBasicBlock *
2042SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
2043 MachineBasicBlock *MBB,
2044 unsigned CompareOpcode,
2045 unsigned KeepOldMask,
2046 unsigned BitSize) const {
2047 const SystemZInstrInfo *TII = TM.getInstrInfo();
2048 MachineFunction &MF = *MBB->getParent();
2049 MachineRegisterInfo &MRI = MF.getRegInfo();
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002050 bool IsSubWord = (BitSize < 32);
2051
2052 // Extract the operands. Base can be a register or a frame index.
2053 unsigned Dest = MI->getOperand(0).getReg();
2054 MachineOperand Base = earlyUseOperand(MI->getOperand(1));
2055 int64_t Disp = MI->getOperand(2).getImm();
2056 unsigned Src2 = MI->getOperand(3).getReg();
2057 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0);
2058 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
2059 DebugLoc DL = MI->getDebugLoc();
2060 if (IsSubWord)
2061 BitSize = MI->getOperand(6).getImm();
2062
2063 // Subword operations use 32-bit registers.
2064 const TargetRegisterClass *RC = (BitSize <= 32 ?
2065 &SystemZ::GR32BitRegClass :
2066 &SystemZ::GR64BitRegClass);
2067 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
2068 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
2069
2070 // Get the right opcodes for the displacement.
2071 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
2072 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
2073 assert(LOpcode && CSOpcode && "Displacement out of range");
2074
2075 // Create virtual registers for temporary results.
2076 unsigned OrigVal = MRI.createVirtualRegister(RC);
2077 unsigned OldVal = MRI.createVirtualRegister(RC);
2078 unsigned NewVal = MRI.createVirtualRegister(RC);
2079 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
2080 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
2081 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
2082
2083 // Insert 3 basic blocks for the loop.
2084 MachineBasicBlock *StartMBB = MBB;
2085 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB);
2086 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
2087 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB);
2088 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB);
2089
2090 // StartMBB:
2091 // ...
2092 // %OrigVal = L Disp(%Base)
2093 // # fall through to LoopMMB
2094 MBB = StartMBB;
2095 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
2096 .addOperand(Base).addImm(Disp).addReg(0);
2097 MBB->addSuccessor(LoopMBB);
2098
2099 // LoopMBB:
2100 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
2101 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
2102 // CompareOpcode %RotatedOldVal, %Src2
Richard Sandiford312425f2013-05-20 14:23:08 +00002103 // BRC KeepOldMask, UpdateMBB
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002104 MBB = LoopMBB;
2105 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
2106 .addReg(OrigVal).addMBB(StartMBB)
2107 .addReg(Dest).addMBB(UpdateMBB);
2108 if (IsSubWord)
2109 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
2110 .addReg(OldVal).addReg(BitShift).addImm(0);
Richard Sandiford8a757bb2013-07-31 12:11:07 +00002111 BuildMI(MBB, DL, TII->get(CompareOpcode))
2112 .addReg(RotatedOldVal).addReg(Src2);
2113 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
Richard Sandiford3d768e32013-07-31 12:30:20 +00002114 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002115 MBB->addSuccessor(UpdateMBB);
2116 MBB->addSuccessor(UseAltMBB);
2117
2118 // UseAltMBB:
2119 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
2120 // # fall through to UpdateMMB
2121 MBB = UseAltMBB;
2122 if (IsSubWord)
2123 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
2124 .addReg(RotatedOldVal).addReg(Src2)
2125 .addImm(32).addImm(31 + BitSize).addImm(0);
2126 MBB->addSuccessor(UpdateMBB);
2127
2128 // UpdateMBB:
2129 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
2130 // [ %RotatedAltVal, UseAltMBB ]
2131 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
2132 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
2133 // JNE LoopMBB
2134 // # fall through to DoneMMB
2135 MBB = UpdateMBB;
2136 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
2137 .addReg(RotatedOldVal).addMBB(LoopMBB)
2138 .addReg(RotatedAltVal).addMBB(UseAltMBB);
2139 if (IsSubWord)
2140 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
2141 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
2142 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
2143 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
Richard Sandiford3d768e32013-07-31 12:30:20 +00002144 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2145 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002146 MBB->addSuccessor(LoopMBB);
2147 MBB->addSuccessor(DoneMBB);
2148
2149 MI->eraseFromParent();
2150 return DoneMBB;
2151}
2152
2153// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
2154// instruction MI.
2155MachineBasicBlock *
2156SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
2157 MachineBasicBlock *MBB) const {
2158 const SystemZInstrInfo *TII = TM.getInstrInfo();
2159 MachineFunction &MF = *MBB->getParent();
2160 MachineRegisterInfo &MRI = MF.getRegInfo();
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002161
2162 // Extract the operands. Base can be a register or a frame index.
2163 unsigned Dest = MI->getOperand(0).getReg();
2164 MachineOperand Base = earlyUseOperand(MI->getOperand(1));
2165 int64_t Disp = MI->getOperand(2).getImm();
2166 unsigned OrigCmpVal = MI->getOperand(3).getReg();
2167 unsigned OrigSwapVal = MI->getOperand(4).getReg();
2168 unsigned BitShift = MI->getOperand(5).getReg();
2169 unsigned NegBitShift = MI->getOperand(6).getReg();
2170 int64_t BitSize = MI->getOperand(7).getImm();
2171 DebugLoc DL = MI->getDebugLoc();
2172
2173 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
2174
2175 // Get the right opcodes for the displacement.
2176 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp);
2177 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp);
2178 assert(LOpcode && CSOpcode && "Displacement out of range");
2179
2180 // Create virtual registers for temporary results.
2181 unsigned OrigOldVal = MRI.createVirtualRegister(RC);
2182 unsigned OldVal = MRI.createVirtualRegister(RC);
2183 unsigned CmpVal = MRI.createVirtualRegister(RC);
2184 unsigned SwapVal = MRI.createVirtualRegister(RC);
2185 unsigned StoreVal = MRI.createVirtualRegister(RC);
2186 unsigned RetryOldVal = MRI.createVirtualRegister(RC);
2187 unsigned RetryCmpVal = MRI.createVirtualRegister(RC);
2188 unsigned RetrySwapVal = MRI.createVirtualRegister(RC);
2189
2190 // Insert 2 basic blocks for the loop.
2191 MachineBasicBlock *StartMBB = MBB;
2192 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB);
2193 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
2194 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB);
2195
2196 // StartMBB:
2197 // ...
2198 // %OrigOldVal = L Disp(%Base)
2199 // # fall through to LoopMMB
2200 MBB = StartMBB;
2201 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
2202 .addOperand(Base).addImm(Disp).addReg(0);
2203 MBB->addSuccessor(LoopMBB);
2204
2205 // LoopMBB:
2206 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
2207 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
2208 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
2209 // %Dest = RLL %OldVal, BitSize(%BitShift)
2210 // ^^ The low BitSize bits contain the field
2211 // of interest.
2212 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
2213 // ^^ Replace the upper 32-BitSize bits of the
2214 // comparison value with those that we loaded,
2215 // so that we can use a full word comparison.
Richard Sandiford8a757bb2013-07-31 12:11:07 +00002216 // CR %Dest, %RetryCmpVal
2217 // JNE DoneMBB
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002218 // # Fall through to SetMBB
2219 MBB = LoopMBB;
2220 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
2221 .addReg(OrigOldVal).addMBB(StartMBB)
2222 .addReg(RetryOldVal).addMBB(SetMBB);
2223 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
2224 .addReg(OrigCmpVal).addMBB(StartMBB)
2225 .addReg(RetryCmpVal).addMBB(SetMBB);
2226 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
2227 .addReg(OrigSwapVal).addMBB(StartMBB)
2228 .addReg(RetrySwapVal).addMBB(SetMBB);
2229 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
2230 .addReg(OldVal).addReg(BitShift).addImm(BitSize);
2231 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
2232 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
Richard Sandiford8a757bb2013-07-31 12:11:07 +00002233 BuildMI(MBB, DL, TII->get(SystemZ::CR))
2234 .addReg(Dest).addReg(RetryCmpVal);
2235 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
Richard Sandiford3d768e32013-07-31 12:30:20 +00002236 .addImm(SystemZ::CCMASK_ICMP)
2237 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002238 MBB->addSuccessor(DoneMBB);
2239 MBB->addSuccessor(SetMBB);
2240
2241 // SetMBB:
2242 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
2243 // ^^ Replace the upper 32-BitSize bits of the new
2244 // value with those that we loaded.
2245 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
2246 // ^^ Rotate the new field to its proper position.
2247 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
2248 // JNE LoopMBB
2249 // # fall through to ExitMMB
2250 MBB = SetMBB;
2251 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
2252 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
2253 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
2254 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
2255 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
2256 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp);
Richard Sandiford3d768e32013-07-31 12:30:20 +00002257 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2258 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002259 MBB->addSuccessor(LoopMBB);
2260 MBB->addSuccessor(DoneMBB);
2261
2262 MI->eraseFromParent();
2263 return DoneMBB;
2264}
2265
2266// Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true
2267// if the high register of the GR128 value must be cleared or false if
2268// it's "don't care". SubReg is subreg_odd32 when extending a GR32
2269// and subreg_odd when extending a GR64.
2270MachineBasicBlock *
2271SystemZTargetLowering::emitExt128(MachineInstr *MI,
2272 MachineBasicBlock *MBB,
2273 bool ClearEven, unsigned SubReg) const {
2274 const SystemZInstrInfo *TII = TM.getInstrInfo();
2275 MachineFunction &MF = *MBB->getParent();
2276 MachineRegisterInfo &MRI = MF.getRegInfo();
2277 DebugLoc DL = MI->getDebugLoc();
2278
2279 unsigned Dest = MI->getOperand(0).getReg();
2280 unsigned Src = MI->getOperand(1).getReg();
2281 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
2282
2283 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
2284 if (ClearEven) {
2285 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
2286 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
2287
2288 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
2289 .addImm(0);
2290 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
2291 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_high);
2292 In128 = NewIn128;
2293 }
2294 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
2295 .addReg(In128).addReg(Src).addImm(SubReg);
2296
2297 MI->eraseFromParent();
2298 return MBB;
2299}
2300
Richard Sandifordd131ff82013-07-08 09:35:23 +00002301MachineBasicBlock *
Richard Sandiford564681c2013-08-12 10:28:10 +00002302SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
2303 MachineBasicBlock *MBB,
2304 unsigned Opcode) const {
Richard Sandifordd131ff82013-07-08 09:35:23 +00002305 const SystemZInstrInfo *TII = TM.getInstrInfo();
2306 DebugLoc DL = MI->getDebugLoc();
2307
2308 MachineOperand DestBase = MI->getOperand(0);
2309 uint64_t DestDisp = MI->getOperand(1).getImm();
2310 MachineOperand SrcBase = MI->getOperand(2);
2311 uint64_t SrcDisp = MI->getOperand(3).getImm();
2312 uint64_t Length = MI->getOperand(4).getImm();
2313
Richard Sandiford564681c2013-08-12 10:28:10 +00002314 BuildMI(*MBB, MI, DL, TII->get(Opcode))
Richard Sandifordd131ff82013-07-08 09:35:23 +00002315 .addOperand(DestBase).addImm(DestDisp).addImm(Length)
2316 .addOperand(SrcBase).addImm(SrcDisp);
2317
2318 MI->eraseFromParent();
2319 return MBB;
2320}
2321
Richard Sandifordca232712013-08-16 11:21:54 +00002322// Decompose string pseudo-instruction MI into a loop that continually performs
2323// Opcode until CC != 3.
2324MachineBasicBlock *
2325SystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
2326 MachineBasicBlock *MBB,
2327 unsigned Opcode) const {
2328 const SystemZInstrInfo *TII = TM.getInstrInfo();
2329 MachineFunction &MF = *MBB->getParent();
2330 MachineRegisterInfo &MRI = MF.getRegInfo();
2331 DebugLoc DL = MI->getDebugLoc();
2332
2333 uint64_t End1Reg = MI->getOperand(0).getReg();
2334 uint64_t Start1Reg = MI->getOperand(1).getReg();
2335 uint64_t Start2Reg = MI->getOperand(2).getReg();
2336 uint64_t CharReg = MI->getOperand(3).getReg();
2337
2338 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
2339 uint64_t This1Reg = MRI.createVirtualRegister(RC);
2340 uint64_t This2Reg = MRI.createVirtualRegister(RC);
2341 uint64_t End2Reg = MRI.createVirtualRegister(RC);
2342
2343 MachineBasicBlock *StartMBB = MBB;
2344 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB);
2345 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
2346
2347 // StartMBB:
2348 // R0W = %CharReg
2349 // # fall through to LoopMMB
2350 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0W).addReg(CharReg);
2351 MBB->addSuccessor(LoopMBB);
2352
2353 // LoopMBB:
2354 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
2355 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
2356 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0W
2357 // JO LoopMBB
2358 // # fall through to DoneMMB
2359 MBB = LoopMBB;
2360 MBB->addLiveIn(SystemZ::R0W);
2361
2362 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
2363 .addReg(Start1Reg).addMBB(StartMBB)
2364 .addReg(End1Reg).addMBB(LoopMBB);
2365 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
2366 .addReg(Start2Reg).addMBB(StartMBB)
2367 .addReg(End2Reg).addMBB(LoopMBB);
2368 BuildMI(MBB, DL, TII->get(Opcode))
2369 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
2370 .addReg(This1Reg).addReg(This2Reg);
2371 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
2372 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
2373 MBB->addSuccessor(LoopMBB);
2374 MBB->addSuccessor(DoneMBB);
2375
2376 DoneMBB->addLiveIn(SystemZ::CC);
2377
2378 MI->eraseFromParent();
2379 return DoneMBB;
2380}
2381
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002382MachineBasicBlock *SystemZTargetLowering::
2383EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
2384 switch (MI->getOpcode()) {
2385 case SystemZ::Select32:
2386 case SystemZ::SelectF32:
2387 case SystemZ::Select64:
2388 case SystemZ::SelectF64:
2389 case SystemZ::SelectF128:
2390 return emitSelect(MI, MBB);
2391
Richard Sandifordb86a8342013-06-27 09:27:40 +00002392 case SystemZ::CondStore8_32:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002393 return emitCondStore(MI, MBB, SystemZ::STC32, 0, false);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002394 case SystemZ::CondStore8_32Inv:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002395 return emitCondStore(MI, MBB, SystemZ::STC32, 0, true);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002396 case SystemZ::CondStore16_32:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002397 return emitCondStore(MI, MBB, SystemZ::STH32, 0, false);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002398 case SystemZ::CondStore16_32Inv:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002399 return emitCondStore(MI, MBB, SystemZ::STH32, 0, true);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002400 case SystemZ::CondStore32_32:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002401 return emitCondStore(MI, MBB, SystemZ::ST32, SystemZ::STOC32, false);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002402 case SystemZ::CondStore32_32Inv:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002403 return emitCondStore(MI, MBB, SystemZ::ST32, SystemZ::STOC32, true);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002404 case SystemZ::CondStore8:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002405 return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002406 case SystemZ::CondStore8Inv:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002407 return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002408 case SystemZ::CondStore16:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002409 return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002410 case SystemZ::CondStore16Inv:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002411 return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002412 case SystemZ::CondStore32:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002413 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002414 case SystemZ::CondStore32Inv:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002415 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002416 case SystemZ::CondStore64:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002417 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002418 case SystemZ::CondStore64Inv:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002419 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002420 case SystemZ::CondStoreF32:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002421 return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002422 case SystemZ::CondStoreF32Inv:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002423 return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002424 case SystemZ::CondStoreF64:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002425 return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002426 case SystemZ::CondStoreF64Inv:
Richard Sandiforda68e6f52013-07-25 08:57:02 +00002427 return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
Richard Sandifordb86a8342013-06-27 09:27:40 +00002428
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002429 case SystemZ::AEXT128_64:
2430 return emitExt128(MI, MBB, false, SystemZ::subreg_low);
2431 case SystemZ::ZEXT128_32:
2432 return emitExt128(MI, MBB, true, SystemZ::subreg_low32);
2433 case SystemZ::ZEXT128_64:
2434 return emitExt128(MI, MBB, true, SystemZ::subreg_low);
2435
2436 case SystemZ::ATOMIC_SWAPW:
2437 return emitAtomicLoadBinary(MI, MBB, 0, 0);
2438 case SystemZ::ATOMIC_SWAP_32:
2439 return emitAtomicLoadBinary(MI, MBB, 0, 32);
2440 case SystemZ::ATOMIC_SWAP_64:
2441 return emitAtomicLoadBinary(MI, MBB, 0, 64);
2442
2443 case SystemZ::ATOMIC_LOADW_AR:
2444 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
2445 case SystemZ::ATOMIC_LOADW_AFI:
2446 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
2447 case SystemZ::ATOMIC_LOAD_AR:
2448 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
2449 case SystemZ::ATOMIC_LOAD_AHI:
2450 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
2451 case SystemZ::ATOMIC_LOAD_AFI:
2452 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
2453 case SystemZ::ATOMIC_LOAD_AGR:
2454 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
2455 case SystemZ::ATOMIC_LOAD_AGHI:
2456 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
2457 case SystemZ::ATOMIC_LOAD_AGFI:
2458 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
2459
2460 case SystemZ::ATOMIC_LOADW_SR:
2461 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
2462 case SystemZ::ATOMIC_LOAD_SR:
2463 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
2464 case SystemZ::ATOMIC_LOAD_SGR:
2465 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
2466
2467 case SystemZ::ATOMIC_LOADW_NR:
2468 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
2469 case SystemZ::ATOMIC_LOADW_NILH:
2470 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0);
2471 case SystemZ::ATOMIC_LOAD_NR:
2472 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
2473 case SystemZ::ATOMIC_LOAD_NILL32:
2474 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32);
2475 case SystemZ::ATOMIC_LOAD_NILH32:
2476 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32);
2477 case SystemZ::ATOMIC_LOAD_NILF32:
2478 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32);
2479 case SystemZ::ATOMIC_LOAD_NGR:
2480 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
2481 case SystemZ::ATOMIC_LOAD_NILL:
2482 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64);
2483 case SystemZ::ATOMIC_LOAD_NILH:
2484 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64);
2485 case SystemZ::ATOMIC_LOAD_NIHL:
2486 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64);
2487 case SystemZ::ATOMIC_LOAD_NIHH:
2488 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64);
2489 case SystemZ::ATOMIC_LOAD_NILF:
2490 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64);
2491 case SystemZ::ATOMIC_LOAD_NIHF:
2492 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64);
2493
2494 case SystemZ::ATOMIC_LOADW_OR:
2495 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
2496 case SystemZ::ATOMIC_LOADW_OILH:
2497 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 0);
2498 case SystemZ::ATOMIC_LOAD_OR:
2499 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
2500 case SystemZ::ATOMIC_LOAD_OILL32:
2501 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL32, 32);
2502 case SystemZ::ATOMIC_LOAD_OILH32:
2503 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 32);
2504 case SystemZ::ATOMIC_LOAD_OILF32:
2505 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF32, 32);
2506 case SystemZ::ATOMIC_LOAD_OGR:
2507 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
2508 case SystemZ::ATOMIC_LOAD_OILL:
2509 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 64);
2510 case SystemZ::ATOMIC_LOAD_OILH:
2511 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 64);
2512 case SystemZ::ATOMIC_LOAD_OIHL:
2513 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL, 64);
2514 case SystemZ::ATOMIC_LOAD_OIHH:
2515 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH, 64);
2516 case SystemZ::ATOMIC_LOAD_OILF:
2517 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 64);
2518 case SystemZ::ATOMIC_LOAD_OIHF:
2519 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF, 64);
2520
2521 case SystemZ::ATOMIC_LOADW_XR:
2522 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
2523 case SystemZ::ATOMIC_LOADW_XILF:
2524 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 0);
2525 case SystemZ::ATOMIC_LOAD_XR:
2526 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
2527 case SystemZ::ATOMIC_LOAD_XILF32:
2528 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 32);
2529 case SystemZ::ATOMIC_LOAD_XGR:
2530 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
2531 case SystemZ::ATOMIC_LOAD_XILF:
2532 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 64);
2533 case SystemZ::ATOMIC_LOAD_XIHF:
2534 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF, 64);
2535
2536 case SystemZ::ATOMIC_LOADW_NRi:
2537 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
2538 case SystemZ::ATOMIC_LOADW_NILHi:
2539 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0, true);
2540 case SystemZ::ATOMIC_LOAD_NRi:
2541 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
2542 case SystemZ::ATOMIC_LOAD_NILL32i:
2543 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32, true);
2544 case SystemZ::ATOMIC_LOAD_NILH32i:
2545 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32, true);
2546 case SystemZ::ATOMIC_LOAD_NILF32i:
2547 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32, true);
2548 case SystemZ::ATOMIC_LOAD_NGRi:
2549 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
2550 case SystemZ::ATOMIC_LOAD_NILLi:
2551 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64, true);
2552 case SystemZ::ATOMIC_LOAD_NILHi:
2553 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64, true);
2554 case SystemZ::ATOMIC_LOAD_NIHLi:
2555 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64, true);
2556 case SystemZ::ATOMIC_LOAD_NIHHi:
2557 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64, true);
2558 case SystemZ::ATOMIC_LOAD_NILFi:
2559 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64, true);
2560 case SystemZ::ATOMIC_LOAD_NIHFi:
2561 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64, true);
2562
2563 case SystemZ::ATOMIC_LOADW_MIN:
2564 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
2565 SystemZ::CCMASK_CMP_LE, 0);
2566 case SystemZ::ATOMIC_LOAD_MIN_32:
2567 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
2568 SystemZ::CCMASK_CMP_LE, 32);
2569 case SystemZ::ATOMIC_LOAD_MIN_64:
2570 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
2571 SystemZ::CCMASK_CMP_LE, 64);
2572
2573 case SystemZ::ATOMIC_LOADW_MAX:
2574 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
2575 SystemZ::CCMASK_CMP_GE, 0);
2576 case SystemZ::ATOMIC_LOAD_MAX_32:
2577 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
2578 SystemZ::CCMASK_CMP_GE, 32);
2579 case SystemZ::ATOMIC_LOAD_MAX_64:
2580 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
2581 SystemZ::CCMASK_CMP_GE, 64);
2582
2583 case SystemZ::ATOMIC_LOADW_UMIN:
2584 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
2585 SystemZ::CCMASK_CMP_LE, 0);
2586 case SystemZ::ATOMIC_LOAD_UMIN_32:
2587 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
2588 SystemZ::CCMASK_CMP_LE, 32);
2589 case SystemZ::ATOMIC_LOAD_UMIN_64:
2590 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
2591 SystemZ::CCMASK_CMP_LE, 64);
2592
2593 case SystemZ::ATOMIC_LOADW_UMAX:
2594 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
2595 SystemZ::CCMASK_CMP_GE, 0);
2596 case SystemZ::ATOMIC_LOAD_UMAX_32:
2597 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
2598 SystemZ::CCMASK_CMP_GE, 32);
2599 case SystemZ::ATOMIC_LOAD_UMAX_64:
2600 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
2601 SystemZ::CCMASK_CMP_GE, 64);
2602
2603 case SystemZ::ATOMIC_CMP_SWAPW:
2604 return emitAtomicCmpSwapW(MI, MBB);
Richard Sandifordd131ff82013-07-08 09:35:23 +00002605 case SystemZ::MVCWrapper:
Richard Sandiford564681c2013-08-12 10:28:10 +00002606 return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
2607 case SystemZ::CLCWrapper:
2608 return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
Richard Sandifordca232712013-08-16 11:21:54 +00002609 case SystemZ::CLSTLoop:
2610 return emitStringWrapper(MI, MBB, SystemZ::CLST);
Richard Sandifordbb83a502013-08-16 11:29:37 +00002611 case SystemZ::MVSTLoop:
2612 return emitStringWrapper(MI, MBB, SystemZ::MVST);
Richard Sandiford0dec06a2013-08-16 11:41:43 +00002613 case SystemZ::SRSTLoop:
2614 return emitStringWrapper(MI, MBB, SystemZ::SRST);
Ulrich Weigand5f613df2013-05-06 16:15:19 +00002615 default:
2616 llvm_unreachable("Unexpected instr type to insert");
2617 }
2618}