blob: 8db22764ef5117c22bc96353e68a9665527b4497 [file] [log] [blame]
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07001//
2// The Subzero Code Generator
3//
4// This file is distributed under the University of Illinois Open Source
5// License. See LICENSE.TXT for details.
6//
7//===----------------------------------------------------------------------===//
Andrew Scull9612d322015-07-06 14:53:25 -07008///
9/// \file
Jim Stichnoth92a6e5b2015-12-02 16:52:44 -080010/// \brief Implements the TargetLoweringMIPS32 class, which consists almost
Andrew Scull9612d322015-07-06 14:53:25 -070011/// entirely of the lowering sequence for each high-level instruction.
12///
Jim Stichnoth6da4cef2015-06-11 13:26:33 -070013//===----------------------------------------------------------------------===//
14
John Porto67f8de92015-06-25 10:14:17 -070015#include "IceTargetLoweringMIPS32.h"
Jim Stichnoth6da4cef2015-06-11 13:26:33 -070016
17#include "IceCfg.h"
18#include "IceCfgNode.h"
19#include "IceClFlags.h"
20#include "IceDefs.h"
21#include "IceELFObjectWriter.h"
22#include "IceGlobalInits.h"
23#include "IceInstMIPS32.h"
Sagar Thakur5cce7612016-05-24 06:25:50 -070024#include "IceInstVarIter.h"
Jim Stichnoth6da4cef2015-06-11 13:26:33 -070025#include "IceLiveness.h"
26#include "IceOperand.h"
Jim Stichnothac8da5c2015-10-21 06:57:46 -070027#include "IcePhiLoweringImpl.h"
Jim Stichnoth6da4cef2015-06-11 13:26:33 -070028#include "IceRegistersMIPS32.h"
29#include "IceTargetLoweringMIPS32.def"
Jim Stichnoth6da4cef2015-06-11 13:26:33 -070030#include "IceUtils.h"
John Porto67f8de92015-06-25 10:14:17 -070031#include "llvm/Support/MathExtras.h"
Jim Stichnoth6da4cef2015-06-11 13:26:33 -070032
John Porto53611e22015-12-30 07:30:10 -080033namespace MIPS32 {
34std::unique_ptr<::Ice::TargetLowering> createTargetLowering(::Ice::Cfg *Func) {
John Porto4a566862016-01-04 09:33:41 -080035 return ::Ice::MIPS32::TargetMIPS32::create(Func);
John Porto53611e22015-12-30 07:30:10 -080036}
37
38std::unique_ptr<::Ice::TargetDataLowering>
39createTargetDataLowering(::Ice::GlobalContext *Ctx) {
John Porto4a566862016-01-04 09:33:41 -080040 return ::Ice::MIPS32::TargetDataMIPS32::create(Ctx);
John Porto53611e22015-12-30 07:30:10 -080041}
42
43std::unique_ptr<::Ice::TargetHeaderLowering>
44createTargetHeaderLowering(::Ice::GlobalContext *Ctx) {
John Porto4a566862016-01-04 09:33:41 -080045 return ::Ice::MIPS32::TargetHeaderMIPS32::create(Ctx);
John Porto53611e22015-12-30 07:30:10 -080046}
47
Karl Schimpf5403f5d2016-01-15 11:07:46 -080048void staticInit(::Ice::GlobalContext *Ctx) {
49 ::Ice::MIPS32::TargetMIPS32::staticInit(Ctx);
Jim Stichnoth8ff4b282016-01-04 15:39:06 -080050}
Jim Stichnoth467ffe52016-03-29 15:01:06 -070051
52bool shouldBePooled(const ::Ice::Constant *C) {
53 return ::Ice::MIPS32::TargetMIPS32::shouldBePooled(C);
54}
John Porto53611e22015-12-30 07:30:10 -080055} // end of namespace MIPS32
56
Jim Stichnoth6da4cef2015-06-11 13:26:33 -070057namespace Ice {
John Porto4a566862016-01-04 09:33:41 -080058namespace MIPS32 {
Jim Stichnoth6da4cef2015-06-11 13:26:33 -070059
Jim Stichnothac8da5c2015-10-21 06:57:46 -070060using llvm::isInt;
61
62namespace {
63
64// The maximum number of arguments to pass in GPR registers.
65constexpr uint32_t MIPS32_MAX_GPR_ARG = 4;
66
Mohit Bhakkadf90118a2016-06-13 00:28:13 -070067std::array<RegNumT, MIPS32_MAX_GPR_ARG> GPRArgInitializer;
68std::array<RegNumT, MIPS32_MAX_GPR_ARG / 2> I64ArgInitializer;
69
70constexpr uint32_t MIPS32_MAX_FP_ARG = 2;
71
72std::array<RegNumT, MIPS32_MAX_FP_ARG> FP32ArgInitializer;
73std::array<RegNumT, MIPS32_MAX_FP_ARG> FP64ArgInitializer;
74
Jim Stichnoth467ffe52016-03-29 15:01:06 -070075const char *getRegClassName(RegClass C) {
Jim Stichnoth2544d4d2016-01-22 13:07:46 -080076 auto ClassNum = static_cast<RegClassMIPS32>(C);
77 assert(ClassNum < RCMIPS32_NUM);
78 switch (ClassNum) {
79 default:
80 assert(C < RC_Target);
81 return regClassString(C);
82 // Add handling of new register classes below.
83 }
84}
85
Sagar Thakura49fce02016-06-13 05:55:00 -070086// Stack alignment
87constexpr uint32_t MIPS32_STACK_ALIGNMENT_BYTES = 8;
88
89// Value is in bytes. Return Value adjusted to the next highest multiple of the
90// stack alignment required for the given type.
91uint32_t applyStackAlignmentTy(uint32_t Value, Type Ty) {
92 size_t typeAlignInBytes = typeWidthInBytes(Ty);
93 if (isVectorType(Ty))
94 UnimplementedError(getFlags());
95 return Utils::applyAlignment(Value, typeAlignInBytes);
96}
97
Mohit Bhakkad9b4c3c32016-06-22 05:44:05 -070098// Value is in bytes. Return Value adjusted to the next highest multiple of the
99// stack alignment.
100uint32_t applyStackAlignment(uint32_t Value) {
101 return Utils::applyAlignment(Value, MIPS32_STACK_ALIGNMENT_BYTES);
102}
103
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700104} // end of anonymous namespace
105
Jim Stichnoth94844f12015-11-04 16:06:16 -0800106TargetMIPS32::TargetMIPS32(Cfg *Func) : TargetLowering(Func) {}
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700107
Sagar Thakur752e59f2016-07-21 06:12:09 -0700108void TargetMIPS32::assignVarStackSlots(VarList &SortedSpilledVariables,
109 size_t SpillAreaPaddingBytes,
110 size_t SpillAreaSizeBytes,
111 size_t GlobalsAndSubsequentPaddingSize) {
112 const VariablesMetadata *VMetadata = Func->getVMetadata();
113 size_t GlobalsSpaceUsed = SpillAreaPaddingBytes;
114 size_t NextStackOffset = SpillAreaPaddingBytes;
115 CfgVector<size_t> LocalsSize(Func->getNumNodes());
116 const bool SimpleCoalescing = !callsReturnsTwice();
117
118 for (Variable *Var : SortedSpilledVariables) {
119 size_t Increment = typeWidthInBytesOnStack(Var->getType());
120 if (SimpleCoalescing && VMetadata->isTracked(Var)) {
121 if (VMetadata->isMultiBlock(Var)) {
122 GlobalsSpaceUsed += Increment;
123 NextStackOffset = GlobalsSpaceUsed;
124 } else {
125 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
126 LocalsSize[NodeIndex] += Increment;
127 NextStackOffset = SpillAreaPaddingBytes +
128 GlobalsAndSubsequentPaddingSize +
129 LocalsSize[NodeIndex];
130 }
131 } else {
132 NextStackOffset += Increment;
133 }
134 Var->setStackOffset(SpillAreaSizeBytes - NextStackOffset);
135 }
136}
137
Karl Schimpf5403f5d2016-01-15 11:07:46 -0800138void TargetMIPS32::staticInit(GlobalContext *Ctx) {
139 (void)Ctx;
Jim Stichnoth8aa39662016-02-10 11:20:30 -0800140 RegNumT::setLimit(RegMIPS32::Reg_NUM);
John Portoe82b5602016-02-24 15:58:55 -0800141 SmallBitVector IntegerRegisters(RegMIPS32::Reg_NUM);
142 SmallBitVector I64PairRegisters(RegMIPS32::Reg_NUM);
143 SmallBitVector Float32Registers(RegMIPS32::Reg_NUM);
144 SmallBitVector Float64Registers(RegMIPS32::Reg_NUM);
145 SmallBitVector VectorRegisters(RegMIPS32::Reg_NUM);
146 SmallBitVector InvalidRegisters(RegMIPS32::Reg_NUM);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700147#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700148 isI64Pair, isFP32, isFP64, isVec128, alias_init) \
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700149 IntegerRegisters[RegMIPS32::val] = isInt; \
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700150 I64PairRegisters[RegMIPS32::val] = isI64Pair; \
151 Float32Registers[RegMIPS32::val] = isFP32; \
152 Float64Registers[RegMIPS32::val] = isFP64; \
153 VectorRegisters[RegMIPS32::val] = isVec128; \
John Portobb0a5fe2015-09-04 11:23:41 -0700154 RegisterAliases[RegMIPS32::val].resize(RegMIPS32::Reg_NUM); \
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700155 for (SizeT RegAlias : alias_init) { \
156 assert(!RegisterAliases[RegMIPS32::val][RegAlias] && \
157 "Duplicate alias for " #val); \
158 RegisterAliases[RegMIPS32::val].set(RegAlias); \
159 } \
160 RegisterAliases[RegMIPS32::val].resize(RegMIPS32::Reg_NUM); \
Jim Stichnoth29d15fd2016-01-19 10:25:37 -0800161 assert(RegisterAliases[RegMIPS32::val][RegMIPS32::val]);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700162 REGMIPS32_TABLE;
163#undef X
Mohit Bhakkadf90118a2016-06-13 00:28:13 -0700164
165 // TODO(mohit.bhakkad): Change these inits once we provide argument related
166 // field in register tables
167 for (size_t i = 0; i < MIPS32_MAX_GPR_ARG; i++)
168 GPRArgInitializer[i] = RegNumT::fixme(RegMIPS32::Reg_A0 + i);
169
170 for (size_t i = 0; i < MIPS32_MAX_GPR_ARG / 2; i++)
171 I64ArgInitializer[i] = RegNumT::fixme(RegMIPS32::Reg_A0A1 + i);
172
173 for (size_t i = 0; i < MIPS32_MAX_FP_ARG; i++) {
174 FP32ArgInitializer[i] = RegNumT::fixme(RegMIPS32::Reg_F12 + i * 2);
175 FP64ArgInitializer[i] = RegNumT::fixme(RegMIPS32::Reg_F12F13 + i);
176 }
177
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700178 TypeToRegisterSet[IceType_void] = InvalidRegisters;
179 TypeToRegisterSet[IceType_i1] = IntegerRegisters;
180 TypeToRegisterSet[IceType_i8] = IntegerRegisters;
181 TypeToRegisterSet[IceType_i16] = IntegerRegisters;
182 TypeToRegisterSet[IceType_i32] = IntegerRegisters;
183 TypeToRegisterSet[IceType_i64] = IntegerRegisters;
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700184 TypeToRegisterSet[IceType_f32] = Float32Registers;
185 TypeToRegisterSet[IceType_f64] = Float64Registers;
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700186 TypeToRegisterSet[IceType_v4i1] = VectorRegisters;
187 TypeToRegisterSet[IceType_v8i1] = VectorRegisters;
188 TypeToRegisterSet[IceType_v16i1] = VectorRegisters;
189 TypeToRegisterSet[IceType_v16i8] = VectorRegisters;
190 TypeToRegisterSet[IceType_v8i16] = VectorRegisters;
191 TypeToRegisterSet[IceType_v4i32] = VectorRegisters;
192 TypeToRegisterSet[IceType_v4f32] = VectorRegisters;
Karl Schimpf5403f5d2016-01-15 11:07:46 -0800193
Jim Stichnothb40595a2016-01-29 06:14:31 -0800194 for (size_t i = 0; i < llvm::array_lengthof(TypeToRegisterSet); ++i)
195 TypeToRegisterSetUnfiltered[i] = TypeToRegisterSet[i];
196
Karl Schimpf5403f5d2016-01-15 11:07:46 -0800197 filterTypeToRegisterSet(Ctx, RegMIPS32::Reg_NUM, TypeToRegisterSet,
Jim Stichnoth2544d4d2016-01-22 13:07:46 -0800198 llvm::array_lengthof(TypeToRegisterSet),
199 RegMIPS32::getRegName, getRegClassName);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700200}
201
Sagar Thakur1afb4832016-06-16 15:30:24 -0700202void TargetMIPS32::unsetIfNonLeafFunc() {
203 for (CfgNode *Node : Func->getNodes()) {
204 for (Inst &Instr : Node->getInsts()) {
205 if (llvm::isa<InstCall>(&Instr)) {
206 // Unset MaybeLeafFunc if call instruction exists.
207 MaybeLeafFunc = false;
208 return;
209 }
210 }
211 }
212}
213
214uint32_t TargetMIPS32::getStackAlignment() const {
215 return MIPS32_STACK_ALIGNMENT_BYTES;
216}
217
Srdjan Obucinab85cde12016-09-09 09:39:52 -0700218void TargetMIPS32::genTargetHelperCallFor(Inst *Instr) {
219 constexpr bool NoTailCall = false;
220 constexpr bool IsTargetHelperCall = true;
221
222 switch (Instr->getKind()) {
223 default:
224 return;
225 case Inst::Arithmetic: {
226 Variable *Dest = Instr->getDest();
227 const Type DestTy = Dest->getType();
228 const InstArithmetic::OpKind Op =
229 llvm::cast<InstArithmetic>(Instr)->getOp();
230 if (isVectorType(DestTy)) {
231 switch (Op) {
232 default:
233 break;
234 case InstArithmetic::Fdiv:
235 case InstArithmetic::Frem:
236 case InstArithmetic::Sdiv:
237 case InstArithmetic::Srem:
238 case InstArithmetic::Udiv:
239 case InstArithmetic::Urem:
240 scalarizeArithmetic(Op, Dest, Instr->getSrc(0), Instr->getSrc(1));
241 Instr->setDeleted();
242 return;
243 }
244 }
245 switch (DestTy) {
246 default:
247 return;
248 case IceType_i64: {
249 RuntimeHelper HelperID = RuntimeHelper::H_Num;
250 switch (Op) {
251 default:
252 return;
253 case InstArithmetic::Udiv:
254 HelperID = RuntimeHelper::H_udiv_i64;
255 break;
256 case InstArithmetic::Sdiv:
257 HelperID = RuntimeHelper::H_sdiv_i64;
258 break;
259 case InstArithmetic::Urem:
260 HelperID = RuntimeHelper::H_urem_i64;
261 break;
262 case InstArithmetic::Srem:
263 HelperID = RuntimeHelper::H_srem_i64;
264 break;
265 }
266
267 if (HelperID == RuntimeHelper::H_Num) {
268 return;
269 }
270
271 Operand *TargetHelper = Ctx->getRuntimeHelperFunc(HelperID);
272 constexpr SizeT MaxArgs = 2;
273 auto *Call = Context.insert<InstCall>(MaxArgs, Dest, TargetHelper,
274 NoTailCall, IsTargetHelperCall);
275 Call->addArg(Instr->getSrc(0));
276 Call->addArg(Instr->getSrc(1));
277 Instr->setDeleted();
278 return;
279 }
280 case IceType_i32:
281 case IceType_i16:
282 case IceType_i8: {
283 InstCast::OpKind CastKind;
284 RuntimeHelper HelperID = RuntimeHelper::H_Num;
285 switch (Op) {
286 default:
287 return;
288 case InstArithmetic::Udiv:
289 HelperID = RuntimeHelper::H_udiv_i32;
290 CastKind = InstCast::Zext;
291 break;
292 case InstArithmetic::Sdiv:
293 HelperID = RuntimeHelper::H_sdiv_i32;
294 CastKind = InstCast::Sext;
295 break;
296 case InstArithmetic::Urem:
297 HelperID = RuntimeHelper::H_urem_i32;
298 CastKind = InstCast::Zext;
299 break;
300 case InstArithmetic::Srem:
301 HelperID = RuntimeHelper::H_srem_i32;
302 CastKind = InstCast::Sext;
303 break;
304 }
305
306 if (HelperID == RuntimeHelper::H_Num) {
307 return;
308 }
309
310 Operand *Src0 = Instr->getSrc(0);
311 Operand *Src1 = Instr->getSrc(1);
312 if (DestTy != IceType_i32) {
313 // Src0 and Src1 have to be zero-, or signed-extended to i32. For Src0,
314 // we just insert a InstCast right before the call to the helper.
315 Variable *Src0_32 = Func->makeVariable(IceType_i32);
316 Context.insert<InstCast>(CastKind, Src0_32, Src0);
317 Src0 = Src0_32;
318
319 if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) {
320 const int32_t ShAmt = (DestTy == IceType_i16) ? 16 : 24;
321 int32_t NewC = C->getValue();
322 if (CastKind == InstCast::Zext) {
323 NewC &= ~(0x80000000l >> ShAmt);
324 } else {
325 NewC = (NewC << ShAmt) >> ShAmt;
326 }
327 Src1 = Ctx->getConstantInt32(NewC);
328 } else {
329 Variable *Src1_32 = Func->makeVariable(IceType_i32);
330 Context.insert<InstCast>(CastKind, Src1_32, Src1);
331 Src1 = Src1_32;
332 }
333 }
334 Operand *TargetHelper = Ctx->getRuntimeHelperFunc(HelperID);
335 constexpr SizeT MaxArgs = 2;
336 auto *Call = Context.insert<InstCall>(MaxArgs, Dest, TargetHelper,
337 NoTailCall, IsTargetHelperCall);
338 assert(Src0->getType() == IceType_i32);
339 Call->addArg(Src0);
340 assert(Src1->getType() == IceType_i32);
341 Call->addArg(Src1);
342 Instr->setDeleted();
343 return;
344 }
345 case IceType_f32:
346 case IceType_f64: {
347 if (Op != InstArithmetic::Frem) {
348 return;
349 }
350 constexpr SizeT MaxArgs = 2;
351 Operand *TargetHelper = Ctx->getRuntimeHelperFunc(
352 DestTy == IceType_f32 ? RuntimeHelper::H_frem_f32
353 : RuntimeHelper::H_frem_f64);
354 auto *Call = Context.insert<InstCall>(MaxArgs, Dest, TargetHelper,
355 NoTailCall, IsTargetHelperCall);
356 Call->addArg(Instr->getSrc(0));
357 Call->addArg(Instr->getSrc(1));
358 Instr->setDeleted();
359 return;
360 }
361 }
362 llvm::report_fatal_error("Control flow should never have reached here.");
363 }
364 case Inst::Cast: {
365 Variable *Dest = Instr->getDest();
366 Operand *Src0 = Instr->getSrc(0);
367 const Type DestTy = Dest->getType();
368 const Type SrcTy = Src0->getType();
369 auto *CastInstr = llvm::cast<InstCast>(Instr);
370 const InstCast::OpKind CastKind = CastInstr->getCastKind();
371
372 switch (CastKind) {
373 default:
374 return;
375 case InstCast::Fptosi:
376 case InstCast::Fptoui: {
377 if (DestTy != IceType_i64) {
378 return;
379 }
380 const bool DestIsSigned = CastKind == InstCast::Fptosi;
381 const bool Src0IsF32 = isFloat32Asserting32Or64(SrcTy);
382 Operand *TargetHelper = Ctx->getRuntimeHelperFunc(
383 Src0IsF32 ? (DestIsSigned ? RuntimeHelper::H_fptosi_f32_i64
384 : RuntimeHelper::H_fptoui_f32_i64)
385 : (DestIsSigned ? RuntimeHelper::H_fptosi_f64_i64
386 : RuntimeHelper::H_fptoui_f64_i64));
387 static constexpr SizeT MaxArgs = 1;
388 auto *Call = Context.insert<InstCall>(MaxArgs, Dest, TargetHelper,
389 NoTailCall, IsTargetHelperCall);
390 Call->addArg(Src0);
391 Instr->setDeleted();
392 return;
393 }
394 case InstCast::Sitofp:
395 case InstCast::Uitofp: {
396 if (SrcTy != IceType_i64) {
397 return;
398 }
399 const bool SourceIsSigned = CastKind == InstCast::Sitofp;
400 const bool DestIsF32 = isFloat32Asserting32Or64(Dest->getType());
401 Operand *TargetHelper = Ctx->getRuntimeHelperFunc(
402 DestIsF32 ? (SourceIsSigned ? RuntimeHelper::H_sitofp_i64_f32
403 : RuntimeHelper::H_uitofp_i64_f32)
404 : (SourceIsSigned ? RuntimeHelper::H_sitofp_i64_f64
405 : RuntimeHelper::H_uitofp_i64_f64));
406 static constexpr SizeT MaxArgs = 1;
407 auto *Call = Context.insert<InstCall>(MaxArgs, Dest, TargetHelper,
408 NoTailCall, IsTargetHelperCall);
409 Call->addArg(Src0);
410 Instr->setDeleted();
411 return;
412 }
413 case InstCast::Bitcast: {
414 if (DestTy == SrcTy) {
415 return;
416 }
417 Variable *CallDest = Dest;
418 RuntimeHelper HelperID = RuntimeHelper::H_Num;
419 switch (DestTy) {
420 default:
421 return;
422 case IceType_i8:
423 assert(SrcTy == IceType_v8i1);
424 HelperID = RuntimeHelper::H_bitcast_8xi1_i8;
425 CallDest = Func->makeVariable(IceType_i32);
426 break;
427 case IceType_i16:
428 assert(SrcTy == IceType_v16i1);
429 HelperID = RuntimeHelper::H_bitcast_16xi1_i16;
430 CallDest = Func->makeVariable(IceType_i32);
431 break;
432 case IceType_v8i1: {
433 assert(SrcTy == IceType_i8);
434 HelperID = RuntimeHelper::H_bitcast_i8_8xi1;
435 Variable *Src0AsI32 = Func->makeVariable(stackSlotType());
436 // Arguments to functions are required to be at least 32 bits wide.
437 Context.insert<InstCast>(InstCast::Zext, Src0AsI32, Src0);
438 Src0 = Src0AsI32;
439 } break;
440 case IceType_v16i1: {
441 assert(SrcTy == IceType_i16);
442 HelperID = RuntimeHelper::H_bitcast_i16_16xi1;
443 Variable *Src0AsI32 = Func->makeVariable(stackSlotType());
444 // Arguments to functions are required to be at least 32 bits wide.
445 Context.insert<InstCast>(InstCast::Zext, Src0AsI32, Src0);
446 Src0 = Src0AsI32;
447 } break;
448 }
449 constexpr SizeT MaxSrcs = 1;
450 InstCall *Call = makeHelperCall(HelperID, CallDest, MaxSrcs);
451 Call->addArg(Src0);
452 Context.insert(Call);
453 // The PNaCl ABI disallows i8/i16 return types, so truncate the helper
454 // call result to the appropriate type as necessary.
455 if (CallDest->getType() != Dest->getType())
456 Context.insert<InstCast>(InstCast::Trunc, Dest, CallDest);
457 Instr->setDeleted();
458 return;
459 }
460 case InstCast::Trunc: {
461 if (DestTy == SrcTy) {
462 return;
463 }
464 if (!isVectorType(SrcTy)) {
465 return;
466 }
467 assert(typeNumElements(DestTy) == typeNumElements(SrcTy));
468 assert(typeElementType(DestTy) == IceType_i1);
469 assert(isVectorIntegerType(SrcTy));
470 return;
471 }
472 case InstCast::Sext:
473 case InstCast::Zext: {
474 if (DestTy == SrcTy) {
475 return;
476 }
477 if (!isVectorType(DestTy)) {
478 return;
479 }
480 assert(typeNumElements(DestTy) == typeNumElements(SrcTy));
481 assert(typeElementType(SrcTy) == IceType_i1);
482 assert(isVectorIntegerType(DestTy));
483 return;
484 }
485 }
486 llvm::report_fatal_error("Control flow should never have reached here.");
487 }
488 case Inst::IntrinsicCall: {
489 Variable *Dest = Instr->getDest();
490 auto *IntrinsicCall = llvm::cast<InstIntrinsicCall>(Instr);
491 Intrinsics::IntrinsicID ID = IntrinsicCall->getIntrinsicInfo().ID;
492 switch (ID) {
493 default:
494 return;
495 case Intrinsics::Ctpop: {
496 Operand *Src0 = IntrinsicCall->getArg(0);
497 Operand *TargetHelper =
498 Ctx->getRuntimeHelperFunc(isInt32Asserting32Or64(Src0->getType())
499 ? RuntimeHelper::H_call_ctpop_i32
500 : RuntimeHelper::H_call_ctpop_i64);
501 static constexpr SizeT MaxArgs = 1;
502 auto *Call = Context.insert<InstCall>(MaxArgs, Dest, TargetHelper,
503 NoTailCall, IsTargetHelperCall);
504 Call->addArg(Src0);
505 Instr->setDeleted();
506 return;
507 }
508 case Intrinsics::Longjmp: {
509 static constexpr SizeT MaxArgs = 2;
510 static constexpr Variable *NoDest = nullptr;
511 Operand *TargetHelper =
512 Ctx->getRuntimeHelperFunc(RuntimeHelper::H_call_longjmp);
513 auto *Call = Context.insert<InstCall>(MaxArgs, NoDest, TargetHelper,
514 NoTailCall, IsTargetHelperCall);
515 Call->addArg(IntrinsicCall->getArg(0));
516 Call->addArg(IntrinsicCall->getArg(1));
517 Instr->setDeleted();
518 return;
519 }
520 case Intrinsics::Memcpy: {
521 static constexpr SizeT MaxArgs = 3;
522 static constexpr Variable *NoDest = nullptr;
523 Operand *TargetHelper =
524 Ctx->getRuntimeHelperFunc(RuntimeHelper::H_call_memcpy);
525 auto *Call = Context.insert<InstCall>(MaxArgs, NoDest, TargetHelper,
526 NoTailCall, IsTargetHelperCall);
527 Call->addArg(IntrinsicCall->getArg(0));
528 Call->addArg(IntrinsicCall->getArg(1));
529 Call->addArg(IntrinsicCall->getArg(2));
530 Instr->setDeleted();
531 return;
532 }
533 case Intrinsics::Memmove: {
534 static constexpr SizeT MaxArgs = 3;
535 static constexpr Variable *NoDest = nullptr;
536 Operand *TargetHelper =
537 Ctx->getRuntimeHelperFunc(RuntimeHelper::H_call_memmove);
538 auto *Call = Context.insert<InstCall>(MaxArgs, NoDest, TargetHelper,
539 NoTailCall, IsTargetHelperCall);
540 Call->addArg(IntrinsicCall->getArg(0));
541 Call->addArg(IntrinsicCall->getArg(1));
542 Call->addArg(IntrinsicCall->getArg(2));
543 Instr->setDeleted();
544 return;
545 }
546 case Intrinsics::Memset: {
547 Operand *ValOp = IntrinsicCall->getArg(1);
548 assert(ValOp->getType() == IceType_i8);
549 Variable *ValExt = Func->makeVariable(stackSlotType());
550 Context.insert<InstCast>(InstCast::Zext, ValExt, ValOp);
551
552 static constexpr SizeT MaxArgs = 3;
553 static constexpr Variable *NoDest = nullptr;
554 Operand *TargetHelper =
555 Ctx->getRuntimeHelperFunc(RuntimeHelper::H_call_memset);
556 auto *Call = Context.insert<InstCall>(MaxArgs, NoDest, TargetHelper,
557 NoTailCall, IsTargetHelperCall);
558 Call->addArg(IntrinsicCall->getArg(0));
559 Call->addArg(ValExt);
560 Call->addArg(IntrinsicCall->getArg(2));
561 Instr->setDeleted();
562 return;
563 }
564 case Intrinsics::NaClReadTP: {
565 if (SandboxingType == ST_NaCl) {
566 return;
567 }
568 static constexpr SizeT MaxArgs = 0;
569 assert(SandboxingType != ST_Nonsfi);
570 Operand *TargetHelper =
571 Ctx->getRuntimeHelperFunc(RuntimeHelper::H_call_read_tp);
572 Context.insert<InstCall>(MaxArgs, Dest, TargetHelper, NoTailCall,
573 IsTargetHelperCall);
574 Instr->setDeleted();
575 return;
576 }
577 case Intrinsics::Setjmp: {
578 static constexpr SizeT MaxArgs = 1;
579 Operand *TargetHelper =
580 Ctx->getRuntimeHelperFunc(RuntimeHelper::H_call_setjmp);
581 auto *Call = Context.insert<InstCall>(MaxArgs, Dest, TargetHelper,
582 NoTailCall, IsTargetHelperCall);
583 Call->addArg(IntrinsicCall->getArg(0));
584 Instr->setDeleted();
585 return;
586 }
587 }
588 llvm::report_fatal_error("Control flow should never have reached here.");
589 }
590 }
591}
592
Sagar Thakura49fce02016-06-13 05:55:00 -0700593void TargetMIPS32::findMaxStackOutArgsSize() {
594 // MinNeededOutArgsBytes should be updated if the Target ever creates a
595 // high-level InstCall that requires more stack bytes.
Sagar Thakur1afb4832016-06-16 15:30:24 -0700596 size_t MinNeededOutArgsBytes = 0;
597 if (!MaybeLeafFunc)
598 MinNeededOutArgsBytes = MIPS32_MAX_GPR_ARG * 4;
Sagar Thakura49fce02016-06-13 05:55:00 -0700599 MaxOutArgsSizeBytes = MinNeededOutArgsBytes;
600 for (CfgNode *Node : Func->getNodes()) {
601 Context.init(Node);
602 while (!Context.atEnd()) {
603 PostIncrLoweringContext PostIncrement(Context);
604 Inst *CurInstr = iteratorToInst(Context.getCur());
605 if (auto *Call = llvm::dyn_cast<InstCall>(CurInstr)) {
606 SizeT OutArgsSizeBytes = getCallStackArgumentsSizeBytes(Call);
607 MaxOutArgsSizeBytes = std::max(MaxOutArgsSizeBytes, OutArgsSizeBytes);
608 }
609 }
610 }
611}
612
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700613void TargetMIPS32::translateO2() {
614 TimerMarker T(TimerStack::TT_O2, Func);
615
616 // TODO(stichnot): share passes with X86?
617 // https://code.google.com/p/nativeclient/issues/detail?id=4094
John Porto5e0a8a72015-11-20 13:50:36 -0800618 genTargetHelperCalls();
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700619
Sagar Thakur1afb4832016-06-16 15:30:24 -0700620 unsetIfNonLeafFunc();
621
Sagar Thakura49fce02016-06-13 05:55:00 -0700622 findMaxStackOutArgsSize();
623
David Sehr4318a412015-11-11 15:01:55 -0800624 // Merge Alloca instructions, and lay out the stack.
Sagar Thakur1afb4832016-06-16 15:30:24 -0700625 static constexpr bool SortAndCombineAllocas = true;
David Sehr4318a412015-11-11 15:01:55 -0800626 Func->processAllocas(SortAndCombineAllocas);
627 Func->dump("After Alloca processing");
628
Karl Schimpfd4699942016-04-02 09:55:31 -0700629 if (!getFlags().getEnablePhiEdgeSplit()) {
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700630 // Lower Phi instructions.
631 Func->placePhiLoads();
632 if (Func->hasError())
633 return;
634 Func->placePhiStores();
635 if (Func->hasError())
636 return;
637 Func->deletePhis();
638 if (Func->hasError())
639 return;
640 Func->dump("After Phi lowering");
641 }
642
643 // Address mode optimization.
644 Func->getVMetadata()->init(VMK_SingleDefs);
645 Func->doAddressOpt();
646
647 // Argument lowering
648 Func->doArgLowering();
649
Andrew Scull57e12682015-09-16 11:30:19 -0700650 // Target lowering. This requires liveness analysis for some parts of the
651 // lowering decisions, such as compare/branch fusing. If non-lightweight
652 // liveness analysis is used, the instructions need to be renumbered first.
653 // TODO: This renumbering should only be necessary if we're actually
654 // calculating live intervals, which we only do for register allocation.
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700655 Func->renumberInstructions();
656 if (Func->hasError())
657 return;
658
Andrew Scull57e12682015-09-16 11:30:19 -0700659 // TODO: It should be sufficient to use the fastest liveness calculation,
660 // i.e. livenessLightweight(). However, for some reason that slows down the
661 // rest of the translation. Investigate.
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700662 Func->liveness(Liveness_Basic);
663 if (Func->hasError())
664 return;
665 Func->dump("After MIPS32 address mode opt");
666
667 Func->genCode();
668 if (Func->hasError())
669 return;
670 Func->dump("After MIPS32 codegen");
671
Andrew Scull57e12682015-09-16 11:30:19 -0700672 // Register allocation. This requires instruction renumbering and full
673 // liveness analysis.
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700674 Func->renumberInstructions();
675 if (Func->hasError())
676 return;
677 Func->liveness(Liveness_Intervals);
678 if (Func->hasError())
679 return;
Andrew Scull57e12682015-09-16 11:30:19 -0700680 // The post-codegen dump is done here, after liveness analysis and associated
681 // cleanup, to make the dump cleaner and more useful.
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700682 Func->dump("After initial MIPS32 codegen");
Mohit Bhakkad6b44fb02016-06-22 05:47:29 -0700683 // Validate the live range computations. The expensive validation call is
684 // deliberately only made when assertions are enabled.
685 assert(Func->validateLiveness());
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700686 Func->getVMetadata()->init(VMK_All);
687 regAlloc(RAK_Global);
688 if (Func->hasError())
689 return;
690 Func->dump("After linear scan regalloc");
691
Karl Schimpfd4699942016-04-02 09:55:31 -0700692 if (getFlags().getEnablePhiEdgeSplit()) {
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700693 Func->advancedPhiLowering();
694 Func->dump("After advanced Phi lowering");
695 }
696
697 // Stack frame mapping.
698 Func->genFrame();
699 if (Func->hasError())
700 return;
701 Func->dump("After stack frame mapping");
702
Sagar Thakur5674c912016-07-14 14:50:37 -0700703 postLowerLegalization();
704 if (Func->hasError())
705 return;
706 Func->dump("After postLowerLegalization");
707
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700708 Func->contractEmptyNodes();
709 Func->reorderNodes();
710
Andrew Scull57e12682015-09-16 11:30:19 -0700711 // Branch optimization. This needs to be done just before code emission. In
712 // particular, no transformations that insert or reorder CfgNodes should be
713 // done after branch optimization. We go ahead and do it before nop insertion
714 // to reduce the amount of work needed for searching for opportunities.
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700715 Func->doBranchOpt();
716 Func->dump("After branch optimization");
717
718 // Nop insertion
Karl Schimpfd4699942016-04-02 09:55:31 -0700719 if (getFlags().getShouldDoNopInsertion()) {
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700720 Func->doNopInsertion();
721 }
722}
723
724void TargetMIPS32::translateOm1() {
725 TimerMarker T(TimerStack::TT_Om1, Func);
726
727 // TODO: share passes with X86?
John Porto5e0a8a72015-11-20 13:50:36 -0800728 genTargetHelperCalls();
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700729
Sagar Thakur1afb4832016-06-16 15:30:24 -0700730 unsetIfNonLeafFunc();
731
Sagar Thakura49fce02016-06-13 05:55:00 -0700732 findMaxStackOutArgsSize();
733
David Sehr4318a412015-11-11 15:01:55 -0800734 // Do not merge Alloca instructions, and lay out the stack.
735 static constexpr bool SortAndCombineAllocas = false;
736 Func->processAllocas(SortAndCombineAllocas);
737 Func->dump("After Alloca processing");
738
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700739 Func->placePhiLoads();
740 if (Func->hasError())
741 return;
742 Func->placePhiStores();
743 if (Func->hasError())
744 return;
745 Func->deletePhis();
746 if (Func->hasError())
747 return;
748 Func->dump("After Phi lowering");
749
750 Func->doArgLowering();
751
752 Func->genCode();
753 if (Func->hasError())
754 return;
755 Func->dump("After initial MIPS32 codegen");
756
757 regAlloc(RAK_InfOnly);
758 if (Func->hasError())
759 return;
760 Func->dump("After regalloc of infinite-weight variables");
761
762 Func->genFrame();
763 if (Func->hasError())
764 return;
765 Func->dump("After stack frame mapping");
766
Jaydeep Patil1d0690b2016-09-04 07:19:08 -0700767 postLowerLegalization();
768 if (Func->hasError())
769 return;
770 Func->dump("After postLowerLegalization");
771
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700772 // Nop insertion
Karl Schimpfd4699942016-04-02 09:55:31 -0700773 if (getFlags().getShouldDoNopInsertion()) {
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700774 Func->doNopInsertion();
775 }
776}
777
Reed Kotler04bca5a2016-02-03 14:40:47 -0800778bool TargetMIPS32::doBranchOpt(Inst *Instr, const CfgNode *NextNode) {
Jaydeep Patil13f0ca32016-08-26 13:27:40 -0700779 if (auto *Br = llvm::dyn_cast<InstMIPS32Br>(Instr)) {
780 return Br->optimizeBranch(NextNode);
781 }
Reed Kotlerd00d48d2015-07-08 09:49:07 -0700782 return false;
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700783}
784
Karl Schimpf5403f5d2016-01-15 11:07:46 -0800785namespace {
786
787const char *RegNames[RegMIPS32::Reg_NUM] = {
Jan Voung0dab0322015-07-21 14:29:34 -0700788#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700789 isI64Pair, isFP32, isFP64, isVec128, alias_init) \
Jan Voung0dab0322015-07-21 14:29:34 -0700790 name,
Karl Schimpf5403f5d2016-01-15 11:07:46 -0800791 REGMIPS32_TABLE
Jan Voung0dab0322015-07-21 14:29:34 -0700792#undef X
Karl Schimpf5403f5d2016-01-15 11:07:46 -0800793};
794
795} // end of anonymous namespace
796
Jim Stichnoth8aa39662016-02-10 11:20:30 -0800797const char *RegMIPS32::getRegName(RegNumT RegNum) {
798 RegNum.assertIsValid();
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700799 return RegNames[RegNum];
800}
801
Jim Stichnoth467ffe52016-03-29 15:01:06 -0700802const char *TargetMIPS32::getRegName(RegNumT RegNum, Type Ty) const {
Karl Schimpf5403f5d2016-01-15 11:07:46 -0800803 (void)Ty;
804 return RegMIPS32::getRegName(RegNum);
805}
806
Jim Stichnoth8aa39662016-02-10 11:20:30 -0800807Variable *TargetMIPS32::getPhysicalRegister(RegNumT RegNum, Type Ty) {
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700808 if (Ty == IceType_void)
809 Ty = IceType_i32;
810 if (PhysicalRegisters[Ty].empty())
811 PhysicalRegisters[Ty].resize(RegMIPS32::Reg_NUM);
Jim Stichnoth8aa39662016-02-10 11:20:30 -0800812 RegNum.assertIsValid();
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700813 Variable *Reg = PhysicalRegisters[Ty][RegNum];
814 if (Reg == nullptr) {
815 Reg = Func->makeVariable(Ty);
816 Reg->setRegNum(RegNum);
817 PhysicalRegisters[Ty][RegNum] = Reg;
Jim Stichnoth69660552015-09-18 06:41:02 -0700818 // Specially mark a named physical register as an "argument" so that it is
819 // considered live upon function entry. Otherwise it's possible to get
820 // liveness validation errors for saving callee-save registers.
821 Func->addImplicitArg(Reg);
822 // Don't bother tracking the live range of a named physical register.
823 Reg->setIgnoreLiveness();
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700824 }
825 return Reg;
826}
827
Andrew Scull86df4e92015-07-30 13:54:44 -0700828void TargetMIPS32::emitJumpTable(const Cfg *Func,
829 const InstJumpTable *JumpTable) const {
Karl Schimpfd4699942016-04-02 09:55:31 -0700830 (void)Func;
Andrew Scull86df4e92015-07-30 13:54:44 -0700831 (void)JumpTable;
Karl Schimpfd4699942016-04-02 09:55:31 -0700832 UnimplementedError(getFlags());
Andrew Scull86df4e92015-07-30 13:54:44 -0700833}
834
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700835/// Provide a trivial wrapper to legalize() for this common usage.
Jim Stichnoth8aa39662016-02-10 11:20:30 -0800836Variable *TargetMIPS32::legalizeToReg(Operand *From, RegNumT RegNum) {
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700837 return llvm::cast<Variable>(legalize(From, Legal_Reg, RegNum));
838}
839
840/// Legalize undef values to concrete values.
Jim Stichnoth8aa39662016-02-10 11:20:30 -0800841Operand *TargetMIPS32::legalizeUndef(Operand *From, RegNumT RegNum) {
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700842 (void)RegNum;
843 Type Ty = From->getType();
844 if (llvm::isa<ConstantUndef>(From)) {
845 // Lower undefs to zero. Another option is to lower undefs to an
846 // uninitialized register; however, using an uninitialized register
847 // results in less predictable code.
848 //
849 // If in the future the implementation is changed to lower undef
850 // values to uninitialized registers, a FakeDef will be needed:
851 // Context.insert(InstFakeDef::create(Func, Reg));
852 // This is in order to ensure that the live range of Reg is not
853 // overestimated. If the constant being lowered is a 64 bit value,
854 // then the result should be split and the lo and hi components will
855 // need to go in uninitialized registers.
856 if (isVectorType(Ty))
Karl Schimpfd4699942016-04-02 09:55:31 -0700857 UnimplementedError(getFlags());
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700858 return Ctx->getConstantZero(Ty);
859 }
860 return From;
861}
862
Jim Stichnoth8aa39662016-02-10 11:20:30 -0800863Variable *TargetMIPS32::makeReg(Type Type, RegNumT RegNum) {
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700864 // There aren't any 64-bit integer registers for Mips32.
865 assert(Type != IceType_i64);
866 Variable *Reg = Func->makeVariable(Type);
Reed Kotler5fa0a5f2016-02-15 20:01:24 -0800867 if (RegNum.hasValue())
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700868 Reg->setRegNum(RegNum);
Reed Kotler5fa0a5f2016-02-15 20:01:24 -0800869 else
870 Reg->setMustHaveReg();
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700871 return Reg;
872}
873
Mohit Bhakkadf3bc5cf2016-05-31 11:19:03 -0700874OperandMIPS32Mem *TargetMIPS32::formMemoryOperand(Operand *Operand, Type Ty) {
875 // It may be the case that address mode optimization already creates an
876 // OperandMIPS32Mem, so in that case it wouldn't need another level of
877 // transformation.
878 if (auto *Mem = llvm::dyn_cast<OperandMIPS32Mem>(Operand)) {
Jaydeep Patil1d0690b2016-09-04 07:19:08 -0700879 return llvm::cast<OperandMIPS32Mem>(legalize(Mem));
Mohit Bhakkadf3bc5cf2016-05-31 11:19:03 -0700880 }
881
882 // If we didn't do address mode optimization, then we only have a base/offset
883 // to work with. MIPS always requires a base register, so just use that to
884 // hold the operand.
Jaydeep Patil1d0690b2016-09-04 07:19:08 -0700885 auto *Base = llvm::cast<Variable>(
886 legalize(Operand, Legal_Reg | Legal_Rematerializable));
Jim Stichnothfe62f0a2016-07-10 05:13:18 -0700887 const int32_t Offset = Base->hasStackOffset() ? Base->getStackOffset() : 0;
Mohit Bhakkadf3bc5cf2016-05-31 11:19:03 -0700888 return OperandMIPS32Mem::create(
Jim Stichnothfe62f0a2016-07-10 05:13:18 -0700889 Func, Ty, Base,
890 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(Offset)));
Mohit Bhakkadf3bc5cf2016-05-31 11:19:03 -0700891}
892
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700893void TargetMIPS32::emitVariable(const Variable *Var) const {
Jan Voung28068ad2015-07-31 12:58:46 -0700894 if (!BuildDefs::dump())
895 return;
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700896 Ostream &Str = Ctx->getStrEmit();
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700897 const Type FrameSPTy = IceType_i32;
898 if (Var->hasReg()) {
899 Str << '$' << getRegName(Var->getRegNum(), Var->getType());
Jaydeep Patil1d0690b2016-09-04 07:19:08 -0700900 return;
Jim Stichnothac8da5c2015-10-21 06:57:46 -0700901 }
Jaydeep Patil1d0690b2016-09-04 07:19:08 -0700902 if (Var->mustHaveReg()) {
903 llvm::report_fatal_error("Infinite-weight Variable (" + Var->getName() +
904 ") has no register assigned - function " +
905 Func->getFunctionName());
906 }
907 const int32_t Offset = Var->getStackOffset();
908 Str << Offset;
909 Str << "($" << getRegName(getFrameOrStackReg(), FrameSPTy);
910 Str << ")";
Jim Stichnoth6da4cef2015-06-11 13:26:33 -0700911}
912
Mohit Bhakkadf90118a2016-06-13 00:28:13 -0700913TargetMIPS32::CallingConv::CallingConv()
914 : GPRegsUsed(RegMIPS32::Reg_NUM),
915 GPRArgs(GPRArgInitializer.rbegin(), GPRArgInitializer.rend()),
916 I64Args(I64ArgInitializer.rbegin(), I64ArgInitializer.rend()),
917 VFPRegsUsed(RegMIPS32::Reg_NUM),
918 FP32Args(FP32ArgInitializer.rbegin(), FP32ArgInitializer.rend()),
919 FP64Args(FP64ArgInitializer.rbegin(), FP64ArgInitializer.rend()) {}
920
921// In MIPS O32 abi FP argument registers can be used only if first argument is
922// of type float/double. UseFPRegs flag is used to care of that. Also FP arg
923// registers can be used only for first 2 arguments, so we require argument
924// number to make register allocation decisions.
925bool TargetMIPS32::CallingConv::argInReg(Type Ty, uint32_t ArgNo,
926 RegNumT *Reg) {
927 if (isScalarIntegerType(Ty))
928 return argInGPR(Ty, Reg);
929 if (isScalarFloatingType(Ty)) {
930 if (ArgNo == 0) {
931 UseFPRegs = true;
932 return argInVFP(Ty, Reg);
933 }
934 if (UseFPRegs && ArgNo == 1) {
935 UseFPRegs = false;
936 return argInVFP(Ty, Reg);
937 }
938 return argInGPR(Ty, Reg);
939 }
940 UnimplementedError(getFlags());
941 return false;
942}
943
944bool TargetMIPS32::CallingConv::argInGPR(Type Ty, RegNumT *Reg) {
945 CfgVector<RegNumT> *Source;
946
947 switch (Ty) {
948 default: {
949 UnimplementedError(getFlags());
950 return false;
951 } break;
952 case IceType_i32:
953 case IceType_f32: {
954 Source = &GPRArgs;
955 } break;
956 case IceType_i64:
957 case IceType_f64: {
958 Source = &I64Args;
959 } break;
960 }
961
962 discardUnavailableGPRsAndTheirAliases(Source);
963
964 if (Source->empty()) {
965 GPRegsUsed.set();
966 return false;
967 }
968
969 *Reg = Source->back();
970 // Note that we don't Source->pop_back() here. This is intentional. Notice how
971 // we mark all of Reg's aliases as Used. So, for the next argument,
972 // Source->back() is marked as unavailable, and it is thus implicitly popped
973 // from the stack.
974 GPRegsUsed |= RegisterAliases[*Reg];
975 return true;
976}
977
978inline void TargetMIPS32::CallingConv::discardNextGPRAndItsAliases(
979 CfgVector<RegNumT> *Regs) {
980 GPRegsUsed |= RegisterAliases[Regs->back()];
981 Regs->pop_back();
982}
983
Mohit Bhakkadbbb5fa72016-06-29 06:51:08 -0700984inline void TargetMIPS32::CallingConv::alignGPR(CfgVector<RegNumT> *Regs) {
985 if (Regs->back() == RegMIPS32::Reg_A1 || Regs->back() == RegMIPS32::Reg_A3)
986 discardNextGPRAndItsAliases(Regs);
987}
988
Mohit Bhakkadf90118a2016-06-13 00:28:13 -0700989// GPR are not packed when passing parameters. Thus, a function foo(i32, i64,
990// i32) will have the first argument in a0, the second in a2-a3, and the third
991// on the stack. To model this behavior, whenever we pop a register from Regs,
992// we remove all of its aliases from the pool of available GPRs. This has the
993// effect of computing the "closure" on the GPR registers.
994void TargetMIPS32::CallingConv::discardUnavailableGPRsAndTheirAliases(
995 CfgVector<RegNumT> *Regs) {
996 while (!Regs->empty() && GPRegsUsed[Regs->back()]) {
997 discardNextGPRAndItsAliases(Regs);
998 }
999}
1000
1001bool TargetMIPS32::CallingConv::argInVFP(Type Ty, RegNumT *Reg) {
1002 CfgVector<RegNumT> *Source;
1003
1004 switch (Ty) {
1005 default: {
1006 UnimplementedError(getFlags());
1007 return false;
1008 } break;
1009 case IceType_f32: {
1010 Source = &FP32Args;
1011 } break;
1012 case IceType_f64: {
1013 Source = &FP64Args;
1014 } break;
1015 }
1016
1017 discardUnavailableVFPRegsAndTheirAliases(Source);
1018
1019 if (Source->empty()) {
1020 VFPRegsUsed.set();
1021 return false;
1022 }
1023
1024 *Reg = Source->back();
1025 VFPRegsUsed |= RegisterAliases[*Reg];
1026
1027 // In MIPS O32 abi if fun arguments are (f32, i32) then one can not use reg_a0
1028 // for second argument even though it's free. f32 arg goes in reg_f12, i32 arg
1029 // goes in reg_a1. Similarly if arguments are (f64, i32) second argument goes
1030 // in reg_a3 and a0, a1 are not used.
1031 Source = &GPRArgs;
1032 // Discard one GPR reg for f32(4 bytes), two for f64(4 + 4 bytes)
Mohit Bhakkadbbb5fa72016-06-29 06:51:08 -07001033 if (Ty == IceType_f64) {
1034 // In MIPS o32 abi, when we use GPR argument pairs to store F64 values, pair
1035 // must be aligned at even register. Similarly when we discard GPR registers
1036 // when some arguments from starting 16 bytes goes in FPR, we must take care
1037 // of alignment. For example if fun args are (f32, f64, f32), for first f32
1038 // we discard a0, now for f64 argument, which will go in F14F15, we must
1039 // first align GPR vector to even register by discarding a1, then discard
1040 // two GPRs a2 and a3. Now last f32 argument will go on stack.
1041 alignGPR(Source);
Mohit Bhakkadf90118a2016-06-13 00:28:13 -07001042 discardNextGPRAndItsAliases(Source);
Mohit Bhakkadbbb5fa72016-06-29 06:51:08 -07001043 }
1044 discardNextGPRAndItsAliases(Source);
Mohit Bhakkadf90118a2016-06-13 00:28:13 -07001045 return true;
1046}
1047
1048void TargetMIPS32::CallingConv::discardUnavailableVFPRegsAndTheirAliases(
1049 CfgVector<RegNumT> *Regs) {
1050 while (!Regs->empty() && VFPRegsUsed[Regs->back()]) {
1051 Regs->pop_back();
1052 }
1053}
1054
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07001055void TargetMIPS32::lowerArguments() {
Reed Kotlerd00d48d2015-07-08 09:49:07 -07001056 VarList &Args = Func->getArgs();
Mohit Bhakkad7984b712016-06-13 06:06:35 -07001057 TargetMIPS32::CallingConv CC;
1058
1059 // For each register argument, replace Arg in the argument list with the home
1060 // register. Then generate an instruction in the prolog to copy the home
1061 // register to the assigned location of Arg.
Jim Stichnothac8da5c2015-10-21 06:57:46 -07001062 Context.init(Func->getEntryNode());
1063 Context.setInsertPoint(Context.getCur());
Mohit Bhakkad7984b712016-06-13 06:06:35 -07001064
Jim Stichnothac8da5c2015-10-21 06:57:46 -07001065 for (SizeT I = 0, E = Args.size(); I < E; ++I) {
1066 Variable *Arg = Args[I];
1067 Type Ty = Arg->getType();
Mohit Bhakkad7984b712016-06-13 06:06:35 -07001068 RegNumT RegNum;
1069 if (!CC.argInReg(Ty, I, &RegNum)) {
Jim Stichnothac8da5c2015-10-21 06:57:46 -07001070 continue;
1071 }
Mohit Bhakkad7984b712016-06-13 06:06:35 -07001072 Variable *RegisterArg = Func->makeVariable(Ty);
1073 if (BuildDefs::dump()) {
1074 RegisterArg->setName(Func, "home_reg:" + Arg->getName());
Jim Stichnothac8da5c2015-10-21 06:57:46 -07001075 }
Mohit Bhakkad7984b712016-06-13 06:06:35 -07001076 RegisterArg->setIsArg();
1077 Arg->setIsArg(false);
1078 Args[I] = RegisterArg;
1079 switch (Ty) {
1080 default: { RegisterArg->setRegNum(RegNum); } break;
1081 case IceType_i64: {
1082 auto *RegisterArg64 = llvm::cast<Variable64On32>(RegisterArg);
1083 RegisterArg64->initHiLo(Func);
1084 RegisterArg64->getLo()->setRegNum(
Mohit Bhakkadeec56212016-08-02 05:55:11 -07001085 RegNumT::fixme(RegMIPS32::get64PairFirstRegNum(RegNum)));
Mohit Bhakkad7984b712016-06-13 06:06:35 -07001086 RegisterArg64->getHi()->setRegNum(
Mohit Bhakkadeec56212016-08-02 05:55:11 -07001087 RegNumT::fixme(RegMIPS32::get64PairSecondRegNum(RegNum)));
Mohit Bhakkad7984b712016-06-13 06:06:35 -07001088 } break;
Jim Stichnothac8da5c2015-10-21 06:57:46 -07001089 }
Mohit Bhakkad7984b712016-06-13 06:06:35 -07001090 Context.insert<InstAssign>(Arg, RegisterArg);
Jim Stichnothac8da5c2015-10-21 06:57:46 -07001091 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07001092}
1093
1094Type TargetMIPS32::stackSlotType() { return IceType_i32; }
1095
Sagar Thakura49fce02016-06-13 05:55:00 -07001096// Helper function for addProlog().
1097//
1098// This assumes Arg is an argument passed on the stack. This sets the frame
1099// offset for Arg and updates InArgsSizeBytes according to Arg's width. For an
1100// I64 arg that has been split into Lo and Hi components, it calls itself
1101// recursively on the components, taking care to handle Lo first because of the
1102// little-endian architecture. Lastly, this function generates an instruction
1103// to copy Arg into its assigned register if applicable.
1104void TargetMIPS32::finishArgumentLowering(Variable *Arg, Variable *FramePtr,
1105 size_t BasicFrameOffset,
1106 size_t *InArgsSizeBytes) {
1107 const Type Ty = Arg->getType();
1108 *InArgsSizeBytes = applyStackAlignmentTy(*InArgsSizeBytes, Ty);
1109
1110 if (auto *Arg64On32 = llvm::dyn_cast<Variable64On32>(Arg)) {
1111 Variable *const Lo = Arg64On32->getLo();
1112 Variable *const Hi = Arg64On32->getHi();
1113 finishArgumentLowering(Lo, FramePtr, BasicFrameOffset, InArgsSizeBytes);
1114 finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, InArgsSizeBytes);
1115 return;
1116 }
1117 assert(Ty != IceType_i64);
1118
1119 const int32_t ArgStackOffset = BasicFrameOffset + *InArgsSizeBytes;
1120 *InArgsSizeBytes += typeWidthInBytesOnStack(Ty);
1121
1122 if (!Arg->hasReg()) {
1123 Arg->setStackOffset(ArgStackOffset);
1124 return;
1125 }
1126
1127 // If the argument variable has been assigned a register, we need to copy the
1128 // value from the stack slot.
1129 Variable *Parameter = Func->makeVariable(Ty);
1130 Parameter->setMustNotHaveReg();
1131 Parameter->setStackOffset(ArgStackOffset);
1132 _mov(Arg, Parameter);
1133}
1134
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07001135void TargetMIPS32::addProlog(CfgNode *Node) {
Sagar Thakura49fce02016-06-13 05:55:00 -07001136 // Stack frame layout:
1137 //
1138 // +------------------------+
1139 // | 1. preserved registers |
1140 // +------------------------+
1141 // | 2. padding |
1142 // +------------------------+
1143 // | 3. global spill area |
1144 // +------------------------+
1145 // | 4. padding |
1146 // +------------------------+
1147 // | 5. local spill area |
1148 // +------------------------+
1149 // | 6. padding |
1150 // +------------------------+
1151 // | 7. allocas |
1152 // +------------------------+
1153 // | 8. padding |
1154 // +------------------------+
1155 // | 9. out args |
1156 // +------------------------+ <--- StackPointer
1157 //
1158 // The following variables record the size in bytes of the given areas:
1159 // * PreservedRegsSizeBytes: area 1
1160 // * SpillAreaPaddingBytes: area 2
1161 // * GlobalsSize: area 3
1162 // * GlobalsAndSubsequentPaddingSize: areas 3 - 4
1163 // * LocalsSpillAreaSize: area 5
1164 // * SpillAreaSizeBytes: areas 2 - 9
1165 // * maxOutArgsSizeBytes(): area 9
1166
1167 Context.init(Node);
1168 Context.setInsertPoint(Context.getCur());
1169
1170 SmallBitVector CalleeSaves = getRegisterSet(RegSet_CalleeSave, RegSet_None);
1171 RegsUsed = SmallBitVector(CalleeSaves.size());
1172
1173 VarList SortedSpilledVariables;
1174
1175 size_t GlobalsSize = 0;
1176 // If there is a separate locals area, this represents that area. Otherwise
1177 // it counts any variable not counted by GlobalsSize.
1178 SpillAreaSizeBytes = 0;
1179 // If there is a separate locals area, this specifies the alignment for it.
1180 uint32_t LocalsSlotsAlignmentBytes = 0;
1181 // The entire spill locations area gets aligned to largest natural alignment
1182 // of the variables that have a spill slot.
1183 uint32_t SpillAreaAlignmentBytes = 0;
1184 // For now, we don't have target-specific variables that need special
1185 // treatment (no stack-slot-linked SpillVariable type).
1186 std::function<bool(Variable *)> TargetVarHook = [](Variable *Var) {
1187 static constexpr bool AssignStackSlot = false;
1188 static constexpr bool DontAssignStackSlot = !AssignStackSlot;
1189 if (llvm::isa<Variable64On32>(Var)) {
1190 return DontAssignStackSlot;
1191 }
1192 return AssignStackSlot;
1193 };
1194
1195 // Compute the list of spilled variables and bounds for GlobalsSize, etc.
1196 getVarStackSlotParams(SortedSpilledVariables, RegsUsed, &GlobalsSize,
1197 &SpillAreaSizeBytes, &SpillAreaAlignmentBytes,
1198 &LocalsSlotsAlignmentBytes, TargetVarHook);
1199 uint32_t LocalsSpillAreaSize = SpillAreaSizeBytes;
1200 SpillAreaSizeBytes += GlobalsSize;
1201
1202 PreservedGPRs.reserve(CalleeSaves.size());
1203
1204 // Consider FP and RA as callee-save / used as needed.
1205 if (UsesFramePointer) {
1206 if (RegsUsed[RegMIPS32::Reg_FP]) {
1207 llvm::report_fatal_error("Frame pointer has been used.");
1208 }
1209 CalleeSaves[RegMIPS32::Reg_FP] = true;
1210 RegsUsed[RegMIPS32::Reg_FP] = true;
1211 }
1212 if (!MaybeLeafFunc) {
1213 CalleeSaves[RegMIPS32::Reg_RA] = true;
1214 RegsUsed[RegMIPS32::Reg_RA] = true;
1215 }
1216
1217 // Make two passes over the used registers. The first pass records all the
1218 // used registers -- and their aliases. Then, we figure out which GPR
1219 // registers should be saved.
1220 SmallBitVector ToPreserve(RegMIPS32::Reg_NUM);
1221 for (SizeT i = 0; i < CalleeSaves.size(); ++i) {
1222 if (CalleeSaves[i] && RegsUsed[i]) {
1223 ToPreserve |= RegisterAliases[i];
1224 }
1225 }
1226
1227 uint32_t NumCallee = 0;
Sagar Thakura49fce02016-06-13 05:55:00 -07001228
1229 // RegClasses is a tuple of
1230 //
1231 // <First Register in Class, Last Register in Class, Vector of Save Registers>
1232 //
1233 // We use this tuple to figure out which register we should save/restore
1234 // during
1235 // prolog/epilog.
1236 using RegClassType = std::tuple<uint32_t, uint32_t, VarList *>;
1237 const RegClassType RegClass = RegClassType(
1238 RegMIPS32::Reg_GPR_First, RegMIPS32::Reg_GPR_Last, &PreservedGPRs);
1239 const uint32_t FirstRegInClass = std::get<0>(RegClass);
1240 const uint32_t LastRegInClass = std::get<1>(RegClass);
1241 VarList *const PreservedRegsInClass = std::get<2>(RegClass);
1242 for (uint32_t Reg = LastRegInClass; Reg > FirstRegInClass; Reg--) {
1243 if (!ToPreserve[Reg]) {
1244 continue;
1245 }
1246 ++NumCallee;
1247 Variable *PhysicalRegister = getPhysicalRegister(RegNumT::fromInt(Reg));
1248 PreservedRegsSizeBytes +=
1249 typeWidthInBytesOnStack(PhysicalRegister->getType());
1250 PreservedRegsInClass->push_back(PhysicalRegister);
1251 }
1252
1253 Ctx->statsUpdateRegistersSaved(NumCallee);
1254
1255 // Align the variables area. SpillAreaPaddingBytes is the size of the region
1256 // after the preserved registers and before the spill areas.
1257 // LocalsSlotsPaddingBytes is the amount of padding between the globals and
1258 // locals area if they are separate.
1259 assert(SpillAreaAlignmentBytes <= MIPS32_STACK_ALIGNMENT_BYTES);
1260 (void)MIPS32_STACK_ALIGNMENT_BYTES;
1261 assert(LocalsSlotsAlignmentBytes <= SpillAreaAlignmentBytes);
1262 uint32_t SpillAreaPaddingBytes = 0;
1263 uint32_t LocalsSlotsPaddingBytes = 0;
1264 alignStackSpillAreas(PreservedRegsSizeBytes, SpillAreaAlignmentBytes,
1265 GlobalsSize, LocalsSlotsAlignmentBytes,
1266 &SpillAreaPaddingBytes, &LocalsSlotsPaddingBytes);
1267 SpillAreaSizeBytes += SpillAreaPaddingBytes + LocalsSlotsPaddingBytes;
1268 uint32_t GlobalsAndSubsequentPaddingSize =
1269 GlobalsSize + LocalsSlotsPaddingBytes;
1270
Sagar Thakura49fce02016-06-13 05:55:00 -07001271 // Adds the out args space to the stack, and align SP if necessary.
Sagar Thakur752e59f2016-07-21 06:12:09 -07001272 if (!NeedsStackAlignment) {
1273 SpillAreaSizeBytes += MaxOutArgsSizeBytes * (VariableAllocaUsed ? 0 : 1);
1274 } else {
1275 uint32_t StackOffset = PreservedRegsSizeBytes;
1276 uint32_t StackSize = applyStackAlignment(StackOffset + SpillAreaSizeBytes);
1277 if (!VariableAllocaUsed)
1278 StackSize = applyStackAlignment(StackSize + MaxOutArgsSizeBytes);
1279 SpillAreaSizeBytes = StackSize - StackOffset;
1280 }
1281
1282 // Combine fixed alloca with SpillAreaSize.
1283 SpillAreaSizeBytes += FixedAllocaSizeBytes;
1284
1285 TotalStackSizeBytes = PreservedRegsSizeBytes + SpillAreaSizeBytes;
Sagar Thakura49fce02016-06-13 05:55:00 -07001286
1287 // Generate "addiu sp, sp, -TotalStackSizeBytes"
1288 if (TotalStackSizeBytes) {
1289 // Use the scratch register if needed to legalize the immediate.
1290 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP);
1291 _addiu(SP, SP, -(TotalStackSizeBytes));
1292 }
1293
1294 Ctx->statsUpdateFrameBytes(TotalStackSizeBytes);
1295
1296 if (!PreservedGPRs.empty()) {
1297 uint32_t StackOffset = TotalStackSizeBytes;
1298 for (Variable *Var : *PreservedRegsInClass) {
1299 Variable *PhysicalRegister = getPhysicalRegister(Var->getRegNum());
1300 StackOffset -= typeWidthInBytesOnStack(PhysicalRegister->getType());
1301 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP);
1302 OperandMIPS32Mem *MemoryLocation = OperandMIPS32Mem::create(
1303 Func, IceType_i32, SP,
1304 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(StackOffset)));
1305 _sw(PhysicalRegister, MemoryLocation);
1306 }
1307 }
1308
1309 Variable *FP = getPhysicalRegister(RegMIPS32::Reg_FP);
1310
1311 // Generate "mov FP, SP" if needed.
1312 if (UsesFramePointer) {
1313 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP);
1314 _mov(FP, SP);
1315 // Keep FP live for late-stage liveness analysis (e.g. asm-verbose mode).
1316 Context.insert<InstFakeUse>(FP);
1317 }
1318
1319 // Fill in stack offsets for stack args, and copy args into registers for
1320 // those that were register-allocated. Args are pushed right to left, so
1321 // Arg[0] is closest to the stack/frame pointer.
1322 const VarList &Args = Func->getArgs();
Sagar Thakur1afb4832016-06-16 15:30:24 -07001323 size_t InArgsSizeBytes = MIPS32_MAX_GPR_ARG * 4;
Sagar Thakura49fce02016-06-13 05:55:00 -07001324 TargetMIPS32::CallingConv CC;
1325 uint32_t ArgNo = 0;
1326
1327 for (Variable *Arg : Args) {
1328 RegNumT DummyReg;
1329 const Type Ty = Arg->getType();
1330 // Skip arguments passed in registers.
1331 if (CC.argInReg(Ty, ArgNo, &DummyReg)) {
1332 ArgNo++;
1333 continue;
1334 } else {
1335 finishArgumentLowering(Arg, FP, TotalStackSizeBytes, &InArgsSizeBytes);
1336 }
1337 }
1338
1339 // Fill in stack offsets for locals.
1340 assignVarStackSlots(SortedSpilledVariables, SpillAreaPaddingBytes,
Sagar Thakur752e59f2016-07-21 06:12:09 -07001341 SpillAreaSizeBytes, GlobalsAndSubsequentPaddingSize);
Sagar Thakura49fce02016-06-13 05:55:00 -07001342 this->HasComputedFrame = true;
1343
1344 if (BuildDefs::dump() && Func->isVerbose(IceV_Frame)) {
1345 OstreamLocker _(Func->getContext());
1346 Ostream &Str = Func->getContext()->getStrDump();
1347
1348 Str << "Stack layout:\n";
1349 uint32_t SPAdjustmentPaddingSize =
1350 SpillAreaSizeBytes - LocalsSpillAreaSize -
1351 GlobalsAndSubsequentPaddingSize - SpillAreaPaddingBytes -
1352 MaxOutArgsSizeBytes;
1353 Str << " in-args = " << InArgsSizeBytes << " bytes\n"
1354 << " preserved registers = " << PreservedRegsSizeBytes << " bytes\n"
1355 << " spill area padding = " << SpillAreaPaddingBytes << " bytes\n"
1356 << " globals spill area = " << GlobalsSize << " bytes\n"
1357 << " globals-locals spill areas intermediate padding = "
1358 << GlobalsAndSubsequentPaddingSize - GlobalsSize << " bytes\n"
1359 << " locals spill area = " << LocalsSpillAreaSize << " bytes\n"
1360 << " SP alignment padding = " << SPAdjustmentPaddingSize << " bytes\n";
1361
1362 Str << "Stack details:\n"
1363 << " SP adjustment = " << SpillAreaSizeBytes << " bytes\n"
1364 << " spill area alignment = " << SpillAreaAlignmentBytes << " bytes\n"
1365 << " outgoing args size = " << MaxOutArgsSizeBytes << " bytes\n"
1366 << " locals spill area alignment = " << LocalsSlotsAlignmentBytes
1367 << " bytes\n"
1368 << " is FP based = " << 1 << "\n";
1369 }
Jim Stichnothac8da5c2015-10-21 06:57:46 -07001370 return;
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07001371}
1372
1373void TargetMIPS32::addEpilog(CfgNode *Node) {
Sagar Thakur633394c2016-06-25 08:34:10 -07001374 InstList &Insts = Node->getInsts();
1375 InstList::reverse_iterator RI, E;
1376 for (RI = Insts.rbegin(), E = Insts.rend(); RI != E; ++RI) {
1377 if (llvm::isa<InstMIPS32Ret>(*RI))
1378 break;
1379 }
1380 if (RI == E)
1381 return;
1382
1383 // Convert the reverse_iterator position into its corresponding (forward)
1384 // iterator position.
Jim Stichnoth7c9728f2016-08-31 13:42:00 -07001385 InstList::iterator InsertPoint = reverseToForwardIterator(RI);
Sagar Thakur633394c2016-06-25 08:34:10 -07001386 --InsertPoint;
1387 Context.init(Node);
1388 Context.setInsertPoint(InsertPoint);
1389
1390 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP);
1391 if (UsesFramePointer) {
1392 Variable *FP = getPhysicalRegister(RegMIPS32::Reg_FP);
1393 // For late-stage liveness analysis (e.g. asm-verbose mode), adding a fake
1394 // use of SP before the assignment of SP=FP keeps previous SP adjustments
1395 // from being dead-code eliminated.
1396 Context.insert<InstFakeUse>(SP);
1397 _mov(SP, FP);
1398 }
1399
1400 VarList::reverse_iterator RIter, END;
1401
1402 if (!PreservedGPRs.empty()) {
1403 uint32_t StackOffset = TotalStackSizeBytes - PreservedRegsSizeBytes;
1404 for (RIter = PreservedGPRs.rbegin(), END = PreservedGPRs.rend();
1405 RIter != END; ++RIter) {
1406 Variable *PhysicalRegister = getPhysicalRegister((*RIter)->getRegNum());
1407 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP);
1408 OperandMIPS32Mem *MemoryLocation = OperandMIPS32Mem::create(
1409 Func, IceType_i32, SP,
1410 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(StackOffset)));
1411 _lw(PhysicalRegister, MemoryLocation);
1412 StackOffset += typeWidthInBytesOnStack(PhysicalRegister->getType());
1413 }
1414 }
1415
1416 if (TotalStackSizeBytes) {
1417 _addiu(SP, SP, TotalStackSizeBytes);
1418 }
1419
Jim Stichnothac8da5c2015-10-21 06:57:46 -07001420 return;
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07001421}
1422
Sagar Thakur5674c912016-07-14 14:50:37 -07001423Variable *TargetMIPS32::PostLoweringLegalizer::newBaseRegister(
1424 Variable *Base, int32_t Offset, RegNumT ScratchRegNum) {
1425 // Legalize will likely need a lui/ori combination, but if the top bits are
1426 // all 0 from negating the offset and subtracting, we could use that instead.
1427 const bool ShouldSub = Offset != 0 && (-Offset & 0xFFFF0000) == 0;
1428 Variable *ScratchReg = Target->makeReg(IceType_i32, ScratchRegNum);
1429 if (ShouldSub) {
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07001430 Variable *OffsetVal = Target->legalizeToReg(
1431 Target->Ctx->getConstantInt32(-Offset), ScratchRegNum);
Sagar Thakur5674c912016-07-14 14:50:37 -07001432 Target->_sub(ScratchReg, Base, OffsetVal);
1433 } else {
1434 Target->_addiu(ScratchReg, Base, Offset);
1435 }
1436
1437 return ScratchReg;
1438}
1439
1440void TargetMIPS32::PostLoweringLegalizer::legalizeMov(InstMIPS32Mov *MovInstr) {
1441 Variable *Dest = MovInstr->getDest();
1442 assert(Dest != nullptr);
1443 const Type DestTy = Dest->getType();
Sagar Thakur5674c912016-07-14 14:50:37 -07001444 assert(DestTy != IceType_i64);
1445
1446 Operand *Src = MovInstr->getSrc(0);
1447 const Type SrcTy = Src->getType();
1448 (void)SrcTy;
1449 assert(SrcTy != IceType_i64);
1450
1451 if (MovInstr->isMultiDest() || MovInstr->isMultiSource())
1452 return;
1453
1454 bool Legalized = false;
Jaydeep Patild3297662016-09-13 22:52:27 -07001455 auto *SrcR = llvm::cast<Variable>(Src);
1456 if (Dest->hasReg() && SrcR->hasReg()) {
1457 // This might be a GP to/from FP move generated due to argument passing.
1458 // Use mtc1/mfc1 instead of mov.[s/d] if src and dst registers are of
1459 // different types.
1460 const bool IsDstGPR = RegMIPS32::isGPRReg(Dest->getRegNum());
1461 const bool IsSrcGPR = RegMIPS32::isGPRReg(SrcR->getRegNum());
1462 const RegNumT SRegNum = SrcR->getRegNum();
1463 const RegNumT DRegNum = Dest->getRegNum();
1464 if (IsDstGPR != IsSrcGPR) {
1465 if (IsDstGPR) {
1466 // Dest is GPR and SrcR is FPR. Use mfc1.
1467 if (typeWidthInBytes(Dest->getType()) == 8) {
1468 // Split it into two mfc1 instructions
1469 Variable *SrcGPRHi = Target->makeReg(
1470 IceType_f32, RegMIPS32::get64PairFirstRegNum(SRegNum));
1471 Variable *SrcGPRLo = Target->makeReg(
1472 IceType_f32, RegMIPS32::get64PairSecondRegNum(SRegNum));
1473 Variable *DstFPRHi = Target->makeReg(
1474 IceType_i32, RegMIPS32::get64PairFirstRegNum(DRegNum));
1475 Variable *DstFPRLo = Target->makeReg(
1476 IceType_i32, RegMIPS32::get64PairSecondRegNum(DRegNum));
1477 Target->_mov(DstFPRHi, SrcGPRLo);
1478 Target->_mov(DstFPRLo, SrcGPRHi);
1479 Legalized = true;
1480 } else {
1481 Variable *SrcGPR = Target->makeReg(IceType_f32, SRegNum);
1482 Variable *DstFPR = Target->makeReg(IceType_i32, DRegNum);
1483 Target->_mov(DstFPR, SrcGPR);
1484 Legalized = true;
1485 }
1486 } else {
1487 // Dest is FPR and SrcR is GPR. Use mtc1.
1488 if (typeWidthInBytes(SrcR->getType()) == 8) {
1489 // Split it into two mtc1 instructions
1490 Variable *SrcGPRHi = Target->makeReg(
1491 IceType_i32, RegMIPS32::get64PairFirstRegNum(SRegNum));
1492 Variable *SrcGPRLo = Target->makeReg(
1493 IceType_i32, RegMIPS32::get64PairSecondRegNum(SRegNum));
1494 Variable *DstFPRHi = Target->makeReg(
1495 IceType_f32, RegMIPS32::get64PairFirstRegNum(DRegNum));
1496 Variable *DstFPRLo = Target->makeReg(
1497 IceType_f32, RegMIPS32::get64PairSecondRegNum(DRegNum));
1498 Target->_mov(DstFPRHi, SrcGPRLo);
1499 Target->_mov(DstFPRLo, SrcGPRHi);
1500 Legalized = true;
1501 } else {
1502 Variable *SrcGPR = Target->makeReg(IceType_i32, SRegNum);
1503 Variable *DstFPR = Target->makeReg(IceType_f32, DRegNum);
1504 Target->_mov(DstFPR, SrcGPR);
1505 Legalized = true;
1506 }
1507 }
1508 }
1509 if (Legalized) {
1510 if (MovInstr->isDestRedefined()) {
1511 Target->_set_dest_redefined();
1512 }
1513 MovInstr->setDeleted();
1514 return;
1515 }
1516 }
1517
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07001518 if (!Dest->hasReg()) {
1519 auto *SrcR = llvm::cast<Variable>(Src);
1520 assert(SrcR->hasReg());
1521 assert(!SrcR->isRematerializable());
1522 const int32_t Offset = Dest->getStackOffset();
Sagar Thakur5674c912016-07-14 14:50:37 -07001523
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07001524 // This is a _mov(Mem(), Variable), i.e., a store.
1525 auto *Base = Target->getPhysicalRegister(Target->getFrameOrStackReg());
Sagar Thakur5674c912016-07-14 14:50:37 -07001526
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07001527 OperandMIPS32Mem *Addr = OperandMIPS32Mem::create(
1528 Target->Func, DestTy, Base,
1529 llvm::cast<ConstantInteger32>(Target->Ctx->getConstantInt32(Offset)));
1530
1531 // FP arguments are passed in GP reg if first argument is in GP. In this
1532 // case type of the SrcR is still FP thus we need to explicitly generate sw
1533 // instead of swc1.
1534 const RegNumT RegNum = SrcR->getRegNum();
Jaydeep Patild3297662016-09-13 22:52:27 -07001535 const bool IsSrcGPReg = RegMIPS32::isGPRReg(SrcR->getRegNum());
1536 if (SrcTy == IceType_f32 && IsSrcGPReg == true) {
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07001537 Variable *SrcGPR = Target->makeReg(IceType_i32, RegNum);
1538 Target->_sw(SrcGPR, Addr);
Jaydeep Patild3297662016-09-13 22:52:27 -07001539 } else if (SrcTy == IceType_f64 && IsSrcGPReg == true) {
1540 Variable *SrcGPRHi =
1541 Target->makeReg(IceType_i32, RegMIPS32::get64PairFirstRegNum(RegNum));
1542 Variable *SrcGPRLo = Target->makeReg(
1543 IceType_i32, RegMIPS32::get64PairSecondRegNum(RegNum));
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07001544 OperandMIPS32Mem *AddrHi = OperandMIPS32Mem::create(
1545 Target->Func, DestTy, Base,
1546 llvm::cast<ConstantInteger32>(
1547 Target->Ctx->getConstantInt32(Offset + 4)));
1548 Target->_sw(SrcGPRLo, Addr);
1549 Target->_sw(SrcGPRHi, AddrHi);
1550 } else {
1551 Target->_sw(SrcR, Addr);
1552 }
1553
1554 Target->Context.insert<InstFakeDef>(Dest);
1555 Legalized = true;
1556 } else if (auto *Var = llvm::dyn_cast<Variable>(Src)) {
1557 if (Var->isRematerializable()) {
1558 // This is equivalent to an x86 _lea(RematOffset(%esp/%ebp), Variable).
1559
1560 // ExtraOffset is only needed for frame-pointer based frames as we have
1561 // to account for spill storage.
1562 const int32_t ExtraOffset = (Var->getRegNum() == Target->getFrameReg())
1563 ? Target->getFrameFixedAllocaOffset()
1564 : 0;
1565
1566 const int32_t Offset = Var->getStackOffset() + ExtraOffset;
1567 Variable *Base = Target->getPhysicalRegister(Var->getRegNum());
1568 Variable *T = newBaseRegister(Base, Offset, Dest->getRegNum());
1569 Target->_mov(Dest, T);
1570 Legalized = true;
1571 } else {
1572 if (!Var->hasReg()) {
1573 // This is a _mov(Variable, Mem()), i.e., a load.
1574 const int32_t Offset = Var->getStackOffset();
1575 auto *Base = Target->getPhysicalRegister(Target->getFrameOrStackReg());
1576 OperandMIPS32Mem *Addr;
1577 Addr = OperandMIPS32Mem::create(
1578 Target->Func, DestTy, Base,
1579 llvm::cast<ConstantInteger32>(
1580 Target->Ctx->getConstantInt32(Offset)));
1581 Target->_lw(Dest, Addr);
Sagar Thakur5674c912016-07-14 14:50:37 -07001582 Legalized = true;
Sagar Thakur5674c912016-07-14 14:50:37 -07001583 }
1584 }
Sagar Thakur5674c912016-07-14 14:50:37 -07001585 }
1586
1587 if (Legalized) {
1588 if (MovInstr->isDestRedefined()) {
1589 Target->_set_dest_redefined();
1590 }
1591 MovInstr->setDeleted();
1592 }
1593}
1594
1595void TargetMIPS32::postLowerLegalization() {
1596 Func->dump("Before postLowerLegalization");
1597 assert(hasComputedFrame());
1598 for (CfgNode *Node : Func->getNodes()) {
1599 Context.init(Node);
1600 PostLoweringLegalizer Legalizer(this);
1601 while (!Context.atEnd()) {
1602 PostIncrLoweringContext PostIncrement(Context);
1603 Inst *CurInstr = iteratorToInst(Context.getCur());
1604
1605 // TODO(sagar.thakur): Add remaining cases of legalization.
1606
1607 if (auto *MovInstr = llvm::dyn_cast<InstMIPS32Mov>(CurInstr)) {
1608 Legalizer.legalizeMov(MovInstr);
1609 }
1610 }
1611 }
1612}
1613
Jim Stichnothac8da5c2015-10-21 06:57:46 -07001614Operand *TargetMIPS32::loOperand(Operand *Operand) {
1615 assert(Operand->getType() == IceType_i64);
1616 if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand))
1617 return Var64On32->getLo();
1618 if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
1619 return Ctx->getConstantInt32(static_cast<uint32_t>(Const->getValue()));
1620 }
1621 if (auto *Mem = llvm::dyn_cast<OperandMIPS32Mem>(Operand)) {
1622 // Conservatively disallow memory operands with side-effects (pre/post
1623 // increment) in case of duplication.
1624 assert(Mem->getAddrMode() == OperandMIPS32Mem::Offset);
1625 return OperandMIPS32Mem::create(Func, IceType_i32, Mem->getBase(),
1626 Mem->getOffset(), Mem->getAddrMode());
1627 }
1628 llvm_unreachable("Unsupported operand type");
1629 return nullptr;
1630}
1631
1632Operand *TargetMIPS32::hiOperand(Operand *Operand) {
1633 assert(Operand->getType() == IceType_i64);
1634 if (Operand->getType() != IceType_i64)
1635 return Operand;
1636 if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand))
1637 return Var64On32->getHi();
1638 if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
1639 return Ctx->getConstantInt32(
1640 static_cast<uint32_t>(Const->getValue() >> 32));
1641 }
1642 if (auto *Mem = llvm::dyn_cast<OperandMIPS32Mem>(Operand)) {
1643 // Conservatively disallow memory operands with side-effects
1644 // in case of duplication.
1645 assert(Mem->getAddrMode() == OperandMIPS32Mem::Offset);
1646 const Type SplitType = IceType_i32;
1647 Variable *Base = Mem->getBase();
Mohit Bhakkadd1e97772016-07-07 05:07:35 -07001648 auto *Offset = llvm::cast<ConstantInteger32>(Mem->getOffset());
Jim Stichnothac8da5c2015-10-21 06:57:46 -07001649 assert(!Utils::WouldOverflowAdd(Offset->getValue(), 4));
1650 int32_t NextOffsetVal = Offset->getValue() + 4;
1651 constexpr bool SignExt = false;
1652 if (!OperandMIPS32Mem::canHoldOffset(SplitType, SignExt, NextOffsetVal)) {
1653 // We have to make a temp variable and add 4 to either Base or Offset.
1654 // If we add 4 to Offset, this will convert a non-RegReg addressing
1655 // mode into a RegReg addressing mode. Since NaCl sandboxing disallows
1656 // RegReg addressing modes, prefer adding to base and replacing instead.
1657 // Thus we leave the old offset alone.
1658 Constant *Four = Ctx->getConstantInt32(4);
1659 Variable *NewBase = Func->makeVariable(Base->getType());
1660 lowerArithmetic(InstArithmetic::create(Func, InstArithmetic::Add, NewBase,
1661 Base, Four));
1662 Base = NewBase;
1663 } else {
1664 Offset =
1665 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(NextOffsetVal));
1666 }
1667 return OperandMIPS32Mem::create(Func, SplitType, Base, Offset,
1668 Mem->getAddrMode());
1669 }
1670 llvm_unreachable("Unsupported operand type");
1671 return nullptr;
1672}
1673
John Portoe82b5602016-02-24 15:58:55 -08001674SmallBitVector TargetMIPS32::getRegisterSet(RegSetMask Include,
1675 RegSetMask Exclude) const {
1676 SmallBitVector Registers(RegMIPS32::Reg_NUM);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07001677
1678#define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
Jim Stichnothac8da5c2015-10-21 06:57:46 -07001679 isI64Pair, isFP32, isFP64, isVec128, alias_init) \
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07001680 if (scratch && (Include & RegSet_CallerSave)) \
1681 Registers[RegMIPS32::val] = true; \
1682 if (preserved && (Include & RegSet_CalleeSave)) \
1683 Registers[RegMIPS32::val] = true; \
1684 if (stackptr && (Include & RegSet_StackPointer)) \
1685 Registers[RegMIPS32::val] = true; \
1686 if (frameptr && (Include & RegSet_FramePointer)) \
1687 Registers[RegMIPS32::val] = true; \
1688 if (scratch && (Exclude & RegSet_CallerSave)) \
1689 Registers[RegMIPS32::val] = false; \
1690 if (preserved && (Exclude & RegSet_CalleeSave)) \
1691 Registers[RegMIPS32::val] = false; \
1692 if (stackptr && (Exclude & RegSet_StackPointer)) \
1693 Registers[RegMIPS32::val] = false; \
1694 if (frameptr && (Exclude & RegSet_FramePointer)) \
1695 Registers[RegMIPS32::val] = false;
1696
1697 REGMIPS32_TABLE
1698
1699#undef X
1700
1701 return Registers;
1702}
1703
Reed Kotler04bca5a2016-02-03 14:40:47 -08001704void TargetMIPS32::lowerAlloca(const InstAlloca *Instr) {
Andrew Scull57e12682015-09-16 11:30:19 -07001705 // Conservatively require the stack to be aligned. Some stack adjustment
1706 // operations implemented below assume that the stack is aligned before the
1707 // alloca. All the alloca code ensures that the stack alignment is preserved
1708 // after the alloca. The stack alignment restriction can be relaxed in some
1709 // cases.
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07001710 NeedsStackAlignment = true;
Sagar Thakur1afb4832016-06-16 15:30:24 -07001711
1712 // For default align=0, set it to the real value 1, to avoid any
1713 // bit-manipulation problems below.
1714 const uint32_t AlignmentParam = std::max(1u, Instr->getAlignInBytes());
1715
1716 // LLVM enforces power of 2 alignment.
1717 assert(llvm::isPowerOf2_32(AlignmentParam));
1718 assert(llvm::isPowerOf2_32(MIPS32_STACK_ALIGNMENT_BYTES));
1719
1720 const uint32_t Alignment =
1721 std::max(AlignmentParam, MIPS32_STACK_ALIGNMENT_BYTES);
1722 const bool OverAligned = Alignment > MIPS32_STACK_ALIGNMENT_BYTES;
Jim Stichnoth386b52e2016-08-05 15:18:41 -07001723 const bool OptM1 = Func->getOptLevel() == Opt_m1;
Sagar Thakur1afb4832016-06-16 15:30:24 -07001724 const bool AllocaWithKnownOffset = Instr->getKnownFrameOffset();
1725 const bool UseFramePointer =
1726 hasFramePointer() || OverAligned || !AllocaWithKnownOffset || OptM1;
1727
1728 if (UseFramePointer)
1729 setHasFramePointer();
1730
1731 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP);
1732
1733 Variable *Dest = Instr->getDest();
1734 Operand *TotalSize = Instr->getSizeInBytes();
1735
1736 if (const auto *ConstantTotalSize =
1737 llvm::dyn_cast<ConstantInteger32>(TotalSize)) {
1738 const uint32_t Value =
1739 Utils::applyAlignment(ConstantTotalSize->getValue(), Alignment);
1740 FixedAllocaSizeBytes += Value;
1741 // Constant size alloca.
1742 if (!UseFramePointer) {
1743 // If we don't need a Frame Pointer, this alloca has a known offset to the
1744 // stack pointer. We don't need adjust the stack pointer, nor assign any
1745 // value to Dest, as Dest is rematerializable.
1746 assert(Dest->isRematerializable());
1747 Context.insert<InstFakeDef>(Dest);
1748 return;
1749 }
1750 } else {
Sagar Thakurc930d592016-07-12 04:06:44 -07001751 // Non-constant sizes need to be adjusted to the next highest multiple of
1752 // the required alignment at runtime.
1753 VariableAllocaUsed = true;
1754 Variable *AlignAmount;
1755 auto *TotalSizeR = legalizeToReg(TotalSize, Legal_Reg);
1756 auto *T1 = I32Reg();
1757 auto *T2 = I32Reg();
1758 auto *T3 = I32Reg();
1759 auto *T4 = I32Reg();
1760 auto *T5 = I32Reg();
1761 _addiu(T1, TotalSizeR, MIPS32_STACK_ALIGNMENT_BYTES - 1);
1762 _addiu(T2, getZero(), -MIPS32_STACK_ALIGNMENT_BYTES);
1763 _and(T3, T1, T2);
1764 _subu(T4, SP, T3);
1765 if (Instr->getAlignInBytes()) {
1766 AlignAmount =
1767 legalizeToReg(Ctx->getConstantInt32(-AlignmentParam), Legal_Reg);
1768 _and(T5, T4, AlignAmount);
1769 _mov(Dest, T5);
1770 } else {
1771 _mov(Dest, T4);
1772 }
1773 _mov(SP, Dest);
Sagar Thakur1afb4832016-06-16 15:30:24 -07001774 return;
1775 }
1776
1777 // Add enough to the returned address to account for the out args area.
1778 if (MaxOutArgsSizeBytes > 0) {
1779 Variable *T = makeReg(getPointerType());
1780 _addiu(T, SP, MaxOutArgsSizeBytes);
1781 _mov(Dest, T);
1782 } else {
1783 _mov(Dest, SP);
1784 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07001785}
1786
Reed Kotler04bca5a2016-02-03 14:40:47 -08001787void TargetMIPS32::lowerInt64Arithmetic(const InstArithmetic *Instr,
Reed Kotler00e36042016-02-01 20:52:19 -08001788 Variable *Dest, Operand *Src0,
1789 Operand *Src1) {
Reed Kotler04bca5a2016-02-03 14:40:47 -08001790 InstArithmetic::OpKind Op = Instr->getOp();
Reed Kotler00e36042016-02-01 20:52:19 -08001791 switch (Op) {
1792 case InstArithmetic::Add:
1793 case InstArithmetic::And:
1794 case InstArithmetic::Or:
1795 case InstArithmetic::Sub:
1796 case InstArithmetic::Xor:
Reed Kotlera80cdbc2016-02-19 22:03:29 -08001797 case InstArithmetic::Mul:
Reed Kotler00e36042016-02-01 20:52:19 -08001798 break;
1799 default:
Reed Kotler04bca5a2016-02-03 14:40:47 -08001800 UnimplementedLoweringError(this, Instr);
Reed Kotler00e36042016-02-01 20:52:19 -08001801 return;
1802 }
1803 auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
1804 auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
1805 Variable *Src0LoR = legalizeToReg(loOperand(Src0));
1806 Variable *Src1LoR = legalizeToReg(loOperand(Src1));
1807 Variable *Src0HiR = legalizeToReg(hiOperand(Src0));
1808 Variable *Src1HiR = legalizeToReg(hiOperand(Src1));
1809
1810 switch (Op) {
1811 case InstArithmetic::_num:
1812 llvm::report_fatal_error("Unknown arithmetic operator");
1813 return;
1814 case InstArithmetic::Add: {
Reed Kotler953568f2016-02-17 05:37:01 -08001815 auto *T_Carry = I32Reg(), *T_Lo = I32Reg(), *T_Hi = I32Reg(),
1816 *T_Hi2 = I32Reg();
Reed Kotler00e36042016-02-01 20:52:19 -08001817 _addu(T_Lo, Src0LoR, Src1LoR);
1818 _mov(DestLo, T_Lo);
1819 _sltu(T_Carry, T_Lo, Src0LoR);
1820 _addu(T_Hi, T_Carry, Src0HiR);
1821 _addu(T_Hi2, Src1HiR, T_Hi);
1822 _mov(DestHi, T_Hi2);
1823 return;
1824 }
1825 case InstArithmetic::And: {
Reed Kotler953568f2016-02-17 05:37:01 -08001826 auto *T_Lo = I32Reg(), *T_Hi = I32Reg();
Reed Kotler00e36042016-02-01 20:52:19 -08001827 _and(T_Lo, Src0LoR, Src1LoR);
1828 _mov(DestLo, T_Lo);
1829 _and(T_Hi, Src0HiR, Src1HiR);
1830 _mov(DestHi, T_Hi);
1831 return;
1832 }
1833 case InstArithmetic::Sub: {
Reed Kotler953568f2016-02-17 05:37:01 -08001834 auto *T_Borrow = I32Reg(), *T_Lo = I32Reg(), *T_Hi = I32Reg(),
1835 *T_Hi2 = I32Reg();
Reed Kotler00e36042016-02-01 20:52:19 -08001836 _subu(T_Lo, Src0LoR, Src1LoR);
1837 _mov(DestLo, T_Lo);
1838 _sltu(T_Borrow, Src0LoR, Src1LoR);
1839 _addu(T_Hi, T_Borrow, Src1HiR);
1840 _subu(T_Hi2, Src0HiR, T_Hi);
1841 _mov(DestHi, T_Hi2);
1842 return;
1843 }
1844 case InstArithmetic::Or: {
Reed Kotler953568f2016-02-17 05:37:01 -08001845 auto *T_Lo = I32Reg(), *T_Hi = I32Reg();
Reed Kotler00e36042016-02-01 20:52:19 -08001846 _or(T_Lo, Src0LoR, Src1LoR);
1847 _mov(DestLo, T_Lo);
1848 _or(T_Hi, Src0HiR, Src1HiR);
1849 _mov(DestHi, T_Hi);
1850 return;
1851 }
1852 case InstArithmetic::Xor: {
Reed Kotler953568f2016-02-17 05:37:01 -08001853 auto *T_Lo = I32Reg(), *T_Hi = I32Reg();
Reed Kotler00e36042016-02-01 20:52:19 -08001854 _xor(T_Lo, Src0LoR, Src1LoR);
1855 _mov(DestLo, T_Lo);
1856 _xor(T_Hi, Src0HiR, Src1HiR);
1857 _mov(DestHi, T_Hi);
1858 return;
1859 }
Reed Kotlera80cdbc2016-02-19 22:03:29 -08001860 case InstArithmetic::Mul: {
1861 // TODO(rkotler): Make sure that mul has the side effect of clobbering
1862 // LO, HI. Check for any other LO, HI quirkiness in this section.
1863 auto *T_Lo = I32Reg(RegMIPS32::Reg_LO), *T_Hi = I32Reg(RegMIPS32::Reg_HI);
1864 auto *T1 = I32Reg(), *T2 = I32Reg();
1865 auto *TM1 = I32Reg(), *TM2 = I32Reg(), *TM3 = I32Reg(), *TM4 = I32Reg();
1866 _multu(T_Lo, Src0LoR, Src1LoR);
1867 Context.insert<InstFakeDef>(T_Hi, T_Lo);
1868 _mflo(T1, T_Lo);
1869 _mfhi(T2, T_Hi);
1870 _mov(DestLo, T1);
1871 _mul(TM1, Src0HiR, Src1LoR);
1872 _mul(TM2, Src0LoR, Src1HiR);
1873 _addu(TM3, TM1, T2);
1874 _addu(TM4, TM3, TM2);
1875 _mov(DestHi, TM4);
1876 return;
1877 }
Reed Kotler00e36042016-02-01 20:52:19 -08001878 default:
Reed Kotler04bca5a2016-02-03 14:40:47 -08001879 UnimplementedLoweringError(this, Instr);
Reed Kotler00e36042016-02-01 20:52:19 -08001880 return;
1881 }
1882}
1883
Reed Kotler04bca5a2016-02-03 14:40:47 -08001884void TargetMIPS32::lowerArithmetic(const InstArithmetic *Instr) {
1885 Variable *Dest = Instr->getDest();
Jim Stichnoth91c773e2016-01-19 09:52:22 -08001886 // We need to signal all the UnimplementedLoweringError errors before any
1887 // legalization into new variables, otherwise Om1 register allocation may fail
1888 // when it sees variables that are defined but not used.
Reed Kotler00e36042016-02-01 20:52:19 -08001889 Type DestTy = Dest->getType();
Reed Kotler04bca5a2016-02-03 14:40:47 -08001890 Operand *Src0 = legalizeUndef(Instr->getSrc(0));
1891 Operand *Src1 = legalizeUndef(Instr->getSrc(1));
Reed Kotler00e36042016-02-01 20:52:19 -08001892 if (DestTy == IceType_i64) {
Reed Kotler04bca5a2016-02-03 14:40:47 -08001893 lowerInt64Arithmetic(Instr, Instr->getDest(), Src0, Src1);
Reed Kotler37af5b02015-11-05 17:07:19 -08001894 return;
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07001895 }
Reed Kotler37af5b02015-11-05 17:07:19 -08001896 if (isVectorType(Dest->getType())) {
Reed Kotler04bca5a2016-02-03 14:40:47 -08001897 UnimplementedLoweringError(this, Instr);
Reed Kotler37af5b02015-11-05 17:07:19 -08001898 return;
1899 }
Reed Kotler04bca5a2016-02-03 14:40:47 -08001900 switch (Instr->getOp()) {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08001901 default:
1902 break;
Jim Stichnoth91c773e2016-01-19 09:52:22 -08001903 case InstArithmetic::Frem:
Reed Kotler04bca5a2016-02-03 14:40:47 -08001904 UnimplementedLoweringError(this, Instr);
Jim Stichnoth91c773e2016-01-19 09:52:22 -08001905 return;
1906 }
1907
1908 // At this point Dest->getType() is non-i64 scalar
1909
Reed Kotler37af5b02015-11-05 17:07:19 -08001910 Variable *T = makeReg(Dest->getType());
1911 Variable *Src0R = legalizeToReg(Src0);
1912 Variable *Src1R = legalizeToReg(Src1);
Jim Stichnoth91c773e2016-01-19 09:52:22 -08001913
Reed Kotler04bca5a2016-02-03 14:40:47 -08001914 switch (Instr->getOp()) {
Reed Kotler37af5b02015-11-05 17:07:19 -08001915 case InstArithmetic::_num:
1916 break;
1917 case InstArithmetic::Add:
Reed Kotler00e36042016-02-01 20:52:19 -08001918 _addu(T, Src0R, Src1R);
Reed Kotler37af5b02015-11-05 17:07:19 -08001919 _mov(Dest, T);
1920 return;
1921 case InstArithmetic::And:
1922 _and(T, Src0R, Src1R);
1923 _mov(Dest, T);
1924 return;
1925 case InstArithmetic::Or:
1926 _or(T, Src0R, Src1R);
1927 _mov(Dest, T);
1928 return;
1929 case InstArithmetic::Xor:
1930 _xor(T, Src0R, Src1R);
1931 _mov(Dest, T);
1932 return;
1933 case InstArithmetic::Sub:
Reed Kotler00e36042016-02-01 20:52:19 -08001934 _subu(T, Src0R, Src1R);
Reed Kotler37af5b02015-11-05 17:07:19 -08001935 _mov(Dest, T);
1936 return;
1937 case InstArithmetic::Mul: {
1938 _mul(T, Src0R, Src1R);
1939 _mov(Dest, T);
1940 return;
1941 }
Srdjan Obucinac2ee36a2016-05-17 13:16:02 -07001942 case InstArithmetic::Shl: {
1943 _sllv(T, Src0R, Src1R);
1944 _mov(Dest, T);
1945 return;
1946 }
1947 case InstArithmetic::Lshr: {
1948 _srlv(T, Src0R, Src1R);
1949 _mov(Dest, T);
1950 return;
1951 }
1952 case InstArithmetic::Ashr: {
1953 _srav(T, Src0R, Src1R);
1954 _mov(Dest, T);
1955 return;
1956 }
Srdjan Obucinaae93eee2016-05-18 11:31:15 -07001957 case InstArithmetic::Udiv: {
1958 auto *T_Zero = I32Reg(RegMIPS32::Reg_ZERO);
1959 _divu(T_Zero, Src0R, Src1R);
1960 _mflo(T, T_Zero);
1961 _mov(Dest, T);
1962 return;
1963 }
1964 case InstArithmetic::Sdiv: {
1965 auto *T_Zero = I32Reg(RegMIPS32::Reg_ZERO);
1966 _div(T_Zero, Src0R, Src1R);
1967 _mflo(T, T_Zero);
1968 _mov(Dest, T);
1969 return;
1970 }
1971 case InstArithmetic::Urem: {
1972 auto *T_Zero = I32Reg(RegMIPS32::Reg_ZERO);
1973 _divu(T_Zero, Src0R, Src1R);
1974 _mfhi(T, T_Zero);
1975 _mov(Dest, T);
1976 return;
1977 }
1978 case InstArithmetic::Srem: {
1979 auto *T_Zero = I32Reg(RegMIPS32::Reg_ZERO);
1980 _div(T_Zero, Src0R, Src1R);
1981 _mfhi(T, T_Zero);
1982 _mov(Dest, T);
1983 return;
1984 }
Srdjan Obucinaab6a04f2016-07-11 20:23:50 -07001985 case InstArithmetic::Fadd: {
1986 if (DestTy == IceType_f32) {
1987 _add_s(T, Src0R, Src1R);
1988 _mov(Dest, T);
1989 return;
1990 }
1991 if (DestTy == IceType_f64) {
1992 _add_d(T, Src0R, Src1R);
1993 _mov(Dest, T);
1994 return;
1995 }
Reed Kotler37af5b02015-11-05 17:07:19 -08001996 break;
Srdjan Obucinaab6a04f2016-07-11 20:23:50 -07001997 }
Reed Kotler37af5b02015-11-05 17:07:19 -08001998 case InstArithmetic::Fsub:
Srdjan Obucinaab6a04f2016-07-11 20:23:50 -07001999 if (DestTy == IceType_f32) {
2000 _sub_s(T, Src0R, Src1R);
2001 _mov(Dest, T);
2002 return;
2003 }
2004 if (DestTy == IceType_f64) {
2005 _sub_d(T, Src0R, Src1R);
2006 _mov(Dest, T);
2007 return;
2008 }
Reed Kotler37af5b02015-11-05 17:07:19 -08002009 break;
2010 case InstArithmetic::Fmul:
Srdjan Obucinaab6a04f2016-07-11 20:23:50 -07002011 if (DestTy == IceType_f32) {
2012 _mul_s(T, Src0R, Src1R);
2013 _mov(Dest, T);
2014 return;
2015 }
2016 if (DestTy == IceType_f64) {
2017 _mul_d(T, Src0R, Src1R);
2018 _mov(Dest, T);
2019 return;
2020 }
Reed Kotler37af5b02015-11-05 17:07:19 -08002021 break;
2022 case InstArithmetic::Fdiv:
Srdjan Obucinaab6a04f2016-07-11 20:23:50 -07002023 if (DestTy == IceType_f32) {
2024 _div_s(T, Src0R, Src1R);
2025 _mov(Dest, T);
2026 return;
2027 }
2028 if (DestTy == IceType_f64) {
2029 _div_d(T, Src0R, Src1R);
2030 _mov(Dest, T);
2031 return;
2032 }
Reed Kotler37af5b02015-11-05 17:07:19 -08002033 break;
2034 case InstArithmetic::Frem:
2035 break;
2036 }
Reed Kotler04bca5a2016-02-03 14:40:47 -08002037 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002038}
2039
Reed Kotler04bca5a2016-02-03 14:40:47 -08002040void TargetMIPS32::lowerAssign(const InstAssign *Instr) {
2041 Variable *Dest = Instr->getDest();
Sagar Thakur5674c912016-07-14 14:50:37 -07002042
2043 if (Dest->isRematerializable()) {
2044 Context.insert<InstFakeDef>(Dest);
2045 return;
2046 }
2047
Reed Kotler04bca5a2016-02-03 14:40:47 -08002048 Operand *Src0 = Instr->getSrc(0);
Jim Stichnothac8da5c2015-10-21 06:57:46 -07002049 assert(Dest->getType() == Src0->getType());
2050 if (Dest->getType() == IceType_i64) {
2051 Src0 = legalizeUndef(Src0);
2052 Operand *Src0Lo = legalize(loOperand(Src0), Legal_Reg);
2053 Operand *Src0Hi = legalize(hiOperand(Src0), Legal_Reg);
Jim Stichnoth54f3d512015-12-11 09:53:00 -08002054 auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
2055 auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
Reed Kotler953568f2016-02-17 05:37:01 -08002056 auto *T_Lo = I32Reg(), *T_Hi = I32Reg();
Jim Stichnothac8da5c2015-10-21 06:57:46 -07002057 _mov(T_Lo, Src0Lo);
2058 _mov(DestLo, T_Lo);
2059 _mov(T_Hi, Src0Hi);
2060 _mov(DestHi, T_Hi);
2061 } else {
2062 Operand *SrcR;
2063 if (Dest->hasReg()) {
2064 // If Dest already has a physical register, then legalize the Src operand
2065 // into a Variable with the same register assignment. This especially
2066 // helps allow the use of Flex operands.
2067 SrcR = legalize(Src0, Legal_Reg, Dest->getRegNum());
2068 } else {
2069 // Dest could be a stack operand. Since we could potentially need
2070 // to do a Store (and store can only have Register operands),
2071 // legalize this to a register.
2072 SrcR = legalize(Src0, Legal_Reg);
2073 }
2074 if (isVectorType(Dest->getType())) {
Reed Kotler04bca5a2016-02-03 14:40:47 -08002075 UnimplementedLoweringError(this, Instr);
Jim Stichnothac8da5c2015-10-21 06:57:46 -07002076 } else {
2077 _mov(Dest, SrcR);
2078 }
2079 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002080}
2081
Reed Kotler04bca5a2016-02-03 14:40:47 -08002082void TargetMIPS32::lowerBr(const InstBr *Instr) {
Reed Kotler3fe4b572016-02-23 18:59:43 -08002083 if (Instr->isUnconditional()) {
2084 _br(Instr->getTargetUnconditional());
2085 return;
2086 }
Sagar Thakur5cce7612016-05-24 06:25:50 -07002087 CfgNode *TargetTrue = Instr->getTargetTrue();
2088 CfgNode *TargetFalse = Instr->getTargetFalse();
2089 Operand *Boolean = Instr->getCondition();
2090 const Inst *Producer = Computations.getProducerOf(Boolean);
2091 if (Producer == nullptr) {
2092 // Since we don't know the producer of this boolean we will assume its
2093 // producer will keep it in positive logic and just emit beqz with this
2094 // Boolean as an operand.
2095 auto *BooleanR = legalizeToReg(Boolean);
2096 _br(TargetTrue, TargetFalse, BooleanR, CondMIPS32::Cond::EQZ);
2097 return;
2098 }
2099 if (Producer->getKind() == Inst::Icmp) {
2100 const InstIcmp *CompareInst = llvm::cast<InstIcmp>(Producer);
2101 Operand *Src0 = CompareInst->getSrc(0);
2102 Operand *Src1 = CompareInst->getSrc(1);
2103 const Type Src0Ty = Src0->getType();
2104 assert(Src0Ty == Src1->getType());
2105 if (Src0Ty == IceType_i64) {
2106 UnimplementedLoweringError(this, Instr);
2107 return;
2108 }
2109 auto *Src0R = legalizeToReg(Src0);
2110 auto *Src1R = legalizeToReg(Src1);
2111 auto *DestT = makeReg(Src0Ty);
2112 switch (CompareInst->getCondition()) {
2113 default:
2114 break;
2115 case InstIcmp::Eq: {
2116 _br(TargetTrue, TargetFalse, Src0R, Src1R, CondMIPS32::Cond::NE);
2117 break;
2118 }
2119 case InstIcmp::Ne: {
2120 _br(TargetTrue, TargetFalse, Src0R, Src1R, CondMIPS32::Cond::EQ);
2121 break;
2122 }
2123 case InstIcmp::Ugt: {
2124 _sltu(DestT, Src1R, Src0R);
2125 _br(TargetTrue, TargetFalse, DestT, CondMIPS32::Cond::EQZ);
2126 break;
2127 }
2128 case InstIcmp::Uge: {
2129 _sltu(DestT, Src0R, Src1R);
2130 _br(TargetTrue, TargetFalse, DestT, CondMIPS32::Cond::NEZ);
2131 break;
2132 }
2133 case InstIcmp::Ult: {
2134 _sltu(DestT, Src0R, Src1R);
2135 _br(TargetTrue, TargetFalse, DestT, CondMIPS32::Cond::EQZ);
2136 break;
2137 }
2138 case InstIcmp::Ule: {
2139 _sltu(DestT, Src1R, Src0R);
2140 _br(TargetTrue, TargetFalse, DestT, CondMIPS32::Cond::NEZ);
2141 break;
2142 }
2143 case InstIcmp::Sgt: {
2144 _slt(DestT, Src1R, Src0R);
2145 _br(TargetTrue, TargetFalse, DestT, CondMIPS32::Cond::EQZ);
2146 break;
2147 }
2148 case InstIcmp::Sge: {
2149 _slt(DestT, Src0R, Src1R);
2150 _br(TargetTrue, TargetFalse, DestT, CondMIPS32::Cond::NEZ);
2151 break;
2152 }
2153 case InstIcmp::Slt: {
2154 _slt(DestT, Src0R, Src1R);
2155 _br(TargetTrue, TargetFalse, DestT, CondMIPS32::Cond::EQZ);
2156 break;
2157 }
2158 case InstIcmp::Sle: {
2159 _slt(DestT, Src1R, Src0R);
2160 _br(TargetTrue, TargetFalse, DestT, CondMIPS32::Cond::NEZ);
2161 break;
2162 }
2163 }
2164 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002165}
2166
Reed Kotler00e36042016-02-01 20:52:19 -08002167void TargetMIPS32::lowerCall(const InstCall *Instr) {
Mohit Bhakkad9b4c3c32016-06-22 05:44:05 -07002168 NeedsStackAlignment = true;
2169
2170 // Assign arguments to registers and stack. Also reserve stack.
2171 TargetMIPS32::CallingConv CC;
2172
2173 // Pair of Arg Operand -> GPR number assignments.
2174 llvm::SmallVector<std::pair<Operand *, RegNumT>, MIPS32_MAX_GPR_ARG> GPRArgs;
2175 llvm::SmallVector<std::pair<Operand *, RegNumT>, MIPS32_MAX_FP_ARG> FPArgs;
2176 // Pair of Arg Operand -> stack offset.
2177 llvm::SmallVector<std::pair<Operand *, int32_t>, 8> StackArgs;
2178 size_t ParameterAreaSizeBytes = 16;
2179
2180 // Classify each argument operand according to the location where the
2181 // argument is passed.
2182
2183 for (SizeT i = 0, NumArgs = Instr->getNumArgs(); i < NumArgs; ++i) {
2184 Operand *Arg = legalizeUndef(Instr->getArg(i));
2185 const Type Ty = Arg->getType();
2186 bool InReg = false;
2187 RegNumT Reg;
2188
2189 InReg = CC.argInReg(Ty, i, &Reg);
2190
2191 if (!InReg) {
2192 ParameterAreaSizeBytes =
2193 applyStackAlignmentTy(ParameterAreaSizeBytes, Ty);
2194 StackArgs.push_back(std::make_pair(Arg, ParameterAreaSizeBytes));
2195 ParameterAreaSizeBytes += typeWidthInBytesOnStack(Ty);
2196 continue;
2197 }
2198
2199 if (Ty == IceType_i64) {
2200 Operand *Lo = loOperand(Arg);
2201 Operand *Hi = hiOperand(Arg);
2202 GPRArgs.push_back(
Mohit Bhakkadeec56212016-08-02 05:55:11 -07002203 std::make_pair(Lo, RegMIPS32::get64PairFirstRegNum(Reg)));
Mohit Bhakkad9b4c3c32016-06-22 05:44:05 -07002204 GPRArgs.push_back(
Mohit Bhakkadeec56212016-08-02 05:55:11 -07002205 std::make_pair(Hi, RegMIPS32::get64PairSecondRegNum(Reg)));
Mohit Bhakkad9b4c3c32016-06-22 05:44:05 -07002206 } else if (isScalarIntegerType(Ty)) {
2207 GPRArgs.push_back(std::make_pair(Arg, Reg));
2208 } else {
2209 FPArgs.push_back(std::make_pair(Arg, Reg));
2210 }
Reed Kotler00e36042016-02-01 20:52:19 -08002211 }
Mohit Bhakkad9b4c3c32016-06-22 05:44:05 -07002212
2213 // Adjust the parameter area so that the stack is aligned. It is assumed that
2214 // the stack is already aligned at the start of the calling sequence.
2215 ParameterAreaSizeBytes = applyStackAlignment(ParameterAreaSizeBytes);
2216
2217 // Copy arguments that are passed on the stack to the appropriate stack
2218 // locations.
2219 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP);
2220 for (auto &StackArg : StackArgs) {
2221 ConstantInteger32 *Loc =
2222 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(StackArg.second));
2223 Type Ty = StackArg.first->getType();
2224 OperandMIPS32Mem *Addr;
2225 constexpr bool SignExt = false;
2226 if (OperandMIPS32Mem::canHoldOffset(Ty, SignExt, StackArg.second)) {
2227 Addr = OperandMIPS32Mem::create(Func, Ty, SP, Loc);
2228 } else {
2229 Variable *NewBase = Func->makeVariable(SP->getType());
2230 lowerArithmetic(
2231 InstArithmetic::create(Func, InstArithmetic::Add, NewBase, SP, Loc));
2232 Addr = formMemoryOperand(NewBase, Ty);
2233 }
2234 lowerStore(InstStore::create(Func, StackArg.first, Addr));
2235 }
2236
Reed Kotler00e36042016-02-01 20:52:19 -08002237 // Generate the call instruction. Assign its result to a temporary with high
2238 // register allocation weight.
2239 Variable *Dest = Instr->getDest();
2240 // ReturnReg doubles as ReturnRegLo as necessary.
2241 Variable *ReturnReg = nullptr;
2242 Variable *ReturnRegHi = nullptr;
2243 if (Dest) {
2244 switch (Dest->getType()) {
2245 case IceType_NUM:
2246 llvm_unreachable("Invalid Call dest type");
2247 return;
2248 case IceType_void:
2249 break;
2250 case IceType_i1:
2251 case IceType_i8:
2252 case IceType_i16:
2253 case IceType_i32:
2254 ReturnReg = makeReg(Dest->getType(), RegMIPS32::Reg_V0);
2255 break;
2256 case IceType_i64:
Reed Kotler953568f2016-02-17 05:37:01 -08002257 ReturnReg = I32Reg(RegMIPS32::Reg_V0);
2258 ReturnRegHi = I32Reg(RegMIPS32::Reg_V1);
Reed Kotler00e36042016-02-01 20:52:19 -08002259 break;
2260 case IceType_f32:
Mohit Bhakkad135fbe52016-08-12 23:33:14 -07002261 ReturnReg = makeReg(Dest->getType(), RegMIPS32::Reg_F0);
2262 break;
Reed Kotler00e36042016-02-01 20:52:19 -08002263 case IceType_f64:
Srdjan Obucinab85cde12016-09-09 09:39:52 -07002264 ReturnReg = makeReg(IceType_f64, RegMIPS32::Reg_F0);
Mohit Bhakkad135fbe52016-08-12 23:33:14 -07002265 break;
Reed Kotler00e36042016-02-01 20:52:19 -08002266 case IceType_v4i1:
2267 case IceType_v8i1:
2268 case IceType_v16i1:
2269 case IceType_v16i8:
2270 case IceType_v8i16:
2271 case IceType_v4i32:
2272 case IceType_v4f32:
2273 UnimplementedLoweringError(this, Instr);
2274 return;
2275 }
2276 }
2277 Operand *CallTarget = Instr->getCallTarget();
2278 // Allow ConstantRelocatable to be left alone as a direct call,
2279 // but force other constants like ConstantInteger32 to be in
2280 // a register and make it an indirect call.
2281 if (!llvm::isa<ConstantRelocatable>(CallTarget)) {
2282 CallTarget = legalize(CallTarget, Legal_Reg);
2283 }
Mohit Bhakkad9b4c3c32016-06-22 05:44:05 -07002284
2285 // Copy arguments to be passed in registers to the appropriate registers.
2286 CfgVector<Variable *> RegArgs;
2287 for (auto &FPArg : FPArgs) {
2288 RegArgs.emplace_back(legalizeToReg(FPArg.first, FPArg.second));
2289 }
2290 for (auto &GPRArg : GPRArgs) {
2291 RegArgs.emplace_back(legalizeToReg(GPRArg.first, GPRArg.second));
2292 }
2293
2294 // Generate a FakeUse of register arguments so that they do not get dead code
2295 // eliminated as a result of the FakeKill of scratch registers after the call.
2296 // These fake-uses need to be placed here to avoid argument registers from
2297 // being used during the legalizeToReg() calls above.
2298 for (auto *RegArg : RegArgs) {
2299 Context.insert<InstFakeUse>(RegArg);
2300 }
2301
Sagar Thakurc930d592016-07-12 04:06:44 -07002302 // If variable alloca is used the extra 16 bytes for argument build area
2303 // will be allocated on stack before a call.
2304 if (VariableAllocaUsed)
2305 _addiu(SP, SP, -MaxOutArgsSizeBytes);
2306
Reed Kotler00e36042016-02-01 20:52:19 -08002307 Inst *NewCall = InstMIPS32Call::create(Func, ReturnReg, CallTarget);
2308 Context.insert(NewCall);
Sagar Thakurc930d592016-07-12 04:06:44 -07002309
2310 if (VariableAllocaUsed)
2311 _addiu(SP, SP, MaxOutArgsSizeBytes);
2312
2313 // Insert a fake use of stack pointer to avoid dead code elimination of addiu
2314 // instruction.
2315 Context.insert<InstFakeUse>(SP);
2316
Reed Kotler00e36042016-02-01 20:52:19 -08002317 if (ReturnRegHi)
2318 Context.insert(InstFakeDef::create(Func, ReturnRegHi));
2319 // Insert a register-kill pseudo instruction.
2320 Context.insert(InstFakeKill::create(Func, NewCall));
2321 // Generate a FakeUse to keep the call live if necessary.
2322 if (Instr->hasSideEffects() && ReturnReg) {
Mohit Bhakkad9b4c3c32016-06-22 05:44:05 -07002323 Context.insert<InstFakeUse>(ReturnReg);
Reed Kotler00e36042016-02-01 20:52:19 -08002324 }
2325 if (Dest == nullptr)
2326 return;
2327
2328 // Assign the result of the call to Dest.
2329 if (ReturnReg) {
2330 if (ReturnRegHi) {
2331 assert(Dest->getType() == IceType_i64);
2332 auto *Dest64On32 = llvm::cast<Variable64On32>(Dest);
2333 Variable *DestLo = Dest64On32->getLo();
2334 Variable *DestHi = Dest64On32->getHi();
2335 _mov(DestLo, ReturnReg);
2336 _mov(DestHi, ReturnRegHi);
2337 } else {
2338 assert(Dest->getType() == IceType_i32 || Dest->getType() == IceType_i16 ||
2339 Dest->getType() == IceType_i8 || Dest->getType() == IceType_i1 ||
Srdjan Obucinab85cde12016-09-09 09:39:52 -07002340 isScalarFloatingType(Dest->getType()) ||
Reed Kotler00e36042016-02-01 20:52:19 -08002341 isVectorType(Dest->getType()));
Srdjan Obucinab85cde12016-09-09 09:39:52 -07002342 if (isVectorType(Dest->getType())) {
Reed Kotler00e36042016-02-01 20:52:19 -08002343 UnimplementedLoweringError(this, Instr);
2344 return;
2345 } else {
2346 _mov(Dest, ReturnReg);
2347 }
2348 }
2349 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002350}
2351
Reed Kotler04bca5a2016-02-03 14:40:47 -08002352void TargetMIPS32::lowerCast(const InstCast *Instr) {
2353 InstCast::OpKind CastKind = Instr->getCastKind();
Sagar Thakur38dcb592016-05-09 11:57:59 -07002354 Variable *Dest = Instr->getDest();
2355 Operand *Src0 = legalizeUndef(Instr->getSrc(0));
2356 const Type DestTy = Dest->getType();
2357 const Type Src0Ty = Src0->getType();
2358 const uint32_t ShiftAmount =
Sagar Thakure160ed92016-05-30 07:54:47 -07002359 (Src0Ty == IceType_i1
2360 ? INT32_BITS - 1
2361 : INT32_BITS - (CHAR_BITS * typeWidthInBytes(Src0Ty)));
2362 const uint32_t Mask =
2363 (Src0Ty == IceType_i1
2364 ? 1
2365 : (1 << (CHAR_BITS * typeWidthInBytes(Src0Ty))) - 1);
Sagar Thakur38dcb592016-05-09 11:57:59 -07002366
Sagar Thakure160ed92016-05-30 07:54:47 -07002367 if (isVectorType(DestTy)) {
Sagar Thakur38dcb592016-05-09 11:57:59 -07002368 UnimplementedLoweringError(this, Instr);
2369 return;
2370 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002371 switch (CastKind) {
2372 default:
2373 Func->setError("Cast type not supported");
2374 return;
2375 case InstCast::Sext: {
Sagar Thakur38dcb592016-05-09 11:57:59 -07002376 if (DestTy == IceType_i64) {
2377 auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
2378 auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
2379 Variable *Src0R = legalizeToReg(Src0);
Sagar Thakure160ed92016-05-30 07:54:47 -07002380 Variable *T1_Lo = I32Reg();
2381 Variable *T2_Lo = I32Reg();
2382 Variable *T_Hi = I32Reg();
2383 if (Src0Ty == IceType_i1) {
2384 _sll(T1_Lo, Src0R, INT32_BITS - 1);
2385 _sra(T2_Lo, T1_Lo, INT32_BITS - 1);
2386 _mov(DestHi, T2_Lo);
2387 _mov(DestLo, T2_Lo);
Sagar Thakur38dcb592016-05-09 11:57:59 -07002388 } else if (Src0Ty == IceType_i8 || Src0Ty == IceType_i16) {
Sagar Thakure160ed92016-05-30 07:54:47 -07002389 _sll(T1_Lo, Src0R, ShiftAmount);
2390 _sra(T2_Lo, T1_Lo, ShiftAmount);
2391 _sra(T_Hi, T2_Lo, INT32_BITS - 1);
2392 _mov(DestHi, T_Hi);
2393 _mov(DestLo, T2_Lo);
2394 } else if (Src0Ty == IceType_i32) {
2395 _mov(T1_Lo, Src0R);
2396 _sra(T_Hi, T1_Lo, INT32_BITS - 1);
2397 _mov(DestHi, T_Hi);
2398 _mov(DestLo, T1_Lo);
Sagar Thakur38dcb592016-05-09 11:57:59 -07002399 }
Sagar Thakur38dcb592016-05-09 11:57:59 -07002400 } else {
2401 Variable *Src0R = legalizeToReg(Src0);
Sagar Thakure160ed92016-05-30 07:54:47 -07002402 Variable *T1 = makeReg(DestTy);
2403 Variable *T2 = makeReg(DestTy);
2404 if (Src0Ty == IceType_i1 || Src0Ty == IceType_i8 ||
2405 Src0Ty == IceType_i16) {
2406 _sll(T1, Src0R, ShiftAmount);
2407 _sra(T2, T1, ShiftAmount);
2408 _mov(Dest, T2);
Sagar Thakur38dcb592016-05-09 11:57:59 -07002409 }
2410 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002411 break;
2412 }
2413 case InstCast::Zext: {
Sagar Thakur38dcb592016-05-09 11:57:59 -07002414 if (DestTy == IceType_i64) {
2415 auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
2416 auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
2417 Variable *Src0R = legalizeToReg(Src0);
Sagar Thakure160ed92016-05-30 07:54:47 -07002418 Variable *T_Lo = I32Reg();
2419 Variable *T_Hi = I32Reg();
Sagar Thakur38dcb592016-05-09 11:57:59 -07002420
Sagar Thakure160ed92016-05-30 07:54:47 -07002421 if (Src0Ty == IceType_i1 || Src0Ty == IceType_i8 || Src0Ty == IceType_i16)
2422 _andi(T_Lo, Src0R, Mask);
2423 else if (Src0Ty == IceType_i32)
2424 _mov(T_Lo, Src0R);
2425 else
2426 assert(Src0Ty != IceType_i64);
2427 _mov(DestLo, T_Lo);
Sagar Thakur38dcb592016-05-09 11:57:59 -07002428
2429 auto *Zero = getZero();
Sagar Thakure160ed92016-05-30 07:54:47 -07002430 _addiu(T_Hi, Zero, 0);
2431 _mov(DestHi, T_Hi);
Sagar Thakur38dcb592016-05-09 11:57:59 -07002432 } else {
2433 Variable *Src0R = legalizeToReg(Src0);
2434 Variable *T = makeReg(DestTy);
Sagar Thakure160ed92016-05-30 07:54:47 -07002435 if (Src0Ty == IceType_i1 || Src0Ty == IceType_i8 ||
2436 Src0Ty == IceType_i16) {
Sagar Thakur38dcb592016-05-09 11:57:59 -07002437 _andi(T, Src0R, Mask);
Sagar Thakure160ed92016-05-30 07:54:47 -07002438 _mov(Dest, T);
2439 }
Sagar Thakur38dcb592016-05-09 11:57:59 -07002440 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002441 break;
2442 }
2443 case InstCast::Trunc: {
Sagar Thakur38dcb592016-05-09 11:57:59 -07002444 if (Src0Ty == IceType_i64)
2445 Src0 = loOperand(Src0);
2446 Variable *Src0R = legalizeToReg(Src0);
2447 Variable *T = makeReg(DestTy);
2448 _mov(T, Src0R);
2449 _mov(Dest, T);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002450 break;
2451 }
Srdjan Obucina5a9f7142016-09-11 07:08:30 -07002452 case InstCast::Fptrunc: {
2453 assert(Dest->getType() == IceType_f32);
2454 assert(Src0->getType() == IceType_f64);
2455 auto *DestR = legalizeToReg(Dest);
2456 auto *Src0R = legalizeToReg(Src0);
2457 _cvt_s_d(DestR, Src0R);
2458 _mov(Dest, DestR);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002459 break;
2460 }
Srdjan Obucina5a9f7142016-09-11 07:08:30 -07002461 case InstCast::Fpext: {
2462 assert(Dest->getType() == IceType_f64);
2463 assert(Src0->getType() == IceType_f32);
2464 auto *DestR = legalizeToReg(Dest);
2465 auto *Src0R = legalizeToReg(Src0);
2466 _cvt_d_s(DestR, Src0R);
2467 _mov(Dest, DestR);
2468 break;
2469 }
Jaydeep Patild3297662016-09-13 22:52:27 -07002470 case InstCast::Fptosi: {
2471 if (Src0Ty == IceType_f32 && DestTy == IceType_i32) {
2472 Variable *Src0R = legalizeToReg(Src0);
2473 Variable *FTmp = makeReg(IceType_f32);
2474 _trunc_w_s(FTmp, Src0R);
2475 _mov(Dest, FTmp);
2476 } else {
2477 UnimplementedLoweringError(this, Instr);
2478 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002479 break;
Jaydeep Patild3297662016-09-13 22:52:27 -07002480 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002481 case InstCast::Fptoui:
Reed Kotler04bca5a2016-02-03 14:40:47 -08002482 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002483 break;
Jaydeep Patild3297662016-09-13 22:52:27 -07002484 case InstCast::Sitofp: {
2485 if (Src0Ty == IceType_i32 && DestTy == IceType_f32) {
2486 Variable *Src0R = legalizeToReg(Src0);
2487 Variable *FTmp1 = makeReg(IceType_f32);
2488 Variable *FTmp2 = makeReg(IceType_f32);
2489 _mov(FTmp1, Src0R);
2490 _cvt_s_w(FTmp2, FTmp1);
2491 _mov(Dest, FTmp2);
2492 } else {
2493 UnimplementedLoweringError(this, Instr);
2494 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002495 break;
Jaydeep Patild3297662016-09-13 22:52:27 -07002496 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002497 case InstCast::Uitofp: {
Reed Kotler04bca5a2016-02-03 14:40:47 -08002498 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002499 break;
2500 }
2501 case InstCast::Bitcast: {
Reed Kotler04bca5a2016-02-03 14:40:47 -08002502 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002503 break;
2504 }
2505 }
2506}
2507
Reed Kotler04bca5a2016-02-03 14:40:47 -08002508void TargetMIPS32::lowerExtractElement(const InstExtractElement *Instr) {
2509 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002510}
2511
Reed Kotler04bca5a2016-02-03 14:40:47 -08002512void TargetMIPS32::lowerFcmp(const InstFcmp *Instr) {
Srdjan Obucinaf315f0d2016-09-13 05:46:30 -07002513 Variable *Dest = Instr->getDest();
2514 if (isVectorType(Dest->getType())) {
2515 UnimplementedLoweringError(this, Instr);
2516 return;
2517 }
2518
2519 auto *Src0 = Instr->getSrc(0);
2520 auto *Src1 = Instr->getSrc(1);
2521 auto *Zero = getZero();
2522
2523 InstFcmp::FCond Cond = Instr->getCondition();
2524 auto *DestR = legalizeToReg(Dest);
2525 auto *Src0R = legalizeToReg(Src0);
2526 auto *Src1R = legalizeToReg(Src1);
2527 const Type Src0Ty = Src0->getType();
2528
2529 Operand *FCC0 = OperandMIPS32FCC::create(getFunc(), OperandMIPS32FCC::FCC0);
2530
2531 switch (Cond) {
2532 default: {
2533 UnimplementedLoweringError(this, Instr);
2534 return;
2535 }
2536 case InstFcmp::False: {
2537 Context.insert<InstFakeUse>(Src0R);
2538 Context.insert<InstFakeUse>(Src1R);
2539 _addiu(DestR, Zero, 0);
2540 _mov(Dest, DestR);
2541 break;
2542 }
2543 case InstFcmp::Oeq: {
2544 if (Src0Ty == IceType_f32) {
2545 _c_eq_s(Src0R, Src1R);
2546 } else {
2547 _c_eq_d(Src0R, Src1R);
2548 }
2549 _movf(DestR, Zero, FCC0);
2550 _mov(Dest, DestR);
2551 break;
2552 }
2553 case InstFcmp::Ogt: {
2554 if (Src0Ty == IceType_f32) {
2555 _c_ule_s(Src0R, Src1R);
2556 } else {
2557 _c_ule_d(Src0R, Src1R);
2558 }
2559 _movt(DestR, Zero, FCC0);
2560 _mov(Dest, DestR);
2561 break;
2562 }
2563 case InstFcmp::Oge: {
2564 if (Src0Ty == IceType_f32) {
2565 _c_ult_s(Src0R, Src1R);
2566 } else {
2567 _c_ult_d(Src0R, Src1R);
2568 }
2569 _movt(DestR, Zero, FCC0);
2570 _mov(Dest, DestR);
2571 break;
2572 }
2573 case InstFcmp::Olt: {
2574 if (Src0Ty == IceType_f32) {
2575 _c_olt_s(Src0R, Src1R);
2576 } else {
2577 _c_olt_d(Src0R, Src1R);
2578 }
2579 _movf(DestR, Zero, FCC0);
2580 _mov(Dest, DestR);
2581 break;
2582 }
2583 case InstFcmp::Ole: {
2584 if (Src0Ty == IceType_f32) {
2585 _c_ole_s(Src0R, Src1R);
2586 } else {
2587 _c_ole_d(Src0R, Src1R);
2588 }
2589 _movf(DestR, Zero, FCC0);
2590 _mov(Dest, DestR);
2591 break;
2592 }
2593 case InstFcmp::One: {
2594 if (Src0Ty == IceType_f32) {
2595 _c_ueq_s(Src0R, Src1R);
2596 } else {
2597 _c_ueq_d(Src0R, Src1R);
2598 }
2599 _movt(DestR, Zero, FCC0);
2600 _mov(Dest, DestR);
2601 break;
2602 }
2603 case InstFcmp::Ord: {
2604 if (Src0Ty == IceType_f32) {
2605 _c_un_s(Src0R, Src1R);
2606 } else {
2607 _c_un_d(Src0R, Src1R);
2608 }
2609 _movt(DestR, Zero, FCC0);
2610 _mov(Dest, DestR);
2611 break;
2612 }
2613 case InstFcmp::Ueq: {
2614 if (Src0Ty == IceType_f32) {
2615 _c_ueq_s(Src0R, Src1R);
2616 } else {
2617 _c_ueq_d(Src0R, Src1R);
2618 }
2619 _movf(DestR, Zero, FCC0);
2620 _mov(Dest, DestR);
2621 break;
2622 }
2623 case InstFcmp::Ugt: {
2624 if (Src0Ty == IceType_f32) {
2625 _c_ole_s(Src0R, Src1R);
2626 } else {
2627 _c_ole_d(Src0R, Src1R);
2628 }
2629 _movt(DestR, Zero, FCC0);
2630 _mov(Dest, DestR);
2631 break;
2632 }
2633 case InstFcmp::Uge: {
2634 if (Src0Ty == IceType_f32) {
2635 _c_olt_s(Src0R, Src1R);
2636 } else {
2637 _c_olt_d(Src0R, Src1R);
2638 }
2639 _movt(DestR, Zero, FCC0);
2640 _mov(Dest, DestR);
2641 break;
2642 }
2643 case InstFcmp::Ult: {
2644 if (Src0Ty == IceType_f32) {
2645 _c_ult_s(Src0R, Src1R);
2646 } else {
2647 _c_ult_d(Src0R, Src1R);
2648 }
2649 _movf(DestR, Zero, FCC0);
2650 _mov(Dest, DestR);
2651 break;
2652 }
2653 case InstFcmp::Ule: {
2654 if (Src0Ty == IceType_f32) {
2655 _c_ule_s(Src0R, Src1R);
2656 } else {
2657 _c_ule_d(Src0R, Src1R);
2658 }
2659 _movf(DestR, Zero, FCC0);
2660 _mov(Dest, DestR);
2661 break;
2662 }
2663 case InstFcmp::Une: {
2664 if (Src0Ty == IceType_f32) {
2665 _c_eq_s(Src0R, Src1R);
2666 } else {
2667 _c_eq_d(Src0R, Src1R);
2668 }
2669 _movt(DestR, Zero, FCC0);
2670 _mov(Dest, DestR);
2671 break;
2672 }
2673 case InstFcmp::Uno: {
2674 if (Src0Ty == IceType_f32) {
2675 _c_un_s(Src0R, Src1R);
2676 } else {
2677 _c_un_d(Src0R, Src1R);
2678 }
2679 _movf(DestR, Zero, FCC0);
2680 _mov(Dest, DestR);
2681 break;
2682 }
2683 case InstFcmp::True: {
2684 Context.insert<InstFakeUse>(Src0R);
2685 Context.insert<InstFakeUse>(Src1R);
2686 _addiu(DestR, Zero, 1);
2687 _mov(Dest, DestR);
2688 break;
2689 }
2690 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002691}
2692
Sagar Thakur1a478b12016-04-25 08:39:19 -07002693void TargetMIPS32::lower64Icmp(const InstIcmp *Instr) {
Reed Kotler04bca5a2016-02-03 14:40:47 -08002694 UnimplementedLoweringError(this, Instr);
Sagar Thakur1a478b12016-04-25 08:39:19 -07002695 return;
2696}
2697
2698void TargetMIPS32::lowerIcmp(const InstIcmp *Instr) {
2699 auto *Src0 = Instr->getSrc(0);
2700 auto *Src1 = Instr->getSrc(1);
2701 if (Src0->getType() == IceType_i64) {
2702 lower64Icmp(Instr);
2703 return;
2704 }
2705 Variable *Dest = Instr->getDest();
2706 if (isVectorType(Dest->getType())) {
2707 UnimplementedLoweringError(this, Instr);
2708 return;
2709 }
2710 InstIcmp::ICond Cond = Instr->getCondition();
2711 auto *Src0R = legalizeToReg(Src0);
2712 auto *Src1R = legalizeToReg(Src1);
Sagar Thakura393fd42016-06-01 12:36:50 -07002713 const Type Src0Ty = Src0R->getType();
2714 const uint32_t ShAmt = INT32_BITS - getScalarIntBitWidth(Src0->getType());
2715 Variable *Src0RT = I32Reg();
2716 Variable *Src1RT = I32Reg();
2717
2718 if (Src0Ty != IceType_i32) {
2719 _sll(Src0RT, Src0R, ShAmt);
2720 _sll(Src1RT, Src1R, ShAmt);
2721 } else {
2722 _mov(Src0RT, Src0R);
2723 _mov(Src1RT, Src1R);
2724 }
2725
Sagar Thakur1a478b12016-04-25 08:39:19 -07002726 switch (Cond) {
2727 case InstIcmp::Eq: {
2728 auto *DestT = I32Reg();
2729 auto *T = I32Reg();
Sagar Thakura393fd42016-06-01 12:36:50 -07002730 _xor(T, Src0RT, Src1RT);
Sagar Thakur1a478b12016-04-25 08:39:19 -07002731 _sltiu(DestT, T, 1);
2732 _mov(Dest, DestT);
2733 return;
2734 }
2735 case InstIcmp::Ne: {
2736 auto *DestT = I32Reg();
2737 auto *T = I32Reg();
2738 auto *Zero = getZero();
Sagar Thakura393fd42016-06-01 12:36:50 -07002739 _xor(T, Src0RT, Src1RT);
Sagar Thakur1a478b12016-04-25 08:39:19 -07002740 _sltu(DestT, Zero, T);
2741 _mov(Dest, DestT);
2742 return;
2743 }
2744 case InstIcmp::Ugt: {
2745 auto *DestT = I32Reg();
Sagar Thakura393fd42016-06-01 12:36:50 -07002746 _sltu(DestT, Src1RT, Src0RT);
Sagar Thakur1a478b12016-04-25 08:39:19 -07002747 _mov(Dest, DestT);
2748 return;
2749 }
2750 case InstIcmp::Uge: {
2751 auto *DestT = I32Reg();
2752 auto *T = I32Reg();
Sagar Thakura393fd42016-06-01 12:36:50 -07002753 _sltu(T, Src0RT, Src1RT);
Sagar Thakur1a478b12016-04-25 08:39:19 -07002754 _xori(DestT, T, 1);
2755 _mov(Dest, DestT);
2756 return;
2757 }
2758 case InstIcmp::Ult: {
2759 auto *DestT = I32Reg();
Sagar Thakura393fd42016-06-01 12:36:50 -07002760 _sltu(DestT, Src0RT, Src1RT);
Sagar Thakur1a478b12016-04-25 08:39:19 -07002761 _mov(Dest, DestT);
2762 return;
2763 }
2764 case InstIcmp::Ule: {
2765 auto *DestT = I32Reg();
2766 auto *T = I32Reg();
Sagar Thakura393fd42016-06-01 12:36:50 -07002767 _sltu(T, Src1RT, Src0RT);
Sagar Thakur1a478b12016-04-25 08:39:19 -07002768 _xori(DestT, T, 1);
2769 _mov(Dest, DestT);
2770 return;
2771 }
2772 case InstIcmp::Sgt: {
2773 auto *DestT = I32Reg();
Sagar Thakura393fd42016-06-01 12:36:50 -07002774 _slt(DestT, Src1RT, Src0RT);
Sagar Thakur1a478b12016-04-25 08:39:19 -07002775 _mov(Dest, DestT);
2776 return;
2777 }
2778 case InstIcmp::Sge: {
2779 auto *DestT = I32Reg();
2780 auto *T = I32Reg();
Sagar Thakura393fd42016-06-01 12:36:50 -07002781 _slt(T, Src0RT, Src1RT);
Sagar Thakur1a478b12016-04-25 08:39:19 -07002782 _xori(DestT, T, 1);
2783 _mov(Dest, DestT);
2784 return;
2785 }
2786 case InstIcmp::Slt: {
2787 auto *DestT = I32Reg();
Sagar Thakura393fd42016-06-01 12:36:50 -07002788 _slt(DestT, Src0RT, Src1RT);
Sagar Thakur1a478b12016-04-25 08:39:19 -07002789 _mov(Dest, DestT);
2790 return;
2791 }
2792 case InstIcmp::Sle: {
2793 auto *DestT = I32Reg();
2794 auto *T = I32Reg();
Sagar Thakura393fd42016-06-01 12:36:50 -07002795 _slt(T, Src1RT, Src0RT);
Sagar Thakur1a478b12016-04-25 08:39:19 -07002796 _xori(DestT, T, 1);
2797 _mov(Dest, DestT);
2798 return;
2799 }
2800 default:
2801 llvm_unreachable("Invalid ICmp operator");
2802 return;
2803 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002804}
2805
Reed Kotler04bca5a2016-02-03 14:40:47 -08002806void TargetMIPS32::lowerInsertElement(const InstInsertElement *Instr) {
2807 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002808}
2809
2810void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
Srdjan Obucina53d05682016-09-10 08:25:37 -07002811 Variable *Dest = Instr->getDest();
2812 Type DestTy = (Dest == nullptr) ? IceType_void : Dest->getType();
Jim Stichnotha8d47132015-09-08 14:43:38 -07002813 switch (Instr->getIntrinsicInfo().ID) {
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002814 case Intrinsics::AtomicCmpxchg: {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002815 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002816 return;
2817 }
2818 case Intrinsics::AtomicFence:
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002819 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002820 return;
2821 case Intrinsics::AtomicFenceAll:
Andrew Scull57e12682015-09-16 11:30:19 -07002822 // NOTE: FenceAll should prevent and load/store from being moved across the
2823 // fence (both atomic and non-atomic). The InstMIPS32Mfence instruction is
2824 // currently marked coarsely as "HasSideEffects".
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002825 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002826 return;
2827 case Intrinsics::AtomicIsLockFree: {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002828 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002829 return;
2830 }
2831 case Intrinsics::AtomicLoad: {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002832 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002833 return;
2834 }
2835 case Intrinsics::AtomicRMW:
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002836 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002837 return;
2838 case Intrinsics::AtomicStore: {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002839 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002840 return;
2841 }
2842 case Intrinsics::Bswap: {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002843 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002844 return;
2845 }
2846 case Intrinsics::Ctpop: {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002847 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002848 return;
2849 }
2850 case Intrinsics::Ctlz: {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002851 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002852 return;
2853 }
2854 case Intrinsics::Cttz: {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002855 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002856 return;
2857 }
2858 case Intrinsics::Fabs: {
Srdjan Obucina53d05682016-09-10 08:25:37 -07002859 if (isScalarFloatingType(DestTy)) {
2860 Variable *T = makeReg(DestTy);
2861 if (DestTy == IceType_f32) {
2862 _abs_s(T, legalizeToReg(Instr->getArg(0)));
2863 } else {
2864 _abs_d(T, legalizeToReg(Instr->getArg(0)));
2865 }
2866 _mov(Dest, T);
2867 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002868 return;
2869 }
2870 case Intrinsics::Longjmp: {
Karl Schimpf20070e82016-03-17 13:30:13 -07002871 InstCall *Call = makeHelperCall(RuntimeHelper::H_call_longjmp, nullptr, 2);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002872 Call->addArg(Instr->getArg(0));
2873 Call->addArg(Instr->getArg(1));
2874 lowerCall(Call);
2875 return;
2876 }
2877 case Intrinsics::Memcpy: {
2878 // In the future, we could potentially emit an inline memcpy/memset, etc.
2879 // for intrinsic calls w/ a known length.
Karl Schimpf20070e82016-03-17 13:30:13 -07002880 InstCall *Call = makeHelperCall(RuntimeHelper::H_call_memcpy, nullptr, 3);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002881 Call->addArg(Instr->getArg(0));
2882 Call->addArg(Instr->getArg(1));
2883 Call->addArg(Instr->getArg(2));
2884 lowerCall(Call);
2885 return;
2886 }
2887 case Intrinsics::Memmove: {
Karl Schimpf20070e82016-03-17 13:30:13 -07002888 InstCall *Call = makeHelperCall(RuntimeHelper::H_call_memmove, nullptr, 3);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002889 Call->addArg(Instr->getArg(0));
2890 Call->addArg(Instr->getArg(1));
2891 Call->addArg(Instr->getArg(2));
2892 lowerCall(Call);
2893 return;
2894 }
2895 case Intrinsics::Memset: {
Andrew Scull57e12682015-09-16 11:30:19 -07002896 // The value operand needs to be extended to a stack slot size because the
2897 // PNaCl ABI requires arguments to be at least 32 bits wide.
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002898 Operand *ValOp = Instr->getArg(1);
2899 assert(ValOp->getType() == IceType_i8);
2900 Variable *ValExt = Func->makeVariable(stackSlotType());
2901 lowerCast(InstCast::create(Func, InstCast::Zext, ValExt, ValOp));
Karl Schimpf20070e82016-03-17 13:30:13 -07002902 InstCall *Call = makeHelperCall(RuntimeHelper::H_call_memset, nullptr, 3);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002903 Call->addArg(Instr->getArg(0));
2904 Call->addArg(ValExt);
2905 Call->addArg(Instr->getArg(2));
2906 lowerCall(Call);
2907 return;
2908 }
2909 case Intrinsics::NaClReadTP: {
Karl Schimpfd4699942016-04-02 09:55:31 -07002910 if (getFlags().getUseSandboxing()) {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002911 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002912 } else {
Karl Schimpf20070e82016-03-17 13:30:13 -07002913 InstCall *Call =
2914 makeHelperCall(RuntimeHelper::H_call_read_tp, Instr->getDest(), 0);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002915 lowerCall(Call);
2916 }
2917 return;
2918 }
2919 case Intrinsics::Setjmp: {
Karl Schimpf20070e82016-03-17 13:30:13 -07002920 InstCall *Call =
2921 makeHelperCall(RuntimeHelper::H_call_setjmp, Instr->getDest(), 1);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002922 Call->addArg(Instr->getArg(0));
2923 lowerCall(Call);
2924 return;
2925 }
2926 case Intrinsics::Sqrt: {
Srdjan Obucina53d05682016-09-10 08:25:37 -07002927 if (isScalarFloatingType(DestTy)) {
2928 Variable *T = makeReg(DestTy);
2929 if (DestTy == IceType_f32) {
2930 _sqrt_s(T, legalizeToReg(Instr->getArg(0)));
2931 } else {
2932 _sqrt_d(T, legalizeToReg(Instr->getArg(0)));
2933 }
2934 _mov(Dest, T);
2935 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002936 return;
2937 }
2938 case Intrinsics::Stacksave: {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002939 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002940 return;
2941 }
2942 case Intrinsics::Stackrestore: {
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002943 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002944 return;
2945 }
2946 case Intrinsics::Trap:
Jim Stichnoth91c773e2016-01-19 09:52:22 -08002947 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002948 return;
2949 case Intrinsics::UnknownIntrinsic:
2950 Func->setError("Should not be lowering UnknownIntrinsic");
2951 return;
2952 }
2953 return;
2954}
2955
Reed Kotler04bca5a2016-02-03 14:40:47 -08002956void TargetMIPS32::lowerLoad(const InstLoad *Instr) {
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07002957 // A Load instruction can be treated the same as an Assign instruction, after
Srdjan Obucinab85cde12016-09-09 09:39:52 -07002958 // the source operand is transformed into an OperandMIPS32Mem operand.
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07002959 Type Ty = Instr->getDest()->getType();
2960 Operand *Src0 = formMemoryOperand(Instr->getSourceAddress(), Ty);
2961 Variable *DestLoad = Instr->getDest();
2962 auto *Assign = InstAssign::create(Func, DestLoad, Src0);
2963 lowerAssign(Assign);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07002964}
2965
Jaydeep Patil47ef0be2016-09-13 06:09:48 -07002966namespace {
2967void dumpAddressOpt(const Cfg *Func, const Variable *Base, int32_t Offset,
2968 const Inst *Reason) {
2969 if (!BuildDefs::dump())
2970 return;
2971 if (!Func->isVerbose(IceV_AddrOpt))
2972 return;
2973 OstreamLocker _(Func->getContext());
2974 Ostream &Str = Func->getContext()->getStrDump();
2975 Str << "Instruction: ";
2976 Reason->dumpDecorated(Func);
2977 Str << " results in Base=";
2978 if (Base)
2979 Base->dump(Func);
2980 else
2981 Str << "<null>";
2982 Str << ", Offset=" << Offset << "\n";
2983}
2984
2985bool matchAssign(const VariablesMetadata *VMetadata, Variable **Var,
2986 int32_t *Offset, const Inst **Reason) {
2987 // Var originates from Var=SrcVar ==> set Var:=SrcVar
2988 if (*Var == nullptr)
2989 return false;
2990 const Inst *VarAssign = VMetadata->getSingleDefinition(*Var);
2991 if (!VarAssign)
2992 return false;
2993 assert(!VMetadata->isMultiDef(*Var));
2994 if (!llvm::isa<InstAssign>(VarAssign))
2995 return false;
2996
2997 Operand *SrcOp = VarAssign->getSrc(0);
2998 bool Optimized = false;
2999 if (auto *SrcVar = llvm::dyn_cast<Variable>(SrcOp)) {
3000 if (!VMetadata->isMultiDef(SrcVar) ||
3001 // TODO: ensure SrcVar stays single-BB
3002 false) {
3003 Optimized = true;
3004 *Var = SrcVar;
3005 } else if (auto *Const = llvm::dyn_cast<ConstantInteger32>(SrcOp)) {
3006 int32_t MoreOffset = Const->getValue();
3007 int32_t NewOffset = MoreOffset + *Offset;
3008 if (Utils::WouldOverflowAdd(*Offset, MoreOffset))
3009 return false;
3010 *Var = nullptr;
3011 *Offset += NewOffset;
3012 Optimized = true;
3013 }
3014 }
3015
3016 if (Optimized) {
3017 *Reason = VarAssign;
3018 }
3019
3020 return Optimized;
3021}
3022
3023bool isAddOrSub(const Inst *Instr, InstArithmetic::OpKind *Kind) {
3024 if (const auto *Arith = llvm::dyn_cast<InstArithmetic>(Instr)) {
3025 switch (Arith->getOp()) {
3026 default:
3027 return false;
3028 case InstArithmetic::Add:
3029 case InstArithmetic::Sub:
3030 *Kind = Arith->getOp();
3031 return true;
3032 }
3033 }
3034 return false;
3035}
3036
3037bool matchOffsetBase(const VariablesMetadata *VMetadata, Variable **Base,
3038 int32_t *Offset, const Inst **Reason) {
3039 // Base is Base=Var+Const || Base is Base=Const+Var ==>
3040 // set Base=Var, Offset+=Const
3041 // Base is Base=Var-Const ==>
3042 // set Base=Var, Offset-=Const
3043 if (*Base == nullptr)
3044 return false;
3045 const Inst *BaseInst = VMetadata->getSingleDefinition(*Base);
3046 if (BaseInst == nullptr) {
3047 return false;
3048 }
3049 assert(!VMetadata->isMultiDef(*Base));
3050
3051 auto *ArithInst = llvm::dyn_cast<const InstArithmetic>(BaseInst);
3052 if (ArithInst == nullptr)
3053 return false;
3054 InstArithmetic::OpKind Kind;
3055 if (!isAddOrSub(ArithInst, &Kind))
3056 return false;
3057 bool IsAdd = Kind == InstArithmetic::Add;
3058 Operand *Src0 = ArithInst->getSrc(0);
3059 Operand *Src1 = ArithInst->getSrc(1);
3060 auto *Var0 = llvm::dyn_cast<Variable>(Src0);
3061 auto *Var1 = llvm::dyn_cast<Variable>(Src1);
3062 auto *Const0 = llvm::dyn_cast<ConstantInteger32>(Src0);
3063 auto *Const1 = llvm::dyn_cast<ConstantInteger32>(Src1);
3064 Variable *NewBase = nullptr;
3065 int32_t NewOffset = *Offset;
3066
3067 if (Var0 == nullptr && Const0 == nullptr) {
3068 assert(llvm::isa<ConstantRelocatable>(Src0));
3069 return false;
3070 }
3071
3072 if (Var1 == nullptr && Const1 == nullptr) {
3073 assert(llvm::isa<ConstantRelocatable>(Src1));
3074 return false;
3075 }
3076
3077 if (Var0 && Var1)
3078 // TODO(jpp): merge base/index splitting into here.
3079 return false;
3080 if (!IsAdd && Var1)
3081 return false;
3082 if (Var0)
3083 NewBase = Var0;
3084 else if (Var1)
3085 NewBase = Var1;
3086 // Compute the updated constant offset.
3087 if (Const0) {
3088 int32_t MoreOffset = IsAdd ? Const0->getValue() : -Const0->getValue();
3089 if (Utils::WouldOverflowAdd(NewOffset, MoreOffset))
3090 return false;
3091 NewOffset += MoreOffset;
3092 }
3093 if (Const1) {
3094 int32_t MoreOffset = IsAdd ? Const1->getValue() : -Const1->getValue();
3095 if (Utils::WouldOverflowAdd(NewOffset, MoreOffset))
3096 return false;
3097 NewOffset += MoreOffset;
3098 }
3099
3100 // Update the computed address parameters once we are sure optimization
3101 // is valid.
3102 *Base = NewBase;
3103 *Offset = NewOffset;
3104 *Reason = BaseInst;
3105 return true;
3106}
3107} // end of anonymous namespace
3108
3109OperandMIPS32Mem *TargetMIPS32::formAddressingMode(Type Ty, Cfg *Func,
3110 const Inst *LdSt,
3111 Operand *Base) {
3112 assert(Base != nullptr);
3113 int32_t OffsetImm = 0;
3114
3115 Func->resetCurrentNode();
3116 if (Func->isVerbose(IceV_AddrOpt)) {
3117 OstreamLocker _(Func->getContext());
3118 Ostream &Str = Func->getContext()->getStrDump();
3119 Str << "\nAddress mode formation:\t";
3120 LdSt->dumpDecorated(Func);
3121 }
3122
3123 if (isVectorType(Ty)) {
3124 UnimplementedError(getFlags());
3125 return nullptr;
3126 }
3127
3128 auto *BaseVar = llvm::dyn_cast<Variable>(Base);
3129 if (BaseVar == nullptr)
3130 return nullptr;
3131
3132 const VariablesMetadata *VMetadata = Func->getVMetadata();
3133 const Inst *Reason = nullptr;
3134
3135 do {
3136 if (Reason != nullptr) {
3137 dumpAddressOpt(Func, BaseVar, OffsetImm, Reason);
3138 Reason = nullptr;
3139 }
3140
3141 if (matchAssign(VMetadata, &BaseVar, &OffsetImm, &Reason)) {
3142 continue;
3143 }
3144
3145 if (matchOffsetBase(VMetadata, &BaseVar, &OffsetImm, &Reason)) {
3146 continue;
3147 }
3148 } while (Reason);
3149
3150 if (BaseVar == nullptr) {
3151 // We need base register rather than just OffsetImm. Move the OffsetImm to
3152 // BaseVar and form 0(BaseVar) addressing.
3153 const Type PointerType = getPointerType();
3154 BaseVar = makeReg(PointerType);
3155 Context.insert<InstAssign>(BaseVar, Ctx->getConstantInt32(OffsetImm));
3156 OffsetImm = 0;
3157 } else if (OffsetImm != 0) {
3158 // If the OffsetImm is more than signed 16-bit value then add it in the
3159 // BaseVar and form 0(BaseVar) addressing.
3160 const int32_t PositiveOffset = OffsetImm > 0 ? OffsetImm : -OffsetImm;
3161 const InstArithmetic::OpKind Op =
3162 OffsetImm > 0 ? InstArithmetic::Add : InstArithmetic::Sub;
3163 constexpr bool ZeroExt = false;
3164 if (!OperandMIPS32Mem::canHoldOffset(Ty, ZeroExt, OffsetImm)) {
3165 const Type PointerType = getPointerType();
3166 Variable *T = makeReg(PointerType);
3167 Context.insert<InstArithmetic>(Op, T, BaseVar,
3168 Ctx->getConstantInt32(PositiveOffset));
3169 BaseVar = T;
3170 OffsetImm = 0;
3171 }
3172 }
3173
3174 assert(BaseVar != nullptr);
3175 assert(OffsetImm < 0 ? (-OffsetImm & 0x0000ffff) == -OffsetImm
3176 : (OffsetImm & 0x0000ffff) == OffsetImm);
3177
3178 return OperandMIPS32Mem::create(
3179 Func, Ty, BaseVar,
3180 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(OffsetImm)));
3181}
3182
3183void TargetMIPS32::doAddressOptLoad() {
3184 Inst *Instr = iteratorToInst(Context.getCur());
3185 assert(llvm::isa<InstLoad>(Instr));
3186 Variable *Dest = Instr->getDest();
3187 Operand *Addr = Instr->getSrc(0);
3188 if (OperandMIPS32Mem *Mem =
3189 formAddressingMode(Dest->getType(), Func, Instr, Addr)) {
3190 Instr->setDeleted();
3191 Context.insert<InstLoad>(Dest, Mem);
3192 }
3193}
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003194
Qining Luaee5fa82015-08-20 14:59:03 -07003195void TargetMIPS32::randomlyInsertNop(float Probability,
3196 RandomNumberGenerator &RNG) {
3197 RandomNumberGeneratorWrapper RNGW(RNG);
3198 if (RNGW.getTrueWithProbability(Probability)) {
Karl Schimpfd4699942016-04-02 09:55:31 -07003199 UnimplementedError(getFlags());
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003200 }
3201}
3202
Reed Kotler04bca5a2016-02-03 14:40:47 -08003203void TargetMIPS32::lowerPhi(const InstPhi * /*Instr*/) {
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003204 Func->setError("Phi found in regular instruction list");
3205}
3206
Reed Kotler04bca5a2016-02-03 14:40:47 -08003207void TargetMIPS32::lowerRet(const InstRet *Instr) {
Reed Kotlerd00d48d2015-07-08 09:49:07 -07003208 Variable *Reg = nullptr;
Reed Kotler04bca5a2016-02-03 14:40:47 -08003209 if (Instr->hasRetValue()) {
3210 Operand *Src0 = Instr->getRetValue();
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003211 switch (Src0->getType()) {
Srdjan Obucinad57ed5f2016-09-14 06:06:24 -07003212 case IceType_f32: {
3213 Operand *Src0F = legalizeToReg(Src0);
3214 Reg = makeReg(Src0F->getType(), RegMIPS32::Reg_F0);
3215 _mov(Reg, Src0F);
3216 break;
3217 }
3218 case IceType_f64: {
3219 Operand *Src0F = legalizeToReg(Src0);
3220 Reg = makeReg(Src0F->getType(), RegMIPS32::Reg_F0F1);
3221 _mov(Reg, Src0F);
3222 break;
3223 }
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003224 case IceType_i1:
3225 case IceType_i8:
3226 case IceType_i16:
3227 case IceType_i32: {
Srdjan Obucinad57ed5f2016-09-14 06:06:24 -07003228 Operand *Src0F = legalizeToReg(Src0);
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003229 Reg = makeReg(Src0F->getType(), RegMIPS32::Reg_V0);
3230 _mov(Reg, Src0F);
3231 break;
3232 }
3233 case IceType_i64: {
3234 Src0 = legalizeUndef(Src0);
3235 Variable *R0 = legalizeToReg(loOperand(Src0), RegMIPS32::Reg_V0);
3236 Variable *R1 = legalizeToReg(hiOperand(Src0), RegMIPS32::Reg_V1);
3237 Reg = R0;
John Porto1d937a82015-12-17 06:19:34 -08003238 Context.insert<InstFakeUse>(R1);
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003239 break;
3240 }
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003241 default:
Reed Kotler04bca5a2016-02-03 14:40:47 -08003242 UnimplementedLoweringError(this, Instr);
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003243 }
3244 }
Reed Kotlerd00d48d2015-07-08 09:49:07 -07003245 _ret(getPhysicalRegister(RegMIPS32::Reg_RA), Reg);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003246}
3247
Reed Kotler04bca5a2016-02-03 14:40:47 -08003248void TargetMIPS32::lowerSelect(const InstSelect *Instr) {
Srdjan Obucinad57ed5f2016-09-14 06:06:24 -07003249 Variable *Dest = Instr->getDest();
3250 const Type DestTy = Dest->getType();
3251
3252 if (DestTy == IceType_i64 || isVectorType(DestTy)) {
3253 UnimplementedLoweringError(this, Instr);
3254 return;
3255 }
3256
3257 Variable *DestR = legalizeToReg(Dest);
3258 Variable *SrcTR = legalizeToReg(Instr->getTrueOperand());
3259 Variable *SrcFR = legalizeToReg(Instr->getFalseOperand());
3260
3261 Variable *ConditionR = legalizeToReg(Instr->getCondition());
3262
3263 assert(Instr->getCondition()->getType() == IceType_i1);
3264
3265 switch (DestTy) {
3266 case IceType_i1:
3267 case IceType_i8:
3268 case IceType_i16:
3269 case IceType_i32:
3270 _movn(SrcFR, SrcTR, ConditionR);
3271 _mov(DestR, SrcFR);
3272 _mov(Dest, DestR);
3273 break;
3274 case IceType_f32:
3275 _movn_s(SrcFR, SrcTR, ConditionR);
3276 _mov(DestR, SrcFR);
3277 _mov(Dest, DestR);
3278 break;
3279 case IceType_f64:
3280 _movn_d(SrcFR, SrcTR, ConditionR);
3281 _mov(DestR, SrcFR);
3282 _mov(Dest, DestR);
3283 break;
3284 default:
3285 UnimplementedLoweringError(this, Instr);
3286 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003287}
3288
John Portoa47c11c2016-04-21 05:53:42 -07003289void TargetMIPS32::lowerShuffleVector(const InstShuffleVector *Instr) {
3290 UnimplementedLoweringError(this, Instr);
3291}
3292
Reed Kotler04bca5a2016-02-03 14:40:47 -08003293void TargetMIPS32::lowerStore(const InstStore *Instr) {
Mohit Bhakkadf3bc5cf2016-05-31 11:19:03 -07003294 Operand *Value = Instr->getData();
3295 Operand *Addr = Instr->getAddr();
3296 OperandMIPS32Mem *NewAddr = formMemoryOperand(Addr, Value->getType());
3297 Type Ty = NewAddr->getType();
3298
3299 if (Ty == IceType_i64) {
3300 Value = legalizeUndef(Value);
3301 Variable *ValueHi = legalizeToReg(hiOperand(Value));
3302 Variable *ValueLo = legalizeToReg(loOperand(Value));
3303 _sw(ValueHi, llvm::cast<OperandMIPS32Mem>(hiOperand(NewAddr)));
3304 _sw(ValueLo, llvm::cast<OperandMIPS32Mem>(loOperand(NewAddr)));
3305 } else {
3306 Variable *ValueR = legalizeToReg(Value);
3307 _sw(ValueR, NewAddr);
3308 }
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003309}
3310
Jaydeep Patil47ef0be2016-09-13 06:09:48 -07003311void TargetMIPS32::doAddressOptStore() {
3312 Inst *Instr = iteratorToInst(Context.getCur());
3313 assert(llvm::isa<InstStore>(Instr));
3314 Operand *Src = Instr->getSrc(0);
3315 Operand *Addr = Instr->getSrc(1);
3316 if (OperandMIPS32Mem *Mem =
3317 formAddressingMode(Src->getType(), Func, Instr, Addr)) {
3318 Instr->setDeleted();
3319 Context.insert<InstStore>(Src, Mem);
3320 }
3321}
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003322
Reed Kotler04bca5a2016-02-03 14:40:47 -08003323void TargetMIPS32::lowerSwitch(const InstSwitch *Instr) {
Jaydeep Patil29823f12016-08-31 05:10:03 -07003324 Operand *Src = Instr->getComparison();
3325 SizeT NumCases = Instr->getNumCases();
3326 if (Src->getType() == IceType_i64) {
3327 Src = legalizeUndef(Src);
3328 Variable *Src0Lo = legalizeToReg(loOperand(Src));
3329 Variable *Src0Hi = legalizeToReg(hiOperand(Src));
3330 for (SizeT I = 0; I < NumCases; ++I) {
3331 Operand *ValueLo = Ctx->getConstantInt32(Instr->getValue(I));
3332 Operand *ValueHi = Ctx->getConstantInt32(Instr->getValue(I) >> 32);
3333 CfgNode *TargetTrue = Instr->getLabel(I);
3334 constexpr CfgNode *NoTarget = nullptr;
3335 ValueHi = legalizeToReg(ValueHi);
3336 InstMIPS32Label *IntraLabel = InstMIPS32Label::create(Func, this);
3337 _br(NoTarget, NoTarget, Src0Hi, ValueHi, IntraLabel,
3338 CondMIPS32::Cond::NE);
3339 ValueLo = legalizeToReg(ValueLo);
3340 _br(NoTarget, TargetTrue, Src0Lo, ValueLo, CondMIPS32::Cond::EQ);
3341 Context.insert(IntraLabel);
3342 }
3343 _br(Instr->getLabelDefault());
3344 return;
3345 }
3346 Variable *SrcVar = legalizeToReg(Src);
3347 assert(SrcVar->mustHaveReg());
3348 for (SizeT I = 0; I < NumCases; ++I) {
3349 Operand *Value = Ctx->getConstantInt32(Instr->getValue(I));
3350 CfgNode *TargetTrue = Instr->getLabel(I);
3351 constexpr CfgNode *NoTargetFalse = nullptr;
3352 Value = legalizeToReg(Value);
3353 _br(NoTargetFalse, TargetTrue, SrcVar, Value, CondMIPS32::Cond::EQ);
3354 }
3355 _br(Instr->getLabelDefault());
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003356}
3357
Eric Holk67c7c412016-04-15 13:05:37 -07003358void TargetMIPS32::lowerBreakpoint(const InstBreakpoint *Instr) {
3359 UnimplementedLoweringError(this, Instr);
3360}
3361
Reed Kotler04bca5a2016-02-03 14:40:47 -08003362void TargetMIPS32::lowerUnreachable(const InstUnreachable *Instr) {
3363 UnimplementedLoweringError(this, Instr);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003364}
3365
Andrew Scull57e12682015-09-16 11:30:19 -07003366// Turn an i64 Phi instruction into a pair of i32 Phi instructions, to preserve
3367// integrity of liveness analysis. Undef values are also turned into zeroes,
3368// since loOperand() and hiOperand() don't expect Undef input.
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003369void TargetMIPS32::prelowerPhis() {
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003370 PhiLowering::prelowerPhis32Bit<TargetMIPS32>(this, Context.getNode(), Func);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003371}
3372
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003373void TargetMIPS32::postLower() {
Jim Stichnoth386b52e2016-08-05 15:18:41 -07003374 if (Func->getOptLevel() == Opt_m1)
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003375 return;
Reed Kotler00e36042016-02-01 20:52:19 -08003376 // TODO(rkotler): Find two-address non-SSA instructions where Dest==Src0,
3377 // and set the IsDestRedefined flag to keep liveness analysis consistent.
Karl Schimpfd4699942016-04-02 09:55:31 -07003378 UnimplementedError(getFlags());
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003379}
3380
3381void TargetMIPS32::makeRandomRegisterPermutation(
Jim Stichnoth8aa39662016-02-10 11:20:30 -08003382 llvm::SmallVectorImpl<RegNumT> &Permutation,
John Portoe82b5602016-02-24 15:58:55 -08003383 const SmallBitVector &ExcludeRegisters, uint64_t Salt) const {
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003384 (void)Permutation;
3385 (void)ExcludeRegisters;
Qining Luaee5fa82015-08-20 14:59:03 -07003386 (void)Salt;
Karl Schimpfd4699942016-04-02 09:55:31 -07003387 UnimplementedError(getFlags());
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003388}
3389
3390/* TODO(jvoung): avoid duplicate symbols with multiple targets.
3391void ConstantUndef::emitWithoutDollar(GlobalContext *) const {
3392 llvm_unreachable("Not expecting to emitWithoutDollar undef");
3393}
3394
3395void ConstantUndef::emit(GlobalContext *) const {
3396 llvm_unreachable("undef value encountered by emitter.");
3397}
3398*/
3399
3400TargetDataMIPS32::TargetDataMIPS32(GlobalContext *Ctx)
3401 : TargetDataLowering(Ctx) {}
3402
John Porto8b1a7052015-06-17 13:20:08 -07003403void TargetDataMIPS32::lowerGlobals(const VariableDeclarationList &Vars,
Jim Stichnoth467ffe52016-03-29 15:01:06 -07003404 const std::string &SectionSuffix) {
Karl Schimpfd4699942016-04-02 09:55:31 -07003405 const bool IsPIC = getFlags().getUseNonsfi();
3406 switch (getFlags().getOutFileType()) {
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003407 case FT_Elf: {
3408 ELFObjectWriter *Writer = Ctx->getObjectWriter();
Jim Stichnoth8ff4b282016-01-04 15:39:06 -08003409 Writer->writeDataSection(Vars, llvm::ELF::R_MIPS_GLOB_DAT, SectionSuffix,
3410 IsPIC);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003411 } break;
3412 case FT_Asm:
3413 case FT_Iasm: {
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003414 OstreamLocker L(Ctx);
John Porto8b1a7052015-06-17 13:20:08 -07003415 for (const VariableDeclaration *Var : Vars) {
Jim Stichnothdd6dcfa2016-04-18 12:52:09 -07003416 if (getFlags().matchTranslateOnly(Var->getName(), 0)) {
John Porto8b1a7052015-06-17 13:20:08 -07003417 emitGlobal(*Var, SectionSuffix);
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003418 }
3419 }
3420 } break;
3421 }
3422}
3423
John Porto0f86d032015-06-15 07:44:27 -07003424void TargetDataMIPS32::lowerConstants() {
Karl Schimpfd4699942016-04-02 09:55:31 -07003425 if (getFlags().getDisableTranslation())
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003426 return;
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003427}
3428
Andrew Scull86df4e92015-07-30 13:54:44 -07003429void TargetDataMIPS32::lowerJumpTables() {
Karl Schimpfd4699942016-04-02 09:55:31 -07003430 if (getFlags().getDisableTranslation())
Andrew Scull86df4e92015-07-30 13:54:44 -07003431 return;
Andrew Scull86df4e92015-07-30 13:54:44 -07003432}
3433
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003434// Helper for legalize() to emit the right code to lower an operand to a
3435// register of the appropriate type.
Jim Stichnoth8aa39662016-02-10 11:20:30 -08003436Variable *TargetMIPS32::copyToReg(Operand *Src, RegNumT RegNum) {
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003437 Type Ty = Src->getType();
3438 Variable *Reg = makeReg(Ty, RegNum);
Mohit Bhakkadb684f2b2016-06-09 15:06:23 -07003439 if (isVectorType(Ty)) {
Karl Schimpfd4699942016-04-02 09:55:31 -07003440 UnimplementedError(getFlags());
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003441 } else {
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07003442 if (auto *Mem = llvm::dyn_cast<OperandMIPS32Mem>(Src)) {
3443 _lw(Reg, Mem);
3444 } else {
3445 _mov(Reg, Src);
3446 }
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003447 }
3448 return Reg;
3449}
3450
3451Operand *TargetMIPS32::legalize(Operand *From, LegalMask Allowed,
Jim Stichnoth8aa39662016-02-10 11:20:30 -08003452 RegNumT RegNum) {
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003453 Type Ty = From->getType();
3454 // Assert that a physical register is allowed. To date, all calls
3455 // to legalize() allow a physical register. Legal_Flex converts
3456 // registers to the right type OperandMIPS32FlexReg as needed.
3457 assert(Allowed & Legal_Reg);
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07003458
3459 if (RegNum.hasNoValue()) {
3460 if (Variable *Subst = getContext().availabilityGet(From)) {
3461 // At this point we know there is a potential substitution available.
3462 if (!Subst->isRematerializable() && Subst->mustHaveReg() &&
3463 !Subst->hasReg()) {
3464 // At this point we know the substitution will have a register.
3465 if (From->getType() == Subst->getType()) {
3466 // At this point we know the substitution's register is compatible.
3467 return Subst;
3468 }
3469 }
3470 }
3471 }
3472
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003473 // Go through the various types of operands:
Reed Kotler37af5b02015-11-05 17:07:19 -08003474 // OperandMIPS32Mem, Constant, and Variable.
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003475 // Given the above assertion, if type of operand is not legal
3476 // (e.g., OperandMIPS32Mem and !Legal_Mem), we can always copy
3477 // to a register.
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07003478 if (auto *Mem = llvm::dyn_cast<OperandMIPS32Mem>(From)) {
3479 // Base must be in a physical register.
3480 Variable *Base = Mem->getBase();
3481 ConstantInteger32 *Offset = llvm::cast<ConstantInteger32>(Mem->getOffset());
3482 Variable *RegBase = nullptr;
3483 assert(Base);
3484
3485 RegBase = llvm::cast<Variable>(
3486 legalize(Base, Legal_Reg | Legal_Rematerializable));
3487
3488 if (Offset != nullptr && Offset->getValue() != 0) {
3489 static constexpr bool ZeroExt = false;
3490 if (!OperandMIPS32Mem::canHoldOffset(Ty, ZeroExt, Offset->getValue())) {
3491 llvm::report_fatal_error("Invalid memory offset.");
3492 }
3493 }
3494
3495 // Create a new operand if there was a change.
3496 if (Base != RegBase) {
3497 Mem = OperandMIPS32Mem::create(Func, Ty, RegBase, Offset,
3498 Mem->getAddrMode());
3499 }
3500
3501 if (Allowed & Legal_Mem) {
3502 From = Mem;
3503 } else {
3504 Variable *Reg = makeReg(Ty, RegNum);
3505 _lw(Reg, Mem);
3506 From = Reg;
3507 }
3508 return From;
3509 }
3510
Mohit Bhakkadd1e97772016-07-07 05:07:35 -07003511 if (llvm::isa<Constant>(From)) {
3512 if (auto *C = llvm::dyn_cast<ConstantRelocatable>(From)) {
3513 (void)C;
3514 // TODO(reed kotler): complete this case for proper implementation
3515 Variable *Reg = makeReg(Ty, RegNum);
3516 Context.insert<InstFakeDef>(Reg);
3517 return Reg;
3518 } else if (auto *C32 = llvm::dyn_cast<ConstantInteger32>(From)) {
3519 const uint32_t Value = C32->getValue();
3520 // Check if the immediate will fit in a Flexible second operand,
3521 // if a Flexible second operand is allowed. We need to know the exact
3522 // value, so that rules out relocatable constants.
3523 // Also try the inverse and use MVN if possible.
3524 // Do a movw/movt to a register.
3525 Variable *Reg;
3526 if (RegNum.hasValue())
3527 Reg = getPhysicalRegister(RegNum);
3528 else
3529 Reg = makeReg(Ty, RegNum);
3530 if (isInt<16>(int32_t(Value))) {
3531 Variable *Zero = getPhysicalRegister(RegMIPS32::Reg_ZERO, Ty);
3532 Context.insert<InstFakeDef>(Zero);
3533 _addiu(Reg, Zero, Value);
3534 } else {
3535 uint32_t UpperBits = (Value >> 16) & 0xFFFF;
3536 (void)UpperBits;
3537 uint32_t LowerBits = Value & 0xFFFF;
3538 Variable *TReg = makeReg(Ty, RegNum);
Sagar Thakurc930d592016-07-12 04:06:44 -07003539 if (LowerBits) {
3540 _lui(TReg, Ctx->getConstantInt32(UpperBits));
3541 _ori(Reg, TReg, LowerBits);
3542 } else {
3543 _lui(Reg, Ctx->getConstantInt32(UpperBits));
3544 }
Mohit Bhakkadd1e97772016-07-07 05:07:35 -07003545 }
3546 return Reg;
3547 } else if (isScalarFloatingType(Ty)) {
3548 // Load floats/doubles from literal pool.
3549 auto *CFrom = llvm::cast<Constant>(From);
3550 assert(CFrom->getShouldBePooled());
3551 Constant *Offset = Ctx->getConstantSym(0, CFrom->getLabelName());
3552 Variable *TReg1 = makeReg(getPointerType());
3553 Variable *TReg2 = makeReg(Ty);
3554 Context.insert<InstFakeDef>(TReg2);
3555 _lui(TReg1, Offset, RO_Hi);
3556 OperandMIPS32Mem *Addr =
3557 OperandMIPS32Mem::create(Func, Ty, TReg1, Offset);
3558 if (Ty == IceType_f32)
3559 _lwc1(TReg2, Addr, RO_Lo);
3560 else
3561 _ldc1(TReg2, Addr, RO_Lo);
3562 return copyToReg(TReg2, RegNum);
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003563 }
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003564 }
Mohit Bhakkadd1e97772016-07-07 05:07:35 -07003565
Jim Stichnoth5bff61c2015-10-28 09:26:00 -07003566 if (auto *Var = llvm::dyn_cast<Variable>(From)) {
Jaydeep Patil1d0690b2016-09-04 07:19:08 -07003567 if (Var->isRematerializable()) {
3568 if (Allowed & Legal_Rematerializable) {
3569 return From;
3570 }
3571
3572 Variable *T = makeReg(Var->getType(), RegNum);
3573 _mov(T, Var);
3574 return T;
3575 }
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003576 // Check if the variable is guaranteed a physical register. This
3577 // can happen either when the variable is pre-colored or when it is
3578 // assigned infinite weight.
3579 bool MustHaveRegister = (Var->hasReg() || Var->mustHaveReg());
3580 // We need a new physical register for the operand if:
3581 // Mem is not allowed and Var isn't guaranteed a physical
3582 // register, or
3583 // RegNum is required and Var->getRegNum() doesn't match.
3584 if ((!(Allowed & Legal_Mem) && !MustHaveRegister) ||
Reed Kotler5fa0a5f2016-02-15 20:01:24 -08003585 (RegNum.hasValue() && RegNum != Var->getRegNum())) {
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003586 From = copyToReg(From, RegNum);
3587 }
3588 return From;
3589 }
3590 return From;
3591}
3592
Sagar Thakur5cce7612016-05-24 06:25:50 -07003593namespace BoolFolding {
3594// TODO(sagar.thakur): Add remaining instruction kinds to shouldTrackProducer()
3595// and isValidConsumer()
3596bool shouldTrackProducer(const Inst &Instr) {
3597 return Instr.getKind() == Inst::Icmp;
3598}
3599
3600bool isValidConsumer(const Inst &Instr) { return Instr.getKind() == Inst::Br; }
3601} // end of namespace BoolFolding
3602
3603void TargetMIPS32::ComputationTracker::recordProducers(CfgNode *Node) {
3604 for (Inst &Instr : Node->getInsts()) {
3605 if (Instr.isDeleted())
3606 continue;
3607 // Check whether Instr is a valid producer.
3608 Variable *Dest = Instr.getDest();
3609 if (Dest // only consider instructions with an actual dest var; and
3610 && Dest->getType() == IceType_i1 // only bool-type dest vars; and
3611 && BoolFolding::shouldTrackProducer(Instr)) { // white-listed instr.
3612 KnownComputations.emplace(Dest->getIndex(),
3613 ComputationEntry(&Instr, IceType_i1));
3614 }
3615 // Check each src variable against the map.
3616 FOREACH_VAR_IN_INST(Var, Instr) {
3617 SizeT VarNum = Var->getIndex();
3618 auto ComputationIter = KnownComputations.find(VarNum);
3619 if (ComputationIter == KnownComputations.end()) {
3620 continue;
3621 }
3622
3623 ++ComputationIter->second.NumUses;
3624 switch (ComputationIter->second.ComputationType) {
3625 default:
3626 KnownComputations.erase(VarNum);
3627 continue;
3628 case IceType_i1:
3629 if (!BoolFolding::isValidConsumer(Instr)) {
3630 KnownComputations.erase(VarNum);
3631 continue;
3632 }
3633 break;
3634 }
3635
3636 if (Instr.isLastUse(Var)) {
3637 ComputationIter->second.IsLiveOut = false;
3638 }
3639 }
3640 }
3641
3642 for (auto Iter = KnownComputations.begin(), End = KnownComputations.end();
3643 Iter != End;) {
3644 // Disable the folding if its dest may be live beyond this block.
3645 if (Iter->second.IsLiveOut || Iter->second.NumUses > 1) {
3646 Iter = KnownComputations.erase(Iter);
3647 continue;
3648 }
3649
3650 // Mark as "dead" rather than outright deleting. This is so that other
3651 // peephole style optimizations during or before lowering have access to
3652 // this instruction in undeleted form. See for example
3653 // tryOptimizedCmpxchgCmpBr().
3654 Iter->second.Instr->setDead();
3655 ++Iter;
3656 }
3657}
3658
Jan Voungfb792842015-06-11 15:27:50 -07003659TargetHeaderMIPS32::TargetHeaderMIPS32(GlobalContext *Ctx)
3660 : TargetHeaderLowering(Ctx) {}
3661
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003662void TargetHeaderMIPS32::lower() {
3663 OstreamLocker L(Ctx);
3664 Ostream &Str = Ctx->getStrEmit();
Jim Stichnoth6106df82015-12-16 06:17:58 -08003665 Str << "\t.set\t"
3666 << "nomicromips\n";
3667 Str << "\t.set\t"
3668 << "nomips16\n";
Jim Stichnothac8da5c2015-10-21 06:57:46 -07003669}
3670
John Portoe82b5602016-02-24 15:58:55 -08003671SmallBitVector TargetMIPS32::TypeToRegisterSet[RCMIPS32_NUM];
3672SmallBitVector TargetMIPS32::TypeToRegisterSetUnfiltered[RCMIPS32_NUM];
3673SmallBitVector TargetMIPS32::RegisterAliases[RegMIPS32::Reg_NUM];
Jim Stichnoth94844f12015-11-04 16:06:16 -08003674
John Porto4a566862016-01-04 09:33:41 -08003675} // end of namespace MIPS32
Jim Stichnoth6da4cef2015-06-11 13:26:33 -07003676} // end of namespace Ice