blob: e11d49251ad267fec08dfde74f8f4ae8adf88bf4 [file] [log] [blame]
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001//===- InstCombineCalls.cpp -----------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visitCall and visitInvoke functions.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carrutha9174582015-01-22 05:25:13 +000014#include "InstCombineInternal.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000015#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/None.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000019#include "llvm/ADT/Optional.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000020#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallVector.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000022#include "llvm/ADT/Statistic.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000023#include "llvm/ADT/Twine.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000024#include "llvm/Analysis/AssumptionCache.h"
David Majnemer15032582015-05-22 03:56:46 +000025#include "llvm/Analysis/InstructionSimplify.h"
Chris Lattner7a9e47a2010-01-05 07:32:13 +000026#include "llvm/Analysis/MemoryBuiltins.h"
David Blaikie2be39222018-03-21 22:34:23 +000027#include "llvm/Analysis/Utils/Local.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000028#include "llvm/Analysis/ValueTracking.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000029#include "llvm/IR/Attributes.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000030#include "llvm/IR/BasicBlock.h"
Chandler Carruth219b89b2014-03-04 11:01:28 +000031#include "llvm/IR/CallSite.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000032#include "llvm/IR/Constant.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000033#include "llvm/IR/Constants.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000034#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/GlobalVariable.h"
38#include "llvm/IR/InstrTypes.h"
39#include "llvm/IR/Instruction.h"
40#include "llvm/IR/Instructions.h"
41#include "llvm/IR/IntrinsicInst.h"
42#include "llvm/IR/Intrinsics.h"
43#include "llvm/IR/LLVMContext.h"
44#include "llvm/IR/Metadata.h"
Chandler Carruth820a9082014-03-04 11:08:18 +000045#include "llvm/IR/PatternMatch.h"
Philip Reames1a1bdb22014-12-02 18:50:36 +000046#include "llvm/IR/Statepoint.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000047#include "llvm/IR/Type.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000048#include "llvm/IR/User.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000049#include "llvm/IR/Value.h"
50#include "llvm/IR/ValueHandle.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000051#include "llvm/Support/AtomicOrdering.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000052#include "llvm/Support/Casting.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000053#include "llvm/Support/CommandLine.h"
54#include "llvm/Support/Compiler.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000055#include "llvm/Support/Debug.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000056#include "llvm/Support/ErrorHandling.h"
Craig Topperb45eabc2017-04-26 16:39:58 +000057#include "llvm/Support/KnownBits.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000058#include "llvm/Support/MathExtras.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000059#include "llvm/Support/raw_ostream.h"
60#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
Chandler Carruthba4c5172015-01-21 11:23:40 +000061#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000062#include <algorithm>
63#include <cassert>
64#include <cstdint>
65#include <cstring>
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000066#include <utility>
Eugene Zelenkocdc71612016-08-11 17:20:18 +000067#include <vector>
68
Chris Lattner7a9e47a2010-01-05 07:32:13 +000069using namespace llvm;
Michael Ilseman536cc322012-12-13 03:13:36 +000070using namespace PatternMatch;
Chris Lattner7a9e47a2010-01-05 07:32:13 +000071
Chandler Carruth964daaa2014-04-22 02:55:47 +000072#define DEBUG_TYPE "instcombine"
73
Meador Ingee3f2b262012-11-30 04:05:06 +000074STATISTIC(NumSimplified, "Number of library calls simplified");
75
Philip Reames79e917d2018-05-09 22:56:32 +000076static cl::opt<unsigned> GuardWideningWindow(
77 "instcombine-guard-widening-window",
78 cl::init(3),
79 cl::desc("How wide an instruction window to bypass looking for "
80 "another guard"));
81
Sanjay Patelcd4377c2016-01-20 22:24:38 +000082/// Return the specified type promoted as it would be to pass though a va_arg
83/// area.
Chris Lattner229907c2011-07-18 04:54:35 +000084static Type *getPromotedType(Type *Ty) {
85 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +000086 if (ITy->getBitWidth() < 32)
87 return Type::getInt32Ty(Ty->getContext());
88 }
89 return Ty;
90}
91
Sanjay Patel368ac5d2016-02-21 17:29:33 +000092/// Return a constant boolean vector that has true elements in all positions
Sanjay Patel24401302016-02-21 17:33:31 +000093/// where the input constant data vector has an element with the sign bit set.
Sanjay Patel368ac5d2016-02-21 17:29:33 +000094static Constant *getNegativeIsTrueBoolVec(ConstantDataVector *V) {
95 SmallVector<Constant *, 32> BoolVec;
96 IntegerType *BoolTy = Type::getInt1Ty(V->getContext());
97 for (unsigned I = 0, E = V->getNumElements(); I != E; ++I) {
98 Constant *Elt = V->getElementAsConstant(I);
99 assert((isa<ConstantInt>(Elt) || isa<ConstantFP>(Elt)) &&
100 "Unexpected constant data vector element type");
101 bool Sign = V->getElementType()->isIntegerTy()
102 ? cast<ConstantInt>(Elt)->isNegative()
103 : cast<ConstantFP>(Elt)->isNegative();
104 BoolVec.push_back(ConstantInt::get(BoolTy, Sign));
105 }
106 return ConstantVector::get(BoolVec);
107}
108
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000109Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
Daniel Neilson2363da92018-02-12 23:06:55 +0000110 unsigned DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
111 unsigned CopyDstAlign = MI->getDestAlignment();
112 if (CopyDstAlign < DstAlign){
113 MI->setDestAlignment(DstAlign);
114 return MI;
115 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000116
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000117 unsigned SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
118 unsigned CopySrcAlign = MI->getSourceAlignment();
Daniel Neilson2363da92018-02-12 23:06:55 +0000119 if (CopySrcAlign < SrcAlign) {
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000120 MI->setSourceAlignment(SrcAlign);
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000121 return MI;
122 }
Jim Grosbach7815f562012-02-03 00:07:04 +0000123
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000124 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
125 // load/store.
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000126 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
Craig Topperf40110f2014-04-25 05:29:35 +0000127 if (!MemOpLength) return nullptr;
Jim Grosbach7815f562012-02-03 00:07:04 +0000128
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000129 // Source and destination pointer types are always "i8*" for intrinsic. See
130 // if the size is something we can handle with a single primitive load/store.
131 // A single load+store correctly handles overlapping memory in the memmove
132 // case.
Michael Liao69e172a2012-08-15 03:49:59 +0000133 uint64_t Size = MemOpLength->getLimitedValue();
Alp Tokercb402912014-01-24 17:20:08 +0000134 assert(Size && "0-sized memory transferring should be removed already.");
Jim Grosbach7815f562012-02-03 00:07:04 +0000135
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000136 if (Size > 8 || (Size&(Size-1)))
Craig Topperf40110f2014-04-25 05:29:35 +0000137 return nullptr; // If not 1/2/4/8 bytes, exit.
Jim Grosbach7815f562012-02-03 00:07:04 +0000138
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000139 // Use an integer load+store unless we can find something better.
Mon P Wangc576ee92010-04-04 03:10:48 +0000140 unsigned SrcAddrSp =
Gabor Greif0a136c92010-06-24 13:54:33 +0000141 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
Gabor Greiff3755202010-04-16 15:33:14 +0000142 unsigned DstAddrSp =
Gabor Greif0a136c92010-06-24 13:54:33 +0000143 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
Mon P Wangc576ee92010-04-04 03:10:48 +0000144
Chris Lattner229907c2011-07-18 04:54:35 +0000145 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
Mon P Wangc576ee92010-04-04 03:10:48 +0000146 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
147 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
Jim Grosbach7815f562012-02-03 00:07:04 +0000148
Mikael Holmen760dc9a2017-03-01 06:45:20 +0000149 // If the memcpy has metadata describing the members, see if we can get the
150 // TBAA tag describing our copy.
Craig Topperf40110f2014-04-25 05:29:35 +0000151 MDNode *CopyMD = nullptr;
Ivan A. Kosarevf03f5792018-02-19 12:10:20 +0000152 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
153 CopyMD = M;
154 } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
Mikael Holmen760dc9a2017-03-01 06:45:20 +0000155 if (M->getNumOperands() == 3 && M->getOperand(0) &&
156 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
Craig Topper79ab6432017-07-06 18:39:47 +0000157 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
Mikael Holmen760dc9a2017-03-01 06:45:20 +0000158 M->getOperand(1) &&
159 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
160 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
161 Size &&
162 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
163 CopyMD = cast<MDNode>(M->getOperand(2));
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000164 }
Jim Grosbach7815f562012-02-03 00:07:04 +0000165
Craig Topperbb4069e2017-07-07 23:16:26 +0000166 Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
167 Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000168 LoadInst *L = Builder.CreateLoad(Src);
Daniel Neilson2363da92018-02-12 23:06:55 +0000169 // Alignment from the mem intrinsic will be better, so use it.
170 L->setAlignment(CopySrcAlign);
Dan Gohman3f553c22012-09-13 21:51:01 +0000171 if (CopyMD)
172 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
Dorit Nuzmanabd15f62016-09-04 07:49:39 +0000173 MDNode *LoopMemParallelMD =
174 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
175 if (LoopMemParallelMD)
176 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
Dorit Nuzman7673ba72016-09-04 07:06:00 +0000177
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000178 StoreInst *S = Builder.CreateStore(L, Dest);
Daniel Neilson2363da92018-02-12 23:06:55 +0000179 // Alignment from the mem intrinsic will be better, so use it.
180 S->setAlignment(CopyDstAlign);
Dan Gohman3f553c22012-09-13 21:51:01 +0000181 if (CopyMD)
182 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
Dorit Nuzmanabd15f62016-09-04 07:49:39 +0000183 if (LoopMemParallelMD)
184 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000185
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000186 if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
187 // non-atomics can be volatile
188 L->setVolatile(MT->isVolatile());
189 S->setVolatile(MT->isVolatile());
190 }
191 if (isa<AtomicMemTransferInst>(MI)) {
192 // atomics have to be unordered
193 L->setOrdering(AtomicOrdering::Unordered);
194 S->setOrdering(AtomicOrdering::Unordered);
195 }
196
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000197 // Set the size of the copy to 0, it will be deleted on the next iteration.
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000198 MI->setLength(Constant::getNullValue(MemOpLength->getType()));
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000199 return MI;
200}
201
202Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000203 unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
Daniel Neilson38af2ee2018-02-02 22:03:03 +0000204 if (MI->getDestAlignment() < Alignment) {
205 MI->setDestAlignment(Alignment);
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000206 return MI;
207 }
Jim Grosbach7815f562012-02-03 00:07:04 +0000208
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000209 // Extract the length and alignment and fill if they are constant.
210 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
211 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
Duncan Sands9dff9be2010-02-15 16:12:20 +0000212 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
Craig Topperf40110f2014-04-25 05:29:35 +0000213 return nullptr;
Michael Liao69e172a2012-08-15 03:49:59 +0000214 uint64_t Len = LenC->getLimitedValue();
Daniel Neilson710d7b92018-03-22 18:36:15 +0000215 Alignment = MI->getDestAlignment();
Michael Liao69e172a2012-08-15 03:49:59 +0000216 assert(Len && "0-sized memory setting should be removed already.");
Jim Grosbach7815f562012-02-03 00:07:04 +0000217
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000218 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
219 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
Chris Lattner229907c2011-07-18 04:54:35 +0000220 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
Jim Grosbach7815f562012-02-03 00:07:04 +0000221
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000222 Value *Dest = MI->getDest();
Mon P Wang1991c472010-12-20 01:05:30 +0000223 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
224 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
Craig Topperbb4069e2017-07-07 23:16:26 +0000225 Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000226
227 // Alignment 0 is identity for alignment 1 for memset, but not store.
228 if (Alignment == 0) Alignment = 1;
Jim Grosbach7815f562012-02-03 00:07:04 +0000229
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000230 // Extract the fill value and store.
231 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
Craig Topperbb4069e2017-07-07 23:16:26 +0000232 StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
233 MI->isVolatile());
Eli Friedman49346012011-05-18 19:57:14 +0000234 S->setAlignment(Alignment);
Jim Grosbach7815f562012-02-03 00:07:04 +0000235
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000236 // Set the size of the copy to 0, it will be deleted on the next iteration.
237 MI->setLength(Constant::getNullValue(LenC->getType()));
238 return MI;
239 }
240
Simon Pilgrim18617d12015-08-05 08:18:00 +0000241 return nullptr;
242}
243
Sanjay Patel6038d3e2016-01-29 23:27:03 +0000244static Value *simplifyX86immShift(const IntrinsicInst &II,
Simon Pilgrimbecd5e82015-08-13 07:39:03 +0000245 InstCombiner::BuilderTy &Builder) {
246 bool LogicalShift = false;
247 bool ShiftLeft = false;
248
249 switch (II.getIntrinsicID()) {
Craig Topperb4173a52016-11-13 07:26:19 +0000250 default: llvm_unreachable("Unexpected intrinsic!");
Simon Pilgrimbecd5e82015-08-13 07:39:03 +0000251 case Intrinsic::x86_sse2_psra_d:
252 case Intrinsic::x86_sse2_psra_w:
253 case Intrinsic::x86_sse2_psrai_d:
254 case Intrinsic::x86_sse2_psrai_w:
255 case Intrinsic::x86_avx2_psra_d:
256 case Intrinsic::x86_avx2_psra_w:
257 case Intrinsic::x86_avx2_psrai_d:
258 case Intrinsic::x86_avx2_psrai_w:
Craig Topper8b831cb2016-11-13 01:51:55 +0000259 case Intrinsic::x86_avx512_psra_q_128:
260 case Intrinsic::x86_avx512_psrai_q_128:
261 case Intrinsic::x86_avx512_psra_q_256:
262 case Intrinsic::x86_avx512_psrai_q_256:
263 case Intrinsic::x86_avx512_psra_d_512:
264 case Intrinsic::x86_avx512_psra_q_512:
265 case Intrinsic::x86_avx512_psra_w_512:
266 case Intrinsic::x86_avx512_psrai_d_512:
267 case Intrinsic::x86_avx512_psrai_q_512:
268 case Intrinsic::x86_avx512_psrai_w_512:
Simon Pilgrimbecd5e82015-08-13 07:39:03 +0000269 LogicalShift = false; ShiftLeft = false;
270 break;
271 case Intrinsic::x86_sse2_psrl_d:
272 case Intrinsic::x86_sse2_psrl_q:
273 case Intrinsic::x86_sse2_psrl_w:
274 case Intrinsic::x86_sse2_psrli_d:
275 case Intrinsic::x86_sse2_psrli_q:
276 case Intrinsic::x86_sse2_psrli_w:
277 case Intrinsic::x86_avx2_psrl_d:
278 case Intrinsic::x86_avx2_psrl_q:
279 case Intrinsic::x86_avx2_psrl_w:
280 case Intrinsic::x86_avx2_psrli_d:
281 case Intrinsic::x86_avx2_psrli_q:
282 case Intrinsic::x86_avx2_psrli_w:
Craig Topper8b831cb2016-11-13 01:51:55 +0000283 case Intrinsic::x86_avx512_psrl_d_512:
284 case Intrinsic::x86_avx512_psrl_q_512:
285 case Intrinsic::x86_avx512_psrl_w_512:
286 case Intrinsic::x86_avx512_psrli_d_512:
287 case Intrinsic::x86_avx512_psrli_q_512:
288 case Intrinsic::x86_avx512_psrli_w_512:
Simon Pilgrimbecd5e82015-08-13 07:39:03 +0000289 LogicalShift = true; ShiftLeft = false;
290 break;
291 case Intrinsic::x86_sse2_psll_d:
292 case Intrinsic::x86_sse2_psll_q:
293 case Intrinsic::x86_sse2_psll_w:
294 case Intrinsic::x86_sse2_pslli_d:
295 case Intrinsic::x86_sse2_pslli_q:
296 case Intrinsic::x86_sse2_pslli_w:
297 case Intrinsic::x86_avx2_psll_d:
298 case Intrinsic::x86_avx2_psll_q:
299 case Intrinsic::x86_avx2_psll_w:
300 case Intrinsic::x86_avx2_pslli_d:
301 case Intrinsic::x86_avx2_pslli_q:
302 case Intrinsic::x86_avx2_pslli_w:
Craig Topper8b831cb2016-11-13 01:51:55 +0000303 case Intrinsic::x86_avx512_psll_d_512:
304 case Intrinsic::x86_avx512_psll_q_512:
305 case Intrinsic::x86_avx512_psll_w_512:
306 case Intrinsic::x86_avx512_pslli_d_512:
307 case Intrinsic::x86_avx512_pslli_q_512:
308 case Intrinsic::x86_avx512_pslli_w_512:
Simon Pilgrimbecd5e82015-08-13 07:39:03 +0000309 LogicalShift = true; ShiftLeft = true;
310 break;
311 }
Simon Pilgrima3a72b42015-08-10 20:21:15 +0000312 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
313
Simon Pilgrim3815c162015-08-07 18:22:50 +0000314 // Simplify if count is constant.
315 auto Arg1 = II.getArgOperand(1);
316 auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
317 auto CDV = dyn_cast<ConstantDataVector>(Arg1);
318 auto CInt = dyn_cast<ConstantInt>(Arg1);
319 if (!CAZ && !CDV && !CInt)
Simon Pilgrim18617d12015-08-05 08:18:00 +0000320 return nullptr;
Simon Pilgrim3815c162015-08-07 18:22:50 +0000321
322 APInt Count(64, 0);
323 if (CDV) {
324 // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
325 // operand to compute the shift amount.
326 auto VT = cast<VectorType>(CDV->getType());
327 unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
328 assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
329 unsigned NumSubElts = 64 / BitWidth;
330
331 // Concatenate the sub-elements to create the 64-bit value.
332 for (unsigned i = 0; i != NumSubElts; ++i) {
333 unsigned SubEltIdx = (NumSubElts - 1) - i;
334 auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
Craig Topper24e71012017-04-28 03:36:24 +0000335 Count <<= BitWidth;
Simon Pilgrim3815c162015-08-07 18:22:50 +0000336 Count |= SubElt->getValue().zextOrTrunc(64);
337 }
338 }
339 else if (CInt)
340 Count = CInt->getValue();
Simon Pilgrim18617d12015-08-05 08:18:00 +0000341
342 auto Vec = II.getArgOperand(0);
343 auto VT = cast<VectorType>(Vec->getType());
344 auto SVT = VT->getElementType();
Simon Pilgrim3815c162015-08-07 18:22:50 +0000345 unsigned VWidth = VT->getNumElements();
346 unsigned BitWidth = SVT->getPrimitiveSizeInBits();
347
348 // If shift-by-zero then just return the original value.
Craig Topper73ba1c82017-06-07 07:40:37 +0000349 if (Count.isNullValue())
Simon Pilgrim3815c162015-08-07 18:22:50 +0000350 return Vec;
351
Simon Pilgrima3a72b42015-08-10 20:21:15 +0000352 // Handle cases when Shift >= BitWidth.
353 if (Count.uge(BitWidth)) {
354 // If LogicalShift - just return zero.
355 if (LogicalShift)
356 return ConstantAggregateZero::get(VT);
357
358 // If ArithmeticShift - clamp Shift to (BitWidth - 1).
359 Count = APInt(64, BitWidth - 1);
360 }
Simon Pilgrim18617d12015-08-05 08:18:00 +0000361
Simon Pilgrim18617d12015-08-05 08:18:00 +0000362 // Get a constant vector of the same type as the first operand.
Simon Pilgrim3815c162015-08-07 18:22:50 +0000363 auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
364 auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
Simon Pilgrim18617d12015-08-05 08:18:00 +0000365
366 if (ShiftLeft)
Simon Pilgrim3815c162015-08-07 18:22:50 +0000367 return Builder.CreateShl(Vec, ShiftVec);
Simon Pilgrim18617d12015-08-05 08:18:00 +0000368
Simon Pilgrima3a72b42015-08-10 20:21:15 +0000369 if (LogicalShift)
370 return Builder.CreateLShr(Vec, ShiftVec);
371
372 return Builder.CreateAShr(Vec, ShiftVec);
Simon Pilgrim18617d12015-08-05 08:18:00 +0000373}
374
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000375// Attempt to simplify AVX2 per-element shift intrinsics to a generic IR shift.
376// Unlike the generic IR shifts, the intrinsics have defined behaviour for out
377// of range shift amounts (logical - set to zero, arithmetic - splat sign bit).
378static Value *simplifyX86varShift(const IntrinsicInst &II,
379 InstCombiner::BuilderTy &Builder) {
380 bool LogicalShift = false;
381 bool ShiftLeft = false;
382
383 switch (II.getIntrinsicID()) {
Craig Topperb4173a52016-11-13 07:26:19 +0000384 default: llvm_unreachable("Unexpected intrinsic!");
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000385 case Intrinsic::x86_avx2_psrav_d:
386 case Intrinsic::x86_avx2_psrav_d_256:
Craig Topperb4173a52016-11-13 07:26:19 +0000387 case Intrinsic::x86_avx512_psrav_q_128:
388 case Intrinsic::x86_avx512_psrav_q_256:
389 case Intrinsic::x86_avx512_psrav_d_512:
390 case Intrinsic::x86_avx512_psrav_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +0000391 case Intrinsic::x86_avx512_psrav_w_128:
392 case Intrinsic::x86_avx512_psrav_w_256:
393 case Intrinsic::x86_avx512_psrav_w_512:
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000394 LogicalShift = false;
395 ShiftLeft = false;
396 break;
397 case Intrinsic::x86_avx2_psrlv_d:
398 case Intrinsic::x86_avx2_psrlv_d_256:
399 case Intrinsic::x86_avx2_psrlv_q:
400 case Intrinsic::x86_avx2_psrlv_q_256:
Craig Topperb4173a52016-11-13 07:26:19 +0000401 case Intrinsic::x86_avx512_psrlv_d_512:
402 case Intrinsic::x86_avx512_psrlv_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +0000403 case Intrinsic::x86_avx512_psrlv_w_128:
404 case Intrinsic::x86_avx512_psrlv_w_256:
405 case Intrinsic::x86_avx512_psrlv_w_512:
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000406 LogicalShift = true;
407 ShiftLeft = false;
408 break;
409 case Intrinsic::x86_avx2_psllv_d:
410 case Intrinsic::x86_avx2_psllv_d_256:
411 case Intrinsic::x86_avx2_psllv_q:
412 case Intrinsic::x86_avx2_psllv_q_256:
Craig Topperb4173a52016-11-13 07:26:19 +0000413 case Intrinsic::x86_avx512_psllv_d_512:
414 case Intrinsic::x86_avx512_psllv_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +0000415 case Intrinsic::x86_avx512_psllv_w_128:
416 case Intrinsic::x86_avx512_psllv_w_256:
417 case Intrinsic::x86_avx512_psllv_w_512:
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000418 LogicalShift = true;
419 ShiftLeft = true;
420 break;
421 }
422 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
423
424 // Simplify if all shift amounts are constant/undef.
425 auto *CShift = dyn_cast<Constant>(II.getArgOperand(1));
426 if (!CShift)
427 return nullptr;
428
429 auto Vec = II.getArgOperand(0);
430 auto VT = cast<VectorType>(II.getType());
431 auto SVT = VT->getVectorElementType();
432 int NumElts = VT->getNumElements();
433 int BitWidth = SVT->getIntegerBitWidth();
434
435 // Collect each element's shift amount.
436 // We also collect special cases: UNDEF = -1, OUT-OF-RANGE = BitWidth.
437 bool AnyOutOfRange = false;
438 SmallVector<int, 8> ShiftAmts;
439 for (int I = 0; I < NumElts; ++I) {
440 auto *CElt = CShift->getAggregateElement(I);
441 if (CElt && isa<UndefValue>(CElt)) {
442 ShiftAmts.push_back(-1);
443 continue;
444 }
445
446 auto *COp = dyn_cast_or_null<ConstantInt>(CElt);
447 if (!COp)
448 return nullptr;
449
450 // Handle out of range shifts.
451 // If LogicalShift - set to BitWidth (special case).
452 // If ArithmeticShift - set to (BitWidth - 1) (sign splat).
453 APInt ShiftVal = COp->getValue();
454 if (ShiftVal.uge(BitWidth)) {
455 AnyOutOfRange = LogicalShift;
456 ShiftAmts.push_back(LogicalShift ? BitWidth : BitWidth - 1);
457 continue;
458 }
459
460 ShiftAmts.push_back((int)ShiftVal.getZExtValue());
461 }
462
463 // If all elements out of range or UNDEF, return vector of zeros/undefs.
464 // ArithmeticShift should only hit this if they are all UNDEF.
465 auto OutOfRange = [&](int Idx) { return (Idx < 0) || (BitWidth <= Idx); };
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +0000466 if (llvm::all_of(ShiftAmts, OutOfRange)) {
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000467 SmallVector<Constant *, 8> ConstantVec;
468 for (int Idx : ShiftAmts) {
469 if (Idx < 0) {
470 ConstantVec.push_back(UndefValue::get(SVT));
471 } else {
472 assert(LogicalShift && "Logical shift expected");
473 ConstantVec.push_back(ConstantInt::getNullValue(SVT));
474 }
475 }
476 return ConstantVector::get(ConstantVec);
477 }
478
479 // We can't handle only some out of range values with generic logical shifts.
480 if (AnyOutOfRange)
481 return nullptr;
482
483 // Build the shift amount constant vector.
484 SmallVector<Constant *, 8> ShiftVecAmts;
485 for (int Idx : ShiftAmts) {
486 if (Idx < 0)
487 ShiftVecAmts.push_back(UndefValue::get(SVT));
488 else
489 ShiftVecAmts.push_back(ConstantInt::get(SVT, Idx));
490 }
491 auto ShiftVec = ConstantVector::get(ShiftVecAmts);
492
493 if (ShiftLeft)
494 return Builder.CreateShl(Vec, ShiftVec);
495
496 if (LogicalShift)
497 return Builder.CreateLShr(Vec, ShiftVec);
498
499 return Builder.CreateAShr(Vec, ShiftVec);
500}
501
Craig Topper4853c432017-07-06 23:18:42 +0000502static Value *simplifyX86pack(IntrinsicInst &II, bool IsSigned) {
Simon Pilgrim6f6b2792017-01-25 14:37:24 +0000503 Value *Arg0 = II.getArgOperand(0);
504 Value *Arg1 = II.getArgOperand(1);
505 Type *ResTy = II.getType();
506
507 // Fast all undef handling.
508 if (isa<UndefValue>(Arg0) && isa<UndefValue>(Arg1))
509 return UndefValue::get(ResTy);
510
511 Type *ArgTy = Arg0->getType();
512 unsigned NumLanes = ResTy->getPrimitiveSizeInBits() / 128;
513 unsigned NumDstElts = ResTy->getVectorNumElements();
514 unsigned NumSrcElts = ArgTy->getVectorNumElements();
515 assert(NumDstElts == (2 * NumSrcElts) && "Unexpected packing types");
516
517 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
518 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
519 unsigned DstScalarSizeInBits = ResTy->getScalarSizeInBits();
520 assert(ArgTy->getScalarSizeInBits() == (2 * DstScalarSizeInBits) &&
521 "Unexpected packing types");
522
523 // Constant folding.
524 auto *Cst0 = dyn_cast<Constant>(Arg0);
525 auto *Cst1 = dyn_cast<Constant>(Arg1);
526 if (!Cst0 || !Cst1)
527 return nullptr;
528
529 SmallVector<Constant *, 32> Vals;
530 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
531 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
532 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
533 auto *Cst = (Elt >= NumSrcEltsPerLane) ? Cst1 : Cst0;
534 auto *COp = Cst->getAggregateElement(SrcIdx);
535 if (COp && isa<UndefValue>(COp)) {
536 Vals.push_back(UndefValue::get(ResTy->getScalarType()));
537 continue;
538 }
539
540 auto *CInt = dyn_cast_or_null<ConstantInt>(COp);
541 if (!CInt)
542 return nullptr;
543
544 APInt Val = CInt->getValue();
545 assert(Val.getBitWidth() == ArgTy->getScalarSizeInBits() &&
546 "Unexpected constant bitwidth");
547
548 if (IsSigned) {
549 // PACKSS: Truncate signed value with signed saturation.
550 // Source values less than dst minint are saturated to minint.
551 // Source values greater than dst maxint are saturated to maxint.
552 if (Val.isSignedIntN(DstScalarSizeInBits))
553 Val = Val.trunc(DstScalarSizeInBits);
554 else if (Val.isNegative())
555 Val = APInt::getSignedMinValue(DstScalarSizeInBits);
556 else
557 Val = APInt::getSignedMaxValue(DstScalarSizeInBits);
558 } else {
559 // PACKUS: Truncate signed value with unsigned saturation.
560 // Source values less than zero are saturated to zero.
561 // Source values greater than dst maxuint are saturated to maxuint.
562 if (Val.isIntN(DstScalarSizeInBits))
563 Val = Val.trunc(DstScalarSizeInBits);
564 else if (Val.isNegative())
565 Val = APInt::getNullValue(DstScalarSizeInBits);
566 else
567 Val = APInt::getAllOnesValue(DstScalarSizeInBits);
568 }
569
570 Vals.push_back(ConstantInt::get(ResTy->getScalarType(), Val));
571 }
572 }
573
574 return ConstantVector::get(Vals);
575}
576
Craig Topper4853c432017-07-06 23:18:42 +0000577static Value *simplifyX86movmsk(const IntrinsicInst &II) {
Simon Pilgrim91e3ac82016-06-07 08:18:35 +0000578 Value *Arg = II.getArgOperand(0);
579 Type *ResTy = II.getType();
580 Type *ArgTy = Arg->getType();
581
582 // movmsk(undef) -> zero as we must ensure the upper bits are zero.
583 if (isa<UndefValue>(Arg))
584 return Constant::getNullValue(ResTy);
585
586 // We can't easily peek through x86_mmx types.
587 if (!ArgTy->isVectorTy())
588 return nullptr;
589
590 auto *C = dyn_cast<Constant>(Arg);
591 if (!C)
592 return nullptr;
593
594 // Extract signbits of the vector input and pack into integer result.
595 APInt Result(ResTy->getPrimitiveSizeInBits(), 0);
596 for (unsigned I = 0, E = ArgTy->getVectorNumElements(); I != E; ++I) {
597 auto *COp = C->getAggregateElement(I);
598 if (!COp)
599 return nullptr;
600 if (isa<UndefValue>(COp))
601 continue;
602
603 auto *CInt = dyn_cast<ConstantInt>(COp);
604 auto *CFp = dyn_cast<ConstantFP>(COp);
605 if (!CInt && !CFp)
606 return nullptr;
607
608 if ((CInt && CInt->isNegative()) || (CFp && CFp->isNegative()))
609 Result.setBit(I);
610 }
611
612 return Constant::getIntegerValue(ResTy, Result);
613}
614
Sanjay Patel6038d3e2016-01-29 23:27:03 +0000615static Value *simplifyX86insertps(const IntrinsicInst &II,
Sanjay Patelc86867c2015-04-16 17:52:13 +0000616 InstCombiner::BuilderTy &Builder) {
Sanjay Patel03c03f52016-01-28 00:03:16 +0000617 auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
618 if (!CInt)
619 return nullptr;
Simon Pilgrim54fcd622015-07-25 20:41:00 +0000620
Sanjay Patel03c03f52016-01-28 00:03:16 +0000621 VectorType *VecTy = cast<VectorType>(II.getType());
622 assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
Sanjay Patelc86867c2015-04-16 17:52:13 +0000623
Sanjay Patel03c03f52016-01-28 00:03:16 +0000624 // The immediate permute control byte looks like this:
625 // [3:0] - zero mask for each 32-bit lane
626 // [5:4] - select one 32-bit destination lane
627 // [7:6] - select one 32-bit source lane
Sanjay Patelc86867c2015-04-16 17:52:13 +0000628
Sanjay Patel03c03f52016-01-28 00:03:16 +0000629 uint8_t Imm = CInt->getZExtValue();
630 uint8_t ZMask = Imm & 0xf;
631 uint8_t DestLane = (Imm >> 4) & 0x3;
632 uint8_t SourceLane = (Imm >> 6) & 0x3;
Sanjay Patelc1d20a32015-04-25 20:55:25 +0000633
Sanjay Patel03c03f52016-01-28 00:03:16 +0000634 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
Sanjay Patelc86867c2015-04-16 17:52:13 +0000635
Sanjay Patel03c03f52016-01-28 00:03:16 +0000636 // If all zero mask bits are set, this was just a weird way to
637 // generate a zero vector.
638 if (ZMask == 0xf)
639 return ZeroVector;
Sanjay Patelc1d20a32015-04-25 20:55:25 +0000640
Sanjay Patel03c03f52016-01-28 00:03:16 +0000641 // Initialize by passing all of the first source bits through.
Craig Topper99d1eab2016-06-12 00:41:19 +0000642 uint32_t ShuffleMask[4] = { 0, 1, 2, 3 };
Sanjay Patelc1d20a32015-04-25 20:55:25 +0000643
Sanjay Patel03c03f52016-01-28 00:03:16 +0000644 // We may replace the second operand with the zero vector.
645 Value *V1 = II.getArgOperand(1);
646
647 if (ZMask) {
648 // If the zero mask is being used with a single input or the zero mask
649 // overrides the destination lane, this is a shuffle with the zero vector.
650 if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
651 (ZMask & (1 << DestLane))) {
652 V1 = ZeroVector;
653 // We may still move 32-bits of the first source vector from one lane
654 // to another.
655 ShuffleMask[DestLane] = SourceLane;
656 // The zero mask may override the previous insert operation.
657 for (unsigned i = 0; i < 4; ++i)
658 if ((ZMask >> i) & 0x1)
659 ShuffleMask[i] = i + 4;
Sanjay Patelc1d20a32015-04-25 20:55:25 +0000660 } else {
Sanjay Patel03c03f52016-01-28 00:03:16 +0000661 // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
662 return nullptr;
Sanjay Patelc1d20a32015-04-25 20:55:25 +0000663 }
Sanjay Patel03c03f52016-01-28 00:03:16 +0000664 } else {
665 // Replace the selected destination lane with the selected source lane.
666 ShuffleMask[DestLane] = SourceLane + 4;
Sanjay Patelc86867c2015-04-16 17:52:13 +0000667 }
Sanjay Patel03c03f52016-01-28 00:03:16 +0000668
669 return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
Sanjay Patelc86867c2015-04-16 17:52:13 +0000670}
671
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000672/// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
673/// or conversion to a shuffle vector.
Sanjay Patel6038d3e2016-01-29 23:27:03 +0000674static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000675 ConstantInt *CILength, ConstantInt *CIIndex,
676 InstCombiner::BuilderTy &Builder) {
677 auto LowConstantHighUndef = [&](uint64_t Val) {
678 Type *IntTy64 = Type::getInt64Ty(II.getContext());
679 Constant *Args[] = {ConstantInt::get(IntTy64, Val),
680 UndefValue::get(IntTy64)};
681 return ConstantVector::get(Args);
682 };
683
684 // See if we're dealing with constant values.
685 Constant *C0 = dyn_cast<Constant>(Op0);
686 ConstantInt *CI0 =
Andrea Di Biagio8df5b9c2016-09-07 12:03:03 +0000687 C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000688 : nullptr;
689
690 // Attempt to constant fold.
691 if (CILength && CIIndex) {
692 // From AMD documentation: "The bit index and field length are each six
693 // bits in length other bits of the field are ignored."
694 APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
695 APInt APLength = CILength->getValue().zextOrTrunc(6);
696
697 unsigned Index = APIndex.getZExtValue();
698
699 // From AMD documentation: "a value of zero in the field length is
700 // defined as length of 64".
701 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
702
703 // From AMD documentation: "If the sum of the bit index + length field
704 // is greater than 64, the results are undefined".
705 unsigned End = Index + Length;
706
707 // Note that both field index and field length are 8-bit quantities.
708 // Since variables 'Index' and 'Length' are unsigned values
709 // obtained from zero-extending field index and field length
710 // respectively, their sum should never wrap around.
711 if (End > 64)
712 return UndefValue::get(II.getType());
713
714 // If we are inserting whole bytes, we can convert this to a shuffle.
715 // Lowering can recognize EXTRQI shuffle masks.
716 if ((Length % 8) == 0 && (Index % 8) == 0) {
717 // Convert bit indices to byte indices.
718 Length /= 8;
719 Index /= 8;
720
721 Type *IntTy8 = Type::getInt8Ty(II.getContext());
722 Type *IntTy32 = Type::getInt32Ty(II.getContext());
723 VectorType *ShufTy = VectorType::get(IntTy8, 16);
724
725 SmallVector<Constant *, 16> ShuffleMask;
726 for (int i = 0; i != (int)Length; ++i)
727 ShuffleMask.push_back(
728 Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
729 for (int i = Length; i != 8; ++i)
730 ShuffleMask.push_back(
731 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
732 for (int i = 8; i != 16; ++i)
733 ShuffleMask.push_back(UndefValue::get(IntTy32));
734
735 Value *SV = Builder.CreateShuffleVector(
736 Builder.CreateBitCast(Op0, ShufTy),
737 ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
738 return Builder.CreateBitCast(SV, II.getType());
739 }
740
741 // Constant Fold - shift Index'th bit to lowest position and mask off
742 // Length bits.
743 if (CI0) {
744 APInt Elt = CI0->getValue();
Craig Topperfc947bc2017-04-18 17:14:21 +0000745 Elt.lshrInPlace(Index);
746 Elt = Elt.zextOrTrunc(Length);
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000747 return LowConstantHighUndef(Elt.getZExtValue());
748 }
749
750 // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
751 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
752 Value *Args[] = {Op0, CILength, CIIndex};
Sanjay Patelaf674fb2015-12-14 17:24:23 +0000753 Module *M = II.getModule();
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000754 Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
755 return Builder.CreateCall(F, Args);
756 }
757 }
758
759 // Constant Fold - extraction from zero is always {zero, undef}.
Craig Topperca2c8762017-07-06 18:39:49 +0000760 if (CI0 && CI0->isZero())
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000761 return LowConstantHighUndef(0);
762
763 return nullptr;
764}
765
766/// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
767/// folding or conversion to a shuffle vector.
Sanjay Patel6038d3e2016-01-29 23:27:03 +0000768static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000769 APInt APLength, APInt APIndex,
770 InstCombiner::BuilderTy &Builder) {
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000771 // From AMD documentation: "The bit index and field length are each six bits
772 // in length other bits of the field are ignored."
773 APIndex = APIndex.zextOrTrunc(6);
774 APLength = APLength.zextOrTrunc(6);
775
776 // Attempt to constant fold.
777 unsigned Index = APIndex.getZExtValue();
778
779 // From AMD documentation: "a value of zero in the field length is
780 // defined as length of 64".
781 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
782
783 // From AMD documentation: "If the sum of the bit index + length field
784 // is greater than 64, the results are undefined".
785 unsigned End = Index + Length;
786
787 // Note that both field index and field length are 8-bit quantities.
788 // Since variables 'Index' and 'Length' are unsigned values
789 // obtained from zero-extending field index and field length
790 // respectively, their sum should never wrap around.
791 if (End > 64)
792 return UndefValue::get(II.getType());
793
794 // If we are inserting whole bytes, we can convert this to a shuffle.
795 // Lowering can recognize INSERTQI shuffle masks.
796 if ((Length % 8) == 0 && (Index % 8) == 0) {
797 // Convert bit indices to byte indices.
798 Length /= 8;
799 Index /= 8;
800
801 Type *IntTy8 = Type::getInt8Ty(II.getContext());
802 Type *IntTy32 = Type::getInt32Ty(II.getContext());
803 VectorType *ShufTy = VectorType::get(IntTy8, 16);
804
805 SmallVector<Constant *, 16> ShuffleMask;
806 for (int i = 0; i != (int)Index; ++i)
807 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
808 for (int i = 0; i != (int)Length; ++i)
809 ShuffleMask.push_back(
810 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
811 for (int i = Index + Length; i != 8; ++i)
812 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
813 for (int i = 8; i != 16; ++i)
814 ShuffleMask.push_back(UndefValue::get(IntTy32));
815
816 Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
817 Builder.CreateBitCast(Op1, ShufTy),
818 ConstantVector::get(ShuffleMask));
819 return Builder.CreateBitCast(SV, II.getType());
820 }
821
822 // See if we're dealing with constant values.
823 Constant *C0 = dyn_cast<Constant>(Op0);
824 Constant *C1 = dyn_cast<Constant>(Op1);
825 ConstantInt *CI00 =
Andrea Di Biagiof3fd3162016-09-07 12:47:53 +0000826 C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000827 : nullptr;
828 ConstantInt *CI10 =
Andrea Di Biagiof3fd3162016-09-07 12:47:53 +0000829 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000830 : nullptr;
831
832 // Constant Fold - insert bottom Length bits starting at the Index'th bit.
833 if (CI00 && CI10) {
834 APInt V00 = CI00->getValue();
835 APInt V10 = CI10->getValue();
836 APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
837 V00 = V00 & ~Mask;
838 V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
839 APInt Val = V00 | V10;
840 Type *IntTy64 = Type::getInt64Ty(II.getContext());
841 Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
842 UndefValue::get(IntTy64)};
843 return ConstantVector::get(Args);
844 }
845
846 // If we were an INSERTQ call, we'll save demanded elements if we convert to
847 // INSERTQI.
848 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
849 Type *IntTy8 = Type::getInt8Ty(II.getContext());
850 Constant *CILength = ConstantInt::get(IntTy8, Length, false);
851 Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
852
853 Value *Args[] = {Op0, Op1, CILength, CIIndex};
Sanjay Patelaf674fb2015-12-14 17:24:23 +0000854 Module *M = II.getModule();
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000855 Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
856 return Builder.CreateCall(F, Args);
857 }
858
859 return nullptr;
860}
861
Simon Pilgrimc0c56e72016-04-24 17:00:34 +0000862/// Attempt to convert pshufb* to shufflevector if the mask is constant.
863static Value *simplifyX86pshufb(const IntrinsicInst &II,
864 InstCombiner::BuilderTy &Builder) {
Simon Pilgrimbf60cc42016-04-29 21:34:54 +0000865 Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
866 if (!V)
867 return nullptr;
868
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +0000869 auto *VecTy = cast<VectorType>(II.getType());
870 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
871 unsigned NumElts = VecTy->getNumElements();
Craig Topper9a63d7a2016-12-11 00:23:50 +0000872 assert((NumElts == 16 || NumElts == 32 || NumElts == 64) &&
Simon Pilgrimc0c56e72016-04-24 17:00:34 +0000873 "Unexpected number of elements in shuffle mask!");
Simon Pilgrimbf60cc42016-04-29 21:34:54 +0000874
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +0000875 // Construct a shuffle mask from constant integers or UNDEFs.
Craig Topper9a63d7a2016-12-11 00:23:50 +0000876 Constant *Indexes[64] = {nullptr};
Simon Pilgrimc0c56e72016-04-24 17:00:34 +0000877
Simon Pilgrimbf60cc42016-04-29 21:34:54 +0000878 // Each byte in the shuffle control mask forms an index to permute the
879 // corresponding byte in the destination operand.
880 for (unsigned I = 0; I < NumElts; ++I) {
881 Constant *COp = V->getAggregateElement(I);
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +0000882 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
Simon Pilgrimbf60cc42016-04-29 21:34:54 +0000883 return nullptr;
Simon Pilgrimc0c56e72016-04-24 17:00:34 +0000884
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +0000885 if (isa<UndefValue>(COp)) {
886 Indexes[I] = UndefValue::get(MaskEltTy);
887 continue;
888 }
889
Simon Pilgrimbf60cc42016-04-29 21:34:54 +0000890 int8_t Index = cast<ConstantInt>(COp)->getValue().getZExtValue();
891
892 // If the most significant bit (bit[7]) of each byte of the shuffle
893 // control mask is set, then zero is written in the result byte.
894 // The zero vector is in the right-hand side of the resulting
895 // shufflevector.
896
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +0000897 // The value of each index for the high 128-bit lane is the least
898 // significant 4 bits of the respective shuffle control byte.
899 Index = ((Index < 0) ? NumElts : Index & 0x0F) + (I & 0xF0);
900 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
Simon Pilgrimbf60cc42016-04-29 21:34:54 +0000901 }
Simon Pilgrimc0c56e72016-04-24 17:00:34 +0000902
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +0000903 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
Simon Pilgrimc0c56e72016-04-24 17:00:34 +0000904 auto V1 = II.getArgOperand(0);
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +0000905 auto V2 = Constant::getNullValue(VecTy);
Simon Pilgrimc0c56e72016-04-24 17:00:34 +0000906 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
907}
908
Simon Pilgrim2f6097d2016-04-24 17:23:46 +0000909/// Attempt to convert vpermilvar* to shufflevector if the mask is constant.
910static Value *simplifyX86vpermilvar(const IntrinsicInst &II,
911 InstCombiner::BuilderTy &Builder) {
Simon Pilgrim640f9962016-04-30 07:23:30 +0000912 Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
913 if (!V)
914 return nullptr;
Simon Pilgrim2f6097d2016-04-24 17:23:46 +0000915
Craig Topper58917f32016-12-11 01:59:36 +0000916 auto *VecTy = cast<VectorType>(II.getType());
Simon Pilgrimeeacc402016-05-01 20:22:42 +0000917 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
Craig Topper58917f32016-12-11 01:59:36 +0000918 unsigned NumElts = VecTy->getVectorNumElements();
919 bool IsPD = VecTy->getScalarType()->isDoubleTy();
920 unsigned NumLaneElts = IsPD ? 2 : 4;
921 assert(NumElts == 16 || NumElts == 8 || NumElts == 4 || NumElts == 2);
Simon Pilgrim2f6097d2016-04-24 17:23:46 +0000922
Simon Pilgrimeeacc402016-05-01 20:22:42 +0000923 // Construct a shuffle mask from constant integers or UNDEFs.
Craig Topper58917f32016-12-11 01:59:36 +0000924 Constant *Indexes[16] = {nullptr};
Simon Pilgrim640f9962016-04-30 07:23:30 +0000925
926 // The intrinsics only read one or two bits, clear the rest.
Simon Pilgrimeeacc402016-05-01 20:22:42 +0000927 for (unsigned I = 0; I < NumElts; ++I) {
Simon Pilgrim640f9962016-04-30 07:23:30 +0000928 Constant *COp = V->getAggregateElement(I);
Simon Pilgrimeeacc402016-05-01 20:22:42 +0000929 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
Simon Pilgrim640f9962016-04-30 07:23:30 +0000930 return nullptr;
931
Simon Pilgrimeeacc402016-05-01 20:22:42 +0000932 if (isa<UndefValue>(COp)) {
933 Indexes[I] = UndefValue::get(MaskEltTy);
934 continue;
935 }
936
937 APInt Index = cast<ConstantInt>(COp)->getValue();
938 Index = Index.zextOrTrunc(32).getLoBits(2);
Simon Pilgrim640f9962016-04-30 07:23:30 +0000939
940 // The PD variants uses bit 1 to select per-lane element index, so
941 // shift down to convert to generic shuffle mask index.
Craig Topper58917f32016-12-11 01:59:36 +0000942 if (IsPD)
Craig Topperfc947bc2017-04-18 17:14:21 +0000943 Index.lshrInPlace(1);
Simon Pilgrimeeacc402016-05-01 20:22:42 +0000944
945 // The _256 variants are a bit trickier since the mask bits always index
946 // into the corresponding 128 half. In order to convert to a generic
947 // shuffle, we have to make that explicit.
Craig Topper58917f32016-12-11 01:59:36 +0000948 Index += APInt(32, (I / NumLaneElts) * NumLaneElts);
Simon Pilgrimeeacc402016-05-01 20:22:42 +0000949
950 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
Simon Pilgrim2f6097d2016-04-24 17:23:46 +0000951 }
952
Simon Pilgrimeeacc402016-05-01 20:22:42 +0000953 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
Simon Pilgrim2f6097d2016-04-24 17:23:46 +0000954 auto V1 = II.getArgOperand(0);
955 auto V2 = UndefValue::get(V1->getType());
956 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
957}
958
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +0000959/// Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
960static Value *simplifyX86vpermv(const IntrinsicInst &II,
961 InstCombiner::BuilderTy &Builder) {
962 auto *V = dyn_cast<Constant>(II.getArgOperand(1));
963 if (!V)
964 return nullptr;
965
Simon Pilgrimca140b12016-05-01 20:43:02 +0000966 auto *VecTy = cast<VectorType>(II.getType());
967 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +0000968 unsigned Size = VecTy->getNumElements();
Craig Toppere3280452016-12-25 23:58:57 +0000969 assert((Size == 4 || Size == 8 || Size == 16 || Size == 32 || Size == 64) &&
970 "Unexpected shuffle mask size");
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +0000971
Simon Pilgrimca140b12016-05-01 20:43:02 +0000972 // Construct a shuffle mask from constant integers or UNDEFs.
Craig Toppere3280452016-12-25 23:58:57 +0000973 Constant *Indexes[64] = {nullptr};
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +0000974
975 for (unsigned I = 0; I < Size; ++I) {
976 Constant *COp = V->getAggregateElement(I);
Simon Pilgrimca140b12016-05-01 20:43:02 +0000977 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +0000978 return nullptr;
979
Simon Pilgrimca140b12016-05-01 20:43:02 +0000980 if (isa<UndefValue>(COp)) {
981 Indexes[I] = UndefValue::get(MaskEltTy);
982 continue;
983 }
984
Craig Toppere3280452016-12-25 23:58:57 +0000985 uint32_t Index = cast<ConstantInt>(COp)->getZExtValue();
986 Index &= Size - 1;
Simon Pilgrimca140b12016-05-01 20:43:02 +0000987 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +0000988 }
989
Simon Pilgrimca140b12016-05-01 20:43:02 +0000990 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, Size));
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +0000991 auto V1 = II.getArgOperand(0);
992 auto V2 = UndefValue::get(VecTy);
993 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
994}
995
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +0000996/// Decode XOP integer vector comparison intrinsics.
Sanjay Patel6038d3e2016-01-29 23:27:03 +0000997static Value *simplifyX86vpcom(const IntrinsicInst &II,
Sanjay Patelf9f5d3c2016-01-29 23:14:58 +0000998 InstCombiner::BuilderTy &Builder,
999 bool IsSigned) {
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +00001000 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
1001 uint64_t Imm = CInt->getZExtValue() & 0x7;
1002 VectorType *VecTy = cast<VectorType>(II.getType());
1003 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1004
1005 switch (Imm) {
1006 case 0x0:
1007 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1008 break;
1009 case 0x1:
1010 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1011 break;
1012 case 0x2:
1013 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1014 break;
1015 case 0x3:
1016 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1017 break;
1018 case 0x4:
1019 Pred = ICmpInst::ICMP_EQ; break;
1020 case 0x5:
1021 Pred = ICmpInst::ICMP_NE; break;
1022 case 0x6:
1023 return ConstantInt::getSigned(VecTy, 0); // FALSE
1024 case 0x7:
1025 return ConstantInt::getSigned(VecTy, -1); // TRUE
1026 }
1027
Sanjay Patelf9f5d3c2016-01-29 23:14:58 +00001028 if (Value *Cmp = Builder.CreateICmp(Pred, II.getArgOperand(0),
1029 II.getArgOperand(1)))
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +00001030 return Builder.CreateSExtOrTrunc(Cmp, VecTy);
1031 }
1032 return nullptr;
1033}
1034
Craig Toppere3280452016-12-25 23:58:57 +00001035// Emit a select instruction and appropriate bitcasts to help simplify
1036// masked intrinsics.
1037static Value *emitX86MaskSelect(Value *Mask, Value *Op0, Value *Op1,
1038 InstCombiner::BuilderTy &Builder) {
Craig Topper99163632016-12-30 23:06:28 +00001039 unsigned VWidth = Op0->getType()->getVectorNumElements();
1040
1041 // If the mask is all ones we don't need the select. But we need to check
1042 // only the bit thats will be used in case VWidth is less than 8.
1043 if (auto *C = dyn_cast<ConstantInt>(Mask))
1044 if (C->getValue().zextOrTrunc(VWidth).isAllOnesValue())
1045 return Op0;
1046
Craig Toppere3280452016-12-25 23:58:57 +00001047 auto *MaskTy = VectorType::get(Builder.getInt1Ty(),
1048 cast<IntegerType>(Mask->getType())->getBitWidth());
1049 Mask = Builder.CreateBitCast(Mask, MaskTy);
1050
1051 // If we have less than 8 elements, then the starting mask was an i8 and
1052 // we need to extract down to the right number of elements.
Craig Toppere3280452016-12-25 23:58:57 +00001053 if (VWidth < 8) {
1054 uint32_t Indices[4];
1055 for (unsigned i = 0; i != VWidth; ++i)
1056 Indices[i] = i;
1057 Mask = Builder.CreateShuffleVector(Mask, Mask,
1058 makeArrayRef(Indices, VWidth),
1059 "extract");
1060 }
1061
1062 return Builder.CreateSelect(Mask, Op0, Op1);
1063}
1064
Sanjay Patel0069f562016-01-31 16:35:23 +00001065static Value *simplifyMinnumMaxnum(const IntrinsicInst &II) {
1066 Value *Arg0 = II.getArgOperand(0);
1067 Value *Arg1 = II.getArgOperand(1);
1068
1069 // fmin(x, x) -> x
1070 if (Arg0 == Arg1)
1071 return Arg0;
1072
1073 const auto *C1 = dyn_cast<ConstantFP>(Arg1);
1074
1075 // fmin(x, nan) -> x
1076 if (C1 && C1->isNaN())
1077 return Arg0;
1078
1079 // This is the value because if undef were NaN, we would return the other
1080 // value and cannot return a NaN unless both operands are.
1081 //
1082 // fmin(undef, x) -> x
1083 if (isa<UndefValue>(Arg0))
1084 return Arg1;
1085
1086 // fmin(x, undef) -> x
1087 if (isa<UndefValue>(Arg1))
1088 return Arg0;
1089
1090 Value *X = nullptr;
1091 Value *Y = nullptr;
1092 if (II.getIntrinsicID() == Intrinsic::minnum) {
1093 // fmin(x, fmin(x, y)) -> fmin(x, y)
1094 // fmin(y, fmin(x, y)) -> fmin(x, y)
1095 if (match(Arg1, m_FMin(m_Value(X), m_Value(Y)))) {
1096 if (Arg0 == X || Arg0 == Y)
1097 return Arg1;
1098 }
1099
1100 // fmin(fmin(x, y), x) -> fmin(x, y)
1101 // fmin(fmin(x, y), y) -> fmin(x, y)
1102 if (match(Arg0, m_FMin(m_Value(X), m_Value(Y)))) {
1103 if (Arg1 == X || Arg1 == Y)
1104 return Arg0;
1105 }
1106
1107 // TODO: fmin(nnan x, inf) -> x
1108 // TODO: fmin(nnan ninf x, flt_max) -> x
1109 if (C1 && C1->isInfinity()) {
1110 // fmin(x, -inf) -> -inf
1111 if (C1->isNegative())
1112 return Arg1;
1113 }
1114 } else {
1115 assert(II.getIntrinsicID() == Intrinsic::maxnum);
1116 // fmax(x, fmax(x, y)) -> fmax(x, y)
1117 // fmax(y, fmax(x, y)) -> fmax(x, y)
1118 if (match(Arg1, m_FMax(m_Value(X), m_Value(Y)))) {
1119 if (Arg0 == X || Arg0 == Y)
1120 return Arg1;
1121 }
1122
1123 // fmax(fmax(x, y), x) -> fmax(x, y)
1124 // fmax(fmax(x, y), y) -> fmax(x, y)
1125 if (match(Arg0, m_FMax(m_Value(X), m_Value(Y)))) {
1126 if (Arg1 == X || Arg1 == Y)
1127 return Arg0;
1128 }
1129
1130 // TODO: fmax(nnan x, -inf) -> x
1131 // TODO: fmax(nnan ninf x, -flt_max) -> x
1132 if (C1 && C1->isInfinity()) {
1133 // fmax(x, inf) -> inf
1134 if (!C1->isNegative())
1135 return Arg1;
1136 }
1137 }
1138 return nullptr;
1139}
1140
David Majnemer666aa942016-07-14 06:58:42 +00001141static bool maskIsAllOneOrUndef(Value *Mask) {
1142 auto *ConstMask = dyn_cast<Constant>(Mask);
1143 if (!ConstMask)
1144 return false;
1145 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1146 return true;
1147 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
1148 ++I) {
1149 if (auto *MaskElt = ConstMask->getAggregateElement(I))
1150 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1151 continue;
1152 return false;
1153 }
1154 return true;
1155}
1156
Sanjay Patelb695c552016-02-01 17:00:10 +00001157static Value *simplifyMaskedLoad(const IntrinsicInst &II,
1158 InstCombiner::BuilderTy &Builder) {
David Majnemer666aa942016-07-14 06:58:42 +00001159 // If the mask is all ones or undefs, this is a plain vector load of the 1st
1160 // argument.
1161 if (maskIsAllOneOrUndef(II.getArgOperand(2))) {
Sanjay Patelb695c552016-02-01 17:00:10 +00001162 Value *LoadPtr = II.getArgOperand(0);
1163 unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
1164 return Builder.CreateAlignedLoad(LoadPtr, Alignment, "unmaskedload");
1165 }
1166
1167 return nullptr;
1168}
1169
Sanjay Patel04f792b2016-02-01 19:39:52 +00001170static Instruction *simplifyMaskedStore(IntrinsicInst &II, InstCombiner &IC) {
1171 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1172 if (!ConstMask)
1173 return nullptr;
1174
1175 // If the mask is all zeros, this instruction does nothing.
1176 if (ConstMask->isNullValue())
Sanjay Patel4b198802016-02-01 22:23:39 +00001177 return IC.eraseInstFromFunction(II);
Sanjay Patel04f792b2016-02-01 19:39:52 +00001178
1179 // If the mask is all ones, this is a plain vector store of the 1st argument.
1180 if (ConstMask->isAllOnesValue()) {
1181 Value *StorePtr = II.getArgOperand(1);
1182 unsigned Alignment = cast<ConstantInt>(II.getArgOperand(2))->getZExtValue();
1183 return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
1184 }
1185
1186 return nullptr;
1187}
1188
Sanjay Patel103ab7d2016-02-01 22:10:26 +00001189static Instruction *simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC) {
1190 // If the mask is all zeros, return the "passthru" argument of the gather.
1191 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
1192 if (ConstMask && ConstMask->isNullValue())
Sanjay Patel4b198802016-02-01 22:23:39 +00001193 return IC.replaceInstUsesWith(II, II.getArgOperand(3));
Sanjay Patel103ab7d2016-02-01 22:10:26 +00001194
1195 return nullptr;
1196}
1197
1198static Instruction *simplifyMaskedScatter(IntrinsicInst &II, InstCombiner &IC) {
1199 // If the mask is all zeros, a scatter does nothing.
1200 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1201 if (ConstMask && ConstMask->isNullValue())
Sanjay Patel4b198802016-02-01 22:23:39 +00001202 return IC.eraseInstFromFunction(II);
Sanjay Patel103ab7d2016-02-01 22:10:26 +00001203
1204 return nullptr;
1205}
1206
Amaury Sechet763c59d2016-08-18 20:43:50 +00001207static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
1208 assert((II.getIntrinsicID() == Intrinsic::cttz ||
1209 II.getIntrinsicID() == Intrinsic::ctlz) &&
1210 "Expected cttz or ctlz intrinsic");
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001211 Value *Op0 = II.getArgOperand(0);
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001212
Craig Topper8205a1a2017-05-24 16:53:07 +00001213 KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001214
1215 // Create a mask for bits above (ctlz) or below (cttz) the first known one.
1216 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
Craig Topper8df66c62017-05-12 17:20:30 +00001217 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
1218 : Known.countMaxLeadingZeros();
1219 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
1220 : Known.countMinLeadingZeros();
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001221
1222 // If all bits above (ctlz) or below (cttz) the first known one are known
1223 // zero, this value is constant.
1224 // FIXME: This should be in InstSimplify because we're replacing an
1225 // instruction with a constant.
Craig Topper9474e9b2017-04-27 04:51:25 +00001226 if (PossibleZeros == DefiniteZeros) {
Craig Topper0799ff92017-06-03 18:50:32 +00001227 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
Amaury Sechet763c59d2016-08-18 20:43:50 +00001228 return IC.replaceInstUsesWith(II, C);
1229 }
1230
1231 // If the input to cttz/ctlz is known to be non-zero,
1232 // then change the 'ZeroIsUndef' parameter to 'true'
1233 // because we know the zero behavior can't affect the result.
Craig Topper73ba1c82017-06-07 07:40:37 +00001234 if (!Known.One.isNullValue() ||
Craig Topperd45185f2017-05-26 18:23:57 +00001235 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
1236 &IC.getDominatorTree())) {
Amaury Sechet763c59d2016-08-18 20:43:50 +00001237 if (!match(II.getArgOperand(1), m_One())) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001238 II.setOperand(1, IC.Builder.getTrue());
Amaury Sechet763c59d2016-08-18 20:43:50 +00001239 return &II;
1240 }
1241 }
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001242
Craig Topper5b173f22017-06-21 16:32:35 +00001243 // Add range metadata since known bits can't completely reflect what we know.
1244 // TODO: Handle splat vectors.
1245 auto *IT = dyn_cast<IntegerType>(Op0->getType());
1246 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1247 Metadata *LowAndHigh[] = {
1248 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
1249 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
1250 II.setMetadata(LLVMContext::MD_range,
1251 MDNode::get(II.getContext(), LowAndHigh));
1252 return &II;
1253 }
1254
1255 return nullptr;
1256}
1257
1258static Instruction *foldCtpop(IntrinsicInst &II, InstCombiner &IC) {
1259 assert(II.getIntrinsicID() == Intrinsic::ctpop &&
1260 "Expected ctpop intrinsic");
1261 Value *Op0 = II.getArgOperand(0);
1262 // FIXME: Try to simplify vectors of integers.
1263 auto *IT = dyn_cast<IntegerType>(Op0->getType());
1264 if (!IT)
1265 return nullptr;
1266
1267 unsigned BitWidth = IT->getBitWidth();
1268 KnownBits Known(BitWidth);
1269 IC.computeKnownBits(Op0, Known, 0, &II);
1270
1271 unsigned MinCount = Known.countMinPopulation();
1272 unsigned MaxCount = Known.countMaxPopulation();
1273
1274 // Add range metadata since known bits can't completely reflect what we know.
1275 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1276 Metadata *LowAndHigh[] = {
1277 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)),
1278 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
1279 II.setMetadata(LLVMContext::MD_range,
1280 MDNode::get(II.getContext(), LowAndHigh));
1281 return &II;
1282 }
1283
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001284 return nullptr;
1285}
1286
Sanjay Patel1ace9932016-02-26 21:04:14 +00001287// TODO: If the x86 backend knew how to convert a bool vector mask back to an
1288// XMM register mask efficiently, we could transform all x86 masked intrinsics
1289// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
Sanjay Patel98a71502016-02-29 23:16:48 +00001290static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
1291 Value *Ptr = II.getOperand(0);
1292 Value *Mask = II.getOperand(1);
Sanjay Patel5e5056d2016-04-12 23:16:23 +00001293 Constant *ZeroVec = Constant::getNullValue(II.getType());
Sanjay Patel98a71502016-02-29 23:16:48 +00001294
1295 // Special case a zero mask since that's not a ConstantDataVector.
Sanjay Patel5e5056d2016-04-12 23:16:23 +00001296 // This masked load instruction creates a zero vector.
Sanjay Patel98a71502016-02-29 23:16:48 +00001297 if (isa<ConstantAggregateZero>(Mask))
Sanjay Patel5e5056d2016-04-12 23:16:23 +00001298 return IC.replaceInstUsesWith(II, ZeroVec);
Sanjay Patel98a71502016-02-29 23:16:48 +00001299
1300 auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1301 if (!ConstMask)
1302 return nullptr;
1303
1304 // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1305 // to allow target-independent optimizations.
1306
1307 // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1308 // the LLVM intrinsic definition for the pointer argument.
1309 unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1310 PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
Craig Topperbb4069e2017-07-07 23:16:26 +00001311 Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
Sanjay Patel98a71502016-02-29 23:16:48 +00001312
1313 // Second, convert the x86 XMM integer vector mask to a vector of bools based
1314 // on each element's most significant bit (the sign bit).
1315 Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1316
Sanjay Patel5e5056d2016-04-12 23:16:23 +00001317 // The pass-through vector for an x86 masked load is a zero vector.
1318 CallInst *NewMaskedLoad =
Craig Topperbb4069e2017-07-07 23:16:26 +00001319 IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
Sanjay Patel98a71502016-02-29 23:16:48 +00001320 return IC.replaceInstUsesWith(II, NewMaskedLoad);
1321}
1322
1323// TODO: If the x86 backend knew how to convert a bool vector mask back to an
1324// XMM register mask efficiently, we could transform all x86 masked intrinsics
1325// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
Sanjay Patel1ace9932016-02-26 21:04:14 +00001326static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
1327 Value *Ptr = II.getOperand(0);
1328 Value *Mask = II.getOperand(1);
1329 Value *Vec = II.getOperand(2);
1330
1331 // Special case a zero mask since that's not a ConstantDataVector:
1332 // this masked store instruction does nothing.
1333 if (isa<ConstantAggregateZero>(Mask)) {
1334 IC.eraseInstFromFunction(II);
1335 return true;
1336 }
1337
Sanjay Patelc4acbae2016-03-12 15:16:59 +00001338 // The SSE2 version is too weird (eg, unaligned but non-temporal) to do
1339 // anything else at this level.
1340 if (II.getIntrinsicID() == Intrinsic::x86_sse2_maskmov_dqu)
1341 return false;
1342
Sanjay Patel1ace9932016-02-26 21:04:14 +00001343 auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1344 if (!ConstMask)
1345 return false;
1346
1347 // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1348 // to allow target-independent optimizations.
1349
1350 // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1351 // the LLVM intrinsic definition for the pointer argument.
1352 unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1353 PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
Craig Topperbb4069e2017-07-07 23:16:26 +00001354 Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
Sanjay Patel1ace9932016-02-26 21:04:14 +00001355
1356 // Second, convert the x86 XMM integer vector mask to a vector of bools based
1357 // on each element's most significant bit (the sign bit).
1358 Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1359
Craig Topperbb4069e2017-07-07 23:16:26 +00001360 IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
Sanjay Patel1ace9932016-02-26 21:04:14 +00001361
1362 // 'Replace uses' doesn't work for stores. Erase the original masked store.
1363 IC.eraseInstFromFunction(II);
1364 return true;
1365}
1366
Matt Arsenaultcdb468c2017-02-27 23:08:49 +00001367// Constant fold llvm.amdgcn.fmed3 intrinsics for standard inputs.
1368//
1369// A single NaN input is folded to minnum, so we rely on that folding for
1370// handling NaNs.
1371static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
1372 const APFloat &Src2) {
1373 APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2);
1374
1375 APFloat::cmpResult Cmp0 = Max3.compare(Src0);
1376 assert(Cmp0 != APFloat::cmpUnordered && "nans handled separately");
1377 if (Cmp0 == APFloat::cmpEqual)
1378 return maxnum(Src1, Src2);
1379
1380 APFloat::cmpResult Cmp1 = Max3.compare(Src1);
1381 assert(Cmp1 != APFloat::cmpUnordered && "nans handled separately");
1382 if (Cmp1 == APFloat::cmpEqual)
1383 return maxnum(Src0, Src2);
1384
1385 return maxnum(Src0, Src1);
1386}
1387
Arnaud A. de Grandmaison333ef382016-05-10 09:24:49 +00001388// Returns true iff the 2 intrinsics have the same operands, limiting the
1389// comparison to the first NumOperands.
1390static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
1391 unsigned NumOperands) {
1392 assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
1393 assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
1394 for (unsigned i = 0; i < NumOperands; i++)
1395 if (I.getArgOperand(i) != E.getArgOperand(i))
1396 return false;
1397 return true;
1398}
1399
1400// Remove trivially empty start/end intrinsic ranges, i.e. a start
1401// immediately followed by an end (ignoring debuginfo or other
1402// start/end intrinsics in between). As this handles only the most trivial
1403// cases, tracking the nesting level is not needed:
1404//
1405// call @llvm.foo.start(i1 0) ; &I
1406// call @llvm.foo.start(i1 0)
1407// call @llvm.foo.end(i1 0) ; This one will not be skipped: it will be removed
1408// call @llvm.foo.end(i1 0)
1409static bool removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID,
1410 unsigned EndID, InstCombiner &IC) {
1411 assert(I.getIntrinsicID() == StartID &&
1412 "Start intrinsic does not have expected ID");
1413 BasicBlock::iterator BI(I), BE(I.getParent()->end());
1414 for (++BI; BI != BE; ++BI) {
1415 if (auto *E = dyn_cast<IntrinsicInst>(BI)) {
1416 if (isa<DbgInfoIntrinsic>(E) || E->getIntrinsicID() == StartID)
1417 continue;
1418 if (E->getIntrinsicID() == EndID &&
1419 haveSameOperands(I, *E, E->getNumArgOperands())) {
1420 IC.eraseInstFromFunction(*E);
1421 IC.eraseInstFromFunction(I);
1422 return true;
1423 }
1424 }
1425 break;
1426 }
1427
1428 return false;
1429}
1430
Justin Lebar698c31b2017-01-27 00:58:58 +00001431// Convert NVVM intrinsics to target-generic LLVM code where possible.
1432static Instruction *SimplifyNVVMIntrinsic(IntrinsicInst *II, InstCombiner &IC) {
1433 // Each NVVM intrinsic we can simplify can be replaced with one of:
1434 //
1435 // * an LLVM intrinsic,
1436 // * an LLVM cast operation,
1437 // * an LLVM binary operation, or
1438 // * ad-hoc LLVM IR for the particular operation.
1439
1440 // Some transformations are only valid when the module's
1441 // flush-denormals-to-zero (ftz) setting is true/false, whereas other
1442 // transformations are valid regardless of the module's ftz setting.
1443 enum FtzRequirementTy {
1444 FTZ_Any, // Any ftz setting is ok.
1445 FTZ_MustBeOn, // Transformation is valid only if ftz is on.
1446 FTZ_MustBeOff, // Transformation is valid only if ftz is off.
1447 };
1448 // Classes of NVVM intrinsics that can't be replaced one-to-one with a
1449 // target-generic intrinsic, cast op, or binary op but that we can nonetheless
1450 // simplify.
1451 enum SpecialCase {
1452 SPC_Reciprocal,
1453 };
1454
1455 // SimplifyAction is a poor-man's variant (plus an additional flag) that
1456 // represents how to replace an NVVM intrinsic with target-generic LLVM IR.
1457 struct SimplifyAction {
1458 // Invariant: At most one of these Optionals has a value.
1459 Optional<Intrinsic::ID> IID;
1460 Optional<Instruction::CastOps> CastOp;
1461 Optional<Instruction::BinaryOps> BinaryOp;
1462 Optional<SpecialCase> Special;
1463
1464 FtzRequirementTy FtzRequirement = FTZ_Any;
1465
1466 SimplifyAction() = default;
1467
1468 SimplifyAction(Intrinsic::ID IID, FtzRequirementTy FtzReq)
1469 : IID(IID), FtzRequirement(FtzReq) {}
1470
1471 // Cast operations don't have anything to do with FTZ, so we skip that
1472 // argument.
1473 SimplifyAction(Instruction::CastOps CastOp) : CastOp(CastOp) {}
1474
1475 SimplifyAction(Instruction::BinaryOps BinaryOp, FtzRequirementTy FtzReq)
1476 : BinaryOp(BinaryOp), FtzRequirement(FtzReq) {}
1477
1478 SimplifyAction(SpecialCase Special, FtzRequirementTy FtzReq)
1479 : Special(Special), FtzRequirement(FtzReq) {}
1480 };
1481
1482 // Try to generate a SimplifyAction describing how to replace our
1483 // IntrinsicInstr with target-generic LLVM IR.
1484 const SimplifyAction Action = [II]() -> SimplifyAction {
1485 switch (II->getIntrinsicID()) {
Justin Lebar698c31b2017-01-27 00:58:58 +00001486 // NVVM intrinsics that map directly to LLVM intrinsics.
1487 case Intrinsic::nvvm_ceil_d:
1488 return {Intrinsic::ceil, FTZ_Any};
1489 case Intrinsic::nvvm_ceil_f:
1490 return {Intrinsic::ceil, FTZ_MustBeOff};
1491 case Intrinsic::nvvm_ceil_ftz_f:
1492 return {Intrinsic::ceil, FTZ_MustBeOn};
1493 case Intrinsic::nvvm_fabs_d:
1494 return {Intrinsic::fabs, FTZ_Any};
1495 case Intrinsic::nvvm_fabs_f:
1496 return {Intrinsic::fabs, FTZ_MustBeOff};
1497 case Intrinsic::nvvm_fabs_ftz_f:
1498 return {Intrinsic::fabs, FTZ_MustBeOn};
1499 case Intrinsic::nvvm_floor_d:
1500 return {Intrinsic::floor, FTZ_Any};
1501 case Intrinsic::nvvm_floor_f:
1502 return {Intrinsic::floor, FTZ_MustBeOff};
1503 case Intrinsic::nvvm_floor_ftz_f:
1504 return {Intrinsic::floor, FTZ_MustBeOn};
1505 case Intrinsic::nvvm_fma_rn_d:
1506 return {Intrinsic::fma, FTZ_Any};
1507 case Intrinsic::nvvm_fma_rn_f:
1508 return {Intrinsic::fma, FTZ_MustBeOff};
1509 case Intrinsic::nvvm_fma_rn_ftz_f:
1510 return {Intrinsic::fma, FTZ_MustBeOn};
1511 case Intrinsic::nvvm_fmax_d:
1512 return {Intrinsic::maxnum, FTZ_Any};
1513 case Intrinsic::nvvm_fmax_f:
1514 return {Intrinsic::maxnum, FTZ_MustBeOff};
1515 case Intrinsic::nvvm_fmax_ftz_f:
1516 return {Intrinsic::maxnum, FTZ_MustBeOn};
1517 case Intrinsic::nvvm_fmin_d:
1518 return {Intrinsic::minnum, FTZ_Any};
1519 case Intrinsic::nvvm_fmin_f:
1520 return {Intrinsic::minnum, FTZ_MustBeOff};
1521 case Intrinsic::nvvm_fmin_ftz_f:
1522 return {Intrinsic::minnum, FTZ_MustBeOn};
1523 case Intrinsic::nvvm_round_d:
1524 return {Intrinsic::round, FTZ_Any};
1525 case Intrinsic::nvvm_round_f:
1526 return {Intrinsic::round, FTZ_MustBeOff};
1527 case Intrinsic::nvvm_round_ftz_f:
1528 return {Intrinsic::round, FTZ_MustBeOn};
1529 case Intrinsic::nvvm_sqrt_rn_d:
1530 return {Intrinsic::sqrt, FTZ_Any};
1531 case Intrinsic::nvvm_sqrt_f:
1532 // nvvm_sqrt_f is a special case. For most intrinsics, foo_ftz_f is the
1533 // ftz version, and foo_f is the non-ftz version. But nvvm_sqrt_f adopts
1534 // the ftz-ness of the surrounding code. sqrt_rn_f and sqrt_rn_ftz_f are
1535 // the versions with explicit ftz-ness.
1536 return {Intrinsic::sqrt, FTZ_Any};
1537 case Intrinsic::nvvm_sqrt_rn_f:
1538 return {Intrinsic::sqrt, FTZ_MustBeOff};
1539 case Intrinsic::nvvm_sqrt_rn_ftz_f:
1540 return {Intrinsic::sqrt, FTZ_MustBeOn};
1541 case Intrinsic::nvvm_trunc_d:
1542 return {Intrinsic::trunc, FTZ_Any};
1543 case Intrinsic::nvvm_trunc_f:
1544 return {Intrinsic::trunc, FTZ_MustBeOff};
1545 case Intrinsic::nvvm_trunc_ftz_f:
1546 return {Intrinsic::trunc, FTZ_MustBeOn};
1547
1548 // NVVM intrinsics that map to LLVM cast operations.
1549 //
1550 // Note that llvm's target-generic conversion operators correspond to the rz
1551 // (round to zero) versions of the nvvm conversion intrinsics, even though
1552 // most everything else here uses the rn (round to nearest even) nvvm ops.
1553 case Intrinsic::nvvm_d2i_rz:
1554 case Intrinsic::nvvm_f2i_rz:
1555 case Intrinsic::nvvm_d2ll_rz:
1556 case Intrinsic::nvvm_f2ll_rz:
1557 return {Instruction::FPToSI};
1558 case Intrinsic::nvvm_d2ui_rz:
1559 case Intrinsic::nvvm_f2ui_rz:
1560 case Intrinsic::nvvm_d2ull_rz:
1561 case Intrinsic::nvvm_f2ull_rz:
1562 return {Instruction::FPToUI};
1563 case Intrinsic::nvvm_i2d_rz:
1564 case Intrinsic::nvvm_i2f_rz:
1565 case Intrinsic::nvvm_ll2d_rz:
1566 case Intrinsic::nvvm_ll2f_rz:
1567 return {Instruction::SIToFP};
1568 case Intrinsic::nvvm_ui2d_rz:
1569 case Intrinsic::nvvm_ui2f_rz:
1570 case Intrinsic::nvvm_ull2d_rz:
1571 case Intrinsic::nvvm_ull2f_rz:
1572 return {Instruction::UIToFP};
1573
1574 // NVVM intrinsics that map to LLVM binary ops.
1575 case Intrinsic::nvvm_add_rn_d:
1576 return {Instruction::FAdd, FTZ_Any};
1577 case Intrinsic::nvvm_add_rn_f:
1578 return {Instruction::FAdd, FTZ_MustBeOff};
1579 case Intrinsic::nvvm_add_rn_ftz_f:
1580 return {Instruction::FAdd, FTZ_MustBeOn};
1581 case Intrinsic::nvvm_mul_rn_d:
1582 return {Instruction::FMul, FTZ_Any};
1583 case Intrinsic::nvvm_mul_rn_f:
1584 return {Instruction::FMul, FTZ_MustBeOff};
1585 case Intrinsic::nvvm_mul_rn_ftz_f:
1586 return {Instruction::FMul, FTZ_MustBeOn};
1587 case Intrinsic::nvvm_div_rn_d:
1588 return {Instruction::FDiv, FTZ_Any};
1589 case Intrinsic::nvvm_div_rn_f:
1590 return {Instruction::FDiv, FTZ_MustBeOff};
1591 case Intrinsic::nvvm_div_rn_ftz_f:
1592 return {Instruction::FDiv, FTZ_MustBeOn};
1593
1594 // The remainder of cases are NVVM intrinsics that map to LLVM idioms, but
1595 // need special handling.
1596 //
Hiroshi Inoue0ca79dc2017-07-11 06:04:59 +00001597 // We seem to be missing intrinsics for rcp.approx.{ftz.}f32, which is just
Justin Lebar698c31b2017-01-27 00:58:58 +00001598 // as well.
1599 case Intrinsic::nvvm_rcp_rn_d:
1600 return {SPC_Reciprocal, FTZ_Any};
1601 case Intrinsic::nvvm_rcp_rn_f:
1602 return {SPC_Reciprocal, FTZ_MustBeOff};
1603 case Intrinsic::nvvm_rcp_rn_ftz_f:
1604 return {SPC_Reciprocal, FTZ_MustBeOn};
1605
1606 // We do not currently simplify intrinsics that give an approximate answer.
1607 // These include:
1608 //
1609 // - nvvm_cos_approx_{f,ftz_f}
1610 // - nvvm_ex2_approx_{d,f,ftz_f}
1611 // - nvvm_lg2_approx_{d,f,ftz_f}
1612 // - nvvm_sin_approx_{f,ftz_f}
1613 // - nvvm_sqrt_approx_{f,ftz_f}
1614 // - nvvm_rsqrt_approx_{d,f,ftz_f}
1615 // - nvvm_div_approx_{ftz_d,ftz_f,f}
1616 // - nvvm_rcp_approx_ftz_d
1617 //
1618 // Ideally we'd encode them as e.g. "fast call @llvm.cos", where "fast"
1619 // means that fastmath is enabled in the intrinsic. Unfortunately only
1620 // binary operators (currently) have a fastmath bit in SelectionDAG, so this
1621 // information gets lost and we can't select on it.
1622 //
1623 // TODO: div and rcp are lowered to a binary op, so these we could in theory
1624 // lower them to "fast fdiv".
1625
1626 default:
1627 return {};
1628 }
1629 }();
1630
1631 // If Action.FtzRequirementTy is not satisfied by the module's ftz state, we
1632 // can bail out now. (Notice that in the case that IID is not an NVVM
1633 // intrinsic, we don't have to look up any module metadata, as
1634 // FtzRequirementTy will be FTZ_Any.)
1635 if (Action.FtzRequirement != FTZ_Any) {
1636 bool FtzEnabled =
1637 II->getFunction()->getFnAttribute("nvptx-f32ftz").getValueAsString() ==
1638 "true";
1639
1640 if (FtzEnabled != (Action.FtzRequirement == FTZ_MustBeOn))
1641 return nullptr;
1642 }
1643
1644 // Simplify to target-generic intrinsic.
1645 if (Action.IID) {
1646 SmallVector<Value *, 4> Args(II->arg_operands());
1647 // All the target-generic intrinsics currently of interest to us have one
1648 // type argument, equal to that of the nvvm intrinsic's argument.
Justin Lebare3ac0fb2017-01-27 01:49:39 +00001649 Type *Tys[] = {II->getArgOperand(0)->getType()};
Justin Lebar698c31b2017-01-27 00:58:58 +00001650 return CallInst::Create(
1651 Intrinsic::getDeclaration(II->getModule(), *Action.IID, Tys), Args);
1652 }
1653
1654 // Simplify to target-generic binary op.
1655 if (Action.BinaryOp)
1656 return BinaryOperator::Create(*Action.BinaryOp, II->getArgOperand(0),
1657 II->getArgOperand(1), II->getName());
1658
1659 // Simplify to target-generic cast op.
1660 if (Action.CastOp)
1661 return CastInst::Create(*Action.CastOp, II->getArgOperand(0), II->getType(),
1662 II->getName());
1663
1664 // All that's left are the special cases.
1665 if (!Action.Special)
1666 return nullptr;
1667
1668 switch (*Action.Special) {
1669 case SPC_Reciprocal:
1670 // Simplify reciprocal.
1671 return BinaryOperator::Create(
1672 Instruction::FDiv, ConstantFP::get(II->getArgOperand(0)->getType(), 1),
1673 II->getArgOperand(0), II->getName());
1674 }
Justin Lebar25ebe2d2017-01-27 02:04:07 +00001675 llvm_unreachable("All SpecialCase enumerators should be handled in switch.");
Justin Lebar698c31b2017-01-27 00:58:58 +00001676}
1677
Arnaud A. de Grandmaison333ef382016-05-10 09:24:49 +00001678Instruction *InstCombiner::visitVAStartInst(VAStartInst &I) {
1679 removeTriviallyEmptyRange(I, Intrinsic::vastart, Intrinsic::vaend, *this);
1680 return nullptr;
1681}
1682
1683Instruction *InstCombiner::visitVACopyInst(VACopyInst &I) {
1684 removeTriviallyEmptyRange(I, Intrinsic::vacopy, Intrinsic::vaend, *this);
1685 return nullptr;
1686}
1687
Sanjay Patelcd4377c2016-01-20 22:24:38 +00001688/// CallInst simplification. This mostly only handles folding of intrinsic
1689/// instructions. For normal calls, it allows visitCallSite to do the heavy
1690/// lifting.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001691Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Philip Reames7a6db4f2017-12-27 00:16:12 +00001692 if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
Sanjay Patel4b198802016-02-01 22:23:39 +00001693 return replaceInstUsesWith(CI, V);
David Majnemer15032582015-05-22 03:56:46 +00001694
Justin Bogner99798402016-08-05 01:06:44 +00001695 if (isFreeCall(&CI, &TLI))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001696 return visitFree(CI);
1697
1698 // If the caller function is nounwind, mark the call as nounwind, even if the
1699 // callee isn't.
Sanjay Patel5a470952016-08-11 15:16:06 +00001700 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001701 CI.setDoesNotThrow();
1702 return &CI;
1703 }
Jim Grosbach7815f562012-02-03 00:07:04 +00001704
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001705 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1706 if (!II) return visitCallSite(&CI);
Gabor Greif589a0b92010-06-24 12:58:35 +00001707
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001708 // Intrinsics cannot occur in an invoke, so handle them here instead of in
1709 // visitCallSite.
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001710 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001711 bool Changed = false;
1712
1713 // memmove/cpy/set of zero bytes is a noop.
1714 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
Chris Lattnerc663a672010-10-01 05:51:02 +00001715 if (NumBytes->isNullValue())
Sanjay Patel4b198802016-02-01 22:23:39 +00001716 return eraseInstFromFunction(CI);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001717
1718 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
1719 if (CI->getZExtValue() == 1) {
1720 // Replace the instruction with just byte operations. We would
1721 // transform other cases to loads/stores, but we don't know if
1722 // alignment is sufficient.
1723 }
1724 }
Jim Grosbach7815f562012-02-03 00:07:04 +00001725
Chris Lattnerc663a672010-10-01 05:51:02 +00001726 // No other transformations apply to volatile transfers.
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001727 if (auto *M = dyn_cast<MemIntrinsic>(MI))
1728 if (M->isVolatile())
1729 return nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001730
1731 // If we have a memmove and the source operation is a constant global,
1732 // then the source and dest pointers can't alias, so we can change this
1733 // into a call to memcpy.
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001734 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001735 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1736 if (GVSrc->isConstant()) {
Sanjay Patelaf674fb2015-12-14 17:24:23 +00001737 Module *M = CI.getModule();
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001738 Intrinsic::ID MemCpyID =
1739 isa<AtomicMemMoveInst>(MMI)
1740 ? Intrinsic::memcpy_element_unordered_atomic
1741 : Intrinsic::memcpy;
Jay Foadb804a2b2011-07-12 14:06:48 +00001742 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1743 CI.getArgOperand(1)->getType(),
1744 CI.getArgOperand(2)->getType() };
Benjamin Kramere6e19332011-07-14 17:45:39 +00001745 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001746 Changed = true;
1747 }
1748 }
1749
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001750 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001751 // memmove(x,x,size) -> noop.
1752 if (MTI->getSource() == MTI->getDest())
Sanjay Patel4b198802016-02-01 22:23:39 +00001753 return eraseInstFromFunction(CI);
Eric Christopher7258dcd2010-04-16 23:37:20 +00001754 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001755
Eric Christopher7258dcd2010-04-16 23:37:20 +00001756 // If we can determine a pointer alignment that is bigger than currently
1757 // set, update the alignment.
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001758 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1759 if (Instruction *I = SimplifyAnyMemTransfer(MTI))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001760 return I;
1761 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
1762 if (Instruction *I = SimplifyMemSet(MSI))
1763 return I;
1764 }
Gabor Greif590d95e2010-06-24 13:42:49 +00001765
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001766 if (Changed) return II;
1767 }
Jim Grosbach7815f562012-02-03 00:07:04 +00001768
Justin Lebar698c31b2017-01-27 00:58:58 +00001769 if (Instruction *I = SimplifyNVVMIntrinsic(II, *this))
1770 return I;
1771
Sanjay Patel1c600c62016-01-20 16:41:43 +00001772 auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width,
1773 unsigned DemandedWidth) {
Simon Pilgrim61116dd2015-09-17 20:32:45 +00001774 APInt UndefElts(Width, 0);
1775 APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
1776 return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
1777 };
1778
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001779 switch (II->getIntrinsicID()) {
1780 default: break;
George Burgess IV3f089142016-12-20 23:46:36 +00001781 case Intrinsic::objectsize:
1782 if (ConstantInt *N =
1783 lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false))
1784 return replaceInstUsesWith(CI, N);
Craig Topperf40110f2014-04-25 05:29:35 +00001785 return nullptr;
Michael Ilseman536cc322012-12-13 03:13:36 +00001786 case Intrinsic::bswap: {
1787 Value *IIOperand = II->getArgOperand(0);
Craig Topperf40110f2014-04-25 05:29:35 +00001788 Value *X = nullptr;
Michael Ilseman536cc322012-12-13 03:13:36 +00001789
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001790 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
Michael Ilseman536cc322012-12-13 03:13:36 +00001791 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1792 unsigned C = X->getType()->getPrimitiveSizeInBits() -
1793 IIOperand->getType()->getPrimitiveSizeInBits();
1794 Value *CV = ConstantInt::get(X->getType(), C);
Craig Topperbb4069e2017-07-07 23:16:26 +00001795 Value *V = Builder.CreateLShr(X, CV);
Michael Ilseman536cc322012-12-13 03:13:36 +00001796 return new TruncInst(V, IIOperand->getType());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001797 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001798 break;
Michael Ilseman536cc322012-12-13 03:13:36 +00001799 }
Sanjay Patelb695c552016-02-01 17:00:10 +00001800 case Intrinsic::masked_load:
Craig Topperbb4069e2017-07-07 23:16:26 +00001801 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00001802 return replaceInstUsesWith(CI, SimplifiedMaskedOp);
Sanjay Patelb695c552016-02-01 17:00:10 +00001803 break;
Sanjay Patel04f792b2016-02-01 19:39:52 +00001804 case Intrinsic::masked_store:
1805 return simplifyMaskedStore(*II, *this);
Sanjay Patel103ab7d2016-02-01 22:10:26 +00001806 case Intrinsic::masked_gather:
1807 return simplifyMaskedGather(*II, *this);
1808 case Intrinsic::masked_scatter:
1809 return simplifyMaskedScatter(*II, *this);
Sanjay Patelb695c552016-02-01 17:00:10 +00001810
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001811 case Intrinsic::powi:
Gabor Greif589a0b92010-06-24 12:58:35 +00001812 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
Philip Reames5000ba62017-12-27 01:14:30 +00001813 // 0 and 1 are handled in instsimplify
1814
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001815 // powi(x, -1) -> 1/x
Craig Topper79ab6432017-07-06 18:39:47 +00001816 if (Power->isMinusOne())
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001817 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
Gabor Greif589a0b92010-06-24 12:58:35 +00001818 II->getArgOperand(0));
Philip Reamescd13a662017-12-27 01:30:12 +00001819 // powi(x, 2) -> x*x
1820 if (Power->equalsInt(2))
1821 return BinaryOperator::CreateFMul(II->getArgOperand(0),
1822 II->getArgOperand(0));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001823 }
1824 break;
Jim Grosbach7815f562012-02-03 00:07:04 +00001825
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001826 case Intrinsic::cttz:
1827 case Intrinsic::ctlz:
Amaury Sechet763c59d2016-08-18 20:43:50 +00001828 if (auto *I = foldCttzCtlz(*II, *this))
1829 return I;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001830 break;
Sanjoy Dasb0984472015-04-08 04:27:22 +00001831
Craig Topper5b173f22017-06-21 16:32:35 +00001832 case Intrinsic::ctpop:
1833 if (auto *I = foldCtpop(*II, *this))
1834 return I;
1835 break;
1836
Nick Lewyckyabe2cc12015-04-13 19:17:37 +00001837 case Intrinsic::uadd_with_overflow:
1838 case Intrinsic::sadd_with_overflow:
1839 case Intrinsic::umul_with_overflow:
1840 case Intrinsic::smul_with_overflow:
Gabor Greif5b1370e2010-06-28 16:50:57 +00001841 if (isa<Constant>(II->getArgOperand(0)) &&
1842 !isa<Constant>(II->getArgOperand(1))) {
Sanjoy Dasb0984472015-04-08 04:27:22 +00001843 // Canonicalize constants into the RHS.
Gabor Greif5b1370e2010-06-28 16:50:57 +00001844 Value *LHS = II->getArgOperand(0);
1845 II->setArgOperand(0, II->getArgOperand(1));
1846 II->setArgOperand(1, LHS);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001847 return II;
1848 }
Justin Bognercd1d5aa2016-08-17 20:30:52 +00001849 LLVM_FALLTHROUGH;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001850
Nick Lewyckyabe2cc12015-04-13 19:17:37 +00001851 case Intrinsic::usub_with_overflow:
1852 case Intrinsic::ssub_with_overflow: {
Sanjoy Dasb0984472015-04-08 04:27:22 +00001853 OverflowCheckFlavor OCF =
1854 IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
1855 assert(OCF != OCF_INVALID && "unexpected!");
Jim Grosbach7815f562012-02-03 00:07:04 +00001856
Sanjoy Dasb0984472015-04-08 04:27:22 +00001857 Value *OperationResult = nullptr;
1858 Constant *OverflowResult = nullptr;
1859 if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
1860 *II, OperationResult, OverflowResult))
1861 return CreateOverflowTuple(II, OperationResult, OverflowResult);
Benjamin Kramera420df22014-07-04 10:22:21 +00001862
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001863 break;
Erik Eckstein096ff7d2014-12-11 08:02:30 +00001864 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001865
Matt Arsenaultd6511b42014-10-21 23:00:20 +00001866 case Intrinsic::minnum:
1867 case Intrinsic::maxnum: {
1868 Value *Arg0 = II->getArgOperand(0);
1869 Value *Arg1 = II->getArgOperand(1);
Sanjay Patel0069f562016-01-31 16:35:23 +00001870 // Canonicalize constants to the RHS.
1871 if (isa<ConstantFP>(Arg0) && !isa<ConstantFP>(Arg1)) {
Matt Arsenaultd6511b42014-10-21 23:00:20 +00001872 II->setArgOperand(0, Arg1);
1873 II->setArgOperand(1, Arg0);
1874 return II;
1875 }
Sanjay Patelc7bb1432018-05-10 20:03:13 +00001876
1877 // FIXME: Simplifications should be in instsimplify.
Sanjay Patel0069f562016-01-31 16:35:23 +00001878 if (Value *V = simplifyMinnumMaxnum(*II))
Sanjay Patel4b198802016-02-01 22:23:39 +00001879 return replaceInstUsesWith(*II, V);
Sanjay Patelc7bb1432018-05-10 20:03:13 +00001880
1881 Value *X, *Y;
1882 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
1883 (Arg0->hasOneUse() || Arg1->hasOneUse())) {
1884 // If both operands are negated, invert the call and negate the result:
1885 // minnum(-X, -Y) --> -(maxnum(X, Y))
1886 // maxnum(-X, -Y) --> -(minnum(X, Y))
1887 Intrinsic::ID NewIID = II->getIntrinsicID() == Intrinsic::maxnum ?
1888 Intrinsic::minnum : Intrinsic::maxnum;
1889 Value *NewCall = Builder.CreateIntrinsic(NewIID, { X, Y }, II);
1890 Instruction *FNeg = BinaryOperator::CreateFNeg(NewCall);
1891 FNeg->copyIRFlags(II);
1892 return FNeg;
1893 }
Matt Arsenaultd6511b42014-10-21 23:00:20 +00001894 break;
1895 }
Matt Arsenault1cc294c2017-01-03 04:32:31 +00001896 case Intrinsic::fmuladd: {
Matt Arsenault92057602017-02-16 18:46:24 +00001897 // Canonicalize fast fmuladd to the separate fmul + fadd.
Sanjay Patel629c4112017-11-06 16:27:15 +00001898 if (II->isFast()) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001899 BuilderTy::FastMathFlagGuard Guard(Builder);
1900 Builder.setFastMathFlags(II->getFastMathFlags());
1901 Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
1902 II->getArgOperand(1));
1903 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
Matt Arsenault92057602017-02-16 18:46:24 +00001904 Add->takeName(II);
1905 return replaceInstUsesWith(*II, Add);
1906 }
1907
1908 LLVM_FALLTHROUGH;
1909 }
1910 case Intrinsic::fma: {
Matt Arsenault1cc294c2017-01-03 04:32:31 +00001911 Value *Src0 = II->getArgOperand(0);
1912 Value *Src1 = II->getArgOperand(1);
1913
Sanjay Patel236442e2018-04-05 13:24:26 +00001914 // Canonicalize constant multiply operand to Src1.
Matt Arsenaultb264c942017-01-03 04:32:35 +00001915 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
1916 II->setArgOperand(0, Src1);
1917 II->setArgOperand(1, Src0);
1918 std::swap(Src0, Src1);
1919 }
1920
Matt Arsenault1cc294c2017-01-03 04:32:31 +00001921 // fma fneg(x), fneg(y), z -> fma x, y, z
Sanjay Patel236442e2018-04-05 13:24:26 +00001922 Value *X, *Y;
1923 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
1924 II->setArgOperand(0, X);
1925 II->setArgOperand(1, Y);
Matt Arsenault3f509042017-01-10 23:17:52 +00001926 return II;
Matt Arsenault1cc294c2017-01-03 04:32:31 +00001927 }
1928
1929 // fma fabs(x), fabs(x), z -> fma x, x, z
Sanjay Patel236442e2018-04-05 13:24:26 +00001930 if (match(Src0, m_Intrinsic<Intrinsic::fabs>(m_Value(X))) &&
1931 match(Src1, m_Intrinsic<Intrinsic::fabs>(m_Specific(X)))) {
1932 II->setArgOperand(0, X);
1933 II->setArgOperand(1, X);
Matt Arsenault3f509042017-01-10 23:17:52 +00001934 return II;
Matt Arsenault1cc294c2017-01-03 04:32:31 +00001935 }
1936
Matt Arsenaultb264c942017-01-03 04:32:35 +00001937 // fma x, 1, z -> fadd x, z
1938 if (match(Src1, m_FPOne())) {
Sanjay Patel236442e2018-04-05 13:24:26 +00001939 auto *FAdd = BinaryOperator::CreateFAdd(Src0, II->getArgOperand(2));
1940 FAdd->copyFastMathFlags(II);
1941 return FAdd;
Matt Arsenaultb264c942017-01-03 04:32:35 +00001942 }
1943
Matt Arsenault1cc294c2017-01-03 04:32:31 +00001944 break;
1945 }
Matt Arsenault56ff4832017-01-03 22:40:34 +00001946 case Intrinsic::fabs: {
1947 Value *Cond;
1948 Constant *LHS, *RHS;
1949 if (match(II->getArgOperand(0),
1950 m_Select(m_Value(Cond), m_Constant(LHS), m_Constant(RHS)))) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001951 CallInst *Call0 = Builder.CreateCall(II->getCalledFunction(), {LHS});
1952 CallInst *Call1 = Builder.CreateCall(II->getCalledFunction(), {RHS});
Matt Arsenault56ff4832017-01-03 22:40:34 +00001953 return SelectInst::Create(Cond, Call0, Call1);
1954 }
1955
Matt Arsenault954a6242017-01-23 23:55:08 +00001956 LLVM_FALLTHROUGH;
1957 }
1958 case Intrinsic::ceil:
1959 case Intrinsic::floor:
1960 case Intrinsic::round:
1961 case Intrinsic::nearbyint:
Joerg Sonnenberger28bed102017-03-31 19:58:07 +00001962 case Intrinsic::rint:
Matt Arsenault954a6242017-01-23 23:55:08 +00001963 case Intrinsic::trunc: {
Matt Arsenault72333442017-01-17 00:10:40 +00001964 Value *ExtSrc;
Sanjay Patel32381d72018-03-23 21:18:12 +00001965 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
1966 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
1967 Value *NarrowII = Builder.CreateIntrinsic(II->getIntrinsicID(),
1968 { ExtSrc }, II);
1969 return new FPExtInst(NarrowII, II->getType());
Matt Arsenault72333442017-01-17 00:10:40 +00001970 }
Matt Arsenault56ff4832017-01-03 22:40:34 +00001971 break;
1972 }
Matt Arsenault3bdd75d2017-01-04 22:49:03 +00001973 case Intrinsic::cos:
1974 case Intrinsic::amdgcn_cos: {
1975 Value *SrcSrc;
1976 Value *Src = II->getArgOperand(0);
1977 if (match(Src, m_FNeg(m_Value(SrcSrc))) ||
1978 match(Src, m_Intrinsic<Intrinsic::fabs>(m_Value(SrcSrc)))) {
1979 // cos(-x) -> cos(x)
1980 // cos(fabs(x)) -> cos(x)
1981 II->setArgOperand(0, SrcSrc);
1982 return II;
1983 }
1984
1985 break;
1986 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001987 case Intrinsic::ppc_altivec_lvx:
1988 case Intrinsic::ppc_altivec_lvxl:
Bill Wendlingb902f1d2011-04-13 00:36:11 +00001989 // Turn PPC lvx -> load if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00001990 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00001991 &DT) >= 16) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001992 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001993 PointerType::getUnqual(II->getType()));
1994 return new LoadInst(Ptr);
1995 }
1996 break;
Bill Schmidt72954782014-11-12 04:19:40 +00001997 case Intrinsic::ppc_vsx_lxvw4x:
1998 case Intrinsic::ppc_vsx_lxvd2x: {
1999 // Turn PPC VSX loads into normal loads.
Craig Topperbb4069e2017-07-07 23:16:26 +00002000 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2001 PointerType::getUnqual(II->getType()));
Bill Schmidt72954782014-11-12 04:19:40 +00002002 return new LoadInst(Ptr, Twine(""), false, 1);
2003 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002004 case Intrinsic::ppc_altivec_stvx:
2005 case Intrinsic::ppc_altivec_stvxl:
2006 // Turn stvx -> store if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002007 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00002008 &DT) >= 16) {
Jim Grosbach7815f562012-02-03 00:07:04 +00002009 Type *OpPtrTy =
Gabor Greifa6d75e22010-06-24 15:51:11 +00002010 PointerType::getUnqual(II->getArgOperand(0)->getType());
Craig Topperbb4069e2017-07-07 23:16:26 +00002011 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
Gabor Greifa6d75e22010-06-24 15:51:11 +00002012 return new StoreInst(II->getArgOperand(0), Ptr);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002013 }
2014 break;
Bill Schmidt72954782014-11-12 04:19:40 +00002015 case Intrinsic::ppc_vsx_stxvw4x:
2016 case Intrinsic::ppc_vsx_stxvd2x: {
2017 // Turn PPC VSX stores into normal stores.
2018 Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
Craig Topperbb4069e2017-07-07 23:16:26 +00002019 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
Bill Schmidt72954782014-11-12 04:19:40 +00002020 return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
2021 }
Hal Finkel221f4672015-02-26 18:56:03 +00002022 case Intrinsic::ppc_qpx_qvlfs:
2023 // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002024 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00002025 &DT) >= 16) {
Craig Topperbb4069e2017-07-07 23:16:26 +00002026 Type *VTy = VectorType::get(Builder.getFloatTy(),
Hal Finkelf0d68d72015-05-11 06:37:03 +00002027 II->getType()->getVectorNumElements());
Craig Topperbb4069e2017-07-07 23:16:26 +00002028 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
Hal Finkelf0d68d72015-05-11 06:37:03 +00002029 PointerType::getUnqual(VTy));
Craig Topperbb4069e2017-07-07 23:16:26 +00002030 Value *Load = Builder.CreateLoad(Ptr);
Hal Finkelf0d68d72015-05-11 06:37:03 +00002031 return new FPExtInst(Load, II->getType());
Hal Finkel221f4672015-02-26 18:56:03 +00002032 }
2033 break;
2034 case Intrinsic::ppc_qpx_qvlfd:
2035 // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002036 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00002037 &DT) >= 32) {
Craig Topperbb4069e2017-07-07 23:16:26 +00002038 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
Hal Finkel221f4672015-02-26 18:56:03 +00002039 PointerType::getUnqual(II->getType()));
2040 return new LoadInst(Ptr);
2041 }
2042 break;
2043 case Intrinsic::ppc_qpx_qvstfs:
2044 // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002045 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00002046 &DT) >= 16) {
Craig Topperbb4069e2017-07-07 23:16:26 +00002047 Type *VTy = VectorType::get(Builder.getFloatTy(),
Hal Finkelf0d68d72015-05-11 06:37:03 +00002048 II->getArgOperand(0)->getType()->getVectorNumElements());
Craig Topperbb4069e2017-07-07 23:16:26 +00002049 Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
Hal Finkelf0d68d72015-05-11 06:37:03 +00002050 Type *OpPtrTy = PointerType::getUnqual(VTy);
Craig Topperbb4069e2017-07-07 23:16:26 +00002051 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
Hal Finkelf0d68d72015-05-11 06:37:03 +00002052 return new StoreInst(TOp, Ptr);
Hal Finkel221f4672015-02-26 18:56:03 +00002053 }
2054 break;
2055 case Intrinsic::ppc_qpx_qvstfd:
2056 // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002057 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00002058 &DT) >= 32) {
Hal Finkel221f4672015-02-26 18:56:03 +00002059 Type *OpPtrTy =
2060 PointerType::getUnqual(II->getArgOperand(0)->getType());
Craig Topperbb4069e2017-07-07 23:16:26 +00002061 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
Hal Finkel221f4672015-02-26 18:56:03 +00002062 return new StoreInst(II->getArgOperand(0), Ptr);
2063 }
2064 break;
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002065
Craig Topper83240032017-07-31 18:52:13 +00002066 case Intrinsic::x86_bmi_bextr_32:
2067 case Intrinsic::x86_bmi_bextr_64:
2068 case Intrinsic::x86_tbm_bextri_u32:
2069 case Intrinsic::x86_tbm_bextri_u64:
2070 // If the RHS is a constant we can try some simplifications.
2071 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2072 uint64_t Shift = C->getZExtValue();
2073 uint64_t Length = (Shift >> 8) & 0xff;
2074 Shift &= 0xff;
2075 unsigned BitWidth = II->getType()->getIntegerBitWidth();
2076 // If the length is 0 or the shift is out of range, replace with zero.
2077 if (Length == 0 || Shift >= BitWidth)
2078 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2079 // If the LHS is also a constant, we can completely constant fold this.
2080 if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2081 uint64_t Result = InC->getZExtValue() >> Shift;
2082 if (Length > BitWidth)
2083 Length = BitWidth;
2084 Result &= maskTrailingOnes<uint64_t>(Length);
2085 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2086 }
2087 // TODO should we turn this into 'and' if shift is 0? Or 'shl' if we
2088 // are only masking bits that a shift already cleared?
2089 }
2090 break;
2091
Craig Topper317a51e2017-07-31 18:52:15 +00002092 case Intrinsic::x86_bmi_bzhi_32:
2093 case Intrinsic::x86_bmi_bzhi_64:
2094 // If the RHS is a constant we can try some simplifications.
2095 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2096 uint64_t Index = C->getZExtValue() & 0xff;
2097 unsigned BitWidth = II->getType()->getIntegerBitWidth();
2098 if (Index >= BitWidth)
2099 return replaceInstUsesWith(CI, II->getArgOperand(0));
2100 if (Index == 0)
2101 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2102 // If the LHS is also a constant, we can completely constant fold this.
2103 if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2104 uint64_t Result = InC->getZExtValue();
2105 Result &= maskTrailingOnes<uint64_t>(Index);
2106 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2107 }
2108 // TODO should we convert this to an AND if the RHS is constant?
2109 }
2110 break;
2111
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002112 case Intrinsic::x86_vcvtph2ps_128:
2113 case Intrinsic::x86_vcvtph2ps_256: {
2114 auto Arg = II->getArgOperand(0);
2115 auto ArgType = cast<VectorType>(Arg->getType());
2116 auto RetType = cast<VectorType>(II->getType());
2117 unsigned ArgWidth = ArgType->getNumElements();
2118 unsigned RetWidth = RetType->getNumElements();
2119 assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
2120 assert(ArgType->isIntOrIntVectorTy() &&
2121 ArgType->getScalarSizeInBits() == 16 &&
2122 "CVTPH2PS input type should be 16-bit integer vector");
2123 assert(RetType->getScalarType()->isFloatTy() &&
2124 "CVTPH2PS output type should be 32-bit float vector");
2125
2126 // Constant folding: Convert to generic half to single conversion.
Simon Pilgrim48ffca02015-09-12 14:00:17 +00002127 if (isa<ConstantAggregateZero>(Arg))
Sanjay Patel4b198802016-02-01 22:23:39 +00002128 return replaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002129
Simon Pilgrim48ffca02015-09-12 14:00:17 +00002130 if (isa<ConstantDataVector>(Arg)) {
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002131 auto VectorHalfAsShorts = Arg;
2132 if (RetWidth < ArgWidth) {
Craig Topper99d1eab2016-06-12 00:41:19 +00002133 SmallVector<uint32_t, 8> SubVecMask;
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002134 for (unsigned i = 0; i != RetWidth; ++i)
2135 SubVecMask.push_back((int)i);
Craig Topperbb4069e2017-07-07 23:16:26 +00002136 VectorHalfAsShorts = Builder.CreateShuffleVector(
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002137 Arg, UndefValue::get(ArgType), SubVecMask);
2138 }
2139
2140 auto VectorHalfType =
2141 VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
2142 auto VectorHalfs =
Craig Topperbb4069e2017-07-07 23:16:26 +00002143 Builder.CreateBitCast(VectorHalfAsShorts, VectorHalfType);
2144 auto VectorFloats = Builder.CreateFPExt(VectorHalfs, RetType);
Sanjay Patel4b198802016-02-01 22:23:39 +00002145 return replaceInstUsesWith(*II, VectorFloats);
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002146 }
2147
2148 // We only use the lowest lanes of the argument.
Simon Pilgrim996725e2015-09-19 11:41:53 +00002149 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002150 II->setArgOperand(0, V);
2151 return II;
2152 }
2153 break;
2154 }
2155
Chandler Carruthcf414cf2011-01-10 07:19:37 +00002156 case Intrinsic::x86_sse_cvtss2si:
2157 case Intrinsic::x86_sse_cvtss2si64:
2158 case Intrinsic::x86_sse_cvttss2si:
2159 case Intrinsic::x86_sse_cvttss2si64:
2160 case Intrinsic::x86_sse2_cvtsd2si:
2161 case Intrinsic::x86_sse2_cvtsd2si64:
2162 case Intrinsic::x86_sse2_cvttsd2si:
Craig Topperaeaa52c2016-12-14 07:46:12 +00002163 case Intrinsic::x86_sse2_cvttsd2si64:
2164 case Intrinsic::x86_avx512_vcvtss2si32:
2165 case Intrinsic::x86_avx512_vcvtss2si64:
2166 case Intrinsic::x86_avx512_vcvtss2usi32:
2167 case Intrinsic::x86_avx512_vcvtss2usi64:
2168 case Intrinsic::x86_avx512_vcvtsd2si32:
2169 case Intrinsic::x86_avx512_vcvtsd2si64:
2170 case Intrinsic::x86_avx512_vcvtsd2usi32:
2171 case Intrinsic::x86_avx512_vcvtsd2usi64:
2172 case Intrinsic::x86_avx512_cvttss2si:
2173 case Intrinsic::x86_avx512_cvttss2si64:
2174 case Intrinsic::x86_avx512_cvttss2usi:
2175 case Intrinsic::x86_avx512_cvttss2usi64:
2176 case Intrinsic::x86_avx512_cvttsd2si:
2177 case Intrinsic::x86_avx512_cvttsd2si64:
2178 case Intrinsic::x86_avx512_cvttsd2usi:
2179 case Intrinsic::x86_avx512_cvttsd2usi64: {
Chandler Carruthcf414cf2011-01-10 07:19:37 +00002180 // These intrinsics only demand the 0th element of their input vectors. If
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002181 // we can simplify the input based on that, do so now.
Simon Pilgrim996725e2015-09-19 11:41:53 +00002182 Value *Arg = II->getArgOperand(0);
2183 unsigned VWidth = Arg->getType()->getVectorNumElements();
2184 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
Gabor Greif5b1370e2010-06-28 16:50:57 +00002185 II->setArgOperand(0, V);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002186 return II;
2187 }
Simon Pilgrim18617d12015-08-05 08:18:00 +00002188 break;
2189 }
2190
Simon Pilgrim91e3ac82016-06-07 08:18:35 +00002191 case Intrinsic::x86_mmx_pmovmskb:
2192 case Intrinsic::x86_sse_movmsk_ps:
2193 case Intrinsic::x86_sse2_movmsk_pd:
2194 case Intrinsic::x86_sse2_pmovmskb_128:
2195 case Intrinsic::x86_avx_movmsk_pd_256:
2196 case Intrinsic::x86_avx_movmsk_ps_256:
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +00002197 case Intrinsic::x86_avx2_pmovmskb:
Craig Topper4853c432017-07-06 23:18:42 +00002198 if (Value *V = simplifyX86movmsk(*II))
Simon Pilgrim91e3ac82016-06-07 08:18:35 +00002199 return replaceInstUsesWith(*II, V);
2200 break;
Simon Pilgrim91e3ac82016-06-07 08:18:35 +00002201
Simon Pilgrim471efd22016-02-20 23:17:35 +00002202 case Intrinsic::x86_sse_comieq_ss:
2203 case Intrinsic::x86_sse_comige_ss:
2204 case Intrinsic::x86_sse_comigt_ss:
2205 case Intrinsic::x86_sse_comile_ss:
2206 case Intrinsic::x86_sse_comilt_ss:
2207 case Intrinsic::x86_sse_comineq_ss:
2208 case Intrinsic::x86_sse_ucomieq_ss:
2209 case Intrinsic::x86_sse_ucomige_ss:
2210 case Intrinsic::x86_sse_ucomigt_ss:
2211 case Intrinsic::x86_sse_ucomile_ss:
2212 case Intrinsic::x86_sse_ucomilt_ss:
2213 case Intrinsic::x86_sse_ucomineq_ss:
2214 case Intrinsic::x86_sse2_comieq_sd:
2215 case Intrinsic::x86_sse2_comige_sd:
2216 case Intrinsic::x86_sse2_comigt_sd:
2217 case Intrinsic::x86_sse2_comile_sd:
2218 case Intrinsic::x86_sse2_comilt_sd:
2219 case Intrinsic::x86_sse2_comineq_sd:
2220 case Intrinsic::x86_sse2_ucomieq_sd:
2221 case Intrinsic::x86_sse2_ucomige_sd:
2222 case Intrinsic::x86_sse2_ucomigt_sd:
2223 case Intrinsic::x86_sse2_ucomile_sd:
2224 case Intrinsic::x86_sse2_ucomilt_sd:
Craig Topperd9639532016-12-11 07:42:04 +00002225 case Intrinsic::x86_sse2_ucomineq_sd:
Craig Topperd00db692016-12-31 00:45:06 +00002226 case Intrinsic::x86_avx512_vcomi_ss:
2227 case Intrinsic::x86_avx512_vcomi_sd:
Craig Topperd9639532016-12-11 07:42:04 +00002228 case Intrinsic::x86_avx512_mask_cmp_ss:
2229 case Intrinsic::x86_avx512_mask_cmp_sd: {
Simon Pilgrim471efd22016-02-20 23:17:35 +00002230 // These intrinsics only demand the 0th element of their input vectors. If
2231 // we can simplify the input based on that, do so now.
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002232 bool MadeChange = false;
Simon Pilgrim471efd22016-02-20 23:17:35 +00002233 Value *Arg0 = II->getArgOperand(0);
2234 Value *Arg1 = II->getArgOperand(1);
2235 unsigned VWidth = Arg0->getType()->getVectorNumElements();
2236 if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) {
2237 II->setArgOperand(0, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002238 MadeChange = true;
Simon Pilgrim471efd22016-02-20 23:17:35 +00002239 }
2240 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
2241 II->setArgOperand(1, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002242 MadeChange = true;
Simon Pilgrim471efd22016-02-20 23:17:35 +00002243 }
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002244 if (MadeChange)
2245 return II;
Simon Pilgrim471efd22016-02-20 23:17:35 +00002246 break;
2247 }
Michael Zuckerman16b20d22017-04-16 13:26:08 +00002248 case Intrinsic::x86_avx512_mask_cmp_pd_128:
2249 case Intrinsic::x86_avx512_mask_cmp_pd_256:
2250 case Intrinsic::x86_avx512_mask_cmp_pd_512:
2251 case Intrinsic::x86_avx512_mask_cmp_ps_128:
2252 case Intrinsic::x86_avx512_mask_cmp_ps_256:
2253 case Intrinsic::x86_avx512_mask_cmp_ps_512: {
2254 // Folding cmp(sub(a,b),0) -> cmp(a,b) and cmp(0,sub(a,b)) -> cmp(b,a)
2255 Value *Arg0 = II->getArgOperand(0);
2256 Value *Arg1 = II->getArgOperand(1);
Sanjay Patel93e64dd2018-03-25 21:16:33 +00002257 bool Arg0IsZero = match(Arg0, m_PosZeroFP());
Michael Zuckerman16b20d22017-04-16 13:26:08 +00002258 if (Arg0IsZero)
2259 std::swap(Arg0, Arg1);
2260 Value *A, *B;
2261 // This fold requires only the NINF(not +/- inf) since inf minus
2262 // inf is nan.
2263 // NSZ(No Signed Zeros) is not needed because zeros of any sign are
2264 // equal for both compares.
2265 // NNAN is not needed because nans compare the same for both compares.
2266 // The compare intrinsic uses the above assumptions and therefore
2267 // doesn't require additional flags.
2268 if ((match(Arg0, m_OneUse(m_FSub(m_Value(A), m_Value(B)))) &&
Sanjay Patel93e64dd2018-03-25 21:16:33 +00002269 match(Arg1, m_PosZeroFP()) && isa<Instruction>(Arg0) &&
Michael Zuckerman16b20d22017-04-16 13:26:08 +00002270 cast<Instruction>(Arg0)->getFastMathFlags().noInfs())) {
2271 if (Arg0IsZero)
2272 std::swap(A, B);
2273 II->setArgOperand(0, A);
2274 II->setArgOperand(1, B);
2275 return II;
2276 }
2277 break;
2278 }
Simon Pilgrim471efd22016-02-20 23:17:35 +00002279
Craig Topper020b2282016-12-27 00:23:16 +00002280 case Intrinsic::x86_avx512_mask_add_ps_512:
2281 case Intrinsic::x86_avx512_mask_div_ps_512:
2282 case Intrinsic::x86_avx512_mask_mul_ps_512:
2283 case Intrinsic::x86_avx512_mask_sub_ps_512:
2284 case Intrinsic::x86_avx512_mask_add_pd_512:
2285 case Intrinsic::x86_avx512_mask_div_pd_512:
2286 case Intrinsic::x86_avx512_mask_mul_pd_512:
2287 case Intrinsic::x86_avx512_mask_sub_pd_512:
2288 // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2289 // IR operations.
2290 if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(4))) {
2291 if (R->getValue() == 4) {
2292 Value *Arg0 = II->getArgOperand(0);
2293 Value *Arg1 = II->getArgOperand(1);
2294
2295 Value *V;
2296 switch (II->getIntrinsicID()) {
2297 default: llvm_unreachable("Case stmts out of sync!");
2298 case Intrinsic::x86_avx512_mask_add_ps_512:
2299 case Intrinsic::x86_avx512_mask_add_pd_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002300 V = Builder.CreateFAdd(Arg0, Arg1);
Craig Topper020b2282016-12-27 00:23:16 +00002301 break;
2302 case Intrinsic::x86_avx512_mask_sub_ps_512:
2303 case Intrinsic::x86_avx512_mask_sub_pd_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002304 V = Builder.CreateFSub(Arg0, Arg1);
Craig Topper020b2282016-12-27 00:23:16 +00002305 break;
2306 case Intrinsic::x86_avx512_mask_mul_ps_512:
2307 case Intrinsic::x86_avx512_mask_mul_pd_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002308 V = Builder.CreateFMul(Arg0, Arg1);
Craig Topper020b2282016-12-27 00:23:16 +00002309 break;
2310 case Intrinsic::x86_avx512_mask_div_ps_512:
2311 case Intrinsic::x86_avx512_mask_div_pd_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002312 V = Builder.CreateFDiv(Arg0, Arg1);
Craig Topper020b2282016-12-27 00:23:16 +00002313 break;
2314 }
2315
2316 // Create a select for the masking.
2317 V = emitX86MaskSelect(II->getArgOperand(3), V, II->getArgOperand(2),
Craig Topperbb4069e2017-07-07 23:16:26 +00002318 Builder);
Craig Topper020b2282016-12-27 00:23:16 +00002319 return replaceInstUsesWith(*II, V);
2320 }
2321 }
2322 break;
2323
Craig Topper790d0fa2016-12-11 07:42:01 +00002324 case Intrinsic::x86_avx512_mask_add_ss_round:
2325 case Intrinsic::x86_avx512_mask_div_ss_round:
2326 case Intrinsic::x86_avx512_mask_mul_ss_round:
2327 case Intrinsic::x86_avx512_mask_sub_ss_round:
Craig Topper790d0fa2016-12-11 07:42:01 +00002328 case Intrinsic::x86_avx512_mask_add_sd_round:
2329 case Intrinsic::x86_avx512_mask_div_sd_round:
2330 case Intrinsic::x86_avx512_mask_mul_sd_round:
2331 case Intrinsic::x86_avx512_mask_sub_sd_round:
Craig Topper7b788ada2016-12-26 06:33:19 +00002332 // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2333 // IR operations.
2334 if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(4))) {
2335 if (R->getValue() == 4) {
Craig Topper7f8540b2016-12-27 01:56:30 +00002336 // Extract the element as scalars.
2337 Value *Arg0 = II->getArgOperand(0);
2338 Value *Arg1 = II->getArgOperand(1);
Craig Topperbb4069e2017-07-07 23:16:26 +00002339 Value *LHS = Builder.CreateExtractElement(Arg0, (uint64_t)0);
2340 Value *RHS = Builder.CreateExtractElement(Arg1, (uint64_t)0);
Craig Topper7b788ada2016-12-26 06:33:19 +00002341
Craig Topper7f8540b2016-12-27 01:56:30 +00002342 Value *V;
2343 switch (II->getIntrinsicID()) {
2344 default: llvm_unreachable("Case stmts out of sync!");
2345 case Intrinsic::x86_avx512_mask_add_ss_round:
2346 case Intrinsic::x86_avx512_mask_add_sd_round:
Craig Topperbb4069e2017-07-07 23:16:26 +00002347 V = Builder.CreateFAdd(LHS, RHS);
Craig Topper7f8540b2016-12-27 01:56:30 +00002348 break;
2349 case Intrinsic::x86_avx512_mask_sub_ss_round:
2350 case Intrinsic::x86_avx512_mask_sub_sd_round:
Craig Topperbb4069e2017-07-07 23:16:26 +00002351 V = Builder.CreateFSub(LHS, RHS);
Craig Topper7f8540b2016-12-27 01:56:30 +00002352 break;
2353 case Intrinsic::x86_avx512_mask_mul_ss_round:
2354 case Intrinsic::x86_avx512_mask_mul_sd_round:
Craig Topperbb4069e2017-07-07 23:16:26 +00002355 V = Builder.CreateFMul(LHS, RHS);
Craig Topper7f8540b2016-12-27 01:56:30 +00002356 break;
2357 case Intrinsic::x86_avx512_mask_div_ss_round:
2358 case Intrinsic::x86_avx512_mask_div_sd_round:
Craig Topperbb4069e2017-07-07 23:16:26 +00002359 V = Builder.CreateFDiv(LHS, RHS);
Craig Topper7f8540b2016-12-27 01:56:30 +00002360 break;
Craig Topper7b788ada2016-12-26 06:33:19 +00002361 }
Craig Topper7f8540b2016-12-27 01:56:30 +00002362
2363 // Handle the masking aspect of the intrinsic.
Craig Topper7f8540b2016-12-27 01:56:30 +00002364 Value *Mask = II->getArgOperand(3);
Craig Topper99163632016-12-30 23:06:28 +00002365 auto *C = dyn_cast<ConstantInt>(Mask);
2366 // We don't need a select if we know the mask bit is a 1.
2367 if (!C || !C->getValue()[0]) {
2368 // Cast the mask to an i1 vector and then extract the lowest element.
Craig Topperbb4069e2017-07-07 23:16:26 +00002369 auto *MaskTy = VectorType::get(Builder.getInt1Ty(),
Craig Topper7f8540b2016-12-27 01:56:30 +00002370 cast<IntegerType>(Mask->getType())->getBitWidth());
Craig Topperbb4069e2017-07-07 23:16:26 +00002371 Mask = Builder.CreateBitCast(Mask, MaskTy);
2372 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
Craig Topper99163632016-12-30 23:06:28 +00002373 // Extract the lowest element from the passthru operand.
Craig Topperbb4069e2017-07-07 23:16:26 +00002374 Value *Passthru = Builder.CreateExtractElement(II->getArgOperand(2),
Craig Topper99163632016-12-30 23:06:28 +00002375 (uint64_t)0);
Craig Topperbb4069e2017-07-07 23:16:26 +00002376 V = Builder.CreateSelect(Mask, V, Passthru);
Craig Topper99163632016-12-30 23:06:28 +00002377 }
Craig Topper7f8540b2016-12-27 01:56:30 +00002378
2379 // Insert the result back into the original argument 0.
Craig Topperbb4069e2017-07-07 23:16:26 +00002380 V = Builder.CreateInsertElement(Arg0, V, (uint64_t)0);
Craig Topper7f8540b2016-12-27 01:56:30 +00002381
2382 return replaceInstUsesWith(*II, V);
Craig Topper7b788ada2016-12-26 06:33:19 +00002383 }
2384 }
2385 LLVM_FALLTHROUGH;
2386
2387 // X86 scalar intrinsics simplified with SimplifyDemandedVectorElts.
2388 case Intrinsic::x86_avx512_mask_max_ss_round:
2389 case Intrinsic::x86_avx512_mask_min_ss_round:
Craig Topper790d0fa2016-12-11 07:42:01 +00002390 case Intrinsic::x86_avx512_mask_max_sd_round:
Craig Topper268b3ab2016-12-14 06:06:58 +00002391 case Intrinsic::x86_avx512_mask_min_sd_round:
Craig Topperab5f3552016-12-15 03:49:45 +00002392 case Intrinsic::x86_avx512_mask_vfmadd_ss:
2393 case Intrinsic::x86_avx512_mask_vfmadd_sd:
2394 case Intrinsic::x86_avx512_maskz_vfmadd_ss:
2395 case Intrinsic::x86_avx512_maskz_vfmadd_sd:
2396 case Intrinsic::x86_avx512_mask3_vfmadd_ss:
2397 case Intrinsic::x86_avx512_mask3_vfmadd_sd:
2398 case Intrinsic::x86_avx512_mask3_vfmsub_ss:
2399 case Intrinsic::x86_avx512_mask3_vfmsub_sd:
2400 case Intrinsic::x86_avx512_mask3_vfnmsub_ss:
2401 case Intrinsic::x86_avx512_mask3_vfnmsub_sd:
Craig Topperdfd268d2016-12-14 05:43:05 +00002402 case Intrinsic::x86_fma_vfmadd_ss:
2403 case Intrinsic::x86_fma_vfmsub_ss:
2404 case Intrinsic::x86_fma_vfnmadd_ss:
2405 case Intrinsic::x86_fma_vfnmsub_ss:
2406 case Intrinsic::x86_fma_vfmadd_sd:
2407 case Intrinsic::x86_fma_vfmsub_sd:
2408 case Intrinsic::x86_fma_vfnmadd_sd:
2409 case Intrinsic::x86_fma_vfnmsub_sd:
Craig Toppera0372de2016-12-14 03:17:27 +00002410 case Intrinsic::x86_sse_cmp_ss:
2411 case Intrinsic::x86_sse_min_ss:
2412 case Intrinsic::x86_sse_max_ss:
2413 case Intrinsic::x86_sse2_cmp_sd:
2414 case Intrinsic::x86_sse2_min_sd:
2415 case Intrinsic::x86_sse2_max_sd:
Craig Toppereb6a20e2016-12-14 03:17:30 +00002416 case Intrinsic::x86_sse41_round_ss:
2417 case Intrinsic::x86_sse41_round_sd:
Craig Topperac75bca2016-12-13 07:45:45 +00002418 case Intrinsic::x86_xop_vfrcz_ss:
2419 case Intrinsic::x86_xop_vfrcz_sd: {
2420 unsigned VWidth = II->getType()->getVectorNumElements();
2421 APInt UndefElts(VWidth, 0);
2422 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
2423 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
2424 if (V != II)
2425 return replaceInstUsesWith(*II, V);
2426 return II;
2427 }
2428 break;
2429 }
2430
Simon Pilgrima3a72b42015-08-10 20:21:15 +00002431 // Constant fold ashr( <A x Bi>, Ci ).
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002432 // Constant fold lshr( <A x Bi>, Ci ).
2433 // Constant fold shl( <A x Bi>, Ci ).
Simon Pilgrima3a72b42015-08-10 20:21:15 +00002434 case Intrinsic::x86_sse2_psrai_d:
2435 case Intrinsic::x86_sse2_psrai_w:
Simon Pilgrima3a72b42015-08-10 20:21:15 +00002436 case Intrinsic::x86_avx2_psrai_d:
2437 case Intrinsic::x86_avx2_psrai_w:
Craig Topper8b831cb2016-11-13 01:51:55 +00002438 case Intrinsic::x86_avx512_psrai_q_128:
2439 case Intrinsic::x86_avx512_psrai_q_256:
2440 case Intrinsic::x86_avx512_psrai_d_512:
2441 case Intrinsic::x86_avx512_psrai_q_512:
2442 case Intrinsic::x86_avx512_psrai_w_512:
Simon Pilgrim18617d12015-08-05 08:18:00 +00002443 case Intrinsic::x86_sse2_psrli_d:
2444 case Intrinsic::x86_sse2_psrli_q:
2445 case Intrinsic::x86_sse2_psrli_w:
Simon Pilgrim18617d12015-08-05 08:18:00 +00002446 case Intrinsic::x86_avx2_psrli_d:
2447 case Intrinsic::x86_avx2_psrli_q:
2448 case Intrinsic::x86_avx2_psrli_w:
Craig Topper8b831cb2016-11-13 01:51:55 +00002449 case Intrinsic::x86_avx512_psrli_d_512:
2450 case Intrinsic::x86_avx512_psrli_q_512:
2451 case Intrinsic::x86_avx512_psrli_w_512:
Michael J. Spencerdee4b2c2014-04-24 00:58:18 +00002452 case Intrinsic::x86_sse2_pslli_d:
2453 case Intrinsic::x86_sse2_pslli_q:
2454 case Intrinsic::x86_sse2_pslli_w:
Simon Pilgrim18617d12015-08-05 08:18:00 +00002455 case Intrinsic::x86_avx2_pslli_d:
2456 case Intrinsic::x86_avx2_pslli_q:
2457 case Intrinsic::x86_avx2_pslli_w:
Craig Topper8b831cb2016-11-13 01:51:55 +00002458 case Intrinsic::x86_avx512_pslli_d_512:
2459 case Intrinsic::x86_avx512_pslli_q_512:
2460 case Intrinsic::x86_avx512_pslli_w_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002461 if (Value *V = simplifyX86immShift(*II, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002462 return replaceInstUsesWith(*II, V);
Simon Pilgrim18617d12015-08-05 08:18:00 +00002463 break;
2464
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002465 case Intrinsic::x86_sse2_psra_d:
2466 case Intrinsic::x86_sse2_psra_w:
2467 case Intrinsic::x86_avx2_psra_d:
2468 case Intrinsic::x86_avx2_psra_w:
Craig Topper8b831cb2016-11-13 01:51:55 +00002469 case Intrinsic::x86_avx512_psra_q_128:
2470 case Intrinsic::x86_avx512_psra_q_256:
2471 case Intrinsic::x86_avx512_psra_d_512:
2472 case Intrinsic::x86_avx512_psra_q_512:
2473 case Intrinsic::x86_avx512_psra_w_512:
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002474 case Intrinsic::x86_sse2_psrl_d:
2475 case Intrinsic::x86_sse2_psrl_q:
2476 case Intrinsic::x86_sse2_psrl_w:
2477 case Intrinsic::x86_avx2_psrl_d:
2478 case Intrinsic::x86_avx2_psrl_q:
2479 case Intrinsic::x86_avx2_psrl_w:
Craig Topper8b831cb2016-11-13 01:51:55 +00002480 case Intrinsic::x86_avx512_psrl_d_512:
2481 case Intrinsic::x86_avx512_psrl_q_512:
2482 case Intrinsic::x86_avx512_psrl_w_512:
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002483 case Intrinsic::x86_sse2_psll_d:
2484 case Intrinsic::x86_sse2_psll_q:
2485 case Intrinsic::x86_sse2_psll_w:
2486 case Intrinsic::x86_avx2_psll_d:
2487 case Intrinsic::x86_avx2_psll_q:
Craig Topper8b831cb2016-11-13 01:51:55 +00002488 case Intrinsic::x86_avx2_psll_w:
2489 case Intrinsic::x86_avx512_psll_d_512:
2490 case Intrinsic::x86_avx512_psll_q_512:
2491 case Intrinsic::x86_avx512_psll_w_512: {
Craig Topperbb4069e2017-07-07 23:16:26 +00002492 if (Value *V = simplifyX86immShift(*II, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002493 return replaceInstUsesWith(*II, V);
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002494
2495 // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
2496 // operand to compute the shift amount.
Simon Pilgrim996725e2015-09-19 11:41:53 +00002497 Value *Arg1 = II->getArgOperand(1);
2498 assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002499 "Unexpected packed shift size");
Simon Pilgrim996725e2015-09-19 11:41:53 +00002500 unsigned VWidth = Arg1->getType()->getVectorNumElements();
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002501
Simon Pilgrim996725e2015-09-19 11:41:53 +00002502 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002503 II->setArgOperand(1, V);
2504 return II;
2505 }
2506 break;
2507 }
2508
Simon Pilgrimdb9893f2016-06-07 10:27:15 +00002509 case Intrinsic::x86_avx2_psllv_d:
2510 case Intrinsic::x86_avx2_psllv_d_256:
2511 case Intrinsic::x86_avx2_psllv_q:
2512 case Intrinsic::x86_avx2_psllv_q_256:
Craig Topperb4173a52016-11-13 07:26:19 +00002513 case Intrinsic::x86_avx512_psllv_d_512:
2514 case Intrinsic::x86_avx512_psllv_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +00002515 case Intrinsic::x86_avx512_psllv_w_128:
2516 case Intrinsic::x86_avx512_psllv_w_256:
2517 case Intrinsic::x86_avx512_psllv_w_512:
Simon Pilgrimdb9893f2016-06-07 10:27:15 +00002518 case Intrinsic::x86_avx2_psrav_d:
2519 case Intrinsic::x86_avx2_psrav_d_256:
Craig Topperb4173a52016-11-13 07:26:19 +00002520 case Intrinsic::x86_avx512_psrav_q_128:
2521 case Intrinsic::x86_avx512_psrav_q_256:
2522 case Intrinsic::x86_avx512_psrav_d_512:
2523 case Intrinsic::x86_avx512_psrav_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +00002524 case Intrinsic::x86_avx512_psrav_w_128:
2525 case Intrinsic::x86_avx512_psrav_w_256:
2526 case Intrinsic::x86_avx512_psrav_w_512:
Simon Pilgrimdb9893f2016-06-07 10:27:15 +00002527 case Intrinsic::x86_avx2_psrlv_d:
2528 case Intrinsic::x86_avx2_psrlv_d_256:
2529 case Intrinsic::x86_avx2_psrlv_q:
2530 case Intrinsic::x86_avx2_psrlv_q_256:
Craig Topperb4173a52016-11-13 07:26:19 +00002531 case Intrinsic::x86_avx512_psrlv_d_512:
2532 case Intrinsic::x86_avx512_psrlv_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +00002533 case Intrinsic::x86_avx512_psrlv_w_128:
2534 case Intrinsic::x86_avx512_psrlv_w_256:
2535 case Intrinsic::x86_avx512_psrlv_w_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002536 if (Value *V = simplifyX86varShift(*II, Builder))
Simon Pilgrimdb9893f2016-06-07 10:27:15 +00002537 return replaceInstUsesWith(*II, V);
2538 break;
2539
Simon Pilgrim6f6b2792017-01-25 14:37:24 +00002540 case Intrinsic::x86_sse2_packssdw_128:
2541 case Intrinsic::x86_sse2_packsswb_128:
2542 case Intrinsic::x86_avx2_packssdw:
2543 case Intrinsic::x86_avx2_packsswb:
Craig Topper3731f4d2017-02-16 07:35:23 +00002544 case Intrinsic::x86_avx512_packssdw_512:
2545 case Intrinsic::x86_avx512_packsswb_512:
Craig Topper4853c432017-07-06 23:18:42 +00002546 if (Value *V = simplifyX86pack(*II, true))
Simon Pilgrim6f6b2792017-01-25 14:37:24 +00002547 return replaceInstUsesWith(*II, V);
2548 break;
2549
2550 case Intrinsic::x86_sse2_packuswb_128:
2551 case Intrinsic::x86_sse41_packusdw:
2552 case Intrinsic::x86_avx2_packusdw:
2553 case Intrinsic::x86_avx2_packuswb:
Craig Topper3731f4d2017-02-16 07:35:23 +00002554 case Intrinsic::x86_avx512_packusdw_512:
2555 case Intrinsic::x86_avx512_packuswb_512:
Craig Topper4853c432017-07-06 23:18:42 +00002556 if (Value *V = simplifyX86pack(*II, false))
Simon Pilgrim6f6b2792017-01-25 14:37:24 +00002557 return replaceInstUsesWith(*II, V);
2558 break;
2559
Craig Topperb6122122017-01-26 05:17:13 +00002560 case Intrinsic::x86_pclmulqdq: {
2561 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2562 unsigned Imm = C->getZExtValue();
2563
2564 bool MadeChange = false;
2565 Value *Arg0 = II->getArgOperand(0);
2566 Value *Arg1 = II->getArgOperand(1);
2567 unsigned VWidth = Arg0->getType()->getVectorNumElements();
2568 APInt DemandedElts(VWidth, 0);
2569
2570 APInt UndefElts1(VWidth, 0);
2571 DemandedElts = (Imm & 0x01) ? 2 : 1;
2572 if (Value *V = SimplifyDemandedVectorElts(Arg0, DemandedElts,
2573 UndefElts1)) {
2574 II->setArgOperand(0, V);
2575 MadeChange = true;
2576 }
2577
2578 APInt UndefElts2(VWidth, 0);
2579 DemandedElts = (Imm & 0x10) ? 2 : 1;
2580 if (Value *V = SimplifyDemandedVectorElts(Arg1, DemandedElts,
2581 UndefElts2)) {
2582 II->setArgOperand(1, V);
2583 MadeChange = true;
2584 }
2585
2586 // If both input elements are undef, the result is undef.
2587 if (UndefElts1[(Imm & 0x01) ? 1 : 0] ||
2588 UndefElts2[(Imm & 0x10) ? 1 : 0])
2589 return replaceInstUsesWith(*II,
2590 ConstantAggregateZero::get(II->getType()));
2591
2592 if (MadeChange)
2593 return II;
2594 }
2595 break;
2596 }
2597
Sanjay Patelc86867c2015-04-16 17:52:13 +00002598 case Intrinsic::x86_sse41_insertps:
Craig Topperbb4069e2017-07-07 23:16:26 +00002599 if (Value *V = simplifyX86insertps(*II, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002600 return replaceInstUsesWith(*II, V);
Sanjay Patelc86867c2015-04-16 17:52:13 +00002601 break;
Simon Pilgrim54fcd622015-07-25 20:41:00 +00002602
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002603 case Intrinsic::x86_sse4a_extrq: {
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002604 Value *Op0 = II->getArgOperand(0);
2605 Value *Op1 = II->getArgOperand(1);
2606 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2607 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002608 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2609 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2610 VWidth1 == 16 && "Unexpected operand sizes");
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002611
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002612 // See if we're dealing with constant values.
2613 Constant *C1 = dyn_cast<Constant>(Op1);
2614 ConstantInt *CILength =
Andrea Di Biagio8df5b9c2016-09-07 12:03:03 +00002615 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002616 : nullptr;
2617 ConstantInt *CIIndex =
Andrea Di Biagio8df5b9c2016-09-07 12:03:03 +00002618 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002619 : nullptr;
2620
2621 // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
Craig Topperbb4069e2017-07-07 23:16:26 +00002622 if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002623 return replaceInstUsesWith(*II, V);
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002624
2625 // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
2626 // operands and the lowest 16-bits of the second.
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002627 bool MadeChange = false;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002628 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
2629 II->setArgOperand(0, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002630 MadeChange = true;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002631 }
2632 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
2633 II->setArgOperand(1, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002634 MadeChange = true;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002635 }
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002636 if (MadeChange)
2637 return II;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002638 break;
2639 }
2640
2641 case Intrinsic::x86_sse4a_extrqi: {
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002642 // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
2643 // bits of the lower 64-bits. The upper 64-bits are undefined.
2644 Value *Op0 = II->getArgOperand(0);
2645 unsigned VWidth = Op0->getType()->getVectorNumElements();
2646 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2647 "Unexpected operand size");
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002648
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002649 // See if we're dealing with constant values.
2650 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
2651 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
2652
2653 // Attempt to simplify to a constant or shuffle vector.
Craig Topperbb4069e2017-07-07 23:16:26 +00002654 if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002655 return replaceInstUsesWith(*II, V);
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002656
2657 // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
2658 // operand.
2659 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002660 II->setArgOperand(0, V);
2661 return II;
2662 }
2663 break;
2664 }
2665
2666 case Intrinsic::x86_sse4a_insertq: {
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002667 Value *Op0 = II->getArgOperand(0);
2668 Value *Op1 = II->getArgOperand(1);
2669 unsigned VWidth = Op0->getType()->getVectorNumElements();
2670 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2671 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2672 Op1->getType()->getVectorNumElements() == 2 &&
2673 "Unexpected operand size");
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002674
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002675 // See if we're dealing with constant values.
2676 Constant *C1 = dyn_cast<Constant>(Op1);
2677 ConstantInt *CI11 =
Andrea Di Biagiof3fd3162016-09-07 12:47:53 +00002678 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002679 : nullptr;
2680
2681 // Attempt to simplify to a constant, shuffle vector or INSERTQI call.
2682 if (CI11) {
Benjamin Kramer46e38f32016-06-08 10:01:20 +00002683 const APInt &V11 = CI11->getValue();
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002684 APInt Len = V11.zextOrTrunc(6);
2685 APInt Idx = V11.lshr(8).zextOrTrunc(6);
Craig Topperbb4069e2017-07-07 23:16:26 +00002686 if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002687 return replaceInstUsesWith(*II, V);
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002688 }
2689
2690 // INSERTQ only uses the lowest 64-bits of the first 128-bit vector
2691 // operand.
2692 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002693 II->setArgOperand(0, V);
2694 return II;
2695 }
2696 break;
2697 }
2698
Filipe Cabecinhas1a805952014-04-24 00:38:14 +00002699 case Intrinsic::x86_sse4a_insertqi: {
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002700 // INSERTQI: Extract lowest Length bits from lower half of second source and
2701 // insert over first source starting at Index bit. The upper 64-bits are
2702 // undefined.
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002703 Value *Op0 = II->getArgOperand(0);
2704 Value *Op1 = II->getArgOperand(1);
2705 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2706 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002707 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2708 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2709 VWidth1 == 2 && "Unexpected operand sizes");
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002710
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002711 // See if we're dealing with constant values.
2712 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
2713 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
2714
2715 // Attempt to simplify to a constant or shuffle vector.
2716 if (CILength && CIIndex) {
2717 APInt Len = CILength->getValue().zextOrTrunc(6);
2718 APInt Idx = CIIndex->getValue().zextOrTrunc(6);
Craig Topperbb4069e2017-07-07 23:16:26 +00002719 if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002720 return replaceInstUsesWith(*II, V);
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002721 }
2722
2723 // INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
2724 // operands.
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002725 bool MadeChange = false;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002726 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
2727 II->setArgOperand(0, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002728 MadeChange = true;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002729 }
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002730 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
2731 II->setArgOperand(1, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002732 MadeChange = true;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002733 }
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002734 if (MadeChange)
2735 return II;
Filipe Cabecinhas1a805952014-04-24 00:38:14 +00002736 break;
2737 }
2738
Filipe Cabecinhas82ac07c2014-05-27 03:42:20 +00002739 case Intrinsic::x86_sse41_pblendvb:
2740 case Intrinsic::x86_sse41_blendvps:
2741 case Intrinsic::x86_sse41_blendvpd:
2742 case Intrinsic::x86_avx_blendv_ps_256:
2743 case Intrinsic::x86_avx_blendv_pd_256:
2744 case Intrinsic::x86_avx2_pblendvb: {
2745 // Convert blendv* to vector selects if the mask is constant.
2746 // This optimization is convoluted because the intrinsic is defined as
2747 // getting a vector of floats or doubles for the ps and pd versions.
2748 // FIXME: That should be changed.
Simon Pilgrim8c049d52015-08-12 08:08:56 +00002749
2750 Value *Op0 = II->getArgOperand(0);
2751 Value *Op1 = II->getArgOperand(1);
Filipe Cabecinhas82ac07c2014-05-27 03:42:20 +00002752 Value *Mask = II->getArgOperand(2);
Simon Pilgrim8c049d52015-08-12 08:08:56 +00002753
2754 // fold (blend A, A, Mask) -> A
2755 if (Op0 == Op1)
Sanjay Patel4b198802016-02-01 22:23:39 +00002756 return replaceInstUsesWith(CI, Op0);
Simon Pilgrim8c049d52015-08-12 08:08:56 +00002757
2758 // Zero Mask - select 1st argument.
Simon Pilgrim93f59f52015-08-12 08:23:36 +00002759 if (isa<ConstantAggregateZero>(Mask))
Sanjay Patel4b198802016-02-01 22:23:39 +00002760 return replaceInstUsesWith(CI, Op0);
Simon Pilgrim8c049d52015-08-12 08:08:56 +00002761
2762 // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
Sanjay Patel368ac5d2016-02-21 17:29:33 +00002763 if (auto *ConstantMask = dyn_cast<ConstantDataVector>(Mask)) {
2764 Constant *NewSelector = getNegativeIsTrueBoolVec(ConstantMask);
Simon Pilgrim8c049d52015-08-12 08:08:56 +00002765 return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
Filipe Cabecinhas82ac07c2014-05-27 03:42:20 +00002766 }
Simon Pilgrim8c049d52015-08-12 08:08:56 +00002767 break;
Filipe Cabecinhas82ac07c2014-05-27 03:42:20 +00002768 }
2769
Andrea Di Biagio0594e2a2015-09-30 16:44:39 +00002770 case Intrinsic::x86_ssse3_pshuf_b_128:
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00002771 case Intrinsic::x86_avx2_pshuf_b:
Simon Pilgrima22c3a12017-01-18 13:44:04 +00002772 case Intrinsic::x86_avx512_pshuf_b_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002773 if (Value *V = simplifyX86pshufb(*II, Builder))
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00002774 return replaceInstUsesWith(*II, V);
2775 break;
Andrea Di Biagio0594e2a2015-09-30 16:44:39 +00002776
Rafael Espindolabad3f772014-04-21 22:06:04 +00002777 case Intrinsic::x86_avx_vpermilvar_ps:
2778 case Intrinsic::x86_avx_vpermilvar_ps_256:
Craig Topper58917f32016-12-11 01:59:36 +00002779 case Intrinsic::x86_avx512_vpermilvar_ps_512:
Rafael Espindolabad3f772014-04-21 22:06:04 +00002780 case Intrinsic::x86_avx_vpermilvar_pd:
Simon Pilgrim2f6097d2016-04-24 17:23:46 +00002781 case Intrinsic::x86_avx_vpermilvar_pd_256:
Simon Pilgrima22c3a12017-01-18 13:44:04 +00002782 case Intrinsic::x86_avx512_vpermilvar_pd_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002783 if (Value *V = simplifyX86vpermilvar(*II, Builder))
Simon Pilgrim2f6097d2016-04-24 17:23:46 +00002784 return replaceInstUsesWith(*II, V);
2785 break;
Rafael Espindolabad3f772014-04-21 22:06:04 +00002786
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +00002787 case Intrinsic::x86_avx2_permd:
2788 case Intrinsic::x86_avx2_permps:
Craig Topperbb4069e2017-07-07 23:16:26 +00002789 if (Value *V = simplifyX86vpermv(*II, Builder))
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +00002790 return replaceInstUsesWith(*II, V);
2791 break;
2792
Craig Toppere3280452016-12-25 23:58:57 +00002793 case Intrinsic::x86_avx512_mask_permvar_df_256:
2794 case Intrinsic::x86_avx512_mask_permvar_df_512:
2795 case Intrinsic::x86_avx512_mask_permvar_di_256:
2796 case Intrinsic::x86_avx512_mask_permvar_di_512:
2797 case Intrinsic::x86_avx512_mask_permvar_hi_128:
2798 case Intrinsic::x86_avx512_mask_permvar_hi_256:
2799 case Intrinsic::x86_avx512_mask_permvar_hi_512:
2800 case Intrinsic::x86_avx512_mask_permvar_qi_128:
2801 case Intrinsic::x86_avx512_mask_permvar_qi_256:
2802 case Intrinsic::x86_avx512_mask_permvar_qi_512:
2803 case Intrinsic::x86_avx512_mask_permvar_sf_256:
2804 case Intrinsic::x86_avx512_mask_permvar_sf_512:
2805 case Intrinsic::x86_avx512_mask_permvar_si_256:
2806 case Intrinsic::x86_avx512_mask_permvar_si_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002807 if (Value *V = simplifyX86vpermv(*II, Builder)) {
Craig Toppere3280452016-12-25 23:58:57 +00002808 // We simplified the permuting, now create a select for the masking.
2809 V = emitX86MaskSelect(II->getArgOperand(3), V, II->getArgOperand(2),
Craig Topperbb4069e2017-07-07 23:16:26 +00002810 Builder);
Craig Toppere3280452016-12-25 23:58:57 +00002811 return replaceInstUsesWith(*II, V);
2812 }
2813 break;
2814
Sanjay Patel98a71502016-02-29 23:16:48 +00002815 case Intrinsic::x86_avx_maskload_ps:
Sanjay Patel6f2c01f2016-02-29 23:59:00 +00002816 case Intrinsic::x86_avx_maskload_pd:
2817 case Intrinsic::x86_avx_maskload_ps_256:
2818 case Intrinsic::x86_avx_maskload_pd_256:
2819 case Intrinsic::x86_avx2_maskload_d:
2820 case Intrinsic::x86_avx2_maskload_q:
2821 case Intrinsic::x86_avx2_maskload_d_256:
2822 case Intrinsic::x86_avx2_maskload_q_256:
Sanjay Patel98a71502016-02-29 23:16:48 +00002823 if (Instruction *I = simplifyX86MaskedLoad(*II, *this))
2824 return I;
2825 break;
2826
Sanjay Patelc4acbae2016-03-12 15:16:59 +00002827 case Intrinsic::x86_sse2_maskmov_dqu:
Sanjay Patel1ace9932016-02-26 21:04:14 +00002828 case Intrinsic::x86_avx_maskstore_ps:
2829 case Intrinsic::x86_avx_maskstore_pd:
2830 case Intrinsic::x86_avx_maskstore_ps_256:
2831 case Intrinsic::x86_avx_maskstore_pd_256:
Sanjay Patelfc7e7eb2016-02-26 21:51:44 +00002832 case Intrinsic::x86_avx2_maskstore_d:
2833 case Intrinsic::x86_avx2_maskstore_q:
2834 case Intrinsic::x86_avx2_maskstore_d_256:
2835 case Intrinsic::x86_avx2_maskstore_q_256:
Sanjay Patel1ace9932016-02-26 21:04:14 +00002836 if (simplifyX86MaskedStore(*II, *this))
2837 return nullptr;
2838 break;
2839
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +00002840 case Intrinsic::x86_xop_vpcomb:
2841 case Intrinsic::x86_xop_vpcomd:
2842 case Intrinsic::x86_xop_vpcomq:
2843 case Intrinsic::x86_xop_vpcomw:
Craig Topperbb4069e2017-07-07 23:16:26 +00002844 if (Value *V = simplifyX86vpcom(*II, Builder, true))
Sanjay Patel4b198802016-02-01 22:23:39 +00002845 return replaceInstUsesWith(*II, V);
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +00002846 break;
2847
2848 case Intrinsic::x86_xop_vpcomub:
2849 case Intrinsic::x86_xop_vpcomud:
2850 case Intrinsic::x86_xop_vpcomuq:
2851 case Intrinsic::x86_xop_vpcomuw:
Craig Topperbb4069e2017-07-07 23:16:26 +00002852 if (Value *V = simplifyX86vpcom(*II, Builder, false))
Sanjay Patel4b198802016-02-01 22:23:39 +00002853 return replaceInstUsesWith(*II, V);
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +00002854 break;
2855
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002856 case Intrinsic::ppc_altivec_vperm:
2857 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
Bill Schmidta1184632014-06-05 19:46:04 +00002858 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
2859 // a vectorshuffle for little endian, we must undo the transformation
2860 // performed on vec_perm in altivec.h. That is, we must complement
2861 // the permutation mask with respect to 31 and reverse the order of
2862 // V1 and V2.
Chris Lattner0256be92012-01-27 03:08:05 +00002863 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
2864 assert(Mask->getType()->getVectorNumElements() == 16 &&
2865 "Bad type for intrinsic!");
Jim Grosbach7815f562012-02-03 00:07:04 +00002866
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002867 // Check that all of the elements are integer constants or undefs.
2868 bool AllEltsOk = true;
2869 for (unsigned i = 0; i != 16; ++i) {
Chris Lattner0256be92012-01-27 03:08:05 +00002870 Constant *Elt = Mask->getAggregateElement(i);
Craig Topperf40110f2014-04-25 05:29:35 +00002871 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002872 AllEltsOk = false;
2873 break;
2874 }
2875 }
Jim Grosbach7815f562012-02-03 00:07:04 +00002876
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002877 if (AllEltsOk) {
2878 // Cast the input vectors to byte vectors.
Craig Topperbb4069e2017-07-07 23:16:26 +00002879 Value *Op0 = Builder.CreateBitCast(II->getArgOperand(0),
2880 Mask->getType());
2881 Value *Op1 = Builder.CreateBitCast(II->getArgOperand(1),
2882 Mask->getType());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002883 Value *Result = UndefValue::get(Op0->getType());
Jim Grosbach7815f562012-02-03 00:07:04 +00002884
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002885 // Only extract each element once.
2886 Value *ExtractedElts[32];
2887 memset(ExtractedElts, 0, sizeof(ExtractedElts));
Jim Grosbach7815f562012-02-03 00:07:04 +00002888
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002889 for (unsigned i = 0; i != 16; ++i) {
Chris Lattner0256be92012-01-27 03:08:05 +00002890 if (isa<UndefValue>(Mask->getAggregateElement(i)))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002891 continue;
Jim Grosbach7815f562012-02-03 00:07:04 +00002892 unsigned Idx =
Chris Lattner0256be92012-01-27 03:08:05 +00002893 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002894 Idx &= 31; // Match the hardware behavior.
Mehdi Aminia28d91d2015-03-10 02:37:25 +00002895 if (DL.isLittleEndian())
Bill Schmidta1184632014-06-05 19:46:04 +00002896 Idx = 31 - Idx;
Jim Grosbach7815f562012-02-03 00:07:04 +00002897
Craig Topperf40110f2014-04-25 05:29:35 +00002898 if (!ExtractedElts[Idx]) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +00002899 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
2900 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
Jim Grosbach7815f562012-02-03 00:07:04 +00002901 ExtractedElts[Idx] =
Craig Topperbb4069e2017-07-07 23:16:26 +00002902 Builder.CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
2903 Builder.getInt32(Idx&15));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002904 }
Jim Grosbach7815f562012-02-03 00:07:04 +00002905
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002906 // Insert this value into the result vector.
Craig Topperbb4069e2017-07-07 23:16:26 +00002907 Result = Builder.CreateInsertElement(Result, ExtractedElts[Idx],
2908 Builder.getInt32(i));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002909 }
2910 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
2911 }
2912 }
2913 break;
2914
Bob Wilsona4e231c2010-10-22 21:41:48 +00002915 case Intrinsic::arm_neon_vld1:
2916 case Intrinsic::arm_neon_vld2:
2917 case Intrinsic::arm_neon_vld3:
2918 case Intrinsic::arm_neon_vld4:
2919 case Intrinsic::arm_neon_vld2lane:
2920 case Intrinsic::arm_neon_vld3lane:
2921 case Intrinsic::arm_neon_vld4lane:
2922 case Intrinsic::arm_neon_vst1:
2923 case Intrinsic::arm_neon_vst2:
2924 case Intrinsic::arm_neon_vst3:
2925 case Intrinsic::arm_neon_vst4:
2926 case Intrinsic::arm_neon_vst2lane:
2927 case Intrinsic::arm_neon_vst3lane:
2928 case Intrinsic::arm_neon_vst4lane: {
Justin Bogner99798402016-08-05 01:06:44 +00002929 unsigned MemAlign =
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002930 getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
Bob Wilsona4e231c2010-10-22 21:41:48 +00002931 unsigned AlignArg = II->getNumArgOperands() - 1;
2932 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
2933 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
2934 II->setArgOperand(AlignArg,
2935 ConstantInt::get(Type::getInt32Ty(II->getContext()),
2936 MemAlign, false));
2937 return II;
2938 }
2939 break;
2940 }
2941
Lang Hames3a90fab2012-05-01 00:20:38 +00002942 case Intrinsic::arm_neon_vmulls:
Tim Northover00ed9962014-03-29 10:18:08 +00002943 case Intrinsic::arm_neon_vmullu:
Tim Northover3b0846e2014-05-24 12:50:23 +00002944 case Intrinsic::aarch64_neon_smull:
2945 case Intrinsic::aarch64_neon_umull: {
Lang Hames3a90fab2012-05-01 00:20:38 +00002946 Value *Arg0 = II->getArgOperand(0);
2947 Value *Arg1 = II->getArgOperand(1);
2948
2949 // Handle mul by zero first:
2950 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
Sanjay Patel4b198802016-02-01 22:23:39 +00002951 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
Lang Hames3a90fab2012-05-01 00:20:38 +00002952 }
2953
2954 // Check for constant LHS & RHS - in this case we just simplify.
Tim Northover00ed9962014-03-29 10:18:08 +00002955 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
Tim Northover3b0846e2014-05-24 12:50:23 +00002956 II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
Lang Hames3a90fab2012-05-01 00:20:38 +00002957 VectorType *NewVT = cast<VectorType>(II->getType());
Benjamin Kramer92040952014-02-13 18:23:24 +00002958 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
2959 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
2960 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
2961 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
2962
Sanjay Patel4b198802016-02-01 22:23:39 +00002963 return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
Lang Hames3a90fab2012-05-01 00:20:38 +00002964 }
2965
Alp Tokercb402912014-01-24 17:20:08 +00002966 // Couldn't simplify - canonicalize constant to the RHS.
Lang Hames3a90fab2012-05-01 00:20:38 +00002967 std::swap(Arg0, Arg1);
2968 }
2969
2970 // Handle mul by one:
Benjamin Kramer92040952014-02-13 18:23:24 +00002971 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
Lang Hames3a90fab2012-05-01 00:20:38 +00002972 if (ConstantInt *Splat =
Benjamin Kramer92040952014-02-13 18:23:24 +00002973 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
2974 if (Splat->isOne())
2975 return CastInst::CreateIntegerCast(Arg0, II->getType(),
2976 /*isSigned=*/!Zext);
Lang Hames3a90fab2012-05-01 00:20:38 +00002977
2978 break;
2979 }
Matt Arsenaultbef34e22016-01-22 21:30:34 +00002980 case Intrinsic::amdgcn_rcp: {
Matt Arsenault4c7795d2017-03-24 19:04:57 +00002981 Value *Src = II->getArgOperand(0);
2982
2983 // TODO: Move to ConstantFolding/InstSimplify?
2984 if (isa<UndefValue>(Src))
2985 return replaceInstUsesWith(CI, Src);
2986
2987 if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
Matt Arsenaulta0050b02014-06-19 01:19:19 +00002988 const APFloat &ArgVal = C->getValueAPF();
2989 APFloat Val(ArgVal.getSemantics(), 1.0);
2990 APFloat::opStatus Status = Val.divide(ArgVal,
2991 APFloat::rmNearestTiesToEven);
2992 // Only do this if it was exact and therefore not dependent on the
2993 // rounding mode.
2994 if (Status == APFloat::opOK)
Sanjay Patel4b198802016-02-01 22:23:39 +00002995 return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
Matt Arsenaulta0050b02014-06-19 01:19:19 +00002996 }
2997
2998 break;
2999 }
Matt Arsenault4c7795d2017-03-24 19:04:57 +00003000 case Intrinsic::amdgcn_rsq: {
3001 Value *Src = II->getArgOperand(0);
3002
3003 // TODO: Move to ConstantFolding/InstSimplify?
3004 if (isa<UndefValue>(Src))
3005 return replaceInstUsesWith(CI, Src);
3006 break;
3007 }
Matt Arsenault2fe4fbc2016-03-30 22:28:52 +00003008 case Intrinsic::amdgcn_frexp_mant:
3009 case Intrinsic::amdgcn_frexp_exp: {
Matt Arsenault5cd4f8f2016-03-30 22:28:26 +00003010 Value *Src = II->getArgOperand(0);
3011 if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3012 int Exp;
3013 APFloat Significand = frexp(C->getValueAPF(), Exp,
3014 APFloat::rmNearestTiesToEven);
3015
Matt Arsenault2fe4fbc2016-03-30 22:28:52 +00003016 if (II->getIntrinsicID() == Intrinsic::amdgcn_frexp_mant) {
3017 return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(),
3018 Significand));
3019 }
3020
3021 // Match instruction special case behavior.
3022 if (Exp == APFloat::IEK_NaN || Exp == APFloat::IEK_Inf)
3023 Exp = 0;
3024
3025 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Exp));
3026 }
3027
3028 if (isa<UndefValue>(Src))
3029 return replaceInstUsesWith(CI, UndefValue::get(II->getType()));
Matt Arsenault5cd4f8f2016-03-30 22:28:26 +00003030
3031 break;
3032 }
Matt Arsenault46a03822016-09-03 07:06:58 +00003033 case Intrinsic::amdgcn_class: {
3034 enum {
3035 S_NAN = 1 << 0, // Signaling NaN
3036 Q_NAN = 1 << 1, // Quiet NaN
3037 N_INFINITY = 1 << 2, // Negative infinity
3038 N_NORMAL = 1 << 3, // Negative normal
3039 N_SUBNORMAL = 1 << 4, // Negative subnormal
3040 N_ZERO = 1 << 5, // Negative zero
3041 P_ZERO = 1 << 6, // Positive zero
3042 P_SUBNORMAL = 1 << 7, // Positive subnormal
3043 P_NORMAL = 1 << 8, // Positive normal
3044 P_INFINITY = 1 << 9 // Positive infinity
3045 };
3046
3047 const uint32_t FullMask = S_NAN | Q_NAN | N_INFINITY | N_NORMAL |
3048 N_SUBNORMAL | N_ZERO | P_ZERO | P_SUBNORMAL | P_NORMAL | P_INFINITY;
3049
3050 Value *Src0 = II->getArgOperand(0);
3051 Value *Src1 = II->getArgOperand(1);
3052 const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1);
3053 if (!CMask) {
3054 if (isa<UndefValue>(Src0))
3055 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3056
3057 if (isa<UndefValue>(Src1))
3058 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3059 break;
3060 }
3061
3062 uint32_t Mask = CMask->getZExtValue();
3063
3064 // If all tests are made, it doesn't matter what the value is.
3065 if ((Mask & FullMask) == FullMask)
3066 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), true));
3067
3068 if ((Mask & FullMask) == 0)
3069 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3070
3071 if (Mask == (S_NAN | Q_NAN)) {
3072 // Equivalent of isnan. Replace with standard fcmp.
Craig Topperbb4069e2017-07-07 23:16:26 +00003073 Value *FCmp = Builder.CreateFCmpUNO(Src0, Src0);
Matt Arsenault46a03822016-09-03 07:06:58 +00003074 FCmp->takeName(II);
3075 return replaceInstUsesWith(*II, FCmp);
3076 }
3077
3078 const ConstantFP *CVal = dyn_cast<ConstantFP>(Src0);
3079 if (!CVal) {
3080 if (isa<UndefValue>(Src0))
3081 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3082
3083 // Clamp mask to used bits
3084 if ((Mask & FullMask) != Mask) {
Craig Topperbb4069e2017-07-07 23:16:26 +00003085 CallInst *NewCall = Builder.CreateCall(II->getCalledFunction(),
Matt Arsenault46a03822016-09-03 07:06:58 +00003086 { Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) }
3087 );
3088
3089 NewCall->takeName(II);
3090 return replaceInstUsesWith(*II, NewCall);
3091 }
3092
3093 break;
3094 }
3095
3096 const APFloat &Val = CVal->getValueAPF();
3097
3098 bool Result =
3099 ((Mask & S_NAN) && Val.isNaN() && Val.isSignaling()) ||
3100 ((Mask & Q_NAN) && Val.isNaN() && !Val.isSignaling()) ||
3101 ((Mask & N_INFINITY) && Val.isInfinity() && Val.isNegative()) ||
3102 ((Mask & N_NORMAL) && Val.isNormal() && Val.isNegative()) ||
3103 ((Mask & N_SUBNORMAL) && Val.isDenormal() && Val.isNegative()) ||
3104 ((Mask & N_ZERO) && Val.isZero() && Val.isNegative()) ||
3105 ((Mask & P_ZERO) && Val.isZero() && !Val.isNegative()) ||
3106 ((Mask & P_SUBNORMAL) && Val.isDenormal() && !Val.isNegative()) ||
3107 ((Mask & P_NORMAL) && Val.isNormal() && !Val.isNegative()) ||
3108 ((Mask & P_INFINITY) && Val.isInfinity() && !Val.isNegative());
3109
3110 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), Result));
3111 }
Matt Arsenault1f17c662017-02-22 00:27:34 +00003112 case Intrinsic::amdgcn_cvt_pkrtz: {
3113 Value *Src0 = II->getArgOperand(0);
3114 Value *Src1 = II->getArgOperand(1);
3115 if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3116 if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3117 const fltSemantics &HalfSem
3118 = II->getType()->getScalarType()->getFltSemantics();
3119 bool LosesInfo;
3120 APFloat Val0 = C0->getValueAPF();
3121 APFloat Val1 = C1->getValueAPF();
3122 Val0.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3123 Val1.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3124
3125 Constant *Folded = ConstantVector::get({
3126 ConstantFP::get(II->getContext(), Val0),
3127 ConstantFP::get(II->getContext(), Val1) });
3128 return replaceInstUsesWith(*II, Folded);
3129 }
3130 }
3131
3132 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3133 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3134
3135 break;
3136 }
Marek Olsak13e47412018-01-31 20:18:04 +00003137 case Intrinsic::amdgcn_cvt_pknorm_i16:
3138 case Intrinsic::amdgcn_cvt_pknorm_u16:
3139 case Intrinsic::amdgcn_cvt_pk_i16:
3140 case Intrinsic::amdgcn_cvt_pk_u16: {
3141 Value *Src0 = II->getArgOperand(0);
3142 Value *Src1 = II->getArgOperand(1);
3143
3144 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3145 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3146
3147 break;
3148 }
Matt Arsenaultf5262252017-02-22 23:04:58 +00003149 case Intrinsic::amdgcn_ubfe:
3150 case Intrinsic::amdgcn_sbfe: {
3151 // Decompose simple cases into standard shifts.
3152 Value *Src = II->getArgOperand(0);
3153 if (isa<UndefValue>(Src))
3154 return replaceInstUsesWith(*II, Src);
3155
3156 unsigned Width;
3157 Type *Ty = II->getType();
3158 unsigned IntSize = Ty->getIntegerBitWidth();
3159
3160 ConstantInt *CWidth = dyn_cast<ConstantInt>(II->getArgOperand(2));
3161 if (CWidth) {
3162 Width = CWidth->getZExtValue();
3163 if ((Width & (IntSize - 1)) == 0)
3164 return replaceInstUsesWith(*II, ConstantInt::getNullValue(Ty));
3165
3166 if (Width >= IntSize) {
3167 // Hardware ignores high bits, so remove those.
3168 II->setArgOperand(2, ConstantInt::get(CWidth->getType(),
3169 Width & (IntSize - 1)));
3170 return II;
3171 }
3172 }
3173
3174 unsigned Offset;
3175 ConstantInt *COffset = dyn_cast<ConstantInt>(II->getArgOperand(1));
3176 if (COffset) {
3177 Offset = COffset->getZExtValue();
3178 if (Offset >= IntSize) {
3179 II->setArgOperand(1, ConstantInt::get(COffset->getType(),
3180 Offset & (IntSize - 1)));
3181 return II;
3182 }
3183 }
3184
3185 bool Signed = II->getIntrinsicID() == Intrinsic::amdgcn_sbfe;
3186
3187 // TODO: Also emit sub if only width is constant.
3188 if (!CWidth && COffset && Offset == 0) {
3189 Constant *KSize = ConstantInt::get(COffset->getType(), IntSize);
Craig Topperbb4069e2017-07-07 23:16:26 +00003190 Value *ShiftVal = Builder.CreateSub(KSize, II->getArgOperand(2));
3191 ShiftVal = Builder.CreateZExt(ShiftVal, II->getType());
Matt Arsenaultf5262252017-02-22 23:04:58 +00003192
Craig Topperbb4069e2017-07-07 23:16:26 +00003193 Value *Shl = Builder.CreateShl(Src, ShiftVal);
3194 Value *RightShift = Signed ? Builder.CreateAShr(Shl, ShiftVal)
3195 : Builder.CreateLShr(Shl, ShiftVal);
Matt Arsenaultf5262252017-02-22 23:04:58 +00003196 RightShift->takeName(II);
3197 return replaceInstUsesWith(*II, RightShift);
3198 }
3199
3200 if (!CWidth || !COffset)
3201 break;
3202
3203 // TODO: This allows folding to undef when the hardware has specific
3204 // behavior?
3205 if (Offset + Width < IntSize) {
Craig Topperbb4069e2017-07-07 23:16:26 +00003206 Value *Shl = Builder.CreateShl(Src, IntSize - Offset - Width);
3207 Value *RightShift = Signed ? Builder.CreateAShr(Shl, IntSize - Width)
3208 : Builder.CreateLShr(Shl, IntSize - Width);
Matt Arsenaultf5262252017-02-22 23:04:58 +00003209 RightShift->takeName(II);
3210 return replaceInstUsesWith(*II, RightShift);
3211 }
3212
Craig Topperbb4069e2017-07-07 23:16:26 +00003213 Value *RightShift = Signed ? Builder.CreateAShr(Src, Offset)
3214 : Builder.CreateLShr(Src, Offset);
Matt Arsenaultf5262252017-02-22 23:04:58 +00003215
3216 RightShift->takeName(II);
3217 return replaceInstUsesWith(*II, RightShift);
3218 }
Matt Arsenaultd4bca1e2017-02-23 00:44:03 +00003219 case Intrinsic::amdgcn_exp:
3220 case Intrinsic::amdgcn_exp_compr: {
3221 ConstantInt *En = dyn_cast<ConstantInt>(II->getArgOperand(1));
3222 if (!En) // Illegal.
3223 break;
3224
3225 unsigned EnBits = En->getZExtValue();
3226 if (EnBits == 0xf)
3227 break; // All inputs enabled.
3228
3229 bool IsCompr = II->getIntrinsicID() == Intrinsic::amdgcn_exp_compr;
3230 bool Changed = false;
3231 for (int I = 0; I < (IsCompr ? 2 : 4); ++I) {
3232 if ((!IsCompr && (EnBits & (1 << I)) == 0) ||
3233 (IsCompr && ((EnBits & (0x3 << (2 * I))) == 0))) {
3234 Value *Src = II->getArgOperand(I + 2);
3235 if (!isa<UndefValue>(Src)) {
3236 II->setArgOperand(I + 2, UndefValue::get(Src->getType()));
3237 Changed = true;
3238 }
3239 }
3240 }
3241
3242 if (Changed)
3243 return II;
3244
3245 break;
Matt Arsenaultcdb468c2017-02-27 23:08:49 +00003246 }
3247 case Intrinsic::amdgcn_fmed3: {
3248 // Note this does not preserve proper sNaN behavior if IEEE-mode is enabled
3249 // for the shader.
3250
3251 Value *Src0 = II->getArgOperand(0);
3252 Value *Src1 = II->getArgOperand(1);
3253 Value *Src2 = II->getArgOperand(2);
3254
3255 bool Swap = false;
3256 // Canonicalize constants to RHS operands.
3257 //
3258 // fmed3(c0, x, c1) -> fmed3(x, c0, c1)
3259 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3260 std::swap(Src0, Src1);
3261 Swap = true;
3262 }
3263
3264 if (isa<Constant>(Src1) && !isa<Constant>(Src2)) {
3265 std::swap(Src1, Src2);
3266 Swap = true;
3267 }
3268
3269 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3270 std::swap(Src0, Src1);
3271 Swap = true;
3272 }
3273
3274 if (Swap) {
3275 II->setArgOperand(0, Src0);
3276 II->setArgOperand(1, Src1);
3277 II->setArgOperand(2, Src2);
3278 return II;
3279 }
3280
3281 if (match(Src2, m_NaN()) || isa<UndefValue>(Src2)) {
Craig Topperbb4069e2017-07-07 23:16:26 +00003282 CallInst *NewCall = Builder.CreateMinNum(Src0, Src1);
Matt Arsenaultcdb468c2017-02-27 23:08:49 +00003283 NewCall->copyFastMathFlags(II);
3284 NewCall->takeName(II);
3285 return replaceInstUsesWith(*II, NewCall);
3286 }
3287
3288 if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3289 if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3290 if (const ConstantFP *C2 = dyn_cast<ConstantFP>(Src2)) {
3291 APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(),
3292 C2->getValueAPF());
3293 return replaceInstUsesWith(*II,
Craig Topperbb4069e2017-07-07 23:16:26 +00003294 ConstantFP::get(Builder.getContext(), Result));
Matt Arsenaultcdb468c2017-02-27 23:08:49 +00003295 }
3296 }
3297 }
3298
3299 break;
Matt Arsenaultd4bca1e2017-02-23 00:44:03 +00003300 }
Matt Arsenaultd81f5572017-03-13 18:14:02 +00003301 case Intrinsic::amdgcn_icmp:
3302 case Intrinsic::amdgcn_fcmp: {
3303 const ConstantInt *CC = dyn_cast<ConstantInt>(II->getArgOperand(2));
3304 if (!CC)
3305 break;
3306
3307 // Guard against invalid arguments.
3308 int64_t CCVal = CC->getZExtValue();
3309 bool IsInteger = II->getIntrinsicID() == Intrinsic::amdgcn_icmp;
3310 if ((IsInteger && (CCVal < CmpInst::FIRST_ICMP_PREDICATE ||
3311 CCVal > CmpInst::LAST_ICMP_PREDICATE)) ||
3312 (!IsInteger && (CCVal < CmpInst::FIRST_FCMP_PREDICATE ||
3313 CCVal > CmpInst::LAST_FCMP_PREDICATE)))
3314 break;
3315
3316 Value *Src0 = II->getArgOperand(0);
3317 Value *Src1 = II->getArgOperand(1);
3318
3319 if (auto *CSrc0 = dyn_cast<Constant>(Src0)) {
3320 if (auto *CSrc1 = dyn_cast<Constant>(Src1)) {
3321 Constant *CCmp = ConstantExpr::getCompare(CCVal, CSrc0, CSrc1);
Nicolai Haehnle9c661852017-04-24 17:08:43 +00003322 if (CCmp->isNullValue()) {
3323 return replaceInstUsesWith(
3324 *II, ConstantExpr::getSExt(CCmp, II->getType()));
3325 }
3326
3327 // The result of V_ICMP/V_FCMP assembly instructions (which this
3328 // intrinsic exposes) is one bit per thread, masked with the EXEC
3329 // register (which contains the bitmask of live threads). So a
3330 // comparison that always returns true is the same as a read of the
3331 // EXEC register.
3332 Value *NewF = Intrinsic::getDeclaration(
3333 II->getModule(), Intrinsic::read_register, II->getType());
3334 Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")};
3335 MDNode *MD = MDNode::get(II->getContext(), MDArgs);
3336 Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
Craig Topperbb4069e2017-07-07 23:16:26 +00003337 CallInst *NewCall = Builder.CreateCall(NewF, Args);
Nicolai Haehnle9c661852017-04-24 17:08:43 +00003338 NewCall->addAttribute(AttributeList::FunctionIndex,
3339 Attribute::Convergent);
3340 NewCall->takeName(II);
3341 return replaceInstUsesWith(*II, NewCall);
Matt Arsenaultd81f5572017-03-13 18:14:02 +00003342 }
3343
3344 // Canonicalize constants to RHS.
3345 CmpInst::Predicate SwapPred
3346 = CmpInst::getSwappedPredicate(static_cast<CmpInst::Predicate>(CCVal));
3347 II->setArgOperand(0, Src1);
3348 II->setArgOperand(1, Src0);
3349 II->setArgOperand(2, ConstantInt::get(CC->getType(),
3350 static_cast<int>(SwapPred)));
3351 return II;
3352 }
3353
3354 if (CCVal != CmpInst::ICMP_EQ && CCVal != CmpInst::ICMP_NE)
3355 break;
3356
3357 // Canonicalize compare eq with true value to compare != 0
3358 // llvm.amdgcn.icmp(zext (i1 x), 1, eq)
3359 // -> llvm.amdgcn.icmp(zext (i1 x), 0, ne)
3360 // llvm.amdgcn.icmp(sext (i1 x), -1, eq)
3361 // -> llvm.amdgcn.icmp(sext (i1 x), 0, ne)
3362 Value *ExtSrc;
3363 if (CCVal == CmpInst::ICMP_EQ &&
3364 ((match(Src1, m_One()) && match(Src0, m_ZExt(m_Value(ExtSrc)))) ||
3365 (match(Src1, m_AllOnes()) && match(Src0, m_SExt(m_Value(ExtSrc))))) &&
3366 ExtSrc->getType()->isIntegerTy(1)) {
3367 II->setArgOperand(1, ConstantInt::getNullValue(Src1->getType()));
3368 II->setArgOperand(2, ConstantInt::get(CC->getType(), CmpInst::ICMP_NE));
3369 return II;
3370 }
3371
3372 CmpInst::Predicate SrcPred;
3373 Value *SrcLHS;
3374 Value *SrcRHS;
3375
3376 // Fold compare eq/ne with 0 from a compare result as the predicate to the
3377 // intrinsic. The typical use is a wave vote function in the library, which
3378 // will be fed from a user code condition compared with 0. Fold in the
3379 // redundant compare.
3380
3381 // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, ne)
3382 // -> llvm.amdgcn.[if]cmp(a, b, pred)
3383 //
3384 // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, eq)
3385 // -> llvm.amdgcn.[if]cmp(a, b, inv pred)
3386 if (match(Src1, m_Zero()) &&
3387 match(Src0,
3388 m_ZExtOrSExt(m_Cmp(SrcPred, m_Value(SrcLHS), m_Value(SrcRHS))))) {
3389 if (CCVal == CmpInst::ICMP_EQ)
3390 SrcPred = CmpInst::getInversePredicate(SrcPred);
3391
3392 Intrinsic::ID NewIID = CmpInst::isFPPredicate(SrcPred) ?
3393 Intrinsic::amdgcn_fcmp : Intrinsic::amdgcn_icmp;
3394
3395 Value *NewF = Intrinsic::getDeclaration(II->getModule(), NewIID,
3396 SrcLHS->getType());
3397 Value *Args[] = { SrcLHS, SrcRHS,
3398 ConstantInt::get(CC->getType(), SrcPred) };
Craig Topperbb4069e2017-07-07 23:16:26 +00003399 CallInst *NewCall = Builder.CreateCall(NewF, Args);
Matt Arsenaultd81f5572017-03-13 18:14:02 +00003400 NewCall->takeName(II);
3401 return replaceInstUsesWith(*II, NewCall);
3402 }
3403
3404 break;
3405 }
Marek Olsak2114fc32017-10-24 10:26:59 +00003406 case Intrinsic::amdgcn_wqm_vote: {
3407 // wqm_vote is identity when the argument is constant.
3408 if (!isa<Constant>(II->getArgOperand(0)))
3409 break;
3410
3411 return replaceInstUsesWith(*II, II->getArgOperand(0));
3412 }
Marek Olsakce76ea02017-10-24 10:27:13 +00003413 case Intrinsic::amdgcn_kill: {
3414 const ConstantInt *C = dyn_cast<ConstantInt>(II->getArgOperand(0));
3415 if (!C || !C->getZExtValue())
3416 break;
3417
3418 // amdgcn.kill(i1 1) is a no-op
3419 return eraseInstFromFunction(CI);
3420 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003421 case Intrinsic::stackrestore: {
3422 // If the save is right next to the restore, remove the restore. This can
3423 // happen when variable allocas are DCE'd.
Gabor Greif589a0b92010-06-24 12:58:35 +00003424 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003425 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00003426 if (&*++SS->getIterator() == II)
Sanjay Patel4b198802016-02-01 22:23:39 +00003427 return eraseInstFromFunction(CI);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003428 }
3429 }
Jim Grosbach7815f562012-02-03 00:07:04 +00003430
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003431 // Scan down this block to see if there is another stack restore in the
3432 // same block without an intervening call/alloca.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00003433 BasicBlock::iterator BI(II);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003434 TerminatorInst *TI = II->getParent()->getTerminator();
3435 bool CannotRemove = false;
3436 for (++BI; &*BI != TI; ++BI) {
Nuno Lopes55fff832012-06-21 15:45:28 +00003437 if (isa<AllocaInst>(BI)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003438 CannotRemove = true;
3439 break;
3440 }
3441 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
3442 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
3443 // If there is a stackrestore below this one, remove this one.
3444 if (II->getIntrinsicID() == Intrinsic::stackrestore)
Sanjay Patel4b198802016-02-01 22:23:39 +00003445 return eraseInstFromFunction(CI);
Reid Kleckner892ae2e2016-02-27 00:53:54 +00003446
3447 // Bail if we cross over an intrinsic with side effects, such as
3448 // llvm.stacksave, llvm.read_register, or llvm.setjmp.
3449 if (II->mayHaveSideEffects()) {
3450 CannotRemove = true;
3451 break;
3452 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003453 } else {
3454 // If we found a non-intrinsic call, we can't remove the stack
3455 // restore.
3456 CannotRemove = true;
3457 break;
3458 }
3459 }
3460 }
Jim Grosbach7815f562012-02-03 00:07:04 +00003461
Bill Wendlingf891bf82011-07-31 06:30:59 +00003462 // If the stack restore is in a return, resume, or unwind block and if there
3463 // are no allocas or calls between the restore and the return, nuke the
3464 // restore.
Bill Wendlingd5d95b02012-02-06 21:16:41 +00003465 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
Sanjay Patel4b198802016-02-01 22:23:39 +00003466 return eraseInstFromFunction(CI);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003467 break;
3468 }
Vitaly Bukaf0500b62016-07-28 22:50:48 +00003469 case Intrinsic::lifetime_start:
Vitaly Buka0ab23cf2016-07-28 22:59:03 +00003470 // Asan needs to poison memory to detect invalid access which is possible
3471 // even for empty lifetime range.
Evgeniy Stepanovc667c1f2017-12-09 00:21:41 +00003472 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3473 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
Vitaly Buka0ab23cf2016-07-28 22:59:03 +00003474 break;
3475
Arnaud A. de Grandmaison333ef382016-05-10 09:24:49 +00003476 if (removeTriviallyEmptyRange(*II, Intrinsic::lifetime_start,
3477 Intrinsic::lifetime_end, *this))
3478 return nullptr;
Arnaud A. de Grandmaison849f3bf2015-10-01 14:54:31 +00003479 break;
Hal Finkelf5867a72014-07-25 21:45:17 +00003480 case Intrinsic::assume: {
David Majnemerfcc58112016-04-08 16:37:12 +00003481 Value *IIOperand = II->getArgOperand(0);
3482 // Remove an assume if it is immediately followed by an identical assume.
3483 if (match(II->getNextNode(),
3484 m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
3485 return eraseInstFromFunction(CI);
3486
Hal Finkelf5867a72014-07-25 21:45:17 +00003487 // Canonicalize assume(a && b) -> assume(a); assume(b);
Hal Finkel74c2f352014-09-07 12:44:26 +00003488 // Note: New assumption intrinsics created here are registered by
3489 // the InstCombineIRInserter object.
David Majnemerfcc58112016-04-08 16:37:12 +00003490 Value *AssumeIntrinsic = II->getCalledValue(), *A, *B;
Hal Finkelf5867a72014-07-25 21:45:17 +00003491 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
Craig Topperbb4069e2017-07-07 23:16:26 +00003492 Builder.CreateCall(AssumeIntrinsic, A, II->getName());
3493 Builder.CreateCall(AssumeIntrinsic, B, II->getName());
Sanjay Patel4b198802016-02-01 22:23:39 +00003494 return eraseInstFromFunction(*II);
Hal Finkelf5867a72014-07-25 21:45:17 +00003495 }
3496 // assume(!(a || b)) -> assume(!a); assume(!b);
3497 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
Craig Topperbb4069e2017-07-07 23:16:26 +00003498 Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(A), II->getName());
3499 Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(B), II->getName());
Sanjay Patel4b198802016-02-01 22:23:39 +00003500 return eraseInstFromFunction(*II);
Hal Finkelf5867a72014-07-25 21:45:17 +00003501 }
Hal Finkel04a15612014-10-04 21:27:06 +00003502
Philip Reames66c6de62014-11-11 23:33:19 +00003503 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
3504 // (if assume is valid at the load)
Sanjay Patelf0d1e7732017-01-03 22:25:31 +00003505 CmpInst::Predicate Pred;
3506 Instruction *LHS;
3507 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
3508 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
3509 LHS->getType()->isPointerTy() &&
3510 isValidAssumeForContext(II, LHS, &DT)) {
3511 MDNode *MD = MDNode::get(II->getContext(), None);
3512 LHS->setMetadata(LLVMContext::MD_nonnull, MD);
3513 return eraseInstFromFunction(*II);
3514
Chandler Carruth24969102015-02-10 08:07:32 +00003515 // TODO: apply nonnull return attributes to calls and invokes
Philip Reames66c6de62014-11-11 23:33:19 +00003516 // TODO: apply range metadata for range check patterns?
3517 }
Sanjay Patelf0d1e7732017-01-03 22:25:31 +00003518
Hal Finkel04a15612014-10-04 21:27:06 +00003519 // If there is a dominating assume with the same condition as this one,
3520 // then this one is redundant, and should be removed.
Craig Topperb45eabc2017-04-26 16:39:58 +00003521 KnownBits Known(1);
3522 computeKnownBits(IIOperand, Known, 0, II);
Craig Topperf0aeee02017-05-05 17:36:09 +00003523 if (Known.isAllOnes())
Sanjay Patel4b198802016-02-01 22:23:39 +00003524 return eraseInstFromFunction(*II);
Hal Finkel04a15612014-10-04 21:27:06 +00003525
Hal Finkel8a9a7832017-01-11 13:24:24 +00003526 // Update the cache of affected values for this assumption (we might be
3527 // here because we just simplified the condition).
3528 AC.updateAffectedValues(II);
Hal Finkelf5867a72014-07-25 21:45:17 +00003529 break;
3530 }
Philip Reames9db26ff2014-12-29 23:27:30 +00003531 case Intrinsic::experimental_gc_relocate: {
3532 // Translate facts known about a pointer before relocating into
3533 // facts about the relocate value, while being careful to
3534 // preserve relocation semantics.
Manuel Jacob83eefa62016-01-05 04:03:00 +00003535 Value *DerivedPtr = cast<GCRelocateInst>(II)->getDerivedPtr();
Philip Reames9db26ff2014-12-29 23:27:30 +00003536
3537 // Remove the relocation if unused, note that this check is required
3538 // to prevent the cases below from looping forever.
3539 if (II->use_empty())
Sanjay Patel4b198802016-02-01 22:23:39 +00003540 return eraseInstFromFunction(*II);
Philip Reames9db26ff2014-12-29 23:27:30 +00003541
3542 // Undef is undef, even after relocation.
3543 // TODO: provide a hook for this in GCStrategy. This is clearly legal for
3544 // most practical collectors, but there was discussion in the review thread
3545 // about whether it was legal for all possible collectors.
Philip Reamesea4d8e82016-02-09 21:09:22 +00003546 if (isa<UndefValue>(DerivedPtr))
3547 // Use undef of gc_relocate's type to replace it.
3548 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
Philip Reames9db26ff2014-12-29 23:27:30 +00003549
Philip Reamesea4d8e82016-02-09 21:09:22 +00003550 if (auto *PT = dyn_cast<PointerType>(II->getType())) {
3551 // The relocation of null will be null for most any collector.
3552 // TODO: provide a hook for this in GCStrategy. There might be some
3553 // weird collector this property does not hold for.
3554 if (isa<ConstantPointerNull>(DerivedPtr))
3555 // Use null-pointer of gc_relocate's type to replace it.
3556 return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00003557
Philip Reamesea4d8e82016-02-09 21:09:22 +00003558 // isKnownNonNull -> nonnull attribute
Nuno Lopes404f1062017-09-09 18:23:11 +00003559 if (isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT))
Reid Klecknerb5180542017-03-21 16:57:19 +00003560 II->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
Ramkumar Ramachandra8fcb4982015-02-14 19:37:54 +00003561 }
Philip Reames9db26ff2014-12-29 23:27:30 +00003562
3563 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
3564 // Canonicalize on the type from the uses to the defs
Ramkumar Ramachandra8fcb4982015-02-14 19:37:54 +00003565
Philip Reames9db26ff2014-12-29 23:27:30 +00003566 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
Philip Reamesea4d8e82016-02-09 21:09:22 +00003567 break;
Philip Reames9db26ff2014-12-29 23:27:30 +00003568 }
Artur Pilipenkoe812ca02017-01-25 14:12:12 +00003569
3570 case Intrinsic::experimental_guard: {
Philip Reames79e917d2018-05-09 22:56:32 +00003571 // Is this guard followed by another guard? We scan forward over a small
3572 // fixed window of instructions to handle common cases with conditions
3573 // computed between guards.
Sanjoy Dase0e57952017-02-01 16:34:55 +00003574 Instruction *NextInst = II->getNextNode();
Philip Reames913a7792018-05-10 00:05:29 +00003575 for (unsigned i = 0; i < GuardWideningWindow; i++) {
Philip Reames79e917d2018-05-09 22:56:32 +00003576 // Note: Using context-free form to avoid compile time blow up
3577 if (!isSafeToSpeculativelyExecute(NextInst))
3578 break;
3579 NextInst = NextInst->getNextNode();
3580 }
Sanjoy Dase0e57952017-02-01 16:34:55 +00003581 Value *NextCond = nullptr;
3582 if (match(NextInst,
3583 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
3584 Value *CurrCond = II->getArgOperand(0);
Artur Pilipenkoe812ca02017-01-25 14:12:12 +00003585
Simon Pilgrim68168d12017-03-30 12:59:53 +00003586 // Remove a guard that it is immediately preceded by an identical guard.
Sanjoy Dase0e57952017-02-01 16:34:55 +00003587 if (CurrCond == NextCond)
3588 return eraseInstFromFunction(*NextInst);
3589
3590 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
Philip Reames79e917d2018-05-09 22:56:32 +00003591 Instruction* MoveI = II->getNextNode();
3592 while (MoveI != NextInst) {
3593 auto *Temp = MoveI;
3594 MoveI = MoveI->getNextNode();
3595 Temp->moveBefore(II);
3596 }
Craig Topperbb4069e2017-07-07 23:16:26 +00003597 II->setArgOperand(0, Builder.CreateAnd(CurrCond, NextCond));
Sanjoy Dase0e57952017-02-01 16:34:55 +00003598 return eraseInstFromFunction(*NextInst);
3599 }
Artur Pilipenkoe812ca02017-01-25 14:12:12 +00003600 break;
3601 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003602 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003603 return visitCallSite(II);
3604}
3605
Davide Italianoaec46172017-01-31 18:09:05 +00003606// Fence instruction simplification
3607Instruction *InstCombiner::visitFenceInst(FenceInst &FI) {
3608 // Remove identical consecutive fences.
3609 if (auto *NFI = dyn_cast<FenceInst>(FI.getNextNode()))
3610 if (FI.isIdenticalTo(NFI))
3611 return eraseInstFromFunction(FI);
3612 return nullptr;
3613}
3614
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003615// InvokeInst simplification
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003616Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
3617 return visitCallSite(&II);
3618}
3619
Sanjay Patelcd4377c2016-01-20 22:24:38 +00003620/// If this cast does not affect the value passed through the varargs area, we
3621/// can eliminate the use of the cast.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003622static bool isSafeToEliminateVarargsCast(const CallSite CS,
Mehdi Aminia28d91d2015-03-10 02:37:25 +00003623 const DataLayout &DL,
3624 const CastInst *const CI,
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003625 const int ix) {
3626 if (!CI->isLosslessCast())
3627 return false;
3628
Philip Reames1a1bdb22014-12-02 18:50:36 +00003629 // If this is a GC intrinsic, avoid munging types. We need types for
3630 // statepoint reconstruction in SelectionDAG.
3631 // TODO: This is probably something which should be expanded to all
3632 // intrinsics since the entire point of intrinsics is that
3633 // they are understandable by the optimizer.
3634 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
3635 return false;
3636
Reid Kleckner26af2ca2014-01-28 02:38:36 +00003637 // The size of ByVal or InAlloca arguments is derived from the type, so we
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003638 // can't change to a type with a different size. If the size were
3639 // passed explicitly we could avoid this check.
Reid Kleckner26af2ca2014-01-28 02:38:36 +00003640 if (!CS.isByValOrInAllocaArgument(ix))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003641 return true;
3642
Jim Grosbach7815f562012-02-03 00:07:04 +00003643 Type* SrcTy =
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003644 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
Chris Lattner229907c2011-07-18 04:54:35 +00003645 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003646 if (!SrcTy->isSized() || !DstTy->isSized())
3647 return false;
Mehdi Aminia28d91d2015-03-10 02:37:25 +00003648 if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003649 return false;
3650 return true;
3651}
3652
Mehdi Aminia28d91d2015-03-10 02:37:25 +00003653Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
Craig Topperf40110f2014-04-25 05:29:35 +00003654 if (!CI->getCalledFunction()) return nullptr;
Eric Christophera7fb58f2010-03-06 10:50:38 +00003655
Chandler Carruthba4c5172015-01-21 11:23:40 +00003656 auto InstCombineRAUW = [this](Instruction *From, Value *With) {
Sanjay Patel4b198802016-02-01 22:23:39 +00003657 replaceInstUsesWith(*From, With);
Chandler Carruthba4c5172015-01-21 11:23:40 +00003658 };
Adam Nemetea06e6e2017-07-26 19:03:18 +00003659 LibCallSimplifier Simplifier(DL, &TLI, ORE, InstCombineRAUW);
Chandler Carruthba4c5172015-01-21 11:23:40 +00003660 if (Value *With = Simplifier.optimizeCall(CI)) {
Meador Ingee3f2b262012-11-30 04:05:06 +00003661 ++NumSimplified;
Sanjay Patel4b198802016-02-01 22:23:39 +00003662 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
Meador Ingee3f2b262012-11-30 04:05:06 +00003663 }
Meador Ingedf796f82012-10-13 16:45:24 +00003664
Craig Topperf40110f2014-04-25 05:29:35 +00003665 return nullptr;
Eric Christophera7fb58f2010-03-06 10:50:38 +00003666}
3667
Sanjay Patel6038d3e2016-01-29 23:27:03 +00003668static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
Duncan Sandsa0984362011-09-06 13:37:06 +00003669 // Strip off at most one level of pointer casts, looking for an alloca. This
3670 // is good enough in practice and simpler than handling any number of casts.
3671 Value *Underlying = TrampMem->stripPointerCasts();
3672 if (Underlying != TrampMem &&
Chandler Carruthcdf47882014-03-09 03:16:01 +00003673 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
Craig Topperf40110f2014-04-25 05:29:35 +00003674 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00003675 if (!isa<AllocaInst>(Underlying))
Craig Topperf40110f2014-04-25 05:29:35 +00003676 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00003677
Craig Topperf40110f2014-04-25 05:29:35 +00003678 IntrinsicInst *InitTrampoline = nullptr;
Chandler Carruthcdf47882014-03-09 03:16:01 +00003679 for (User *U : TrampMem->users()) {
3680 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
Duncan Sandsa0984362011-09-06 13:37:06 +00003681 if (!II)
Craig Topperf40110f2014-04-25 05:29:35 +00003682 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00003683 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
3684 if (InitTrampoline)
3685 // More than one init_trampoline writes to this value. Give up.
Craig Topperf40110f2014-04-25 05:29:35 +00003686 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00003687 InitTrampoline = II;
3688 continue;
3689 }
3690 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
3691 // Allow any number of calls to adjust.trampoline.
3692 continue;
Craig Topperf40110f2014-04-25 05:29:35 +00003693 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00003694 }
3695
3696 // No call to init.trampoline found.
3697 if (!InitTrampoline)
Craig Topperf40110f2014-04-25 05:29:35 +00003698 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00003699
3700 // Check that the alloca is being used in the expected way.
3701 if (InitTrampoline->getOperand(0) != TrampMem)
Craig Topperf40110f2014-04-25 05:29:35 +00003702 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00003703
3704 return InitTrampoline;
3705}
3706
Sanjay Patel6038d3e2016-01-29 23:27:03 +00003707static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
Duncan Sandsa0984362011-09-06 13:37:06 +00003708 Value *TrampMem) {
3709 // Visit all the previous instructions in the basic block, and try to find a
3710 // init.trampoline which has a direct path to the adjust.trampoline.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00003711 for (BasicBlock::iterator I = AdjustTramp->getIterator(),
3712 E = AdjustTramp->getParent()->begin();
3713 I != E;) {
3714 Instruction *Inst = &*--I;
Duncan Sandsa0984362011-09-06 13:37:06 +00003715 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
3716 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
3717 II->getOperand(0) == TrampMem)
3718 return II;
3719 if (Inst->mayWriteToMemory())
Craig Topperf40110f2014-04-25 05:29:35 +00003720 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00003721 }
Craig Topperf40110f2014-04-25 05:29:35 +00003722 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00003723}
3724
3725// Given a call to llvm.adjust.trampoline, find and return the corresponding
3726// call to llvm.init.trampoline if the call to the trampoline can be optimized
3727// to a direct call to a function. Otherwise return NULL.
Sanjay Patel6038d3e2016-01-29 23:27:03 +00003728static IntrinsicInst *findInitTrampoline(Value *Callee) {
Duncan Sandsa0984362011-09-06 13:37:06 +00003729 Callee = Callee->stripPointerCasts();
3730 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
3731 if (!AdjustTramp ||
3732 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
Craig Topperf40110f2014-04-25 05:29:35 +00003733 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00003734
3735 Value *TrampMem = AdjustTramp->getOperand(0);
3736
Sanjay Patel6038d3e2016-01-29 23:27:03 +00003737 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
Duncan Sandsa0984362011-09-06 13:37:06 +00003738 return IT;
Sanjay Patel6038d3e2016-01-29 23:27:03 +00003739 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
Duncan Sandsa0984362011-09-06 13:37:06 +00003740 return IT;
Craig Topperf40110f2014-04-25 05:29:35 +00003741 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00003742}
3743
Sanjay Patelcd4377c2016-01-20 22:24:38 +00003744/// Improvements for call and invoke instructions.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003745Instruction *InstCombiner::visitCallSite(CallSite CS) {
Justin Bogner99798402016-08-05 01:06:44 +00003746 if (isAllocLikeFn(CS.getInstruction(), &TLI))
Nuno Lopes95cc4f32012-07-09 18:38:20 +00003747 return visitAllocSite(*CS.getInstruction());
Nuno Lopesdc6085e2012-06-21 21:25:05 +00003748
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003749 bool Changed = false;
3750
Philip Reamesc25df112015-06-16 20:24:25 +00003751 // Mark any parameters that are known to be non-null with the nonnull
3752 // attribute. This is helpful for inlining calls to functions with null
3753 // checks on their arguments.
Reid Kleckner5fbdd172017-05-31 19:23:09 +00003754 SmallVector<unsigned, 4> ArgNos;
Philip Reamesc25df112015-06-16 20:24:25 +00003755 unsigned ArgNo = 0;
Akira Hatanaka237916b2015-12-02 06:58:49 +00003756
Philip Reamesc25df112015-06-16 20:24:25 +00003757 for (Value *V : CS.args()) {
Sanjay Patelf9f5d3c2016-01-29 23:14:58 +00003758 if (V->getType()->isPointerTy() &&
Reid Klecknerfb502d22017-04-14 20:19:02 +00003759 !CS.paramHasAttr(ArgNo, Attribute::NonNull) &&
Nuno Lopes404f1062017-09-09 18:23:11 +00003760 isKnownNonZero(V, DL, 0, &AC, CS.getInstruction(), &DT))
Reid Kleckner5fbdd172017-05-31 19:23:09 +00003761 ArgNos.push_back(ArgNo);
Philip Reamesc25df112015-06-16 20:24:25 +00003762 ArgNo++;
3763 }
Akira Hatanaka237916b2015-12-02 06:58:49 +00003764
Philip Reamesc25df112015-06-16 20:24:25 +00003765 assert(ArgNo == CS.arg_size() && "sanity check");
3766
Reid Kleckner5fbdd172017-05-31 19:23:09 +00003767 if (!ArgNos.empty()) {
Reid Klecknerb5180542017-03-21 16:57:19 +00003768 AttributeList AS = CS.getAttributes();
Akira Hatanaka237916b2015-12-02 06:58:49 +00003769 LLVMContext &Ctx = CS.getInstruction()->getContext();
Reid Kleckner5fbdd172017-05-31 19:23:09 +00003770 AS = AS.addParamAttribute(Ctx, ArgNos,
3771 Attribute::get(Ctx, Attribute::NonNull));
Akira Hatanaka237916b2015-12-02 06:58:49 +00003772 CS.setAttributes(AS);
3773 Changed = true;
3774 }
3775
Chris Lattner73989652010-12-20 08:25:06 +00003776 // If the callee is a pointer to a function, attempt to move any casts to the
3777 // arguments of the call/invoke.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003778 Value *Callee = CS.getCalledValue();
Chris Lattner73989652010-12-20 08:25:06 +00003779 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
Craig Topperf40110f2014-04-25 05:29:35 +00003780 return nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003781
Justin Lebar9d943972016-03-14 20:18:54 +00003782 if (Function *CalleeF = dyn_cast<Function>(Callee)) {
3783 // Remove the convergent attr on calls when the callee is not convergent.
Matt Arsenault802ebcb2016-06-20 19:04:44 +00003784 if (CS.isConvergent() && !CalleeF->isConvergent() &&
3785 !CalleeF->isIntrinsic()) {
Justin Lebar9d943972016-03-14 20:18:54 +00003786 DEBUG(dbgs() << "Removing convergent attr from instr "
3787 << CS.getInstruction() << "\n");
3788 CS.setNotConvergent();
3789 return CS.getInstruction();
3790 }
3791
Chris Lattner846a52e2010-02-01 18:11:34 +00003792 // If the call and callee calling conventions don't match, this call must
3793 // be unreachable, as the call is undefined.
3794 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
3795 // Only do this for calls to a function with a body. A prototype may
3796 // not actually end up matching the implementation's calling conv for a
3797 // variety of reasons (e.g. it may be written in assembly).
3798 !CalleeF->isDeclaration()) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003799 Instruction *OldCall = CS.getInstruction();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003800 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
Jim Grosbach7815f562012-02-03 00:07:04 +00003801 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003802 OldCall);
Chad Rosiere28ae302012-12-13 00:18:46 +00003803 // If OldCall does not return void then replaceAllUsesWith undef.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003804 // This allows ValueHandlers and custom metadata to adjust itself.
3805 if (!OldCall->getType()->isVoidTy())
Sanjay Patel4b198802016-02-01 22:23:39 +00003806 replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
Chris Lattner2cecedf2010-02-01 18:04:58 +00003807 if (isa<CallInst>(OldCall))
Sanjay Patel4b198802016-02-01 22:23:39 +00003808 return eraseInstFromFunction(*OldCall);
Jim Grosbach7815f562012-02-03 00:07:04 +00003809
Chris Lattner2cecedf2010-02-01 18:04:58 +00003810 // We cannot remove an invoke, because it would change the CFG, just
3811 // change the callee to a null pointer.
Gabor Greiffebf6ab2010-03-20 21:00:25 +00003812 cast<InvokeInst>(OldCall)->setCalledFunction(
Chris Lattner2cecedf2010-02-01 18:04:58 +00003813 Constant::getNullValue(CalleeF->getType()));
Craig Topperf40110f2014-04-25 05:29:35 +00003814 return nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003815 }
Justin Lebar9d943972016-03-14 20:18:54 +00003816 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003817
3818 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
Gabor Greif589a0b92010-06-24 12:58:35 +00003819 // If CS does not return void then replaceAllUsesWith undef.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003820 // This allows ValueHandlers and custom metadata to adjust itself.
3821 if (!CS.getInstruction()->getType()->isVoidTy())
Sanjay Patel4b198802016-02-01 22:23:39 +00003822 replaceInstUsesWith(*CS.getInstruction(),
Eli Friedmanb9ed18f2011-05-18 00:32:01 +00003823 UndefValue::get(CS.getInstruction()->getType()));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003824
Nuno Lopes771e7bd2012-06-21 23:52:14 +00003825 if (isa<InvokeInst>(CS.getInstruction())) {
3826 // Can't remove an invoke because we cannot change the CFG.
Craig Topperf40110f2014-04-25 05:29:35 +00003827 return nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003828 }
Nuno Lopes771e7bd2012-06-21 23:52:14 +00003829
3830 // This instruction is not reachable, just remove it. We insert a store to
3831 // undef so that we know that this code is not reachable, despite the fact
3832 // that we can't modify the CFG here.
3833 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
3834 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
3835 CS.getInstruction());
3836
Sanjay Patel4b198802016-02-01 22:23:39 +00003837 return eraseInstFromFunction(*CS.getInstruction());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003838 }
3839
Sanjay Patel6038d3e2016-01-29 23:27:03 +00003840 if (IntrinsicInst *II = findInitTrampoline(Callee))
Duncan Sandsa0984362011-09-06 13:37:06 +00003841 return transformCallThroughTrampoline(CS, II);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003842
Chris Lattner229907c2011-07-18 04:54:35 +00003843 PointerType *PTy = cast<PointerType>(Callee->getType());
3844 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003845 if (FTy->isVarArg()) {
Eli Friedman7534b4682011-11-29 01:18:23 +00003846 int ix = FTy->getNumParams();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003847 // See if we can optimize any arguments passed through the varargs area of
3848 // the call.
Matt Arsenault5d2e85f2013-06-28 00:25:40 +00003849 for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003850 E = CS.arg_end(); I != E; ++I, ++ix) {
3851 CastInst *CI = dyn_cast<CastInst>(*I);
Mehdi Aminia28d91d2015-03-10 02:37:25 +00003852 if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003853 *I = CI->getOperand(0);
3854 Changed = true;
3855 }
3856 }
3857 }
3858
3859 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
3860 // Inline asm calls cannot throw - mark them 'nounwind'.
3861 CS.setDoesNotThrow();
3862 Changed = true;
3863 }
3864
Micah Villmowcdfe20b2012-10-08 16:38:25 +00003865 // Try to optimize the call if possible, we require DataLayout for most of
Eric Christophera7fb58f2010-03-06 10:50:38 +00003866 // this. None of these calls are seen as possibly dead so go ahead and
3867 // delete the instruction now.
3868 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +00003869 Instruction *I = tryOptimizeCall(CI);
Eric Christopher1810d772010-03-06 10:59:25 +00003870 // If we changed something return the result, etc. Otherwise let
3871 // the fallthrough check.
Sanjay Patel4b198802016-02-01 22:23:39 +00003872 if (I) return eraseInstFromFunction(*I);
Eric Christophera7fb58f2010-03-06 10:50:38 +00003873 }
3874
Craig Topperf40110f2014-04-25 05:29:35 +00003875 return Changed ? CS.getInstruction() : nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003876}
3877
Sanjay Patelcd4377c2016-01-20 22:24:38 +00003878/// If the callee is a constexpr cast of a function, attempt to move the cast to
3879/// the arguments of the call/invoke.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003880bool InstCombiner::transformConstExprCastCall(CallSite CS) {
Sanjay Patele3c335c2016-08-11 15:21:21 +00003881 auto *Callee = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
Craig Topperf40110f2014-04-25 05:29:35 +00003882 if (!Callee)
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003883 return false;
Sanjay Patel38ae83d2016-08-11 15:23:56 +00003884
Reid Kleckner298ffc62018-04-02 22:49:44 +00003885 // If this is a call to a thunk function, don't remove the cast. Thunks are
3886 // used to transparently forward all incoming parameters and outgoing return
3887 // values, so it's important to leave the cast in place.
David Majnemer4c0a6e92015-01-21 22:32:04 +00003888 if (Callee->hasFnAttribute("thunk"))
3889 return false;
Sanjay Patel38ae83d2016-08-11 15:23:56 +00003890
Reid Kleckner298ffc62018-04-02 22:49:44 +00003891 // If this is a musttail call, the callee's prototype must match the caller's
3892 // prototype with the exception of pointee types. The code below doesn't
3893 // implement that, so we can't do this transform.
3894 // TODO: Do the transform if it only requires adding pointer casts.
3895 if (CS.isMustTailCall())
3896 return false;
3897
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003898 Instruction *Caller = CS.getInstruction();
Reid Klecknerb5180542017-03-21 16:57:19 +00003899 const AttributeList &CallerPAL = CS.getAttributes();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003900
3901 // Okay, this is a cast from a function to a different type. Unless doing so
3902 // would cause a type conversion of one of our arguments, change this call to
3903 // be a direct call with arguments casted to the appropriate types.
Chris Lattner229907c2011-07-18 04:54:35 +00003904 FunctionType *FT = Callee->getFunctionType();
3905 Type *OldRetTy = Caller->getType();
3906 Type *NewRetTy = FT->getReturnType();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003907
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003908 // Check to see if we are changing the return type...
3909 if (OldRetTy != NewRetTy) {
Nick Lewyckya6a17d72014-01-18 22:47:12 +00003910
3911 if (NewRetTy->isStructTy())
3912 return false; // TODO: Handle multiple return values.
3913
David Majnemer9b6b8222015-01-06 08:41:31 +00003914 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
Matt Arsenaulte6952f22013-09-17 21:10:14 +00003915 if (Callee->isDeclaration())
3916 return false; // Cannot transform this return value.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003917
Matt Arsenaulte6952f22013-09-17 21:10:14 +00003918 if (!Caller->use_empty() &&
3919 // void -> non-void is handled specially
3920 !NewRetTy->isVoidTy())
Frederic Rissc1892e22014-10-23 04:08:42 +00003921 return false; // Cannot transform this return value.
Matt Arsenaulte6952f22013-09-17 21:10:14 +00003922 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003923
3924 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
Reid Klecknerb5180542017-03-21 16:57:19 +00003925 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
Pete Cooper2777d8872015-05-06 23:19:56 +00003926 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003927 return false; // Attribute not compatible with transformed value.
3928 }
3929
3930 // If the callsite is an invoke instruction, and the return value is used by
3931 // a PHI node in a successor, we cannot change the return type of the call
3932 // because there is no place to put the cast instruction (without breaking
3933 // the critical edge). Bail out in this case.
3934 if (!Caller->use_empty())
3935 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
Chandler Carruthcdf47882014-03-09 03:16:01 +00003936 for (User *U : II->users())
3937 if (PHINode *PN = dyn_cast<PHINode>(U))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003938 if (PN->getParent() == II->getNormalDest() ||
3939 PN->getParent() == II->getUnwindDest())
3940 return false;
3941 }
3942
Matt Arsenault5d2e85f2013-06-28 00:25:40 +00003943 unsigned NumActualArgs = CS.arg_size();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003944 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
3945
David Majnemer9b6b8222015-01-06 08:41:31 +00003946 // Prevent us turning:
3947 // declare void @takes_i32_inalloca(i32* inalloca)
3948 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
3949 //
3950 // into:
3951 // call void @takes_i32_inalloca(i32* null)
David Majnemerd61a6fd2015-03-11 18:03:05 +00003952 //
3953 // Similarly, avoid folding away bitcasts of byval calls.
3954 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
3955 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
David Majnemer9b6b8222015-01-06 08:41:31 +00003956 return false;
3957
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003958 CallSite::arg_iterator AI = CS.arg_begin();
3959 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
Chris Lattner229907c2011-07-18 04:54:35 +00003960 Type *ParamTy = FT->getParamType(i);
3961 Type *ActTy = (*AI)->getType();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003962
David Majnemer9b6b8222015-01-06 08:41:31 +00003963 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003964 return false; // Cannot transform this parameter value.
3965
Reid Klecknerf021fab2017-04-13 23:12:13 +00003966 if (AttrBuilder(CallerPAL.getParamAttributes(i))
3967 .overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003968 return false; // Attribute not compatible with transformed value.
Jim Grosbach7815f562012-02-03 00:07:04 +00003969
Reid Kleckner26af2ca2014-01-28 02:38:36 +00003970 if (CS.isInAllocaArgument(i))
3971 return false; // Cannot transform to and from inalloca.
3972
Chris Lattner27ca8eb2010-12-20 08:36:38 +00003973 // If the parameter is passed as a byval argument, then we have to have a
3974 // sized type and the sized type has to have the same size as the old type.
Reid Klecknerf021fab2017-04-13 23:12:13 +00003975 if (ParamTy != ActTy && CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
Chris Lattner229907c2011-07-18 04:54:35 +00003976 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
Mehdi Aminia28d91d2015-03-10 02:37:25 +00003977 if (!ParamPTy || !ParamPTy->getElementType()->isSized())
Chris Lattner27ca8eb2010-12-20 08:36:38 +00003978 return false;
Jim Grosbach7815f562012-02-03 00:07:04 +00003979
Matt Arsenaultfa252722013-09-27 22:18:51 +00003980 Type *CurElTy = ActTy->getPointerElementType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +00003981 if (DL.getTypeAllocSize(CurElTy) !=
3982 DL.getTypeAllocSize(ParamPTy->getElementType()))
Chris Lattner27ca8eb2010-12-20 08:36:38 +00003983 return false;
3984 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003985 }
3986
Chris Lattneradf38b32011-02-24 05:10:56 +00003987 if (Callee->isDeclaration()) {
3988 // Do not delete arguments unless we have a function body.
3989 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
3990 return false;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003991
Chris Lattneradf38b32011-02-24 05:10:56 +00003992 // If the callee is just a declaration, don't change the varargsness of the
3993 // call. We don't want to introduce a varargs call where one doesn't
3994 // already exist.
Chris Lattner229907c2011-07-18 04:54:35 +00003995 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
Chris Lattneradf38b32011-02-24 05:10:56 +00003996 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
3997 return false;
Jim Grosbache84ae7b2012-02-03 00:00:55 +00003998
3999 // If both the callee and the cast type are varargs, we still have to make
4000 // sure the number of fixed parameters are the same or we have the same
4001 // ABI issues as if we introduce a varargs call.
Jim Grosbach1df8cdc2012-02-03 00:26:07 +00004002 if (FT->isVarArg() &&
4003 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
4004 FT->getNumParams() !=
Jim Grosbache84ae7b2012-02-03 00:00:55 +00004005 cast<FunctionType>(APTy->getElementType())->getNumParams())
4006 return false;
Chris Lattneradf38b32011-02-24 05:10:56 +00004007 }
Jim Grosbach7815f562012-02-03 00:07:04 +00004008
Jim Grosbach0ab54182012-02-03 00:00:50 +00004009 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
Reid Kleckneraa0cec72017-04-19 23:17:47 +00004010 !CallerPAL.isEmpty()) {
Jim Grosbach0ab54182012-02-03 00:00:50 +00004011 // In this case we have more arguments than the new function type, but we
4012 // won't be dropping them. Check that these extra arguments have attributes
4013 // that are compatible with being a vararg call argument.
Reid Kleckneraa0cec72017-04-19 23:17:47 +00004014 unsigned SRetIdx;
4015 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
4016 SRetIdx > FT->getNumParams())
4017 return false;
4018 }
Jim Grosbach7815f562012-02-03 00:07:04 +00004019
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004020 // Okay, we decided that this is a safe thing to do: go ahead and start
Chris Lattneradf38b32011-02-24 05:10:56 +00004021 // inserting cast instructions as necessary.
Reid Klecknerc3fae792017-04-13 18:11:03 +00004022 SmallVector<Value *, 8> Args;
4023 SmallVector<AttributeSet, 8> ArgAttrs;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004024 Args.reserve(NumActualArgs);
Reid Klecknerc3fae792017-04-13 18:11:03 +00004025 ArgAttrs.reserve(NumActualArgs);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004026
4027 // Get any return attributes.
Reid Klecknerb5180542017-03-21 16:57:19 +00004028 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004029
4030 // If the return value is not being used, the type may not be compatible
4031 // with the existing attributes. Wipe out any problematic attributes.
Pete Cooper2777d8872015-05-06 23:19:56 +00004032 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004033
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004034 AI = CS.arg_begin();
4035 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
Chris Lattner229907c2011-07-18 04:54:35 +00004036 Type *ParamTy = FT->getParamType(i);
Matt Arsenaultcacbb232013-07-30 20:45:05 +00004037
Reid Klecknerc3fae792017-04-13 18:11:03 +00004038 Value *NewArg = *AI;
4039 if ((*AI)->getType() != ParamTy)
Craig Topperbb4069e2017-07-07 23:16:26 +00004040 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
Reid Klecknerc3fae792017-04-13 18:11:03 +00004041 Args.push_back(NewArg);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004042
4043 // Add any parameter attributes.
Reid Klecknerf021fab2017-04-13 23:12:13 +00004044 ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004045 }
4046
4047 // If the function takes more arguments than the call was taking, add them
4048 // now.
Reid Klecknerc3fae792017-04-13 18:11:03 +00004049 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004050 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
Reid Klecknerc3fae792017-04-13 18:11:03 +00004051 ArgAttrs.push_back(AttributeSet());
4052 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004053
4054 // If we are removing arguments to the function, emit an obnoxious warning.
4055 if (FT->getNumParams() < NumActualArgs) {
Nick Lewycky90053a12012-12-26 22:00:35 +00004056 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
4057 if (FT->isVarArg()) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004058 // Add all of the arguments in their promoted form to the arg list.
4059 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
Chris Lattner229907c2011-07-18 04:54:35 +00004060 Type *PTy = getPromotedType((*AI)->getType());
Reid Klecknerc3fae792017-04-13 18:11:03 +00004061 Value *NewArg = *AI;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004062 if (PTy != (*AI)->getType()) {
4063 // Must promote to pass through va_arg area!
4064 Instruction::CastOps opcode =
4065 CastInst::getCastOpcode(*AI, false, PTy, false);
Craig Topperbb4069e2017-07-07 23:16:26 +00004066 NewArg = Builder.CreateCast(opcode, *AI, PTy);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004067 }
Reid Klecknerc3fae792017-04-13 18:11:03 +00004068 Args.push_back(NewArg);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004069
4070 // Add any parameter attributes.
Reid Klecknerf021fab2017-04-13 23:12:13 +00004071 ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004072 }
4073 }
4074 }
4075
Reid Klecknerc2cb5602017-04-12 00:38:00 +00004076 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004077
4078 if (NewRetTy->isVoidTy())
4079 Caller->setName(""); // Void type should not have a name.
4080
Reid Klecknerc3fae792017-04-13 18:11:03 +00004081 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
4082 "missing argument attributes");
4083 LLVMContext &Ctx = Callee->getContext();
4084 AttributeList NewCallerPAL = AttributeList::get(
4085 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004086
Sanjoy Das76293462015-11-25 00:42:19 +00004087 SmallVector<OperandBundleDef, 1> OpBundles;
Sanjoy Dasc521c7b2015-11-25 00:42:24 +00004088 CS.getOperandBundlesAsDefs(OpBundles);
Sanjoy Das76293462015-11-25 00:42:19 +00004089
Reid Kleckner257cb4e2017-04-13 20:26:38 +00004090 CallSite NewCS;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004091 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
Craig Topperbb4069e2017-07-07 23:16:26 +00004092 NewCS = Builder.CreateInvoke(Callee, II->getNormalDest(),
4093 II->getUnwindDest(), Args, OpBundles);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004094 } else {
Craig Topperbb4069e2017-07-07 23:16:26 +00004095 NewCS = Builder.CreateCall(Callee, Args, OpBundles);
Reid Kleckner257cb4e2017-04-13 20:26:38 +00004096 cast<CallInst>(NewCS.getInstruction())
4097 ->setTailCallKind(cast<CallInst>(Caller)->getTailCallKind());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004098 }
Reid Kleckner257cb4e2017-04-13 20:26:38 +00004099 NewCS->takeName(Caller);
4100 NewCS.setCallingConv(CS.getCallingConv());
4101 NewCS.setAttributes(NewCallerPAL);
4102
4103 // Preserve the weight metadata for the new call instruction. The metadata
4104 // is used by SamplePGO to check callsite's hotness.
4105 uint64_t W;
4106 if (Caller->extractProfTotalWeight(W))
4107 NewCS->setProfWeight(W);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004108
4109 // Insert a cast of the return type as necessary.
Reid Kleckner257cb4e2017-04-13 20:26:38 +00004110 Instruction *NC = NewCS.getInstruction();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004111 Value *NV = NC;
4112 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
4113 if (!NV->getType()->isVoidTy()) {
David Majnemer9b6b8222015-01-06 08:41:31 +00004114 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
Eli Friedman35211c62011-05-27 00:19:40 +00004115 NC->setDebugLoc(Caller->getDebugLoc());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004116
4117 // If this is an invoke instruction, we should insert it after the first
4118 // non-phi, instruction in the normal successor block.
4119 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
Bill Wendling07efd6f2011-08-25 01:08:34 +00004120 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004121 InsertNewInstBefore(NC, *I);
4122 } else {
Chris Lattner73989652010-12-20 08:25:06 +00004123 // Otherwise, it's a call, just insert cast right after the call.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004124 InsertNewInstBefore(NC, *Caller);
4125 }
4126 Worklist.AddUsersToWorkList(*Caller);
4127 } else {
4128 NV = UndefValue::get(Caller->getType());
4129 }
4130 }
4131
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004132 if (!Caller->use_empty())
Sanjay Patel4b198802016-02-01 22:23:39 +00004133 replaceInstUsesWith(*Caller, NV);
Frederic Rissc1892e22014-10-23 04:08:42 +00004134 else if (Caller->hasValueHandle()) {
4135 if (OldRetTy == NV->getType())
4136 ValueHandleBase::ValueIsRAUWd(Caller, NV);
4137 else
4138 // We cannot call ValueIsRAUWd with a different type, and the
4139 // actual tracked value will disappear.
4140 ValueHandleBase::ValueIsDeleted(Caller);
4141 }
Eli Friedmanb9ed18f2011-05-18 00:32:01 +00004142
Sanjay Patel4b198802016-02-01 22:23:39 +00004143 eraseInstFromFunction(*Caller);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004144 return true;
4145}
4146
Sanjay Patelcd4377c2016-01-20 22:24:38 +00004147/// Turn a call to a function created by init_trampoline / adjust_trampoline
4148/// intrinsic pair into a direct call to the underlying function.
Duncan Sandsa0984362011-09-06 13:37:06 +00004149Instruction *
4150InstCombiner::transformCallThroughTrampoline(CallSite CS,
4151 IntrinsicInst *Tramp) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004152 Value *Callee = CS.getCalledValue();
Chris Lattner229907c2011-07-18 04:54:35 +00004153 PointerType *PTy = cast<PointerType>(Callee->getType());
4154 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
Reid Klecknereb9dd5b2017-04-10 23:31:05 +00004155 AttributeList Attrs = CS.getAttributes();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004156
4157 // If the call already has the 'nest' attribute somewhere then give up -
4158 // otherwise 'nest' would occur twice after splicing in the chain.
Bill Wendling6e95ae82012-12-31 00:49:59 +00004159 if (Attrs.hasAttrSomewhere(Attribute::Nest))
Craig Topperf40110f2014-04-25 05:29:35 +00004160 return nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004161
Duncan Sandsa0984362011-09-06 13:37:06 +00004162 assert(Tramp &&
4163 "transformCallThroughTrampoline called with incorrect CallSite.");
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004164
Gabor Greif3e44ea12010-07-22 10:37:47 +00004165 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
Manuel Jacob5f6eaac2016-01-16 20:30:46 +00004166 FunctionType *NestFTy = cast<FunctionType>(NestF->getValueType());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004167
Reid Klecknereb9dd5b2017-04-10 23:31:05 +00004168 AttributeList NestAttrs = NestF->getAttributes();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004169 if (!NestAttrs.isEmpty()) {
Reid Klecknerf021fab2017-04-13 23:12:13 +00004170 unsigned NestArgNo = 0;
Craig Topperf40110f2014-04-25 05:29:35 +00004171 Type *NestTy = nullptr;
Reid Klecknerc2cb5602017-04-12 00:38:00 +00004172 AttributeSet NestAttr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004173
4174 // Look for a parameter marked with the 'nest' attribute.
4175 for (FunctionType::param_iterator I = NestFTy->param_begin(),
Reid Klecknerf021fab2017-04-13 23:12:13 +00004176 E = NestFTy->param_end();
4177 I != E; ++NestArgNo, ++I) {
4178 AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo);
4179 if (AS.hasAttribute(Attribute::Nest)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004180 // Record the parameter type and any other attributes.
4181 NestTy = *I;
Reid Klecknerf021fab2017-04-13 23:12:13 +00004182 NestAttr = AS;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004183 break;
4184 }
Reid Klecknerf021fab2017-04-13 23:12:13 +00004185 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004186
4187 if (NestTy) {
4188 Instruction *Caller = CS.getInstruction();
4189 std::vector<Value*> NewArgs;
Reid Kleckner7f720332017-04-13 00:58:09 +00004190 std::vector<AttributeSet> NewArgAttrs;
Matt Arsenault5d2e85f2013-06-28 00:25:40 +00004191 NewArgs.reserve(CS.arg_size() + 1);
Reid Kleckner7f720332017-04-13 00:58:09 +00004192 NewArgAttrs.reserve(CS.arg_size());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004193
4194 // Insert the nest argument into the call argument list, which may
4195 // mean appending it. Likewise for attributes.
4196
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004197 {
Reid Klecknerf021fab2017-04-13 23:12:13 +00004198 unsigned ArgNo = 0;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004199 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
4200 do {
Reid Klecknerf021fab2017-04-13 23:12:13 +00004201 if (ArgNo == NestArgNo) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004202 // Add the chain argument and attributes.
Gabor Greif589a0b92010-06-24 12:58:35 +00004203 Value *NestVal = Tramp->getArgOperand(2);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004204 if (NestVal->getType() != NestTy)
Craig Topperbb4069e2017-07-07 23:16:26 +00004205 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004206 NewArgs.push_back(NestVal);
Reid Kleckner7f720332017-04-13 00:58:09 +00004207 NewArgAttrs.push_back(NestAttr);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004208 }
4209
4210 if (I == E)
4211 break;
4212
4213 // Add the original argument and attributes.
4214 NewArgs.push_back(*I);
Reid Klecknerf021fab2017-04-13 23:12:13 +00004215 NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004216
Reid Klecknerf021fab2017-04-13 23:12:13 +00004217 ++ArgNo;
Richard Trieu7a083812016-02-18 22:09:30 +00004218 ++I;
Eugene Zelenkocdc71612016-08-11 17:20:18 +00004219 } while (true);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004220 }
4221
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004222 // The trampoline may have been bitcast to a bogus type (FTy).
4223 // Handle this by synthesizing a new function type, equal to FTy
4224 // with the chain parameter inserted.
4225
Jay Foadb804a2b2011-07-12 14:06:48 +00004226 std::vector<Type*> NewTypes;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004227 NewTypes.reserve(FTy->getNumParams()+1);
4228
4229 // Insert the chain's type into the list of parameter types, which may
4230 // mean appending it.
4231 {
Reid Klecknerf021fab2017-04-13 23:12:13 +00004232 unsigned ArgNo = 0;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004233 FunctionType::param_iterator I = FTy->param_begin(),
4234 E = FTy->param_end();
4235
4236 do {
Reid Klecknerf021fab2017-04-13 23:12:13 +00004237 if (ArgNo == NestArgNo)
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004238 // Add the chain's type.
4239 NewTypes.push_back(NestTy);
4240
4241 if (I == E)
4242 break;
4243
4244 // Add the original type.
4245 NewTypes.push_back(*I);
4246
Reid Klecknerf021fab2017-04-13 23:12:13 +00004247 ++ArgNo;
Richard Trieu7a083812016-02-18 22:09:30 +00004248 ++I;
Eugene Zelenkocdc71612016-08-11 17:20:18 +00004249 } while (true);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004250 }
4251
4252 // Replace the trampoline call with a direct call. Let the generic
4253 // code sort out any function type mismatches.
Jim Grosbach7815f562012-02-03 00:07:04 +00004254 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004255 FTy->isVarArg());
4256 Constant *NewCallee =
4257 NestF->getType() == PointerType::getUnqual(NewFTy) ?
Jim Grosbach7815f562012-02-03 00:07:04 +00004258 NestF : ConstantExpr::getBitCast(NestF,
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004259 PointerType::getUnqual(NewFTy));
Reid Kleckner7f720332017-04-13 00:58:09 +00004260 AttributeList NewPAL =
4261 AttributeList::get(FTy->getContext(), Attrs.getFnAttributes(),
4262 Attrs.getRetAttributes(), NewArgAttrs);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004263
David Majnemer231a68c2016-04-29 08:07:20 +00004264 SmallVector<OperandBundleDef, 1> OpBundles;
4265 CS.getOperandBundlesAsDefs(OpBundles);
4266
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004267 Instruction *NewCaller;
4268 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4269 NewCaller = InvokeInst::Create(NewCallee,
4270 II->getNormalDest(), II->getUnwindDest(),
David Majnemer231a68c2016-04-29 08:07:20 +00004271 NewArgs, OpBundles);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004272 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
4273 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4274 } else {
David Majnemer231a68c2016-04-29 08:07:20 +00004275 NewCaller = CallInst::Create(NewCallee, NewArgs, OpBundles);
David Majnemerd5648c72016-11-25 22:35:09 +00004276 cast<CallInst>(NewCaller)->setTailCallKind(
4277 cast<CallInst>(Caller)->getTailCallKind());
4278 cast<CallInst>(NewCaller)->setCallingConv(
4279 cast<CallInst>(Caller)->getCallingConv());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004280 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4281 }
Florian Hahn012c8f92017-12-20 17:16:59 +00004282 NewCaller->setDebugLoc(Caller->getDebugLoc());
Eli Friedman49346012011-05-18 19:57:14 +00004283
4284 return NewCaller;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004285 }
4286 }
4287
4288 // Replace the trampoline call with a direct call. Since there is no 'nest'
4289 // parameter, there is no need to adjust the argument list. Let the generic
4290 // code sort out any function type mismatches.
4291 Constant *NewCallee =
Jim Grosbach7815f562012-02-03 00:07:04 +00004292 NestF->getType() == PTy ? NestF :
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004293 ConstantExpr::getBitCast(NestF, PTy);
4294 CS.setCalledFunction(NewCallee);
4295 return CS.getInstruction();
4296}