blob: a99eaf013e699f2cb29d7cdf13113041bcc98a1c [file] [log] [blame]
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001//===- InstCombineCalls.cpp -----------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visitCall and visitInvoke functions.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carrutha9174582015-01-22 05:25:13 +000014#include "InstCombineInternal.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000015#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/None.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000019#include "llvm/ADT/Optional.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000020#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallVector.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000022#include "llvm/ADT/Statistic.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000023#include "llvm/ADT/Twine.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000024#include "llvm/Analysis/AssumptionCache.h"
David Majnemer15032582015-05-22 03:56:46 +000025#include "llvm/Analysis/InstructionSimplify.h"
Chris Lattner7a9e47a2010-01-05 07:32:13 +000026#include "llvm/Analysis/MemoryBuiltins.h"
David Blaikie31b98d22018-06-04 21:23:21 +000027#include "llvm/Transforms/Utils/Local.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000028#include "llvm/Analysis/ValueTracking.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000029#include "llvm/IR/Attributes.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000030#include "llvm/IR/BasicBlock.h"
Chandler Carruth219b89b2014-03-04 11:01:28 +000031#include "llvm/IR/CallSite.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000032#include "llvm/IR/Constant.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000033#include "llvm/IR/Constants.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000034#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/GlobalVariable.h"
38#include "llvm/IR/InstrTypes.h"
39#include "llvm/IR/Instruction.h"
40#include "llvm/IR/Instructions.h"
41#include "llvm/IR/IntrinsicInst.h"
42#include "llvm/IR/Intrinsics.h"
43#include "llvm/IR/LLVMContext.h"
44#include "llvm/IR/Metadata.h"
Chandler Carruth820a9082014-03-04 11:08:18 +000045#include "llvm/IR/PatternMatch.h"
Philip Reames1a1bdb22014-12-02 18:50:36 +000046#include "llvm/IR/Statepoint.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000047#include "llvm/IR/Type.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000048#include "llvm/IR/User.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000049#include "llvm/IR/Value.h"
50#include "llvm/IR/ValueHandle.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000051#include "llvm/Support/AtomicOrdering.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000052#include "llvm/Support/Casting.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000053#include "llvm/Support/CommandLine.h"
54#include "llvm/Support/Compiler.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000055#include "llvm/Support/Debug.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000056#include "llvm/Support/ErrorHandling.h"
Craig Topperb45eabc2017-04-26 16:39:58 +000057#include "llvm/Support/KnownBits.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000058#include "llvm/Support/MathExtras.h"
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000059#include "llvm/Support/raw_ostream.h"
60#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
Chandler Carruthba4c5172015-01-21 11:23:40 +000061#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
Eugene Zelenkocdc71612016-08-11 17:20:18 +000062#include <algorithm>
63#include <cassert>
64#include <cstdint>
65#include <cstring>
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +000066#include <utility>
Eugene Zelenkocdc71612016-08-11 17:20:18 +000067#include <vector>
68
Chris Lattner7a9e47a2010-01-05 07:32:13 +000069using namespace llvm;
Michael Ilseman536cc322012-12-13 03:13:36 +000070using namespace PatternMatch;
Chris Lattner7a9e47a2010-01-05 07:32:13 +000071
Chandler Carruth964daaa2014-04-22 02:55:47 +000072#define DEBUG_TYPE "instcombine"
73
Meador Ingee3f2b262012-11-30 04:05:06 +000074STATISTIC(NumSimplified, "Number of library calls simplified");
75
Philip Reames79e917d2018-05-09 22:56:32 +000076static cl::opt<unsigned> GuardWideningWindow(
77 "instcombine-guard-widening-window",
78 cl::init(3),
79 cl::desc("How wide an instruction window to bypass looking for "
80 "another guard"));
81
Sanjay Patelcd4377c2016-01-20 22:24:38 +000082/// Return the specified type promoted as it would be to pass though a va_arg
83/// area.
Chris Lattner229907c2011-07-18 04:54:35 +000084static Type *getPromotedType(Type *Ty) {
85 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +000086 if (ITy->getBitWidth() < 32)
87 return Type::getInt32Ty(Ty->getContext());
88 }
89 return Ty;
90}
91
Sanjay Patel368ac5d2016-02-21 17:29:33 +000092/// Return a constant boolean vector that has true elements in all positions
Sanjay Patel24401302016-02-21 17:33:31 +000093/// where the input constant data vector has an element with the sign bit set.
Sanjay Patel368ac5d2016-02-21 17:29:33 +000094static Constant *getNegativeIsTrueBoolVec(ConstantDataVector *V) {
95 SmallVector<Constant *, 32> BoolVec;
96 IntegerType *BoolTy = Type::getInt1Ty(V->getContext());
97 for (unsigned I = 0, E = V->getNumElements(); I != E; ++I) {
98 Constant *Elt = V->getElementAsConstant(I);
99 assert((isa<ConstantInt>(Elt) || isa<ConstantFP>(Elt)) &&
100 "Unexpected constant data vector element type");
101 bool Sign = V->getElementType()->isIntegerTy()
102 ? cast<ConstantInt>(Elt)->isNegative()
103 : cast<ConstantFP>(Elt)->isNegative();
104 BoolVec.push_back(ConstantInt::get(BoolTy, Sign));
105 }
106 return ConstantVector::get(BoolVec);
107}
108
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000109Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
Daniel Neilson2363da92018-02-12 23:06:55 +0000110 unsigned DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
111 unsigned CopyDstAlign = MI->getDestAlignment();
112 if (CopyDstAlign < DstAlign){
113 MI->setDestAlignment(DstAlign);
114 return MI;
115 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000116
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000117 unsigned SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
118 unsigned CopySrcAlign = MI->getSourceAlignment();
Daniel Neilson2363da92018-02-12 23:06:55 +0000119 if (CopySrcAlign < SrcAlign) {
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000120 MI->setSourceAlignment(SrcAlign);
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000121 return MI;
122 }
Jim Grosbach7815f562012-02-03 00:07:04 +0000123
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000124 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
125 // load/store.
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000126 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
Craig Topperf40110f2014-04-25 05:29:35 +0000127 if (!MemOpLength) return nullptr;
Jim Grosbach7815f562012-02-03 00:07:04 +0000128
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000129 // Source and destination pointer types are always "i8*" for intrinsic. See
130 // if the size is something we can handle with a single primitive load/store.
131 // A single load+store correctly handles overlapping memory in the memmove
132 // case.
Michael Liao69e172a2012-08-15 03:49:59 +0000133 uint64_t Size = MemOpLength->getLimitedValue();
Alp Tokercb402912014-01-24 17:20:08 +0000134 assert(Size && "0-sized memory transferring should be removed already.");
Jim Grosbach7815f562012-02-03 00:07:04 +0000135
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000136 if (Size > 8 || (Size&(Size-1)))
Craig Topperf40110f2014-04-25 05:29:35 +0000137 return nullptr; // If not 1/2/4/8 bytes, exit.
Jim Grosbach7815f562012-02-03 00:07:04 +0000138
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000139 // Use an integer load+store unless we can find something better.
Mon P Wangc576ee92010-04-04 03:10:48 +0000140 unsigned SrcAddrSp =
Gabor Greif0a136c92010-06-24 13:54:33 +0000141 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
Gabor Greiff3755202010-04-16 15:33:14 +0000142 unsigned DstAddrSp =
Gabor Greif0a136c92010-06-24 13:54:33 +0000143 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
Mon P Wangc576ee92010-04-04 03:10:48 +0000144
Chris Lattner229907c2011-07-18 04:54:35 +0000145 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
Mon P Wangc576ee92010-04-04 03:10:48 +0000146 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
147 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
Jim Grosbach7815f562012-02-03 00:07:04 +0000148
Mikael Holmen760dc9a2017-03-01 06:45:20 +0000149 // If the memcpy has metadata describing the members, see if we can get the
150 // TBAA tag describing our copy.
Craig Topperf40110f2014-04-25 05:29:35 +0000151 MDNode *CopyMD = nullptr;
Ivan A. Kosarevf03f5792018-02-19 12:10:20 +0000152 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
153 CopyMD = M;
154 } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
Mikael Holmen760dc9a2017-03-01 06:45:20 +0000155 if (M->getNumOperands() == 3 && M->getOperand(0) &&
156 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
Craig Topper79ab6432017-07-06 18:39:47 +0000157 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
Mikael Holmen760dc9a2017-03-01 06:45:20 +0000158 M->getOperand(1) &&
159 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
160 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
161 Size &&
162 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
163 CopyMD = cast<MDNode>(M->getOperand(2));
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000164 }
Jim Grosbach7815f562012-02-03 00:07:04 +0000165
Craig Topperbb4069e2017-07-07 23:16:26 +0000166 Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
167 Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000168 LoadInst *L = Builder.CreateLoad(Src);
Daniel Neilson2363da92018-02-12 23:06:55 +0000169 // Alignment from the mem intrinsic will be better, so use it.
170 L->setAlignment(CopySrcAlign);
Dan Gohman3f553c22012-09-13 21:51:01 +0000171 if (CopyMD)
172 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
Dorit Nuzmanabd15f62016-09-04 07:49:39 +0000173 MDNode *LoopMemParallelMD =
174 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
175 if (LoopMemParallelMD)
176 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
Dorit Nuzman7673ba72016-09-04 07:06:00 +0000177
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000178 StoreInst *S = Builder.CreateStore(L, Dest);
Daniel Neilson2363da92018-02-12 23:06:55 +0000179 // Alignment from the mem intrinsic will be better, so use it.
180 S->setAlignment(CopyDstAlign);
Dan Gohman3f553c22012-09-13 21:51:01 +0000181 if (CopyMD)
182 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
Dorit Nuzmanabd15f62016-09-04 07:49:39 +0000183 if (LoopMemParallelMD)
184 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000185
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000186 if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
187 // non-atomics can be volatile
188 L->setVolatile(MT->isVolatile());
189 S->setVolatile(MT->isVolatile());
190 }
191 if (isa<AtomicMemTransferInst>(MI)) {
192 // atomics have to be unordered
193 L->setOrdering(AtomicOrdering::Unordered);
194 S->setOrdering(AtomicOrdering::Unordered);
195 }
196
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000197 // Set the size of the copy to 0, it will be deleted on the next iteration.
Daniel Neilson8f30ec62018-05-11 14:30:02 +0000198 MI->setLength(Constant::getNullValue(MemOpLength->getType()));
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000199 return MI;
200}
201
Daniel Neilsonf6651d42018-05-11 20:04:50 +0000202Instruction *InstCombiner::SimplifyAnyMemSet(AnyMemSetInst *MI) {
Daniel Jasperaec2fa32016-12-19 08:22:17 +0000203 unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
Daniel Neilson38af2ee2018-02-02 22:03:03 +0000204 if (MI->getDestAlignment() < Alignment) {
205 MI->setDestAlignment(Alignment);
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000206 return MI;
207 }
Jim Grosbach7815f562012-02-03 00:07:04 +0000208
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000209 // Extract the length and alignment and fill if they are constant.
210 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
211 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
Duncan Sands9dff9be2010-02-15 16:12:20 +0000212 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
Craig Topperf40110f2014-04-25 05:29:35 +0000213 return nullptr;
Michael Liao69e172a2012-08-15 03:49:59 +0000214 uint64_t Len = LenC->getLimitedValue();
Daniel Neilson710d7b92018-03-22 18:36:15 +0000215 Alignment = MI->getDestAlignment();
Michael Liao69e172a2012-08-15 03:49:59 +0000216 assert(Len && "0-sized memory setting should be removed already.");
Jim Grosbach7815f562012-02-03 00:07:04 +0000217
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000218 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
219 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
Chris Lattner229907c2011-07-18 04:54:35 +0000220 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
Jim Grosbach7815f562012-02-03 00:07:04 +0000221
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000222 Value *Dest = MI->getDest();
Mon P Wang1991c472010-12-20 01:05:30 +0000223 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
224 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
Craig Topperbb4069e2017-07-07 23:16:26 +0000225 Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000226
227 // Alignment 0 is identity for alignment 1 for memset, but not store.
228 if (Alignment == 0) Alignment = 1;
Jim Grosbach7815f562012-02-03 00:07:04 +0000229
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000230 // Extract the fill value and store.
231 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
Craig Topperbb4069e2017-07-07 23:16:26 +0000232 StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
233 MI->isVolatile());
Eli Friedman49346012011-05-18 19:57:14 +0000234 S->setAlignment(Alignment);
Daniel Neilsonf6651d42018-05-11 20:04:50 +0000235 if (isa<AtomicMemSetInst>(MI))
236 S->setOrdering(AtomicOrdering::Unordered);
Jim Grosbach7815f562012-02-03 00:07:04 +0000237
Chris Lattner7a9e47a2010-01-05 07:32:13 +0000238 // Set the size of the copy to 0, it will be deleted on the next iteration.
239 MI->setLength(Constant::getNullValue(LenC->getType()));
240 return MI;
241 }
242
Simon Pilgrim18617d12015-08-05 08:18:00 +0000243 return nullptr;
244}
245
Tomasz Krupae766e5f2018-08-14 09:04:01 +0000246static Value *simplifyX86AddsSubs(const IntrinsicInst &II,
247 InstCombiner::BuilderTy &Builder) {
Craig Topper9c1d9fd2018-08-16 06:20:24 +0000248 bool IsAddition;
Tomasz Krupae766e5f2018-08-14 09:04:01 +0000249
250 switch (II.getIntrinsicID()) {
251 default: llvm_unreachable("Unexpected intrinsic!");
252 case Intrinsic::x86_sse2_padds_b:
253 case Intrinsic::x86_sse2_padds_w:
254 case Intrinsic::x86_avx2_padds_b:
255 case Intrinsic::x86_avx2_padds_w:
Craig Topper9c1d9fd2018-08-16 06:20:24 +0000256 case Intrinsic::x86_avx512_padds_b_512:
257 case Intrinsic::x86_avx512_padds_w_512:
258 IsAddition = true;
Tomasz Krupae766e5f2018-08-14 09:04:01 +0000259 break;
260 case Intrinsic::x86_sse2_psubs_b:
261 case Intrinsic::x86_sse2_psubs_w:
262 case Intrinsic::x86_avx2_psubs_b:
263 case Intrinsic::x86_avx2_psubs_w:
Craig Topper9c1d9fd2018-08-16 06:20:24 +0000264 case Intrinsic::x86_avx512_psubs_b_512:
265 case Intrinsic::x86_avx512_psubs_w_512:
266 IsAddition = false;
Tomasz Krupae766e5f2018-08-14 09:04:01 +0000267 break;
268 }
269
270 auto *Arg0 = dyn_cast<Constant>(II.getOperand(0));
271 auto *Arg1 = dyn_cast<Constant>(II.getOperand(1));
272 auto VT = cast<VectorType>(II.getType());
273 auto SVT = VT->getElementType();
274 unsigned NumElems = VT->getNumElements();
275
Craig Topper9c1d9fd2018-08-16 06:20:24 +0000276 if (!Arg0 || !Arg1)
Tomasz Krupae766e5f2018-08-14 09:04:01 +0000277 return nullptr;
278
279 SmallVector<Constant *, 64> Result;
280
281 APInt MaxValue = APInt::getSignedMaxValue(SVT->getIntegerBitWidth());
282 APInt MinValue = APInt::getSignedMinValue(SVT->getIntegerBitWidth());
283 for (unsigned i = 0; i < NumElems; ++i) {
284 auto *Elt0 = Arg0->getAggregateElement(i);
285 auto *Elt1 = Arg1->getAggregateElement(i);
286 if (isa<UndefValue>(Elt0) || isa<UndefValue>(Elt1)) {
287 Result.push_back(UndefValue::get(SVT));
288 continue;
289 }
290
291 if (!isa<ConstantInt>(Elt0) || !isa<ConstantInt>(Elt1))
292 return nullptr;
293
294 const APInt &Val0 = cast<ConstantInt>(Elt0)->getValue();
295 const APInt &Val1 = cast<ConstantInt>(Elt1)->getValue();
296 bool Overflow = false;
297 APInt ResultElem = IsAddition ? Val0.sadd_ov(Val1, Overflow)
298 : Val0.ssub_ov(Val1, Overflow);
299 if (Overflow)
300 ResultElem = Val0.isNegative() ? MinValue : MaxValue;
301 Result.push_back(Constant::getIntegerValue(SVT, ResultElem));
302 }
303
Craig Topper9c1d9fd2018-08-16 06:20:24 +0000304 return ConstantVector::get(Result);
Tomasz Krupae766e5f2018-08-14 09:04:01 +0000305}
306
Sanjay Patel6038d3e2016-01-29 23:27:03 +0000307static Value *simplifyX86immShift(const IntrinsicInst &II,
Simon Pilgrimbecd5e82015-08-13 07:39:03 +0000308 InstCombiner::BuilderTy &Builder) {
309 bool LogicalShift = false;
310 bool ShiftLeft = false;
311
312 switch (II.getIntrinsicID()) {
Craig Topperb4173a52016-11-13 07:26:19 +0000313 default: llvm_unreachable("Unexpected intrinsic!");
Simon Pilgrimbecd5e82015-08-13 07:39:03 +0000314 case Intrinsic::x86_sse2_psra_d:
315 case Intrinsic::x86_sse2_psra_w:
316 case Intrinsic::x86_sse2_psrai_d:
317 case Intrinsic::x86_sse2_psrai_w:
318 case Intrinsic::x86_avx2_psra_d:
319 case Intrinsic::x86_avx2_psra_w:
320 case Intrinsic::x86_avx2_psrai_d:
321 case Intrinsic::x86_avx2_psrai_w:
Craig Topper8b831cb2016-11-13 01:51:55 +0000322 case Intrinsic::x86_avx512_psra_q_128:
323 case Intrinsic::x86_avx512_psrai_q_128:
324 case Intrinsic::x86_avx512_psra_q_256:
325 case Intrinsic::x86_avx512_psrai_q_256:
326 case Intrinsic::x86_avx512_psra_d_512:
327 case Intrinsic::x86_avx512_psra_q_512:
328 case Intrinsic::x86_avx512_psra_w_512:
329 case Intrinsic::x86_avx512_psrai_d_512:
330 case Intrinsic::x86_avx512_psrai_q_512:
331 case Intrinsic::x86_avx512_psrai_w_512:
Simon Pilgrimbecd5e82015-08-13 07:39:03 +0000332 LogicalShift = false; ShiftLeft = false;
333 break;
334 case Intrinsic::x86_sse2_psrl_d:
335 case Intrinsic::x86_sse2_psrl_q:
336 case Intrinsic::x86_sse2_psrl_w:
337 case Intrinsic::x86_sse2_psrli_d:
338 case Intrinsic::x86_sse2_psrli_q:
339 case Intrinsic::x86_sse2_psrli_w:
340 case Intrinsic::x86_avx2_psrl_d:
341 case Intrinsic::x86_avx2_psrl_q:
342 case Intrinsic::x86_avx2_psrl_w:
343 case Intrinsic::x86_avx2_psrli_d:
344 case Intrinsic::x86_avx2_psrli_q:
345 case Intrinsic::x86_avx2_psrli_w:
Craig Topper8b831cb2016-11-13 01:51:55 +0000346 case Intrinsic::x86_avx512_psrl_d_512:
347 case Intrinsic::x86_avx512_psrl_q_512:
348 case Intrinsic::x86_avx512_psrl_w_512:
349 case Intrinsic::x86_avx512_psrli_d_512:
350 case Intrinsic::x86_avx512_psrli_q_512:
351 case Intrinsic::x86_avx512_psrli_w_512:
Simon Pilgrimbecd5e82015-08-13 07:39:03 +0000352 LogicalShift = true; ShiftLeft = false;
353 break;
354 case Intrinsic::x86_sse2_psll_d:
355 case Intrinsic::x86_sse2_psll_q:
356 case Intrinsic::x86_sse2_psll_w:
357 case Intrinsic::x86_sse2_pslli_d:
358 case Intrinsic::x86_sse2_pslli_q:
359 case Intrinsic::x86_sse2_pslli_w:
360 case Intrinsic::x86_avx2_psll_d:
361 case Intrinsic::x86_avx2_psll_q:
362 case Intrinsic::x86_avx2_psll_w:
363 case Intrinsic::x86_avx2_pslli_d:
364 case Intrinsic::x86_avx2_pslli_q:
365 case Intrinsic::x86_avx2_pslli_w:
Craig Topper8b831cb2016-11-13 01:51:55 +0000366 case Intrinsic::x86_avx512_psll_d_512:
367 case Intrinsic::x86_avx512_psll_q_512:
368 case Intrinsic::x86_avx512_psll_w_512:
369 case Intrinsic::x86_avx512_pslli_d_512:
370 case Intrinsic::x86_avx512_pslli_q_512:
371 case Intrinsic::x86_avx512_pslli_w_512:
Simon Pilgrimbecd5e82015-08-13 07:39:03 +0000372 LogicalShift = true; ShiftLeft = true;
373 break;
374 }
Simon Pilgrima3a72b42015-08-10 20:21:15 +0000375 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
376
Simon Pilgrim3815c162015-08-07 18:22:50 +0000377 // Simplify if count is constant.
378 auto Arg1 = II.getArgOperand(1);
379 auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
380 auto CDV = dyn_cast<ConstantDataVector>(Arg1);
381 auto CInt = dyn_cast<ConstantInt>(Arg1);
382 if (!CAZ && !CDV && !CInt)
Simon Pilgrim18617d12015-08-05 08:18:00 +0000383 return nullptr;
Simon Pilgrim3815c162015-08-07 18:22:50 +0000384
385 APInt Count(64, 0);
386 if (CDV) {
387 // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
388 // operand to compute the shift amount.
389 auto VT = cast<VectorType>(CDV->getType());
390 unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
391 assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
392 unsigned NumSubElts = 64 / BitWidth;
393
394 // Concatenate the sub-elements to create the 64-bit value.
395 for (unsigned i = 0; i != NumSubElts; ++i) {
396 unsigned SubEltIdx = (NumSubElts - 1) - i;
397 auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
Craig Topper24e71012017-04-28 03:36:24 +0000398 Count <<= BitWidth;
Simon Pilgrim3815c162015-08-07 18:22:50 +0000399 Count |= SubElt->getValue().zextOrTrunc(64);
400 }
401 }
402 else if (CInt)
403 Count = CInt->getValue();
Simon Pilgrim18617d12015-08-05 08:18:00 +0000404
405 auto Vec = II.getArgOperand(0);
406 auto VT = cast<VectorType>(Vec->getType());
407 auto SVT = VT->getElementType();
Simon Pilgrim3815c162015-08-07 18:22:50 +0000408 unsigned VWidth = VT->getNumElements();
409 unsigned BitWidth = SVT->getPrimitiveSizeInBits();
410
411 // If shift-by-zero then just return the original value.
Craig Topper73ba1c82017-06-07 07:40:37 +0000412 if (Count.isNullValue())
Simon Pilgrim3815c162015-08-07 18:22:50 +0000413 return Vec;
414
Simon Pilgrima3a72b42015-08-10 20:21:15 +0000415 // Handle cases when Shift >= BitWidth.
416 if (Count.uge(BitWidth)) {
417 // If LogicalShift - just return zero.
418 if (LogicalShift)
419 return ConstantAggregateZero::get(VT);
420
421 // If ArithmeticShift - clamp Shift to (BitWidth - 1).
422 Count = APInt(64, BitWidth - 1);
423 }
Simon Pilgrim18617d12015-08-05 08:18:00 +0000424
Simon Pilgrim18617d12015-08-05 08:18:00 +0000425 // Get a constant vector of the same type as the first operand.
Simon Pilgrim3815c162015-08-07 18:22:50 +0000426 auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
427 auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
Simon Pilgrim18617d12015-08-05 08:18:00 +0000428
429 if (ShiftLeft)
Simon Pilgrim3815c162015-08-07 18:22:50 +0000430 return Builder.CreateShl(Vec, ShiftVec);
Simon Pilgrim18617d12015-08-05 08:18:00 +0000431
Simon Pilgrima3a72b42015-08-10 20:21:15 +0000432 if (LogicalShift)
433 return Builder.CreateLShr(Vec, ShiftVec);
434
435 return Builder.CreateAShr(Vec, ShiftVec);
Simon Pilgrim18617d12015-08-05 08:18:00 +0000436}
437
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000438// Attempt to simplify AVX2 per-element shift intrinsics to a generic IR shift.
439// Unlike the generic IR shifts, the intrinsics have defined behaviour for out
440// of range shift amounts (logical - set to zero, arithmetic - splat sign bit).
441static Value *simplifyX86varShift(const IntrinsicInst &II,
442 InstCombiner::BuilderTy &Builder) {
443 bool LogicalShift = false;
444 bool ShiftLeft = false;
445
446 switch (II.getIntrinsicID()) {
Craig Topperb4173a52016-11-13 07:26:19 +0000447 default: llvm_unreachable("Unexpected intrinsic!");
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000448 case Intrinsic::x86_avx2_psrav_d:
449 case Intrinsic::x86_avx2_psrav_d_256:
Craig Topperb4173a52016-11-13 07:26:19 +0000450 case Intrinsic::x86_avx512_psrav_q_128:
451 case Intrinsic::x86_avx512_psrav_q_256:
452 case Intrinsic::x86_avx512_psrav_d_512:
453 case Intrinsic::x86_avx512_psrav_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +0000454 case Intrinsic::x86_avx512_psrav_w_128:
455 case Intrinsic::x86_avx512_psrav_w_256:
456 case Intrinsic::x86_avx512_psrav_w_512:
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000457 LogicalShift = false;
458 ShiftLeft = false;
459 break;
460 case Intrinsic::x86_avx2_psrlv_d:
461 case Intrinsic::x86_avx2_psrlv_d_256:
462 case Intrinsic::x86_avx2_psrlv_q:
463 case Intrinsic::x86_avx2_psrlv_q_256:
Craig Topperb4173a52016-11-13 07:26:19 +0000464 case Intrinsic::x86_avx512_psrlv_d_512:
465 case Intrinsic::x86_avx512_psrlv_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +0000466 case Intrinsic::x86_avx512_psrlv_w_128:
467 case Intrinsic::x86_avx512_psrlv_w_256:
468 case Intrinsic::x86_avx512_psrlv_w_512:
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000469 LogicalShift = true;
470 ShiftLeft = false;
471 break;
472 case Intrinsic::x86_avx2_psllv_d:
473 case Intrinsic::x86_avx2_psllv_d_256:
474 case Intrinsic::x86_avx2_psllv_q:
475 case Intrinsic::x86_avx2_psllv_q_256:
Craig Topperb4173a52016-11-13 07:26:19 +0000476 case Intrinsic::x86_avx512_psllv_d_512:
477 case Intrinsic::x86_avx512_psllv_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +0000478 case Intrinsic::x86_avx512_psllv_w_128:
479 case Intrinsic::x86_avx512_psllv_w_256:
480 case Intrinsic::x86_avx512_psllv_w_512:
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000481 LogicalShift = true;
482 ShiftLeft = true;
483 break;
484 }
485 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
486
487 // Simplify if all shift amounts are constant/undef.
488 auto *CShift = dyn_cast<Constant>(II.getArgOperand(1));
489 if (!CShift)
490 return nullptr;
491
492 auto Vec = II.getArgOperand(0);
493 auto VT = cast<VectorType>(II.getType());
494 auto SVT = VT->getVectorElementType();
495 int NumElts = VT->getNumElements();
496 int BitWidth = SVT->getIntegerBitWidth();
497
498 // Collect each element's shift amount.
499 // We also collect special cases: UNDEF = -1, OUT-OF-RANGE = BitWidth.
500 bool AnyOutOfRange = false;
501 SmallVector<int, 8> ShiftAmts;
502 for (int I = 0; I < NumElts; ++I) {
503 auto *CElt = CShift->getAggregateElement(I);
504 if (CElt && isa<UndefValue>(CElt)) {
505 ShiftAmts.push_back(-1);
506 continue;
507 }
508
509 auto *COp = dyn_cast_or_null<ConstantInt>(CElt);
510 if (!COp)
511 return nullptr;
512
513 // Handle out of range shifts.
514 // If LogicalShift - set to BitWidth (special case).
515 // If ArithmeticShift - set to (BitWidth - 1) (sign splat).
516 APInt ShiftVal = COp->getValue();
517 if (ShiftVal.uge(BitWidth)) {
518 AnyOutOfRange = LogicalShift;
519 ShiftAmts.push_back(LogicalShift ? BitWidth : BitWidth - 1);
520 continue;
521 }
522
523 ShiftAmts.push_back((int)ShiftVal.getZExtValue());
524 }
525
526 // If all elements out of range or UNDEF, return vector of zeros/undefs.
527 // ArithmeticShift should only hit this if they are all UNDEF.
528 auto OutOfRange = [&](int Idx) { return (Idx < 0) || (BitWidth <= Idx); };
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +0000529 if (llvm::all_of(ShiftAmts, OutOfRange)) {
Simon Pilgrimdb9893f2016-06-07 10:27:15 +0000530 SmallVector<Constant *, 8> ConstantVec;
531 for (int Idx : ShiftAmts) {
532 if (Idx < 0) {
533 ConstantVec.push_back(UndefValue::get(SVT));
534 } else {
535 assert(LogicalShift && "Logical shift expected");
536 ConstantVec.push_back(ConstantInt::getNullValue(SVT));
537 }
538 }
539 return ConstantVector::get(ConstantVec);
540 }
541
542 // We can't handle only some out of range values with generic logical shifts.
543 if (AnyOutOfRange)
544 return nullptr;
545
546 // Build the shift amount constant vector.
547 SmallVector<Constant *, 8> ShiftVecAmts;
548 for (int Idx : ShiftAmts) {
549 if (Idx < 0)
550 ShiftVecAmts.push_back(UndefValue::get(SVT));
551 else
552 ShiftVecAmts.push_back(ConstantInt::get(SVT, Idx));
553 }
554 auto ShiftVec = ConstantVector::get(ShiftVecAmts);
555
556 if (ShiftLeft)
557 return Builder.CreateShl(Vec, ShiftVec);
558
559 if (LogicalShift)
560 return Builder.CreateLShr(Vec, ShiftVec);
561
562 return Builder.CreateAShr(Vec, ShiftVec);
563}
564
Craig Topper4853c432017-07-06 23:18:42 +0000565static Value *simplifyX86pack(IntrinsicInst &II, bool IsSigned) {
Simon Pilgrim6f6b2792017-01-25 14:37:24 +0000566 Value *Arg0 = II.getArgOperand(0);
567 Value *Arg1 = II.getArgOperand(1);
568 Type *ResTy = II.getType();
569
570 // Fast all undef handling.
571 if (isa<UndefValue>(Arg0) && isa<UndefValue>(Arg1))
572 return UndefValue::get(ResTy);
573
574 Type *ArgTy = Arg0->getType();
575 unsigned NumLanes = ResTy->getPrimitiveSizeInBits() / 128;
576 unsigned NumDstElts = ResTy->getVectorNumElements();
577 unsigned NumSrcElts = ArgTy->getVectorNumElements();
578 assert(NumDstElts == (2 * NumSrcElts) && "Unexpected packing types");
579
580 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
581 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
582 unsigned DstScalarSizeInBits = ResTy->getScalarSizeInBits();
583 assert(ArgTy->getScalarSizeInBits() == (2 * DstScalarSizeInBits) &&
584 "Unexpected packing types");
585
586 // Constant folding.
587 auto *Cst0 = dyn_cast<Constant>(Arg0);
588 auto *Cst1 = dyn_cast<Constant>(Arg1);
589 if (!Cst0 || !Cst1)
590 return nullptr;
591
592 SmallVector<Constant *, 32> Vals;
593 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
594 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
595 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
596 auto *Cst = (Elt >= NumSrcEltsPerLane) ? Cst1 : Cst0;
597 auto *COp = Cst->getAggregateElement(SrcIdx);
598 if (COp && isa<UndefValue>(COp)) {
599 Vals.push_back(UndefValue::get(ResTy->getScalarType()));
600 continue;
601 }
602
603 auto *CInt = dyn_cast_or_null<ConstantInt>(COp);
604 if (!CInt)
605 return nullptr;
606
607 APInt Val = CInt->getValue();
608 assert(Val.getBitWidth() == ArgTy->getScalarSizeInBits() &&
609 "Unexpected constant bitwidth");
610
611 if (IsSigned) {
612 // PACKSS: Truncate signed value with signed saturation.
613 // Source values less than dst minint are saturated to minint.
614 // Source values greater than dst maxint are saturated to maxint.
615 if (Val.isSignedIntN(DstScalarSizeInBits))
616 Val = Val.trunc(DstScalarSizeInBits);
617 else if (Val.isNegative())
618 Val = APInt::getSignedMinValue(DstScalarSizeInBits);
619 else
620 Val = APInt::getSignedMaxValue(DstScalarSizeInBits);
621 } else {
622 // PACKUS: Truncate signed value with unsigned saturation.
623 // Source values less than zero are saturated to zero.
624 // Source values greater than dst maxuint are saturated to maxuint.
625 if (Val.isIntN(DstScalarSizeInBits))
626 Val = Val.trunc(DstScalarSizeInBits);
627 else if (Val.isNegative())
628 Val = APInt::getNullValue(DstScalarSizeInBits);
629 else
630 Val = APInt::getAllOnesValue(DstScalarSizeInBits);
631 }
632
633 Vals.push_back(ConstantInt::get(ResTy->getScalarType(), Val));
634 }
635 }
636
637 return ConstantVector::get(Vals);
638}
639
Mikhail Dvoretckii8393f902018-06-19 10:49:12 +0000640// Replace X86-specific intrinsics with generic floor-ceil where applicable.
641static Value *simplifyX86round(IntrinsicInst &II,
642 InstCombiner::BuilderTy &Builder) {
643 ConstantInt *Arg = nullptr;
644 Intrinsic::ID IntrinsicID = II.getIntrinsicID();
645
646 if (IntrinsicID == Intrinsic::x86_sse41_round_ss ||
647 IntrinsicID == Intrinsic::x86_sse41_round_sd)
648 Arg = dyn_cast<ConstantInt>(II.getArgOperand(2));
649 else if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ss ||
650 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_sd)
651 Arg = dyn_cast<ConstantInt>(II.getArgOperand(4));
652 else
653 Arg = dyn_cast<ConstantInt>(II.getArgOperand(1));
654 if (!Arg)
655 return nullptr;
656 unsigned RoundControl = Arg->getZExtValue();
657
658 Arg = nullptr;
659 unsigned SAE = 0;
660 if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ps_512 ||
661 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_pd_512)
662 Arg = dyn_cast<ConstantInt>(II.getArgOperand(4));
663 else if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ss ||
664 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_sd)
665 Arg = dyn_cast<ConstantInt>(II.getArgOperand(5));
666 else
667 SAE = 4;
668 if (!SAE) {
669 if (!Arg)
670 return nullptr;
671 SAE = Arg->getZExtValue();
672 }
673
674 if (SAE != 4 || (RoundControl != 2 /*ceil*/ && RoundControl != 1 /*floor*/))
675 return nullptr;
676
677 Value *Src, *Dst, *Mask;
678 bool IsScalar = false;
679 if (IntrinsicID == Intrinsic::x86_sse41_round_ss ||
680 IntrinsicID == Intrinsic::x86_sse41_round_sd ||
681 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ss ||
682 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_sd) {
683 IsScalar = true;
684 if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ss ||
685 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_sd) {
686 Mask = II.getArgOperand(3);
687 Value *Zero = Constant::getNullValue(Mask->getType());
688 Mask = Builder.CreateAnd(Mask, 1);
689 Mask = Builder.CreateICmp(ICmpInst::ICMP_NE, Mask, Zero);
690 Dst = II.getArgOperand(2);
691 } else
692 Dst = II.getArgOperand(0);
693 Src = Builder.CreateExtractElement(II.getArgOperand(1), (uint64_t)0);
694 } else {
695 Src = II.getArgOperand(0);
696 if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ps_128 ||
697 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ps_256 ||
698 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ps_512 ||
699 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_pd_128 ||
700 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_pd_256 ||
701 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_pd_512) {
702 Dst = II.getArgOperand(2);
703 Mask = II.getArgOperand(3);
704 } else {
705 Dst = Src;
706 Mask = ConstantInt::getAllOnesValue(
707 Builder.getIntNTy(Src->getType()->getVectorNumElements()));
708 }
709 }
710
711 Intrinsic::ID ID = (RoundControl == 2) ? Intrinsic::ceil : Intrinsic::floor;
Neil Henning57f5d0a2018-10-08 10:32:33 +0000712 Value *Res = Builder.CreateUnaryIntrinsic(ID, Src, &II);
Mikhail Dvoretckii8393f902018-06-19 10:49:12 +0000713 if (!IsScalar) {
714 if (auto *C = dyn_cast<Constant>(Mask))
715 if (C->isAllOnesValue())
716 return Res;
717 auto *MaskTy = VectorType::get(
718 Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
719 Mask = Builder.CreateBitCast(Mask, MaskTy);
720 unsigned Width = Src->getType()->getVectorNumElements();
721 if (MaskTy->getVectorNumElements() > Width) {
722 uint32_t Indices[4];
723 for (unsigned i = 0; i != Width; ++i)
724 Indices[i] = i;
725 Mask = Builder.CreateShuffleVector(Mask, Mask,
726 makeArrayRef(Indices, Width));
727 }
728 return Builder.CreateSelect(Mask, Res, Dst);
729 }
730 if (IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_ss ||
731 IntrinsicID == Intrinsic::x86_avx512_mask_rndscale_sd) {
732 Dst = Builder.CreateExtractElement(Dst, (uint64_t)0);
733 Res = Builder.CreateSelect(Mask, Res, Dst);
734 Dst = II.getArgOperand(0);
735 }
736 return Builder.CreateInsertElement(Dst, Res, (uint64_t)0);
737}
738
Craig Topper4853c432017-07-06 23:18:42 +0000739static Value *simplifyX86movmsk(const IntrinsicInst &II) {
Simon Pilgrim91e3ac82016-06-07 08:18:35 +0000740 Value *Arg = II.getArgOperand(0);
741 Type *ResTy = II.getType();
742 Type *ArgTy = Arg->getType();
743
744 // movmsk(undef) -> zero as we must ensure the upper bits are zero.
745 if (isa<UndefValue>(Arg))
746 return Constant::getNullValue(ResTy);
747
748 // We can't easily peek through x86_mmx types.
749 if (!ArgTy->isVectorTy())
750 return nullptr;
751
752 auto *C = dyn_cast<Constant>(Arg);
753 if (!C)
754 return nullptr;
755
756 // Extract signbits of the vector input and pack into integer result.
757 APInt Result(ResTy->getPrimitiveSizeInBits(), 0);
758 for (unsigned I = 0, E = ArgTy->getVectorNumElements(); I != E; ++I) {
759 auto *COp = C->getAggregateElement(I);
760 if (!COp)
761 return nullptr;
762 if (isa<UndefValue>(COp))
763 continue;
764
765 auto *CInt = dyn_cast<ConstantInt>(COp);
766 auto *CFp = dyn_cast<ConstantFP>(COp);
767 if (!CInt && !CFp)
768 return nullptr;
769
770 if ((CInt && CInt->isNegative()) || (CFp && CFp->isNegative()))
771 Result.setBit(I);
772 }
773
774 return Constant::getIntegerValue(ResTy, Result);
775}
776
Sanjay Patel6038d3e2016-01-29 23:27:03 +0000777static Value *simplifyX86insertps(const IntrinsicInst &II,
Sanjay Patelc86867c2015-04-16 17:52:13 +0000778 InstCombiner::BuilderTy &Builder) {
Sanjay Patel03c03f52016-01-28 00:03:16 +0000779 auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
780 if (!CInt)
781 return nullptr;
Simon Pilgrim54fcd622015-07-25 20:41:00 +0000782
Sanjay Patel03c03f52016-01-28 00:03:16 +0000783 VectorType *VecTy = cast<VectorType>(II.getType());
784 assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
Sanjay Patelc86867c2015-04-16 17:52:13 +0000785
Sanjay Patel03c03f52016-01-28 00:03:16 +0000786 // The immediate permute control byte looks like this:
787 // [3:0] - zero mask for each 32-bit lane
788 // [5:4] - select one 32-bit destination lane
789 // [7:6] - select one 32-bit source lane
Sanjay Patelc86867c2015-04-16 17:52:13 +0000790
Sanjay Patel03c03f52016-01-28 00:03:16 +0000791 uint8_t Imm = CInt->getZExtValue();
792 uint8_t ZMask = Imm & 0xf;
793 uint8_t DestLane = (Imm >> 4) & 0x3;
794 uint8_t SourceLane = (Imm >> 6) & 0x3;
Sanjay Patelc1d20a32015-04-25 20:55:25 +0000795
Sanjay Patel03c03f52016-01-28 00:03:16 +0000796 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
Sanjay Patelc86867c2015-04-16 17:52:13 +0000797
Sanjay Patel03c03f52016-01-28 00:03:16 +0000798 // If all zero mask bits are set, this was just a weird way to
799 // generate a zero vector.
800 if (ZMask == 0xf)
801 return ZeroVector;
Sanjay Patelc1d20a32015-04-25 20:55:25 +0000802
Sanjay Patel03c03f52016-01-28 00:03:16 +0000803 // Initialize by passing all of the first source bits through.
Craig Topper99d1eab2016-06-12 00:41:19 +0000804 uint32_t ShuffleMask[4] = { 0, 1, 2, 3 };
Sanjay Patelc1d20a32015-04-25 20:55:25 +0000805
Sanjay Patel03c03f52016-01-28 00:03:16 +0000806 // We may replace the second operand with the zero vector.
807 Value *V1 = II.getArgOperand(1);
808
809 if (ZMask) {
810 // If the zero mask is being used with a single input or the zero mask
811 // overrides the destination lane, this is a shuffle with the zero vector.
812 if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
813 (ZMask & (1 << DestLane))) {
814 V1 = ZeroVector;
815 // We may still move 32-bits of the first source vector from one lane
816 // to another.
817 ShuffleMask[DestLane] = SourceLane;
818 // The zero mask may override the previous insert operation.
819 for (unsigned i = 0; i < 4; ++i)
820 if ((ZMask >> i) & 0x1)
821 ShuffleMask[i] = i + 4;
Sanjay Patelc1d20a32015-04-25 20:55:25 +0000822 } else {
Sanjay Patel03c03f52016-01-28 00:03:16 +0000823 // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
824 return nullptr;
Sanjay Patelc1d20a32015-04-25 20:55:25 +0000825 }
Sanjay Patel03c03f52016-01-28 00:03:16 +0000826 } else {
827 // Replace the selected destination lane with the selected source lane.
828 ShuffleMask[DestLane] = SourceLane + 4;
Sanjay Patelc86867c2015-04-16 17:52:13 +0000829 }
Sanjay Patel03c03f52016-01-28 00:03:16 +0000830
831 return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
Sanjay Patelc86867c2015-04-16 17:52:13 +0000832}
833
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000834/// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
835/// or conversion to a shuffle vector.
Sanjay Patel6038d3e2016-01-29 23:27:03 +0000836static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000837 ConstantInt *CILength, ConstantInt *CIIndex,
838 InstCombiner::BuilderTy &Builder) {
839 auto LowConstantHighUndef = [&](uint64_t Val) {
840 Type *IntTy64 = Type::getInt64Ty(II.getContext());
841 Constant *Args[] = {ConstantInt::get(IntTy64, Val),
842 UndefValue::get(IntTy64)};
843 return ConstantVector::get(Args);
844 };
845
846 // See if we're dealing with constant values.
847 Constant *C0 = dyn_cast<Constant>(Op0);
848 ConstantInt *CI0 =
Andrea Di Biagio8df5b9c2016-09-07 12:03:03 +0000849 C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000850 : nullptr;
851
852 // Attempt to constant fold.
853 if (CILength && CIIndex) {
854 // From AMD documentation: "The bit index and field length are each six
855 // bits in length other bits of the field are ignored."
856 APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
857 APInt APLength = CILength->getValue().zextOrTrunc(6);
858
859 unsigned Index = APIndex.getZExtValue();
860
861 // From AMD documentation: "a value of zero in the field length is
862 // defined as length of 64".
863 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
864
865 // From AMD documentation: "If the sum of the bit index + length field
866 // is greater than 64, the results are undefined".
867 unsigned End = Index + Length;
868
869 // Note that both field index and field length are 8-bit quantities.
870 // Since variables 'Index' and 'Length' are unsigned values
871 // obtained from zero-extending field index and field length
872 // respectively, their sum should never wrap around.
873 if (End > 64)
874 return UndefValue::get(II.getType());
875
876 // If we are inserting whole bytes, we can convert this to a shuffle.
877 // Lowering can recognize EXTRQI shuffle masks.
878 if ((Length % 8) == 0 && (Index % 8) == 0) {
879 // Convert bit indices to byte indices.
880 Length /= 8;
881 Index /= 8;
882
883 Type *IntTy8 = Type::getInt8Ty(II.getContext());
884 Type *IntTy32 = Type::getInt32Ty(II.getContext());
885 VectorType *ShufTy = VectorType::get(IntTy8, 16);
886
887 SmallVector<Constant *, 16> ShuffleMask;
888 for (int i = 0; i != (int)Length; ++i)
889 ShuffleMask.push_back(
890 Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
891 for (int i = Length; i != 8; ++i)
892 ShuffleMask.push_back(
893 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
894 for (int i = 8; i != 16; ++i)
895 ShuffleMask.push_back(UndefValue::get(IntTy32));
896
897 Value *SV = Builder.CreateShuffleVector(
898 Builder.CreateBitCast(Op0, ShufTy),
899 ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
900 return Builder.CreateBitCast(SV, II.getType());
901 }
902
903 // Constant Fold - shift Index'th bit to lowest position and mask off
904 // Length bits.
905 if (CI0) {
906 APInt Elt = CI0->getValue();
Craig Topperfc947bc2017-04-18 17:14:21 +0000907 Elt.lshrInPlace(Index);
908 Elt = Elt.zextOrTrunc(Length);
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000909 return LowConstantHighUndef(Elt.getZExtValue());
910 }
911
912 // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
913 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
914 Value *Args[] = {Op0, CILength, CIIndex};
Sanjay Patelaf674fb2015-12-14 17:24:23 +0000915 Module *M = II.getModule();
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000916 Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
917 return Builder.CreateCall(F, Args);
918 }
919 }
920
921 // Constant Fold - extraction from zero is always {zero, undef}.
Craig Topperca2c8762017-07-06 18:39:49 +0000922 if (CI0 && CI0->isZero())
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000923 return LowConstantHighUndef(0);
924
925 return nullptr;
926}
927
928/// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
929/// folding or conversion to a shuffle vector.
Sanjay Patel6038d3e2016-01-29 23:27:03 +0000930static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000931 APInt APLength, APInt APIndex,
932 InstCombiner::BuilderTy &Builder) {
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000933 // From AMD documentation: "The bit index and field length are each six bits
934 // in length other bits of the field are ignored."
935 APIndex = APIndex.zextOrTrunc(6);
936 APLength = APLength.zextOrTrunc(6);
937
938 // Attempt to constant fold.
939 unsigned Index = APIndex.getZExtValue();
940
941 // From AMD documentation: "a value of zero in the field length is
942 // defined as length of 64".
943 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
944
945 // From AMD documentation: "If the sum of the bit index + length field
946 // is greater than 64, the results are undefined".
947 unsigned End = Index + Length;
948
949 // Note that both field index and field length are 8-bit quantities.
950 // Since variables 'Index' and 'Length' are unsigned values
951 // obtained from zero-extending field index and field length
952 // respectively, their sum should never wrap around.
953 if (End > 64)
954 return UndefValue::get(II.getType());
955
956 // If we are inserting whole bytes, we can convert this to a shuffle.
957 // Lowering can recognize INSERTQI shuffle masks.
958 if ((Length % 8) == 0 && (Index % 8) == 0) {
959 // Convert bit indices to byte indices.
960 Length /= 8;
961 Index /= 8;
962
963 Type *IntTy8 = Type::getInt8Ty(II.getContext());
964 Type *IntTy32 = Type::getInt32Ty(II.getContext());
965 VectorType *ShufTy = VectorType::get(IntTy8, 16);
966
967 SmallVector<Constant *, 16> ShuffleMask;
968 for (int i = 0; i != (int)Index; ++i)
969 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
970 for (int i = 0; i != (int)Length; ++i)
971 ShuffleMask.push_back(
972 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
973 for (int i = Index + Length; i != 8; ++i)
974 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
975 for (int i = 8; i != 16; ++i)
976 ShuffleMask.push_back(UndefValue::get(IntTy32));
977
978 Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
979 Builder.CreateBitCast(Op1, ShufTy),
980 ConstantVector::get(ShuffleMask));
981 return Builder.CreateBitCast(SV, II.getType());
982 }
983
984 // See if we're dealing with constant values.
985 Constant *C0 = dyn_cast<Constant>(Op0);
986 Constant *C1 = dyn_cast<Constant>(Op1);
987 ConstantInt *CI00 =
Andrea Di Biagiof3fd3162016-09-07 12:47:53 +0000988 C0 ? dyn_cast_or_null<ConstantInt>(C0->getAggregateElement((unsigned)0))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000989 : nullptr;
990 ConstantInt *CI10 =
Andrea Di Biagiof3fd3162016-09-07 12:47:53 +0000991 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +0000992 : nullptr;
993
994 // Constant Fold - insert bottom Length bits starting at the Index'th bit.
995 if (CI00 && CI10) {
996 APInt V00 = CI00->getValue();
997 APInt V10 = CI10->getValue();
998 APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
999 V00 = V00 & ~Mask;
1000 V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
1001 APInt Val = V00 | V10;
1002 Type *IntTy64 = Type::getInt64Ty(II.getContext());
1003 Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
1004 UndefValue::get(IntTy64)};
1005 return ConstantVector::get(Args);
1006 }
1007
1008 // If we were an INSERTQ call, we'll save demanded elements if we convert to
1009 // INSERTQI.
1010 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
1011 Type *IntTy8 = Type::getInt8Ty(II.getContext());
1012 Constant *CILength = ConstantInt::get(IntTy8, Length, false);
1013 Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
1014
1015 Value *Args[] = {Op0, Op1, CILength, CIIndex};
Sanjay Patelaf674fb2015-12-14 17:24:23 +00001016 Module *M = II.getModule();
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00001017 Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
1018 return Builder.CreateCall(F, Args);
1019 }
1020
1021 return nullptr;
1022}
1023
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00001024/// Attempt to convert pshufb* to shufflevector if the mask is constant.
1025static Value *simplifyX86pshufb(const IntrinsicInst &II,
1026 InstCombiner::BuilderTy &Builder) {
Simon Pilgrimbf60cc42016-04-29 21:34:54 +00001027 Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
1028 if (!V)
1029 return nullptr;
1030
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +00001031 auto *VecTy = cast<VectorType>(II.getType());
1032 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
1033 unsigned NumElts = VecTy->getNumElements();
Craig Topper9a63d7a2016-12-11 00:23:50 +00001034 assert((NumElts == 16 || NumElts == 32 || NumElts == 64) &&
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00001035 "Unexpected number of elements in shuffle mask!");
Simon Pilgrimbf60cc42016-04-29 21:34:54 +00001036
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +00001037 // Construct a shuffle mask from constant integers or UNDEFs.
Craig Topper9a63d7a2016-12-11 00:23:50 +00001038 Constant *Indexes[64] = {nullptr};
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00001039
Simon Pilgrimbf60cc42016-04-29 21:34:54 +00001040 // Each byte in the shuffle control mask forms an index to permute the
1041 // corresponding byte in the destination operand.
1042 for (unsigned I = 0; I < NumElts; ++I) {
1043 Constant *COp = V->getAggregateElement(I);
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +00001044 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
Simon Pilgrimbf60cc42016-04-29 21:34:54 +00001045 return nullptr;
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00001046
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +00001047 if (isa<UndefValue>(COp)) {
1048 Indexes[I] = UndefValue::get(MaskEltTy);
1049 continue;
1050 }
1051
Simon Pilgrimbf60cc42016-04-29 21:34:54 +00001052 int8_t Index = cast<ConstantInt>(COp)->getValue().getZExtValue();
1053
1054 // If the most significant bit (bit[7]) of each byte of the shuffle
1055 // control mask is set, then zero is written in the result byte.
1056 // The zero vector is in the right-hand side of the resulting
1057 // shufflevector.
1058
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +00001059 // The value of each index for the high 128-bit lane is the least
1060 // significant 4 bits of the respective shuffle control byte.
1061 Index = ((Index < 0) ? NumElts : Index & 0x0F) + (I & 0xF0);
1062 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
Simon Pilgrimbf60cc42016-04-29 21:34:54 +00001063 }
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00001064
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +00001065 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00001066 auto V1 = II.getArgOperand(0);
Simon Pilgrime5e8c2f2016-05-01 19:26:21 +00001067 auto V2 = Constant::getNullValue(VecTy);
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00001068 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1069}
1070
Simon Pilgrim2f6097d2016-04-24 17:23:46 +00001071/// Attempt to convert vpermilvar* to shufflevector if the mask is constant.
1072static Value *simplifyX86vpermilvar(const IntrinsicInst &II,
1073 InstCombiner::BuilderTy &Builder) {
Simon Pilgrim640f9962016-04-30 07:23:30 +00001074 Constant *V = dyn_cast<Constant>(II.getArgOperand(1));
1075 if (!V)
1076 return nullptr;
Simon Pilgrim2f6097d2016-04-24 17:23:46 +00001077
Craig Topper58917f32016-12-11 01:59:36 +00001078 auto *VecTy = cast<VectorType>(II.getType());
Simon Pilgrimeeacc402016-05-01 20:22:42 +00001079 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
Craig Topper58917f32016-12-11 01:59:36 +00001080 unsigned NumElts = VecTy->getVectorNumElements();
1081 bool IsPD = VecTy->getScalarType()->isDoubleTy();
1082 unsigned NumLaneElts = IsPD ? 2 : 4;
1083 assert(NumElts == 16 || NumElts == 8 || NumElts == 4 || NumElts == 2);
Simon Pilgrim2f6097d2016-04-24 17:23:46 +00001084
Simon Pilgrimeeacc402016-05-01 20:22:42 +00001085 // Construct a shuffle mask from constant integers or UNDEFs.
Craig Topper58917f32016-12-11 01:59:36 +00001086 Constant *Indexes[16] = {nullptr};
Simon Pilgrim640f9962016-04-30 07:23:30 +00001087
1088 // The intrinsics only read one or two bits, clear the rest.
Simon Pilgrimeeacc402016-05-01 20:22:42 +00001089 for (unsigned I = 0; I < NumElts; ++I) {
Simon Pilgrim640f9962016-04-30 07:23:30 +00001090 Constant *COp = V->getAggregateElement(I);
Simon Pilgrimeeacc402016-05-01 20:22:42 +00001091 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
Simon Pilgrim640f9962016-04-30 07:23:30 +00001092 return nullptr;
1093
Simon Pilgrimeeacc402016-05-01 20:22:42 +00001094 if (isa<UndefValue>(COp)) {
1095 Indexes[I] = UndefValue::get(MaskEltTy);
1096 continue;
1097 }
1098
1099 APInt Index = cast<ConstantInt>(COp)->getValue();
1100 Index = Index.zextOrTrunc(32).getLoBits(2);
Simon Pilgrim640f9962016-04-30 07:23:30 +00001101
1102 // The PD variants uses bit 1 to select per-lane element index, so
1103 // shift down to convert to generic shuffle mask index.
Craig Topper58917f32016-12-11 01:59:36 +00001104 if (IsPD)
Craig Topperfc947bc2017-04-18 17:14:21 +00001105 Index.lshrInPlace(1);
Simon Pilgrimeeacc402016-05-01 20:22:42 +00001106
1107 // The _256 variants are a bit trickier since the mask bits always index
1108 // into the corresponding 128 half. In order to convert to a generic
1109 // shuffle, we have to make that explicit.
Craig Topper58917f32016-12-11 01:59:36 +00001110 Index += APInt(32, (I / NumLaneElts) * NumLaneElts);
Simon Pilgrimeeacc402016-05-01 20:22:42 +00001111
1112 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
Simon Pilgrim2f6097d2016-04-24 17:23:46 +00001113 }
1114
Simon Pilgrimeeacc402016-05-01 20:22:42 +00001115 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, NumElts));
Simon Pilgrim2f6097d2016-04-24 17:23:46 +00001116 auto V1 = II.getArgOperand(0);
1117 auto V2 = UndefValue::get(V1->getType());
1118 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1119}
1120
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +00001121/// Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
1122static Value *simplifyX86vpermv(const IntrinsicInst &II,
1123 InstCombiner::BuilderTy &Builder) {
1124 auto *V = dyn_cast<Constant>(II.getArgOperand(1));
1125 if (!V)
1126 return nullptr;
1127
Simon Pilgrimca140b12016-05-01 20:43:02 +00001128 auto *VecTy = cast<VectorType>(II.getType());
1129 auto *MaskEltTy = Type::getInt32Ty(II.getContext());
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +00001130 unsigned Size = VecTy->getNumElements();
Craig Toppere3280452016-12-25 23:58:57 +00001131 assert((Size == 4 || Size == 8 || Size == 16 || Size == 32 || Size == 64) &&
1132 "Unexpected shuffle mask size");
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +00001133
Simon Pilgrimca140b12016-05-01 20:43:02 +00001134 // Construct a shuffle mask from constant integers or UNDEFs.
Craig Toppere3280452016-12-25 23:58:57 +00001135 Constant *Indexes[64] = {nullptr};
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +00001136
1137 for (unsigned I = 0; I < Size; ++I) {
1138 Constant *COp = V->getAggregateElement(I);
Simon Pilgrimca140b12016-05-01 20:43:02 +00001139 if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +00001140 return nullptr;
1141
Simon Pilgrimca140b12016-05-01 20:43:02 +00001142 if (isa<UndefValue>(COp)) {
1143 Indexes[I] = UndefValue::get(MaskEltTy);
1144 continue;
1145 }
1146
Craig Toppere3280452016-12-25 23:58:57 +00001147 uint32_t Index = cast<ConstantInt>(COp)->getZExtValue();
1148 Index &= Size - 1;
Simon Pilgrimca140b12016-05-01 20:43:02 +00001149 Indexes[I] = ConstantInt::get(MaskEltTy, Index);
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +00001150 }
1151
Simon Pilgrimca140b12016-05-01 20:43:02 +00001152 auto ShuffleMask = ConstantVector::get(makeArrayRef(Indexes, Size));
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +00001153 auto V1 = II.getArgOperand(0);
1154 auto V2 = UndefValue::get(VecTy);
1155 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1156}
1157
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +00001158/// Decode XOP integer vector comparison intrinsics.
Sanjay Patel6038d3e2016-01-29 23:27:03 +00001159static Value *simplifyX86vpcom(const IntrinsicInst &II,
Sanjay Patelf9f5d3c2016-01-29 23:14:58 +00001160 InstCombiner::BuilderTy &Builder,
1161 bool IsSigned) {
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +00001162 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
1163 uint64_t Imm = CInt->getZExtValue() & 0x7;
1164 VectorType *VecTy = cast<VectorType>(II.getType());
1165 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1166
1167 switch (Imm) {
1168 case 0x0:
1169 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1170 break;
1171 case 0x1:
1172 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1173 break;
1174 case 0x2:
1175 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1176 break;
1177 case 0x3:
1178 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1179 break;
1180 case 0x4:
1181 Pred = ICmpInst::ICMP_EQ; break;
1182 case 0x5:
1183 Pred = ICmpInst::ICMP_NE; break;
1184 case 0x6:
1185 return ConstantInt::getSigned(VecTy, 0); // FALSE
1186 case 0x7:
1187 return ConstantInt::getSigned(VecTy, -1); // TRUE
1188 }
1189
Sanjay Patelf9f5d3c2016-01-29 23:14:58 +00001190 if (Value *Cmp = Builder.CreateICmp(Pred, II.getArgOperand(0),
1191 II.getArgOperand(1)))
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +00001192 return Builder.CreateSExtOrTrunc(Cmp, VecTy);
1193 }
1194 return nullptr;
1195}
1196
David Majnemer666aa942016-07-14 06:58:42 +00001197static bool maskIsAllOneOrUndef(Value *Mask) {
1198 auto *ConstMask = dyn_cast<Constant>(Mask);
1199 if (!ConstMask)
1200 return false;
1201 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1202 return true;
1203 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
1204 ++I) {
1205 if (auto *MaskElt = ConstMask->getAggregateElement(I))
1206 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1207 continue;
1208 return false;
1209 }
1210 return true;
1211}
1212
Sanjay Patelb695c552016-02-01 17:00:10 +00001213static Value *simplifyMaskedLoad(const IntrinsicInst &II,
1214 InstCombiner::BuilderTy &Builder) {
David Majnemer666aa942016-07-14 06:58:42 +00001215 // If the mask is all ones or undefs, this is a plain vector load of the 1st
1216 // argument.
1217 if (maskIsAllOneOrUndef(II.getArgOperand(2))) {
Sanjay Patelb695c552016-02-01 17:00:10 +00001218 Value *LoadPtr = II.getArgOperand(0);
1219 unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
1220 return Builder.CreateAlignedLoad(LoadPtr, Alignment, "unmaskedload");
1221 }
1222
1223 return nullptr;
1224}
1225
Sanjay Patel04f792b2016-02-01 19:39:52 +00001226static Instruction *simplifyMaskedStore(IntrinsicInst &II, InstCombiner &IC) {
1227 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1228 if (!ConstMask)
1229 return nullptr;
1230
1231 // If the mask is all zeros, this instruction does nothing.
1232 if (ConstMask->isNullValue())
Sanjay Patel4b198802016-02-01 22:23:39 +00001233 return IC.eraseInstFromFunction(II);
Sanjay Patel04f792b2016-02-01 19:39:52 +00001234
1235 // If the mask is all ones, this is a plain vector store of the 1st argument.
1236 if (ConstMask->isAllOnesValue()) {
1237 Value *StorePtr = II.getArgOperand(1);
1238 unsigned Alignment = cast<ConstantInt>(II.getArgOperand(2))->getZExtValue();
1239 return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
1240 }
1241
1242 return nullptr;
1243}
1244
Sanjay Patel103ab7d2016-02-01 22:10:26 +00001245static Instruction *simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC) {
1246 // If the mask is all zeros, return the "passthru" argument of the gather.
1247 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
1248 if (ConstMask && ConstMask->isNullValue())
Sanjay Patel4b198802016-02-01 22:23:39 +00001249 return IC.replaceInstUsesWith(II, II.getArgOperand(3));
Sanjay Patel103ab7d2016-02-01 22:10:26 +00001250
1251 return nullptr;
1252}
1253
Piotr Padlewskic63b4922018-07-12 23:55:20 +00001254/// This function transforms launder.invariant.group and strip.invariant.group
1255/// like:
1256/// launder(launder(%x)) -> launder(%x) (the result is not the argument)
1257/// launder(strip(%x)) -> launder(%x)
1258/// strip(strip(%x)) -> strip(%x) (the result is not the argument)
1259/// strip(launder(%x)) -> strip(%x)
1260/// This is legal because it preserves the most recent information about
1261/// the presence or absence of invariant.group.
1262static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II,
1263 InstCombiner &IC) {
1264 auto *Arg = II.getArgOperand(0);
1265 auto *StrippedArg = Arg->stripPointerCasts();
1266 auto *StrippedInvariantGroupsArg = Arg->stripPointerCastsAndInvariantGroups();
1267 if (StrippedArg == StrippedInvariantGroupsArg)
1268 return nullptr; // No launders/strips to remove.
1269
1270 Value *Result = nullptr;
1271
1272 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
1273 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
1274 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
1275 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
1276 else
1277 llvm_unreachable(
1278 "simplifyInvariantGroupIntrinsic only handles launder and strip");
1279 if (Result->getType()->getPointerAddressSpace() !=
1280 II.getType()->getPointerAddressSpace())
1281 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
1282 if (Result->getType() != II.getType())
1283 Result = IC.Builder.CreateBitCast(Result, II.getType());
1284
1285 return cast<Instruction>(Result);
1286}
1287
Sanjay Patel103ab7d2016-02-01 22:10:26 +00001288static Instruction *simplifyMaskedScatter(IntrinsicInst &II, InstCombiner &IC) {
1289 // If the mask is all zeros, a scatter does nothing.
1290 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
1291 if (ConstMask && ConstMask->isNullValue())
Sanjay Patel4b198802016-02-01 22:23:39 +00001292 return IC.eraseInstFromFunction(II);
Sanjay Patel103ab7d2016-02-01 22:10:26 +00001293
1294 return nullptr;
1295}
1296
Amaury Sechet763c59d2016-08-18 20:43:50 +00001297static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
1298 assert((II.getIntrinsicID() == Intrinsic::cttz ||
1299 II.getIntrinsicID() == Intrinsic::ctlz) &&
1300 "Expected cttz or ctlz intrinsic");
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001301 Value *Op0 = II.getArgOperand(0);
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001302
Craig Topper8205a1a2017-05-24 16:53:07 +00001303 KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001304
1305 // Create a mask for bits above (ctlz) or below (cttz) the first known one.
1306 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
Craig Topper8df66c62017-05-12 17:20:30 +00001307 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
1308 : Known.countMaxLeadingZeros();
1309 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
1310 : Known.countMinLeadingZeros();
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001311
1312 // If all bits above (ctlz) or below (cttz) the first known one are known
1313 // zero, this value is constant.
1314 // FIXME: This should be in InstSimplify because we're replacing an
1315 // instruction with a constant.
Craig Topper9474e9b2017-04-27 04:51:25 +00001316 if (PossibleZeros == DefiniteZeros) {
Craig Topper0799ff92017-06-03 18:50:32 +00001317 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
Amaury Sechet763c59d2016-08-18 20:43:50 +00001318 return IC.replaceInstUsesWith(II, C);
1319 }
1320
1321 // If the input to cttz/ctlz is known to be non-zero,
1322 // then change the 'ZeroIsUndef' parameter to 'true'
1323 // because we know the zero behavior can't affect the result.
Craig Topper73ba1c82017-06-07 07:40:37 +00001324 if (!Known.One.isNullValue() ||
Craig Topperd45185f2017-05-26 18:23:57 +00001325 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
1326 &IC.getDominatorTree())) {
Amaury Sechet763c59d2016-08-18 20:43:50 +00001327 if (!match(II.getArgOperand(1), m_One())) {
Craig Topperbb4069e2017-07-07 23:16:26 +00001328 II.setOperand(1, IC.Builder.getTrue());
Amaury Sechet763c59d2016-08-18 20:43:50 +00001329 return &II;
1330 }
1331 }
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001332
Craig Topper5b173f22017-06-21 16:32:35 +00001333 // Add range metadata since known bits can't completely reflect what we know.
1334 // TODO: Handle splat vectors.
1335 auto *IT = dyn_cast<IntegerType>(Op0->getType());
1336 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1337 Metadata *LowAndHigh[] = {
1338 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
1339 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
1340 II.setMetadata(LLVMContext::MD_range,
1341 MDNode::get(II.getContext(), LowAndHigh));
1342 return &II;
1343 }
1344
1345 return nullptr;
1346}
1347
1348static Instruction *foldCtpop(IntrinsicInst &II, InstCombiner &IC) {
1349 assert(II.getIntrinsicID() == Intrinsic::ctpop &&
1350 "Expected ctpop intrinsic");
1351 Value *Op0 = II.getArgOperand(0);
1352 // FIXME: Try to simplify vectors of integers.
1353 auto *IT = dyn_cast<IntegerType>(Op0->getType());
1354 if (!IT)
1355 return nullptr;
1356
1357 unsigned BitWidth = IT->getBitWidth();
1358 KnownBits Known(BitWidth);
1359 IC.computeKnownBits(Op0, Known, 0, &II);
1360
1361 unsigned MinCount = Known.countMinPopulation();
1362 unsigned MaxCount = Known.countMaxPopulation();
1363
1364 // Add range metadata since known bits can't completely reflect what we know.
1365 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
1366 Metadata *LowAndHigh[] = {
1367 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)),
1368 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
1369 II.setMetadata(LLVMContext::MD_range,
1370 MDNode::get(II.getContext(), LowAndHigh));
1371 return &II;
1372 }
1373
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001374 return nullptr;
1375}
1376
Sanjay Patel1ace9932016-02-26 21:04:14 +00001377// TODO: If the x86 backend knew how to convert a bool vector mask back to an
1378// XMM register mask efficiently, we could transform all x86 masked intrinsics
1379// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
Sanjay Patel98a71502016-02-29 23:16:48 +00001380static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
1381 Value *Ptr = II.getOperand(0);
1382 Value *Mask = II.getOperand(1);
Sanjay Patel5e5056d2016-04-12 23:16:23 +00001383 Constant *ZeroVec = Constant::getNullValue(II.getType());
Sanjay Patel98a71502016-02-29 23:16:48 +00001384
1385 // Special case a zero mask since that's not a ConstantDataVector.
Sanjay Patel5e5056d2016-04-12 23:16:23 +00001386 // This masked load instruction creates a zero vector.
Sanjay Patel98a71502016-02-29 23:16:48 +00001387 if (isa<ConstantAggregateZero>(Mask))
Sanjay Patel5e5056d2016-04-12 23:16:23 +00001388 return IC.replaceInstUsesWith(II, ZeroVec);
Sanjay Patel98a71502016-02-29 23:16:48 +00001389
1390 auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1391 if (!ConstMask)
1392 return nullptr;
1393
1394 // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1395 // to allow target-independent optimizations.
1396
1397 // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1398 // the LLVM intrinsic definition for the pointer argument.
1399 unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1400 PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
Craig Topperbb4069e2017-07-07 23:16:26 +00001401 Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
Sanjay Patel98a71502016-02-29 23:16:48 +00001402
1403 // Second, convert the x86 XMM integer vector mask to a vector of bools based
1404 // on each element's most significant bit (the sign bit).
1405 Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1406
Sanjay Patel5e5056d2016-04-12 23:16:23 +00001407 // The pass-through vector for an x86 masked load is a zero vector.
1408 CallInst *NewMaskedLoad =
Craig Topperbb4069e2017-07-07 23:16:26 +00001409 IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
Sanjay Patel98a71502016-02-29 23:16:48 +00001410 return IC.replaceInstUsesWith(II, NewMaskedLoad);
1411}
1412
1413// TODO: If the x86 backend knew how to convert a bool vector mask back to an
1414// XMM register mask efficiently, we could transform all x86 masked intrinsics
1415// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
Sanjay Patel1ace9932016-02-26 21:04:14 +00001416static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
1417 Value *Ptr = II.getOperand(0);
1418 Value *Mask = II.getOperand(1);
1419 Value *Vec = II.getOperand(2);
1420
1421 // Special case a zero mask since that's not a ConstantDataVector:
1422 // this masked store instruction does nothing.
1423 if (isa<ConstantAggregateZero>(Mask)) {
1424 IC.eraseInstFromFunction(II);
1425 return true;
1426 }
1427
Sanjay Patelc4acbae2016-03-12 15:16:59 +00001428 // The SSE2 version is too weird (eg, unaligned but non-temporal) to do
1429 // anything else at this level.
1430 if (II.getIntrinsicID() == Intrinsic::x86_sse2_maskmov_dqu)
1431 return false;
1432
Sanjay Patel1ace9932016-02-26 21:04:14 +00001433 auto *ConstMask = dyn_cast<ConstantDataVector>(Mask);
1434 if (!ConstMask)
1435 return false;
1436
1437 // The mask is constant. Convert this x86 intrinsic to the LLVM instrinsic
1438 // to allow target-independent optimizations.
1439
1440 // First, cast the x86 intrinsic scalar pointer to a vector pointer to match
1441 // the LLVM intrinsic definition for the pointer argument.
1442 unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
1443 PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
Craig Topperbb4069e2017-07-07 23:16:26 +00001444 Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
Sanjay Patel1ace9932016-02-26 21:04:14 +00001445
1446 // Second, convert the x86 XMM integer vector mask to a vector of bools based
1447 // on each element's most significant bit (the sign bit).
1448 Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
1449
Craig Topperbb4069e2017-07-07 23:16:26 +00001450 IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
Sanjay Patel1ace9932016-02-26 21:04:14 +00001451
1452 // 'Replace uses' doesn't work for stores. Erase the original masked store.
1453 IC.eraseInstFromFunction(II);
1454 return true;
1455}
1456
Matt Arsenaultcdb468c2017-02-27 23:08:49 +00001457// Constant fold llvm.amdgcn.fmed3 intrinsics for standard inputs.
1458//
1459// A single NaN input is folded to minnum, so we rely on that folding for
1460// handling NaNs.
1461static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
1462 const APFloat &Src2) {
1463 APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2);
1464
1465 APFloat::cmpResult Cmp0 = Max3.compare(Src0);
1466 assert(Cmp0 != APFloat::cmpUnordered && "nans handled separately");
1467 if (Cmp0 == APFloat::cmpEqual)
1468 return maxnum(Src1, Src2);
1469
1470 APFloat::cmpResult Cmp1 = Max3.compare(Src1);
1471 assert(Cmp1 != APFloat::cmpUnordered && "nans handled separately");
1472 if (Cmp1 == APFloat::cmpEqual)
1473 return maxnum(Src0, Src2);
1474
1475 return maxnum(Src0, Src1);
1476}
1477
Alexandros Lamprineas52457d32018-05-30 14:38:50 +00001478/// Convert a table lookup to shufflevector if the mask is constant.
1479/// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
1480/// which case we could lower the shufflevector with rev64 instructions
1481/// as it's actually a byte reverse.
1482static Value *simplifyNeonTbl1(const IntrinsicInst &II,
1483 InstCombiner::BuilderTy &Builder) {
1484 // Bail out if the mask is not a constant.
1485 auto *C = dyn_cast<Constant>(II.getArgOperand(1));
1486 if (!C)
1487 return nullptr;
1488
1489 auto *VecTy = cast<VectorType>(II.getType());
1490 unsigned NumElts = VecTy->getNumElements();
1491
1492 // Only perform this transformation for <8 x i8> vector types.
1493 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
1494 return nullptr;
1495
1496 uint32_t Indexes[8];
1497
1498 for (unsigned I = 0; I < NumElts; ++I) {
1499 Constant *COp = C->getAggregateElement(I);
1500
1501 if (!COp || !isa<ConstantInt>(COp))
1502 return nullptr;
1503
1504 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
1505
1506 // Make sure the mask indices are in range.
1507 if (Indexes[I] >= NumElts)
1508 return nullptr;
1509 }
1510
1511 auto *ShuffleMask = ConstantDataVector::get(II.getContext(),
1512 makeArrayRef(Indexes));
1513 auto *V1 = II.getArgOperand(0);
1514 auto *V2 = Constant::getNullValue(V1->getType());
1515 return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
1516}
1517
Alexandros Lamprineas61f0ba12018-05-31 12:19:18 +00001518/// Convert a vector load intrinsic into a simple llvm load instruction.
1519/// This is beneficial when the underlying object being addressed comes
1520/// from a constant, since we get constant-folding for free.
1521static Value *simplifyNeonVld1(const IntrinsicInst &II,
1522 unsigned MemAlign,
1523 InstCombiner::BuilderTy &Builder) {
1524 auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
1525
1526 if (!IntrAlign)
1527 return nullptr;
1528
1529 unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign ?
1530 MemAlign : IntrAlign->getLimitedValue();
1531
1532 if (!isPowerOf2_32(Alignment))
1533 return nullptr;
1534
1535 auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
1536 PointerType::get(II.getType(), 0));
1537 return Builder.CreateAlignedLoad(BCastInst, Alignment);
1538}
1539
Arnaud A. de Grandmaison333ef382016-05-10 09:24:49 +00001540// Returns true iff the 2 intrinsics have the same operands, limiting the
1541// comparison to the first NumOperands.
1542static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
1543 unsigned NumOperands) {
1544 assert(I.getNumArgOperands() >= NumOperands && "Not enough operands");
1545 assert(E.getNumArgOperands() >= NumOperands && "Not enough operands");
1546 for (unsigned i = 0; i < NumOperands; i++)
1547 if (I.getArgOperand(i) != E.getArgOperand(i))
1548 return false;
1549 return true;
1550}
1551
1552// Remove trivially empty start/end intrinsic ranges, i.e. a start
1553// immediately followed by an end (ignoring debuginfo or other
1554// start/end intrinsics in between). As this handles only the most trivial
1555// cases, tracking the nesting level is not needed:
1556//
1557// call @llvm.foo.start(i1 0) ; &I
1558// call @llvm.foo.start(i1 0)
1559// call @llvm.foo.end(i1 0) ; This one will not be skipped: it will be removed
1560// call @llvm.foo.end(i1 0)
1561static bool removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID,
1562 unsigned EndID, InstCombiner &IC) {
1563 assert(I.getIntrinsicID() == StartID &&
1564 "Start intrinsic does not have expected ID");
1565 BasicBlock::iterator BI(I), BE(I.getParent()->end());
1566 for (++BI; BI != BE; ++BI) {
1567 if (auto *E = dyn_cast<IntrinsicInst>(BI)) {
1568 if (isa<DbgInfoIntrinsic>(E) || E->getIntrinsicID() == StartID)
1569 continue;
1570 if (E->getIntrinsicID() == EndID &&
1571 haveSameOperands(I, *E, E->getNumArgOperands())) {
1572 IC.eraseInstFromFunction(*E);
1573 IC.eraseInstFromFunction(I);
1574 return true;
1575 }
1576 }
1577 break;
1578 }
1579
1580 return false;
1581}
1582
Justin Lebar698c31b2017-01-27 00:58:58 +00001583// Convert NVVM intrinsics to target-generic LLVM code where possible.
1584static Instruction *SimplifyNVVMIntrinsic(IntrinsicInst *II, InstCombiner &IC) {
1585 // Each NVVM intrinsic we can simplify can be replaced with one of:
1586 //
1587 // * an LLVM intrinsic,
1588 // * an LLVM cast operation,
1589 // * an LLVM binary operation, or
1590 // * ad-hoc LLVM IR for the particular operation.
1591
1592 // Some transformations are only valid when the module's
1593 // flush-denormals-to-zero (ftz) setting is true/false, whereas other
1594 // transformations are valid regardless of the module's ftz setting.
1595 enum FtzRequirementTy {
1596 FTZ_Any, // Any ftz setting is ok.
1597 FTZ_MustBeOn, // Transformation is valid only if ftz is on.
1598 FTZ_MustBeOff, // Transformation is valid only if ftz is off.
1599 };
1600 // Classes of NVVM intrinsics that can't be replaced one-to-one with a
1601 // target-generic intrinsic, cast op, or binary op but that we can nonetheless
1602 // simplify.
1603 enum SpecialCase {
1604 SPC_Reciprocal,
1605 };
1606
1607 // SimplifyAction is a poor-man's variant (plus an additional flag) that
1608 // represents how to replace an NVVM intrinsic with target-generic LLVM IR.
1609 struct SimplifyAction {
1610 // Invariant: At most one of these Optionals has a value.
1611 Optional<Intrinsic::ID> IID;
1612 Optional<Instruction::CastOps> CastOp;
1613 Optional<Instruction::BinaryOps> BinaryOp;
1614 Optional<SpecialCase> Special;
1615
1616 FtzRequirementTy FtzRequirement = FTZ_Any;
1617
1618 SimplifyAction() = default;
1619
1620 SimplifyAction(Intrinsic::ID IID, FtzRequirementTy FtzReq)
1621 : IID(IID), FtzRequirement(FtzReq) {}
1622
1623 // Cast operations don't have anything to do with FTZ, so we skip that
1624 // argument.
1625 SimplifyAction(Instruction::CastOps CastOp) : CastOp(CastOp) {}
1626
1627 SimplifyAction(Instruction::BinaryOps BinaryOp, FtzRequirementTy FtzReq)
1628 : BinaryOp(BinaryOp), FtzRequirement(FtzReq) {}
1629
1630 SimplifyAction(SpecialCase Special, FtzRequirementTy FtzReq)
1631 : Special(Special), FtzRequirement(FtzReq) {}
1632 };
1633
1634 // Try to generate a SimplifyAction describing how to replace our
1635 // IntrinsicInstr with target-generic LLVM IR.
1636 const SimplifyAction Action = [II]() -> SimplifyAction {
1637 switch (II->getIntrinsicID()) {
Justin Lebar698c31b2017-01-27 00:58:58 +00001638 // NVVM intrinsics that map directly to LLVM intrinsics.
1639 case Intrinsic::nvvm_ceil_d:
1640 return {Intrinsic::ceil, FTZ_Any};
1641 case Intrinsic::nvvm_ceil_f:
1642 return {Intrinsic::ceil, FTZ_MustBeOff};
1643 case Intrinsic::nvvm_ceil_ftz_f:
1644 return {Intrinsic::ceil, FTZ_MustBeOn};
1645 case Intrinsic::nvvm_fabs_d:
1646 return {Intrinsic::fabs, FTZ_Any};
1647 case Intrinsic::nvvm_fabs_f:
1648 return {Intrinsic::fabs, FTZ_MustBeOff};
1649 case Intrinsic::nvvm_fabs_ftz_f:
1650 return {Intrinsic::fabs, FTZ_MustBeOn};
1651 case Intrinsic::nvvm_floor_d:
1652 return {Intrinsic::floor, FTZ_Any};
1653 case Intrinsic::nvvm_floor_f:
1654 return {Intrinsic::floor, FTZ_MustBeOff};
1655 case Intrinsic::nvvm_floor_ftz_f:
1656 return {Intrinsic::floor, FTZ_MustBeOn};
1657 case Intrinsic::nvvm_fma_rn_d:
1658 return {Intrinsic::fma, FTZ_Any};
1659 case Intrinsic::nvvm_fma_rn_f:
1660 return {Intrinsic::fma, FTZ_MustBeOff};
1661 case Intrinsic::nvvm_fma_rn_ftz_f:
1662 return {Intrinsic::fma, FTZ_MustBeOn};
1663 case Intrinsic::nvvm_fmax_d:
1664 return {Intrinsic::maxnum, FTZ_Any};
1665 case Intrinsic::nvvm_fmax_f:
1666 return {Intrinsic::maxnum, FTZ_MustBeOff};
1667 case Intrinsic::nvvm_fmax_ftz_f:
1668 return {Intrinsic::maxnum, FTZ_MustBeOn};
1669 case Intrinsic::nvvm_fmin_d:
1670 return {Intrinsic::minnum, FTZ_Any};
1671 case Intrinsic::nvvm_fmin_f:
1672 return {Intrinsic::minnum, FTZ_MustBeOff};
1673 case Intrinsic::nvvm_fmin_ftz_f:
1674 return {Intrinsic::minnum, FTZ_MustBeOn};
1675 case Intrinsic::nvvm_round_d:
1676 return {Intrinsic::round, FTZ_Any};
1677 case Intrinsic::nvvm_round_f:
1678 return {Intrinsic::round, FTZ_MustBeOff};
1679 case Intrinsic::nvvm_round_ftz_f:
1680 return {Intrinsic::round, FTZ_MustBeOn};
1681 case Intrinsic::nvvm_sqrt_rn_d:
1682 return {Intrinsic::sqrt, FTZ_Any};
1683 case Intrinsic::nvvm_sqrt_f:
1684 // nvvm_sqrt_f is a special case. For most intrinsics, foo_ftz_f is the
1685 // ftz version, and foo_f is the non-ftz version. But nvvm_sqrt_f adopts
1686 // the ftz-ness of the surrounding code. sqrt_rn_f and sqrt_rn_ftz_f are
1687 // the versions with explicit ftz-ness.
1688 return {Intrinsic::sqrt, FTZ_Any};
1689 case Intrinsic::nvvm_sqrt_rn_f:
1690 return {Intrinsic::sqrt, FTZ_MustBeOff};
1691 case Intrinsic::nvvm_sqrt_rn_ftz_f:
1692 return {Intrinsic::sqrt, FTZ_MustBeOn};
1693 case Intrinsic::nvvm_trunc_d:
1694 return {Intrinsic::trunc, FTZ_Any};
1695 case Intrinsic::nvvm_trunc_f:
1696 return {Intrinsic::trunc, FTZ_MustBeOff};
1697 case Intrinsic::nvvm_trunc_ftz_f:
1698 return {Intrinsic::trunc, FTZ_MustBeOn};
1699
1700 // NVVM intrinsics that map to LLVM cast operations.
1701 //
1702 // Note that llvm's target-generic conversion operators correspond to the rz
1703 // (round to zero) versions of the nvvm conversion intrinsics, even though
1704 // most everything else here uses the rn (round to nearest even) nvvm ops.
1705 case Intrinsic::nvvm_d2i_rz:
1706 case Intrinsic::nvvm_f2i_rz:
1707 case Intrinsic::nvvm_d2ll_rz:
1708 case Intrinsic::nvvm_f2ll_rz:
1709 return {Instruction::FPToSI};
1710 case Intrinsic::nvvm_d2ui_rz:
1711 case Intrinsic::nvvm_f2ui_rz:
1712 case Intrinsic::nvvm_d2ull_rz:
1713 case Intrinsic::nvvm_f2ull_rz:
1714 return {Instruction::FPToUI};
1715 case Intrinsic::nvvm_i2d_rz:
1716 case Intrinsic::nvvm_i2f_rz:
1717 case Intrinsic::nvvm_ll2d_rz:
1718 case Intrinsic::nvvm_ll2f_rz:
1719 return {Instruction::SIToFP};
1720 case Intrinsic::nvvm_ui2d_rz:
1721 case Intrinsic::nvvm_ui2f_rz:
1722 case Intrinsic::nvvm_ull2d_rz:
1723 case Intrinsic::nvvm_ull2f_rz:
1724 return {Instruction::UIToFP};
1725
1726 // NVVM intrinsics that map to LLVM binary ops.
1727 case Intrinsic::nvvm_add_rn_d:
1728 return {Instruction::FAdd, FTZ_Any};
1729 case Intrinsic::nvvm_add_rn_f:
1730 return {Instruction::FAdd, FTZ_MustBeOff};
1731 case Intrinsic::nvvm_add_rn_ftz_f:
1732 return {Instruction::FAdd, FTZ_MustBeOn};
1733 case Intrinsic::nvvm_mul_rn_d:
1734 return {Instruction::FMul, FTZ_Any};
1735 case Intrinsic::nvvm_mul_rn_f:
1736 return {Instruction::FMul, FTZ_MustBeOff};
1737 case Intrinsic::nvvm_mul_rn_ftz_f:
1738 return {Instruction::FMul, FTZ_MustBeOn};
1739 case Intrinsic::nvvm_div_rn_d:
1740 return {Instruction::FDiv, FTZ_Any};
1741 case Intrinsic::nvvm_div_rn_f:
1742 return {Instruction::FDiv, FTZ_MustBeOff};
1743 case Intrinsic::nvvm_div_rn_ftz_f:
1744 return {Instruction::FDiv, FTZ_MustBeOn};
1745
1746 // The remainder of cases are NVVM intrinsics that map to LLVM idioms, but
1747 // need special handling.
1748 //
Hiroshi Inoue0ca79dc2017-07-11 06:04:59 +00001749 // We seem to be missing intrinsics for rcp.approx.{ftz.}f32, which is just
Justin Lebar698c31b2017-01-27 00:58:58 +00001750 // as well.
1751 case Intrinsic::nvvm_rcp_rn_d:
1752 return {SPC_Reciprocal, FTZ_Any};
1753 case Intrinsic::nvvm_rcp_rn_f:
1754 return {SPC_Reciprocal, FTZ_MustBeOff};
1755 case Intrinsic::nvvm_rcp_rn_ftz_f:
1756 return {SPC_Reciprocal, FTZ_MustBeOn};
1757
1758 // We do not currently simplify intrinsics that give an approximate answer.
1759 // These include:
1760 //
1761 // - nvvm_cos_approx_{f,ftz_f}
1762 // - nvvm_ex2_approx_{d,f,ftz_f}
1763 // - nvvm_lg2_approx_{d,f,ftz_f}
1764 // - nvvm_sin_approx_{f,ftz_f}
1765 // - nvvm_sqrt_approx_{f,ftz_f}
1766 // - nvvm_rsqrt_approx_{d,f,ftz_f}
1767 // - nvvm_div_approx_{ftz_d,ftz_f,f}
1768 // - nvvm_rcp_approx_ftz_d
1769 //
1770 // Ideally we'd encode them as e.g. "fast call @llvm.cos", where "fast"
1771 // means that fastmath is enabled in the intrinsic. Unfortunately only
1772 // binary operators (currently) have a fastmath bit in SelectionDAG, so this
1773 // information gets lost and we can't select on it.
1774 //
1775 // TODO: div and rcp are lowered to a binary op, so these we could in theory
1776 // lower them to "fast fdiv".
1777
1778 default:
1779 return {};
1780 }
1781 }();
1782
1783 // If Action.FtzRequirementTy is not satisfied by the module's ftz state, we
1784 // can bail out now. (Notice that in the case that IID is not an NVVM
1785 // intrinsic, we don't have to look up any module metadata, as
1786 // FtzRequirementTy will be FTZ_Any.)
1787 if (Action.FtzRequirement != FTZ_Any) {
1788 bool FtzEnabled =
1789 II->getFunction()->getFnAttribute("nvptx-f32ftz").getValueAsString() ==
1790 "true";
1791
1792 if (FtzEnabled != (Action.FtzRequirement == FTZ_MustBeOn))
1793 return nullptr;
1794 }
1795
1796 // Simplify to target-generic intrinsic.
1797 if (Action.IID) {
1798 SmallVector<Value *, 4> Args(II->arg_operands());
1799 // All the target-generic intrinsics currently of interest to us have one
1800 // type argument, equal to that of the nvvm intrinsic's argument.
Justin Lebare3ac0fb2017-01-27 01:49:39 +00001801 Type *Tys[] = {II->getArgOperand(0)->getType()};
Justin Lebar698c31b2017-01-27 00:58:58 +00001802 return CallInst::Create(
1803 Intrinsic::getDeclaration(II->getModule(), *Action.IID, Tys), Args);
1804 }
1805
1806 // Simplify to target-generic binary op.
1807 if (Action.BinaryOp)
1808 return BinaryOperator::Create(*Action.BinaryOp, II->getArgOperand(0),
1809 II->getArgOperand(1), II->getName());
1810
1811 // Simplify to target-generic cast op.
1812 if (Action.CastOp)
1813 return CastInst::Create(*Action.CastOp, II->getArgOperand(0), II->getType(),
1814 II->getName());
1815
1816 // All that's left are the special cases.
1817 if (!Action.Special)
1818 return nullptr;
1819
1820 switch (*Action.Special) {
1821 case SPC_Reciprocal:
1822 // Simplify reciprocal.
1823 return BinaryOperator::Create(
1824 Instruction::FDiv, ConstantFP::get(II->getArgOperand(0)->getType(), 1),
1825 II->getArgOperand(0), II->getName());
1826 }
Justin Lebar25ebe2d2017-01-27 02:04:07 +00001827 llvm_unreachable("All SpecialCase enumerators should be handled in switch.");
Justin Lebar698c31b2017-01-27 00:58:58 +00001828}
1829
Arnaud A. de Grandmaison333ef382016-05-10 09:24:49 +00001830Instruction *InstCombiner::visitVAStartInst(VAStartInst &I) {
1831 removeTriviallyEmptyRange(I, Intrinsic::vastart, Intrinsic::vaend, *this);
1832 return nullptr;
1833}
1834
1835Instruction *InstCombiner::visitVACopyInst(VACopyInst &I) {
1836 removeTriviallyEmptyRange(I, Intrinsic::vacopy, Intrinsic::vaend, *this);
1837 return nullptr;
1838}
1839
Sanjay Patelcd4377c2016-01-20 22:24:38 +00001840/// CallInst simplification. This mostly only handles folding of intrinsic
1841/// instructions. For normal calls, it allows visitCallSite to do the heavy
1842/// lifting.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001843Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Philip Reames7a6db4f2017-12-27 00:16:12 +00001844 if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
Sanjay Patel4b198802016-02-01 22:23:39 +00001845 return replaceInstUsesWith(CI, V);
David Majnemer15032582015-05-22 03:56:46 +00001846
Justin Bogner99798402016-08-05 01:06:44 +00001847 if (isFreeCall(&CI, &TLI))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001848 return visitFree(CI);
1849
1850 // If the caller function is nounwind, mark the call as nounwind, even if the
1851 // callee isn't.
Sanjay Patel5a470952016-08-11 15:16:06 +00001852 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001853 CI.setDoesNotThrow();
1854 return &CI;
1855 }
Jim Grosbach7815f562012-02-03 00:07:04 +00001856
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001857 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1858 if (!II) return visitCallSite(&CI);
Gabor Greif589a0b92010-06-24 12:58:35 +00001859
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001860 // Intrinsics cannot occur in an invoke, so handle them here instead of in
1861 // visitCallSite.
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001862 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001863 bool Changed = false;
1864
1865 // memmove/cpy/set of zero bytes is a noop.
1866 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
Chris Lattnerc663a672010-10-01 05:51:02 +00001867 if (NumBytes->isNullValue())
Sanjay Patel4b198802016-02-01 22:23:39 +00001868 return eraseInstFromFunction(CI);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001869
1870 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
1871 if (CI->getZExtValue() == 1) {
1872 // Replace the instruction with just byte operations. We would
1873 // transform other cases to loads/stores, but we don't know if
1874 // alignment is sufficient.
1875 }
1876 }
Jim Grosbach7815f562012-02-03 00:07:04 +00001877
Chris Lattnerc663a672010-10-01 05:51:02 +00001878 // No other transformations apply to volatile transfers.
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001879 if (auto *M = dyn_cast<MemIntrinsic>(MI))
1880 if (M->isVolatile())
1881 return nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001882
1883 // If we have a memmove and the source operation is a constant global,
1884 // then the source and dest pointers can't alias, so we can change this
1885 // into a call to memcpy.
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001886 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001887 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1888 if (GVSrc->isConstant()) {
Sanjay Patelaf674fb2015-12-14 17:24:23 +00001889 Module *M = CI.getModule();
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001890 Intrinsic::ID MemCpyID =
1891 isa<AtomicMemMoveInst>(MMI)
1892 ? Intrinsic::memcpy_element_unordered_atomic
1893 : Intrinsic::memcpy;
Jay Foadb804a2b2011-07-12 14:06:48 +00001894 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1895 CI.getArgOperand(1)->getType(),
1896 CI.getArgOperand(2)->getType() };
Benjamin Kramere6e19332011-07-14 17:45:39 +00001897 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001898 Changed = true;
1899 }
1900 }
1901
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001902 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001903 // memmove(x,x,size) -> noop.
1904 if (MTI->getSource() == MTI->getDest())
Sanjay Patel4b198802016-02-01 22:23:39 +00001905 return eraseInstFromFunction(CI);
Eric Christopher7258dcd2010-04-16 23:37:20 +00001906 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001907
Eric Christopher7258dcd2010-04-16 23:37:20 +00001908 // If we can determine a pointer alignment that is bigger than currently
1909 // set, update the alignment.
Daniel Neilson8f30ec62018-05-11 14:30:02 +00001910 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1911 if (Instruction *I = SimplifyAnyMemTransfer(MTI))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001912 return I;
Daniel Neilsonf6651d42018-05-11 20:04:50 +00001913 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1914 if (Instruction *I = SimplifyAnyMemSet(MSI))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001915 return I;
1916 }
Gabor Greif590d95e2010-06-24 13:42:49 +00001917
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001918 if (Changed) return II;
1919 }
Jim Grosbach7815f562012-02-03 00:07:04 +00001920
Justin Lebar698c31b2017-01-27 00:58:58 +00001921 if (Instruction *I = SimplifyNVVMIntrinsic(II, *this))
1922 return I;
1923
Sanjay Patel1c600c62016-01-20 16:41:43 +00001924 auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width,
1925 unsigned DemandedWidth) {
Simon Pilgrim61116dd2015-09-17 20:32:45 +00001926 APInt UndefElts(Width, 0);
1927 APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
1928 return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
1929 };
1930
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001931 switch (II->getIntrinsicID()) {
1932 default: break;
George Burgess IV3f089142016-12-20 23:46:36 +00001933 case Intrinsic::objectsize:
1934 if (ConstantInt *N =
1935 lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false))
1936 return replaceInstUsesWith(CI, N);
Craig Topperf40110f2014-04-25 05:29:35 +00001937 return nullptr;
Michael Ilseman536cc322012-12-13 03:13:36 +00001938 case Intrinsic::bswap: {
1939 Value *IIOperand = II->getArgOperand(0);
Craig Topperf40110f2014-04-25 05:29:35 +00001940 Value *X = nullptr;
Michael Ilseman536cc322012-12-13 03:13:36 +00001941
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001942 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
Michael Ilseman536cc322012-12-13 03:13:36 +00001943 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1944 unsigned C = X->getType()->getPrimitiveSizeInBits() -
1945 IIOperand->getType()->getPrimitiveSizeInBits();
1946 Value *CV = ConstantInt::get(X->getType(), C);
Craig Topperbb4069e2017-07-07 23:16:26 +00001947 Value *V = Builder.CreateLShr(X, CV);
Michael Ilseman536cc322012-12-13 03:13:36 +00001948 return new TruncInst(V, IIOperand->getType());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001949 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001950 break;
Michael Ilseman536cc322012-12-13 03:13:36 +00001951 }
Sanjay Patelb695c552016-02-01 17:00:10 +00001952 case Intrinsic::masked_load:
Craig Topperbb4069e2017-07-07 23:16:26 +00001953 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00001954 return replaceInstUsesWith(CI, SimplifiedMaskedOp);
Sanjay Patelb695c552016-02-01 17:00:10 +00001955 break;
Sanjay Patel04f792b2016-02-01 19:39:52 +00001956 case Intrinsic::masked_store:
1957 return simplifyMaskedStore(*II, *this);
Sanjay Patel103ab7d2016-02-01 22:10:26 +00001958 case Intrinsic::masked_gather:
1959 return simplifyMaskedGather(*II, *this);
1960 case Intrinsic::masked_scatter:
1961 return simplifyMaskedScatter(*II, *this);
Piotr Padlewskic63b4922018-07-12 23:55:20 +00001962 case Intrinsic::launder_invariant_group:
1963 case Intrinsic::strip_invariant_group:
1964 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
1965 return replaceInstUsesWith(*II, SkippedBarrier);
1966 break;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001967 case Intrinsic::powi:
Gabor Greif589a0b92010-06-24 12:58:35 +00001968 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
Philip Reames5000ba62017-12-27 01:14:30 +00001969 // 0 and 1 are handled in instsimplify
1970
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001971 // powi(x, -1) -> 1/x
Craig Topper79ab6432017-07-06 18:39:47 +00001972 if (Power->isMinusOne())
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001973 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
Gabor Greif589a0b92010-06-24 12:58:35 +00001974 II->getArgOperand(0));
Philip Reamescd13a662017-12-27 01:30:12 +00001975 // powi(x, 2) -> x*x
1976 if (Power->equalsInt(2))
1977 return BinaryOperator::CreateFMul(II->getArgOperand(0),
1978 II->getArgOperand(0));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001979 }
1980 break;
Jim Grosbach7815f562012-02-03 00:07:04 +00001981
Sanjay Patel8e3ab172016-08-05 22:42:46 +00001982 case Intrinsic::cttz:
1983 case Intrinsic::ctlz:
Amaury Sechet763c59d2016-08-18 20:43:50 +00001984 if (auto *I = foldCttzCtlz(*II, *this))
1985 return I;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00001986 break;
Sanjoy Dasb0984472015-04-08 04:27:22 +00001987
Craig Topper5b173f22017-06-21 16:32:35 +00001988 case Intrinsic::ctpop:
1989 if (auto *I = foldCtpop(*II, *this))
1990 return I;
1991 break;
1992
Sanjay Patela1395642018-11-13 23:27:23 +00001993 case Intrinsic::fshl:
1994 case Intrinsic::fshr: {
1995 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
1996 // so only the low bits of the shift amount are demanded if the bitwidth is
1997 // a power-of-2.
1998 unsigned BitWidth = II->getType()->getScalarSizeInBits();
1999 if (!isPowerOf2_32(BitWidth))
2000 break;
2001 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
2002 KnownBits Op2Known(BitWidth);
2003 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
2004 return &CI;
2005 break;
2006 }
Nick Lewyckyabe2cc12015-04-13 19:17:37 +00002007 case Intrinsic::uadd_with_overflow:
2008 case Intrinsic::sadd_with_overflow:
2009 case Intrinsic::umul_with_overflow:
2010 case Intrinsic::smul_with_overflow:
Gabor Greif5b1370e2010-06-28 16:50:57 +00002011 if (isa<Constant>(II->getArgOperand(0)) &&
2012 !isa<Constant>(II->getArgOperand(1))) {
Sanjoy Dasb0984472015-04-08 04:27:22 +00002013 // Canonicalize constants into the RHS.
Gabor Greif5b1370e2010-06-28 16:50:57 +00002014 Value *LHS = II->getArgOperand(0);
2015 II->setArgOperand(0, II->getArgOperand(1));
2016 II->setArgOperand(1, LHS);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002017 return II;
2018 }
Justin Bognercd1d5aa2016-08-17 20:30:52 +00002019 LLVM_FALLTHROUGH;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002020
Nick Lewyckyabe2cc12015-04-13 19:17:37 +00002021 case Intrinsic::usub_with_overflow:
2022 case Intrinsic::ssub_with_overflow: {
Sanjoy Dasb0984472015-04-08 04:27:22 +00002023 OverflowCheckFlavor OCF =
2024 IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
2025 assert(OCF != OCF_INVALID && "unexpected!");
Jim Grosbach7815f562012-02-03 00:07:04 +00002026
Sanjoy Dasb0984472015-04-08 04:27:22 +00002027 Value *OperationResult = nullptr;
2028 Constant *OverflowResult = nullptr;
2029 if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
2030 *II, OperationResult, OverflowResult))
2031 return CreateOverflowTuple(II, OperationResult, OverflowResult);
Benjamin Kramera420df22014-07-04 10:22:21 +00002032
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002033 break;
Erik Eckstein096ff7d2014-12-11 08:02:30 +00002034 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002035
Matt Arsenaultd6511b42014-10-21 23:00:20 +00002036 case Intrinsic::minnum:
Thomas Livelyc3392502018-10-19 19:01:26 +00002037 case Intrinsic::maxnum:
2038 case Intrinsic::minimum:
2039 case Intrinsic::maximum: {
Matt Arsenaultd6511b42014-10-21 23:00:20 +00002040 Value *Arg0 = II->getArgOperand(0);
2041 Value *Arg1 = II->getArgOperand(1);
Sanjay Patel0069f562016-01-31 16:35:23 +00002042 // Canonicalize constants to the RHS.
2043 if (isa<ConstantFP>(Arg0) && !isa<ConstantFP>(Arg1)) {
Matt Arsenaultd6511b42014-10-21 23:00:20 +00002044 II->setArgOperand(0, Arg1);
2045 II->setArgOperand(1, Arg0);
2046 return II;
2047 }
Sanjay Patelc7bb1432018-05-10 20:03:13 +00002048
Volkan Keles3ca146d2018-10-31 17:50:52 +00002049 Intrinsic::ID IID = II->getIntrinsicID();
Sanjay Patelc7bb1432018-05-10 20:03:13 +00002050 Value *X, *Y;
2051 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
2052 (Arg0->hasOneUse() || Arg1->hasOneUse())) {
2053 // If both operands are negated, invert the call and negate the result:
Thomas Livelyc3392502018-10-19 19:01:26 +00002054 // min(-X, -Y) --> -(max(X, Y))
2055 // max(-X, -Y) --> -(min(X, Y))
2056 Intrinsic::ID NewIID;
Volkan Keles3ca146d2018-10-31 17:50:52 +00002057 switch (IID) {
Thomas Livelyc3392502018-10-19 19:01:26 +00002058 case Intrinsic::maxnum:
2059 NewIID = Intrinsic::minnum;
2060 break;
2061 case Intrinsic::minnum:
2062 NewIID = Intrinsic::maxnum;
2063 break;
2064 case Intrinsic::maximum:
2065 NewIID = Intrinsic::minimum;
2066 break;
2067 case Intrinsic::minimum:
2068 NewIID = Intrinsic::maximum;
2069 break;
2070 default:
2071 llvm_unreachable("unexpected intrinsic ID");
2072 }
Neil Henning57f5d0a2018-10-08 10:32:33 +00002073 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
Sanjay Patelc7bb1432018-05-10 20:03:13 +00002074 Instruction *FNeg = BinaryOperator::CreateFNeg(NewCall);
2075 FNeg->copyIRFlags(II);
2076 return FNeg;
2077 }
Volkan Keles3ca146d2018-10-31 17:50:52 +00002078
2079 // m(m(X, C2), C1) -> m(X, C)
2080 const APFloat *C1, *C2;
2081 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2082 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
2083 ((match(M->getArgOperand(0), m_Value(X)) &&
2084 match(M->getArgOperand(1), m_APFloat(C2))) ||
2085 (match(M->getArgOperand(1), m_Value(X)) &&
2086 match(M->getArgOperand(0), m_APFloat(C2))))) {
2087 APFloat Res(0.0);
2088 switch (IID) {
2089 case Intrinsic::maxnum:
2090 Res = maxnum(*C1, *C2);
2091 break;
2092 case Intrinsic::minnum:
2093 Res = minnum(*C1, *C2);
2094 break;
2095 case Intrinsic::maximum:
2096 Res = maximum(*C1, *C2);
2097 break;
2098 case Intrinsic::minimum:
2099 Res = minimum(*C1, *C2);
2100 break;
2101 default:
2102 llvm_unreachable("unexpected intrinsic ID");
2103 }
2104 Instruction *NewCall = Builder.CreateBinaryIntrinsic(
2105 IID, X, ConstantFP::get(Arg0->getType(), Res));
2106 NewCall->copyIRFlags(II);
2107 return replaceInstUsesWith(*II, NewCall);
2108 }
2109 }
2110
Matt Arsenaultd6511b42014-10-21 23:00:20 +00002111 break;
2112 }
Matt Arsenault1cc294c2017-01-03 04:32:31 +00002113 case Intrinsic::fmuladd: {
Matt Arsenault92057602017-02-16 18:46:24 +00002114 // Canonicalize fast fmuladd to the separate fmul + fadd.
Sanjay Patel629c4112017-11-06 16:27:15 +00002115 if (II->isFast()) {
Craig Topperbb4069e2017-07-07 23:16:26 +00002116 BuilderTy::FastMathFlagGuard Guard(Builder);
2117 Builder.setFastMathFlags(II->getFastMathFlags());
2118 Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
2119 II->getArgOperand(1));
2120 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
Matt Arsenault92057602017-02-16 18:46:24 +00002121 Add->takeName(II);
2122 return replaceInstUsesWith(*II, Add);
2123 }
2124
2125 LLVM_FALLTHROUGH;
2126 }
2127 case Intrinsic::fma: {
Matt Arsenault1cc294c2017-01-03 04:32:31 +00002128 Value *Src0 = II->getArgOperand(0);
2129 Value *Src1 = II->getArgOperand(1);
2130
Sanjay Patel236442e2018-04-05 13:24:26 +00002131 // Canonicalize constant multiply operand to Src1.
Matt Arsenaultb264c942017-01-03 04:32:35 +00002132 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
2133 II->setArgOperand(0, Src1);
2134 II->setArgOperand(1, Src0);
2135 std::swap(Src0, Src1);
2136 }
2137
Matt Arsenault1cc294c2017-01-03 04:32:31 +00002138 // fma fneg(x), fneg(y), z -> fma x, y, z
Sanjay Patel236442e2018-04-05 13:24:26 +00002139 Value *X, *Y;
2140 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
2141 II->setArgOperand(0, X);
2142 II->setArgOperand(1, Y);
Matt Arsenault3f509042017-01-10 23:17:52 +00002143 return II;
Matt Arsenault1cc294c2017-01-03 04:32:31 +00002144 }
2145
2146 // fma fabs(x), fabs(x), z -> fma x, x, z
Matt Arsenaultd1496502018-07-27 09:04:35 +00002147 if (match(Src0, m_FAbs(m_Value(X))) &&
2148 match(Src1, m_FAbs(m_Specific(X)))) {
Sanjay Patel236442e2018-04-05 13:24:26 +00002149 II->setArgOperand(0, X);
2150 II->setArgOperand(1, X);
Matt Arsenault3f509042017-01-10 23:17:52 +00002151 return II;
Matt Arsenault1cc294c2017-01-03 04:32:31 +00002152 }
2153
Matt Arsenaultb264c942017-01-03 04:32:35 +00002154 // fma x, 1, z -> fadd x, z
2155 if (match(Src1, m_FPOne())) {
Sanjay Patel236442e2018-04-05 13:24:26 +00002156 auto *FAdd = BinaryOperator::CreateFAdd(Src0, II->getArgOperand(2));
2157 FAdd->copyFastMathFlags(II);
2158 return FAdd;
Matt Arsenaultb264c942017-01-03 04:32:35 +00002159 }
2160
Matt Arsenault1cc294c2017-01-03 04:32:31 +00002161 break;
2162 }
Matt Arsenault56ff4832017-01-03 22:40:34 +00002163 case Intrinsic::fabs: {
2164 Value *Cond;
2165 Constant *LHS, *RHS;
2166 if (match(II->getArgOperand(0),
2167 m_Select(m_Value(Cond), m_Constant(LHS), m_Constant(RHS)))) {
Craig Topperbb4069e2017-07-07 23:16:26 +00002168 CallInst *Call0 = Builder.CreateCall(II->getCalledFunction(), {LHS});
2169 CallInst *Call1 = Builder.CreateCall(II->getCalledFunction(), {RHS});
Matt Arsenault56ff4832017-01-03 22:40:34 +00002170 return SelectInst::Create(Cond, Call0, Call1);
2171 }
2172
Matt Arsenault954a6242017-01-23 23:55:08 +00002173 LLVM_FALLTHROUGH;
2174 }
2175 case Intrinsic::ceil:
2176 case Intrinsic::floor:
2177 case Intrinsic::round:
2178 case Intrinsic::nearbyint:
Joerg Sonnenberger28bed102017-03-31 19:58:07 +00002179 case Intrinsic::rint:
Matt Arsenault954a6242017-01-23 23:55:08 +00002180 case Intrinsic::trunc: {
Matt Arsenault72333442017-01-17 00:10:40 +00002181 Value *ExtSrc;
Sanjay Patel32381d72018-03-23 21:18:12 +00002182 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
2183 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
Neil Henning57f5d0a2018-10-08 10:32:33 +00002184 Value *NarrowII =
2185 Builder.CreateUnaryIntrinsic(II->getIntrinsicID(), ExtSrc, II);
Sanjay Patel32381d72018-03-23 21:18:12 +00002186 return new FPExtInst(NarrowII, II->getType());
Matt Arsenault72333442017-01-17 00:10:40 +00002187 }
Matt Arsenault56ff4832017-01-03 22:40:34 +00002188 break;
2189 }
Matt Arsenault3bdd75d2017-01-04 22:49:03 +00002190 case Intrinsic::cos:
2191 case Intrinsic::amdgcn_cos: {
Sanjay Patel0f29e952018-08-29 18:27:49 +00002192 Value *X;
Matt Arsenault3bdd75d2017-01-04 22:49:03 +00002193 Value *Src = II->getArgOperand(0);
Sanjay Patel0f29e952018-08-29 18:27:49 +00002194 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) {
Matt Arsenault3bdd75d2017-01-04 22:49:03 +00002195 // cos(-x) -> cos(x)
2196 // cos(fabs(x)) -> cos(x)
Sanjay Patel0f29e952018-08-29 18:27:49 +00002197 II->setArgOperand(0, X);
Matt Arsenault3bdd75d2017-01-04 22:49:03 +00002198 return II;
2199 }
Sanjay Patel0f29e952018-08-29 18:27:49 +00002200 break;
2201 }
2202 case Intrinsic::sin: {
2203 Value *X;
2204 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
2205 // sin(-x) --> -sin(x)
Neil Henning57f5d0a2018-10-08 10:32:33 +00002206 Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II);
Sanjay Patel0f29e952018-08-29 18:27:49 +00002207 Instruction *FNeg = BinaryOperator::CreateFNeg(NewSin);
2208 FNeg->copyFastMathFlags(II);
2209 return FNeg;
2210 }
Matt Arsenault3bdd75d2017-01-04 22:49:03 +00002211 break;
2212 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002213 case Intrinsic::ppc_altivec_lvx:
2214 case Intrinsic::ppc_altivec_lvxl:
Bill Wendlingb902f1d2011-04-13 00:36:11 +00002215 // Turn PPC lvx -> load if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002216 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00002217 &DT) >= 16) {
Craig Topperbb4069e2017-07-07 23:16:26 +00002218 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002219 PointerType::getUnqual(II->getType()));
2220 return new LoadInst(Ptr);
2221 }
2222 break;
Bill Schmidt72954782014-11-12 04:19:40 +00002223 case Intrinsic::ppc_vsx_lxvw4x:
2224 case Intrinsic::ppc_vsx_lxvd2x: {
2225 // Turn PPC VSX loads into normal loads.
Craig Topperbb4069e2017-07-07 23:16:26 +00002226 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
2227 PointerType::getUnqual(II->getType()));
Bill Schmidt72954782014-11-12 04:19:40 +00002228 return new LoadInst(Ptr, Twine(""), false, 1);
2229 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002230 case Intrinsic::ppc_altivec_stvx:
2231 case Intrinsic::ppc_altivec_stvxl:
2232 // Turn stvx -> store if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002233 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00002234 &DT) >= 16) {
Jim Grosbach7815f562012-02-03 00:07:04 +00002235 Type *OpPtrTy =
Gabor Greifa6d75e22010-06-24 15:51:11 +00002236 PointerType::getUnqual(II->getArgOperand(0)->getType());
Craig Topperbb4069e2017-07-07 23:16:26 +00002237 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
Gabor Greifa6d75e22010-06-24 15:51:11 +00002238 return new StoreInst(II->getArgOperand(0), Ptr);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002239 }
2240 break;
Bill Schmidt72954782014-11-12 04:19:40 +00002241 case Intrinsic::ppc_vsx_stxvw4x:
2242 case Intrinsic::ppc_vsx_stxvd2x: {
2243 // Turn PPC VSX stores into normal stores.
2244 Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
Craig Topperbb4069e2017-07-07 23:16:26 +00002245 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
Bill Schmidt72954782014-11-12 04:19:40 +00002246 return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
2247 }
Hal Finkel221f4672015-02-26 18:56:03 +00002248 case Intrinsic::ppc_qpx_qvlfs:
2249 // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002250 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00002251 &DT) >= 16) {
Craig Topperbb4069e2017-07-07 23:16:26 +00002252 Type *VTy = VectorType::get(Builder.getFloatTy(),
Hal Finkelf0d68d72015-05-11 06:37:03 +00002253 II->getType()->getVectorNumElements());
Craig Topperbb4069e2017-07-07 23:16:26 +00002254 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
Hal Finkelf0d68d72015-05-11 06:37:03 +00002255 PointerType::getUnqual(VTy));
Craig Topperbb4069e2017-07-07 23:16:26 +00002256 Value *Load = Builder.CreateLoad(Ptr);
Hal Finkelf0d68d72015-05-11 06:37:03 +00002257 return new FPExtInst(Load, II->getType());
Hal Finkel221f4672015-02-26 18:56:03 +00002258 }
2259 break;
2260 case Intrinsic::ppc_qpx_qvlfd:
2261 // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002262 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00002263 &DT) >= 32) {
Craig Topperbb4069e2017-07-07 23:16:26 +00002264 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
Hal Finkel221f4672015-02-26 18:56:03 +00002265 PointerType::getUnqual(II->getType()));
2266 return new LoadInst(Ptr);
2267 }
2268 break;
2269 case Intrinsic::ppc_qpx_qvstfs:
2270 // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002271 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00002272 &DT) >= 16) {
Craig Topperbb4069e2017-07-07 23:16:26 +00002273 Type *VTy = VectorType::get(Builder.getFloatTy(),
Hal Finkelf0d68d72015-05-11 06:37:03 +00002274 II->getArgOperand(0)->getType()->getVectorNumElements());
Craig Topperbb4069e2017-07-07 23:16:26 +00002275 Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
Hal Finkelf0d68d72015-05-11 06:37:03 +00002276 Type *OpPtrTy = PointerType::getUnqual(VTy);
Craig Topperbb4069e2017-07-07 23:16:26 +00002277 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
Hal Finkelf0d68d72015-05-11 06:37:03 +00002278 return new StoreInst(TOp, Ptr);
Hal Finkel221f4672015-02-26 18:56:03 +00002279 }
2280 break;
2281 case Intrinsic::ppc_qpx_qvstfd:
2282 // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
Daniel Jasperaec2fa32016-12-19 08:22:17 +00002283 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
Justin Bogner99798402016-08-05 01:06:44 +00002284 &DT) >= 32) {
Hal Finkel221f4672015-02-26 18:56:03 +00002285 Type *OpPtrTy =
2286 PointerType::getUnqual(II->getArgOperand(0)->getType());
Craig Topperbb4069e2017-07-07 23:16:26 +00002287 Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
Hal Finkel221f4672015-02-26 18:56:03 +00002288 return new StoreInst(II->getArgOperand(0), Ptr);
2289 }
2290 break;
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002291
Craig Topper83240032017-07-31 18:52:13 +00002292 case Intrinsic::x86_bmi_bextr_32:
2293 case Intrinsic::x86_bmi_bextr_64:
2294 case Intrinsic::x86_tbm_bextri_u32:
2295 case Intrinsic::x86_tbm_bextri_u64:
2296 // If the RHS is a constant we can try some simplifications.
2297 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2298 uint64_t Shift = C->getZExtValue();
2299 uint64_t Length = (Shift >> 8) & 0xff;
2300 Shift &= 0xff;
2301 unsigned BitWidth = II->getType()->getIntegerBitWidth();
2302 // If the length is 0 or the shift is out of range, replace with zero.
2303 if (Length == 0 || Shift >= BitWidth)
2304 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2305 // If the LHS is also a constant, we can completely constant fold this.
2306 if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2307 uint64_t Result = InC->getZExtValue() >> Shift;
2308 if (Length > BitWidth)
2309 Length = BitWidth;
2310 Result &= maskTrailingOnes<uint64_t>(Length);
2311 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2312 }
2313 // TODO should we turn this into 'and' if shift is 0? Or 'shl' if we
2314 // are only masking bits that a shift already cleared?
2315 }
2316 break;
2317
Craig Topper317a51e2017-07-31 18:52:15 +00002318 case Intrinsic::x86_bmi_bzhi_32:
2319 case Intrinsic::x86_bmi_bzhi_64:
2320 // If the RHS is a constant we can try some simplifications.
2321 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2322 uint64_t Index = C->getZExtValue() & 0xff;
2323 unsigned BitWidth = II->getType()->getIntegerBitWidth();
2324 if (Index >= BitWidth)
2325 return replaceInstUsesWith(CI, II->getArgOperand(0));
2326 if (Index == 0)
2327 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), 0));
2328 // If the LHS is also a constant, we can completely constant fold this.
2329 if (auto *InC = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
2330 uint64_t Result = InC->getZExtValue();
2331 Result &= maskTrailingOnes<uint64_t>(Index);
2332 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Result));
2333 }
2334 // TODO should we convert this to an AND if the RHS is constant?
2335 }
2336 break;
2337
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002338 case Intrinsic::x86_vcvtph2ps_128:
2339 case Intrinsic::x86_vcvtph2ps_256: {
2340 auto Arg = II->getArgOperand(0);
2341 auto ArgType = cast<VectorType>(Arg->getType());
2342 auto RetType = cast<VectorType>(II->getType());
2343 unsigned ArgWidth = ArgType->getNumElements();
2344 unsigned RetWidth = RetType->getNumElements();
2345 assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
2346 assert(ArgType->isIntOrIntVectorTy() &&
2347 ArgType->getScalarSizeInBits() == 16 &&
2348 "CVTPH2PS input type should be 16-bit integer vector");
2349 assert(RetType->getScalarType()->isFloatTy() &&
2350 "CVTPH2PS output type should be 32-bit float vector");
2351
2352 // Constant folding: Convert to generic half to single conversion.
Simon Pilgrim48ffca02015-09-12 14:00:17 +00002353 if (isa<ConstantAggregateZero>(Arg))
Sanjay Patel4b198802016-02-01 22:23:39 +00002354 return replaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002355
Simon Pilgrim48ffca02015-09-12 14:00:17 +00002356 if (isa<ConstantDataVector>(Arg)) {
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002357 auto VectorHalfAsShorts = Arg;
2358 if (RetWidth < ArgWidth) {
Craig Topper99d1eab2016-06-12 00:41:19 +00002359 SmallVector<uint32_t, 8> SubVecMask;
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002360 for (unsigned i = 0; i != RetWidth; ++i)
2361 SubVecMask.push_back((int)i);
Craig Topperbb4069e2017-07-07 23:16:26 +00002362 VectorHalfAsShorts = Builder.CreateShuffleVector(
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002363 Arg, UndefValue::get(ArgType), SubVecMask);
2364 }
2365
2366 auto VectorHalfType =
2367 VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
2368 auto VectorHalfs =
Craig Topperbb4069e2017-07-07 23:16:26 +00002369 Builder.CreateBitCast(VectorHalfAsShorts, VectorHalfType);
2370 auto VectorFloats = Builder.CreateFPExt(VectorHalfs, RetType);
Sanjay Patel4b198802016-02-01 22:23:39 +00002371 return replaceInstUsesWith(*II, VectorFloats);
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002372 }
2373
2374 // We only use the lowest lanes of the argument.
Simon Pilgrim996725e2015-09-19 11:41:53 +00002375 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
Simon Pilgrim20c607b2015-09-12 13:39:53 +00002376 II->setArgOperand(0, V);
2377 return II;
2378 }
2379 break;
2380 }
2381
Chandler Carruthcf414cf2011-01-10 07:19:37 +00002382 case Intrinsic::x86_sse_cvtss2si:
2383 case Intrinsic::x86_sse_cvtss2si64:
2384 case Intrinsic::x86_sse_cvttss2si:
2385 case Intrinsic::x86_sse_cvttss2si64:
2386 case Intrinsic::x86_sse2_cvtsd2si:
2387 case Intrinsic::x86_sse2_cvtsd2si64:
2388 case Intrinsic::x86_sse2_cvttsd2si:
Craig Topperaeaa52c2016-12-14 07:46:12 +00002389 case Intrinsic::x86_sse2_cvttsd2si64:
2390 case Intrinsic::x86_avx512_vcvtss2si32:
2391 case Intrinsic::x86_avx512_vcvtss2si64:
2392 case Intrinsic::x86_avx512_vcvtss2usi32:
2393 case Intrinsic::x86_avx512_vcvtss2usi64:
2394 case Intrinsic::x86_avx512_vcvtsd2si32:
2395 case Intrinsic::x86_avx512_vcvtsd2si64:
2396 case Intrinsic::x86_avx512_vcvtsd2usi32:
2397 case Intrinsic::x86_avx512_vcvtsd2usi64:
2398 case Intrinsic::x86_avx512_cvttss2si:
2399 case Intrinsic::x86_avx512_cvttss2si64:
2400 case Intrinsic::x86_avx512_cvttss2usi:
2401 case Intrinsic::x86_avx512_cvttss2usi64:
2402 case Intrinsic::x86_avx512_cvttsd2si:
2403 case Intrinsic::x86_avx512_cvttsd2si64:
2404 case Intrinsic::x86_avx512_cvttsd2usi:
2405 case Intrinsic::x86_avx512_cvttsd2usi64: {
Chandler Carruthcf414cf2011-01-10 07:19:37 +00002406 // These intrinsics only demand the 0th element of their input vectors. If
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002407 // we can simplify the input based on that, do so now.
Simon Pilgrim996725e2015-09-19 11:41:53 +00002408 Value *Arg = II->getArgOperand(0);
2409 unsigned VWidth = Arg->getType()->getVectorNumElements();
2410 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
Gabor Greif5b1370e2010-06-28 16:50:57 +00002411 II->setArgOperand(0, V);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00002412 return II;
2413 }
Simon Pilgrim18617d12015-08-05 08:18:00 +00002414 break;
2415 }
2416
Mikhail Dvoretckii8393f902018-06-19 10:49:12 +00002417 case Intrinsic::x86_sse41_round_ps:
2418 case Intrinsic::x86_sse41_round_pd:
2419 case Intrinsic::x86_avx_round_ps_256:
2420 case Intrinsic::x86_avx_round_pd_256:
2421 case Intrinsic::x86_avx512_mask_rndscale_ps_128:
2422 case Intrinsic::x86_avx512_mask_rndscale_ps_256:
2423 case Intrinsic::x86_avx512_mask_rndscale_ps_512:
2424 case Intrinsic::x86_avx512_mask_rndscale_pd_128:
2425 case Intrinsic::x86_avx512_mask_rndscale_pd_256:
2426 case Intrinsic::x86_avx512_mask_rndscale_pd_512:
2427 case Intrinsic::x86_avx512_mask_rndscale_ss:
2428 case Intrinsic::x86_avx512_mask_rndscale_sd:
2429 if (Value *V = simplifyX86round(*II, Builder))
2430 return replaceInstUsesWith(*II, V);
2431 break;
2432
Simon Pilgrim91e3ac82016-06-07 08:18:35 +00002433 case Intrinsic::x86_mmx_pmovmskb:
2434 case Intrinsic::x86_sse_movmsk_ps:
2435 case Intrinsic::x86_sse2_movmsk_pd:
2436 case Intrinsic::x86_sse2_pmovmskb_128:
2437 case Intrinsic::x86_avx_movmsk_pd_256:
2438 case Intrinsic::x86_avx_movmsk_ps_256:
Eugene Zelenko7f0f9bc2017-10-24 21:24:53 +00002439 case Intrinsic::x86_avx2_pmovmskb:
Craig Topper4853c432017-07-06 23:18:42 +00002440 if (Value *V = simplifyX86movmsk(*II))
Simon Pilgrim91e3ac82016-06-07 08:18:35 +00002441 return replaceInstUsesWith(*II, V);
2442 break;
Simon Pilgrim91e3ac82016-06-07 08:18:35 +00002443
Simon Pilgrim471efd22016-02-20 23:17:35 +00002444 case Intrinsic::x86_sse_comieq_ss:
2445 case Intrinsic::x86_sse_comige_ss:
2446 case Intrinsic::x86_sse_comigt_ss:
2447 case Intrinsic::x86_sse_comile_ss:
2448 case Intrinsic::x86_sse_comilt_ss:
2449 case Intrinsic::x86_sse_comineq_ss:
2450 case Intrinsic::x86_sse_ucomieq_ss:
2451 case Intrinsic::x86_sse_ucomige_ss:
2452 case Intrinsic::x86_sse_ucomigt_ss:
2453 case Intrinsic::x86_sse_ucomile_ss:
2454 case Intrinsic::x86_sse_ucomilt_ss:
2455 case Intrinsic::x86_sse_ucomineq_ss:
2456 case Intrinsic::x86_sse2_comieq_sd:
2457 case Intrinsic::x86_sse2_comige_sd:
2458 case Intrinsic::x86_sse2_comigt_sd:
2459 case Intrinsic::x86_sse2_comile_sd:
2460 case Intrinsic::x86_sse2_comilt_sd:
2461 case Intrinsic::x86_sse2_comineq_sd:
2462 case Intrinsic::x86_sse2_ucomieq_sd:
2463 case Intrinsic::x86_sse2_ucomige_sd:
2464 case Intrinsic::x86_sse2_ucomigt_sd:
2465 case Intrinsic::x86_sse2_ucomile_sd:
2466 case Intrinsic::x86_sse2_ucomilt_sd:
Craig Topperd9639532016-12-11 07:42:04 +00002467 case Intrinsic::x86_sse2_ucomineq_sd:
Craig Topperd00db692016-12-31 00:45:06 +00002468 case Intrinsic::x86_avx512_vcomi_ss:
2469 case Intrinsic::x86_avx512_vcomi_sd:
Craig Topperd9639532016-12-11 07:42:04 +00002470 case Intrinsic::x86_avx512_mask_cmp_ss:
2471 case Intrinsic::x86_avx512_mask_cmp_sd: {
Simon Pilgrim471efd22016-02-20 23:17:35 +00002472 // These intrinsics only demand the 0th element of their input vectors. If
2473 // we can simplify the input based on that, do so now.
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002474 bool MadeChange = false;
Simon Pilgrim471efd22016-02-20 23:17:35 +00002475 Value *Arg0 = II->getArgOperand(0);
2476 Value *Arg1 = II->getArgOperand(1);
2477 unsigned VWidth = Arg0->getType()->getVectorNumElements();
2478 if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) {
2479 II->setArgOperand(0, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002480 MadeChange = true;
Simon Pilgrim471efd22016-02-20 23:17:35 +00002481 }
2482 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, 1)) {
2483 II->setArgOperand(1, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002484 MadeChange = true;
Simon Pilgrim471efd22016-02-20 23:17:35 +00002485 }
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002486 if (MadeChange)
2487 return II;
Simon Pilgrim471efd22016-02-20 23:17:35 +00002488 break;
2489 }
Craig Topper31cbe752018-06-27 15:57:53 +00002490 case Intrinsic::x86_avx512_cmp_pd_128:
2491 case Intrinsic::x86_avx512_cmp_pd_256:
2492 case Intrinsic::x86_avx512_cmp_pd_512:
2493 case Intrinsic::x86_avx512_cmp_ps_128:
2494 case Intrinsic::x86_avx512_cmp_ps_256:
2495 case Intrinsic::x86_avx512_cmp_ps_512: {
Michael Zuckerman16b20d22017-04-16 13:26:08 +00002496 // Folding cmp(sub(a,b),0) -> cmp(a,b) and cmp(0,sub(a,b)) -> cmp(b,a)
2497 Value *Arg0 = II->getArgOperand(0);
2498 Value *Arg1 = II->getArgOperand(1);
Sanjay Patel93e64dd2018-03-25 21:16:33 +00002499 bool Arg0IsZero = match(Arg0, m_PosZeroFP());
Michael Zuckerman16b20d22017-04-16 13:26:08 +00002500 if (Arg0IsZero)
2501 std::swap(Arg0, Arg1);
2502 Value *A, *B;
2503 // This fold requires only the NINF(not +/- inf) since inf minus
2504 // inf is nan.
2505 // NSZ(No Signed Zeros) is not needed because zeros of any sign are
2506 // equal for both compares.
2507 // NNAN is not needed because nans compare the same for both compares.
2508 // The compare intrinsic uses the above assumptions and therefore
2509 // doesn't require additional flags.
2510 if ((match(Arg0, m_OneUse(m_FSub(m_Value(A), m_Value(B)))) &&
Sanjay Patel93e64dd2018-03-25 21:16:33 +00002511 match(Arg1, m_PosZeroFP()) && isa<Instruction>(Arg0) &&
Michael Zuckerman16b20d22017-04-16 13:26:08 +00002512 cast<Instruction>(Arg0)->getFastMathFlags().noInfs())) {
2513 if (Arg0IsZero)
2514 std::swap(A, B);
2515 II->setArgOperand(0, A);
2516 II->setArgOperand(1, B);
2517 return II;
2518 }
2519 break;
2520 }
Simon Pilgrim471efd22016-02-20 23:17:35 +00002521
Craig Topper98a79932018-06-10 06:01:36 +00002522 case Intrinsic::x86_avx512_add_ps_512:
2523 case Intrinsic::x86_avx512_div_ps_512:
2524 case Intrinsic::x86_avx512_mul_ps_512:
2525 case Intrinsic::x86_avx512_sub_ps_512:
2526 case Intrinsic::x86_avx512_add_pd_512:
2527 case Intrinsic::x86_avx512_div_pd_512:
2528 case Intrinsic::x86_avx512_mul_pd_512:
2529 case Intrinsic::x86_avx512_sub_pd_512:
Craig Topper020b2282016-12-27 00:23:16 +00002530 // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2531 // IR operations.
Craig Topper98a79932018-06-10 06:01:36 +00002532 if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
Craig Topper020b2282016-12-27 00:23:16 +00002533 if (R->getValue() == 4) {
2534 Value *Arg0 = II->getArgOperand(0);
2535 Value *Arg1 = II->getArgOperand(1);
2536
2537 Value *V;
2538 switch (II->getIntrinsicID()) {
2539 default: llvm_unreachable("Case stmts out of sync!");
Craig Topper98a79932018-06-10 06:01:36 +00002540 case Intrinsic::x86_avx512_add_ps_512:
2541 case Intrinsic::x86_avx512_add_pd_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002542 V = Builder.CreateFAdd(Arg0, Arg1);
Craig Topper020b2282016-12-27 00:23:16 +00002543 break;
Craig Topper98a79932018-06-10 06:01:36 +00002544 case Intrinsic::x86_avx512_sub_ps_512:
2545 case Intrinsic::x86_avx512_sub_pd_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002546 V = Builder.CreateFSub(Arg0, Arg1);
Craig Topper020b2282016-12-27 00:23:16 +00002547 break;
Craig Topper98a79932018-06-10 06:01:36 +00002548 case Intrinsic::x86_avx512_mul_ps_512:
2549 case Intrinsic::x86_avx512_mul_pd_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002550 V = Builder.CreateFMul(Arg0, Arg1);
Craig Topper020b2282016-12-27 00:23:16 +00002551 break;
Craig Topper98a79932018-06-10 06:01:36 +00002552 case Intrinsic::x86_avx512_div_ps_512:
2553 case Intrinsic::x86_avx512_div_pd_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002554 V = Builder.CreateFDiv(Arg0, Arg1);
Craig Topper020b2282016-12-27 00:23:16 +00002555 break;
2556 }
2557
Craig Topper020b2282016-12-27 00:23:16 +00002558 return replaceInstUsesWith(*II, V);
2559 }
2560 }
2561 break;
2562
Craig Topper790d0fa2016-12-11 07:42:01 +00002563 case Intrinsic::x86_avx512_mask_add_ss_round:
2564 case Intrinsic::x86_avx512_mask_div_ss_round:
2565 case Intrinsic::x86_avx512_mask_mul_ss_round:
2566 case Intrinsic::x86_avx512_mask_sub_ss_round:
Craig Topper790d0fa2016-12-11 07:42:01 +00002567 case Intrinsic::x86_avx512_mask_add_sd_round:
2568 case Intrinsic::x86_avx512_mask_div_sd_round:
2569 case Intrinsic::x86_avx512_mask_mul_sd_round:
2570 case Intrinsic::x86_avx512_mask_sub_sd_round:
Craig Topper7b788ada2016-12-26 06:33:19 +00002571 // If the rounding mode is CUR_DIRECTION(4) we can turn these into regular
2572 // IR operations.
2573 if (auto *R = dyn_cast<ConstantInt>(II->getArgOperand(4))) {
2574 if (R->getValue() == 4) {
Craig Topper7f8540b2016-12-27 01:56:30 +00002575 // Extract the element as scalars.
2576 Value *Arg0 = II->getArgOperand(0);
2577 Value *Arg1 = II->getArgOperand(1);
Craig Topperbb4069e2017-07-07 23:16:26 +00002578 Value *LHS = Builder.CreateExtractElement(Arg0, (uint64_t)0);
2579 Value *RHS = Builder.CreateExtractElement(Arg1, (uint64_t)0);
Craig Topper7b788ada2016-12-26 06:33:19 +00002580
Craig Topper7f8540b2016-12-27 01:56:30 +00002581 Value *V;
2582 switch (II->getIntrinsicID()) {
2583 default: llvm_unreachable("Case stmts out of sync!");
2584 case Intrinsic::x86_avx512_mask_add_ss_round:
2585 case Intrinsic::x86_avx512_mask_add_sd_round:
Craig Topperbb4069e2017-07-07 23:16:26 +00002586 V = Builder.CreateFAdd(LHS, RHS);
Craig Topper7f8540b2016-12-27 01:56:30 +00002587 break;
2588 case Intrinsic::x86_avx512_mask_sub_ss_round:
2589 case Intrinsic::x86_avx512_mask_sub_sd_round:
Craig Topperbb4069e2017-07-07 23:16:26 +00002590 V = Builder.CreateFSub(LHS, RHS);
Craig Topper7f8540b2016-12-27 01:56:30 +00002591 break;
2592 case Intrinsic::x86_avx512_mask_mul_ss_round:
2593 case Intrinsic::x86_avx512_mask_mul_sd_round:
Craig Topperbb4069e2017-07-07 23:16:26 +00002594 V = Builder.CreateFMul(LHS, RHS);
Craig Topper7f8540b2016-12-27 01:56:30 +00002595 break;
2596 case Intrinsic::x86_avx512_mask_div_ss_round:
2597 case Intrinsic::x86_avx512_mask_div_sd_round:
Craig Topperbb4069e2017-07-07 23:16:26 +00002598 V = Builder.CreateFDiv(LHS, RHS);
Craig Topper7f8540b2016-12-27 01:56:30 +00002599 break;
Craig Topper7b788ada2016-12-26 06:33:19 +00002600 }
Craig Topper7f8540b2016-12-27 01:56:30 +00002601
2602 // Handle the masking aspect of the intrinsic.
Craig Topper7f8540b2016-12-27 01:56:30 +00002603 Value *Mask = II->getArgOperand(3);
Craig Topper99163632016-12-30 23:06:28 +00002604 auto *C = dyn_cast<ConstantInt>(Mask);
2605 // We don't need a select if we know the mask bit is a 1.
2606 if (!C || !C->getValue()[0]) {
2607 // Cast the mask to an i1 vector and then extract the lowest element.
Craig Topperbb4069e2017-07-07 23:16:26 +00002608 auto *MaskTy = VectorType::get(Builder.getInt1Ty(),
Craig Topper7f8540b2016-12-27 01:56:30 +00002609 cast<IntegerType>(Mask->getType())->getBitWidth());
Craig Topperbb4069e2017-07-07 23:16:26 +00002610 Mask = Builder.CreateBitCast(Mask, MaskTy);
2611 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
Craig Topper99163632016-12-30 23:06:28 +00002612 // Extract the lowest element from the passthru operand.
Craig Topperbb4069e2017-07-07 23:16:26 +00002613 Value *Passthru = Builder.CreateExtractElement(II->getArgOperand(2),
Craig Topper99163632016-12-30 23:06:28 +00002614 (uint64_t)0);
Craig Topperbb4069e2017-07-07 23:16:26 +00002615 V = Builder.CreateSelect(Mask, V, Passthru);
Craig Topper99163632016-12-30 23:06:28 +00002616 }
Craig Topper7f8540b2016-12-27 01:56:30 +00002617
2618 // Insert the result back into the original argument 0.
Craig Topperbb4069e2017-07-07 23:16:26 +00002619 V = Builder.CreateInsertElement(Arg0, V, (uint64_t)0);
Craig Topper7f8540b2016-12-27 01:56:30 +00002620
2621 return replaceInstUsesWith(*II, V);
Craig Topper7b788ada2016-12-26 06:33:19 +00002622 }
2623 }
2624 LLVM_FALLTHROUGH;
2625
2626 // X86 scalar intrinsics simplified with SimplifyDemandedVectorElts.
2627 case Intrinsic::x86_avx512_mask_max_ss_round:
2628 case Intrinsic::x86_avx512_mask_min_ss_round:
Craig Topper790d0fa2016-12-11 07:42:01 +00002629 case Intrinsic::x86_avx512_mask_max_sd_round:
Craig Topper268b3ab2016-12-14 06:06:58 +00002630 case Intrinsic::x86_avx512_mask_min_sd_round:
Craig Toppera0372de2016-12-14 03:17:27 +00002631 case Intrinsic::x86_sse_cmp_ss:
2632 case Intrinsic::x86_sse_min_ss:
2633 case Intrinsic::x86_sse_max_ss:
2634 case Intrinsic::x86_sse2_cmp_sd:
2635 case Intrinsic::x86_sse2_min_sd:
2636 case Intrinsic::x86_sse2_max_sd:
Craig Topperac75bca2016-12-13 07:45:45 +00002637 case Intrinsic::x86_xop_vfrcz_ss:
2638 case Intrinsic::x86_xop_vfrcz_sd: {
2639 unsigned VWidth = II->getType()->getVectorNumElements();
2640 APInt UndefElts(VWidth, 0);
2641 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
2642 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
2643 if (V != II)
2644 return replaceInstUsesWith(*II, V);
2645 return II;
2646 }
2647 break;
2648 }
Mikhail Dvoretckii8393f902018-06-19 10:49:12 +00002649 case Intrinsic::x86_sse41_round_ss:
2650 case Intrinsic::x86_sse41_round_sd: {
2651 unsigned VWidth = II->getType()->getVectorNumElements();
2652 APInt UndefElts(VWidth, 0);
2653 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
2654 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
2655 if (V != II)
2656 return replaceInstUsesWith(*II, V);
2657 return II;
2658 } else if (Value *V = simplifyX86round(*II, Builder))
2659 return replaceInstUsesWith(*II, V);
2660 break;
2661 }
Craig Topperac75bca2016-12-13 07:45:45 +00002662
Tomasz Krupae766e5f2018-08-14 09:04:01 +00002663 // Constant fold add/sub with saturation intrinsics.
2664 case Intrinsic::x86_sse2_padds_b:
2665 case Intrinsic::x86_sse2_padds_w:
2666 case Intrinsic::x86_sse2_psubs_b:
2667 case Intrinsic::x86_sse2_psubs_w:
2668 case Intrinsic::x86_avx2_padds_b:
2669 case Intrinsic::x86_avx2_padds_w:
2670 case Intrinsic::x86_avx2_psubs_b:
2671 case Intrinsic::x86_avx2_psubs_w:
Craig Topper9c1d9fd2018-08-16 06:20:24 +00002672 case Intrinsic::x86_avx512_padds_b_512:
2673 case Intrinsic::x86_avx512_padds_w_512:
2674 case Intrinsic::x86_avx512_psubs_b_512:
2675 case Intrinsic::x86_avx512_psubs_w_512:
Tomasz Krupae766e5f2018-08-14 09:04:01 +00002676 if (Value *V = simplifyX86AddsSubs(*II, Builder))
2677 return replaceInstUsesWith(*II, V);
2678 break;
Tomasz Krupae766e5f2018-08-14 09:04:01 +00002679
Simon Pilgrima3a72b42015-08-10 20:21:15 +00002680 // Constant fold ashr( <A x Bi>, Ci ).
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002681 // Constant fold lshr( <A x Bi>, Ci ).
2682 // Constant fold shl( <A x Bi>, Ci ).
Simon Pilgrima3a72b42015-08-10 20:21:15 +00002683 case Intrinsic::x86_sse2_psrai_d:
2684 case Intrinsic::x86_sse2_psrai_w:
Simon Pilgrima3a72b42015-08-10 20:21:15 +00002685 case Intrinsic::x86_avx2_psrai_d:
2686 case Intrinsic::x86_avx2_psrai_w:
Craig Topper8b831cb2016-11-13 01:51:55 +00002687 case Intrinsic::x86_avx512_psrai_q_128:
2688 case Intrinsic::x86_avx512_psrai_q_256:
2689 case Intrinsic::x86_avx512_psrai_d_512:
2690 case Intrinsic::x86_avx512_psrai_q_512:
2691 case Intrinsic::x86_avx512_psrai_w_512:
Simon Pilgrim18617d12015-08-05 08:18:00 +00002692 case Intrinsic::x86_sse2_psrli_d:
2693 case Intrinsic::x86_sse2_psrli_q:
2694 case Intrinsic::x86_sse2_psrli_w:
Simon Pilgrim18617d12015-08-05 08:18:00 +00002695 case Intrinsic::x86_avx2_psrli_d:
2696 case Intrinsic::x86_avx2_psrli_q:
2697 case Intrinsic::x86_avx2_psrli_w:
Craig Topper8b831cb2016-11-13 01:51:55 +00002698 case Intrinsic::x86_avx512_psrli_d_512:
2699 case Intrinsic::x86_avx512_psrli_q_512:
2700 case Intrinsic::x86_avx512_psrli_w_512:
Michael J. Spencerdee4b2c2014-04-24 00:58:18 +00002701 case Intrinsic::x86_sse2_pslli_d:
2702 case Intrinsic::x86_sse2_pslli_q:
2703 case Intrinsic::x86_sse2_pslli_w:
Simon Pilgrim18617d12015-08-05 08:18:00 +00002704 case Intrinsic::x86_avx2_pslli_d:
2705 case Intrinsic::x86_avx2_pslli_q:
2706 case Intrinsic::x86_avx2_pslli_w:
Craig Topper8b831cb2016-11-13 01:51:55 +00002707 case Intrinsic::x86_avx512_pslli_d_512:
2708 case Intrinsic::x86_avx512_pslli_q_512:
2709 case Intrinsic::x86_avx512_pslli_w_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002710 if (Value *V = simplifyX86immShift(*II, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002711 return replaceInstUsesWith(*II, V);
Simon Pilgrim18617d12015-08-05 08:18:00 +00002712 break;
2713
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002714 case Intrinsic::x86_sse2_psra_d:
2715 case Intrinsic::x86_sse2_psra_w:
2716 case Intrinsic::x86_avx2_psra_d:
2717 case Intrinsic::x86_avx2_psra_w:
Craig Topper8b831cb2016-11-13 01:51:55 +00002718 case Intrinsic::x86_avx512_psra_q_128:
2719 case Intrinsic::x86_avx512_psra_q_256:
2720 case Intrinsic::x86_avx512_psra_d_512:
2721 case Intrinsic::x86_avx512_psra_q_512:
2722 case Intrinsic::x86_avx512_psra_w_512:
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002723 case Intrinsic::x86_sse2_psrl_d:
2724 case Intrinsic::x86_sse2_psrl_q:
2725 case Intrinsic::x86_sse2_psrl_w:
2726 case Intrinsic::x86_avx2_psrl_d:
2727 case Intrinsic::x86_avx2_psrl_q:
2728 case Intrinsic::x86_avx2_psrl_w:
Craig Topper8b831cb2016-11-13 01:51:55 +00002729 case Intrinsic::x86_avx512_psrl_d_512:
2730 case Intrinsic::x86_avx512_psrl_q_512:
2731 case Intrinsic::x86_avx512_psrl_w_512:
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002732 case Intrinsic::x86_sse2_psll_d:
2733 case Intrinsic::x86_sse2_psll_q:
2734 case Intrinsic::x86_sse2_psll_w:
2735 case Intrinsic::x86_avx2_psll_d:
2736 case Intrinsic::x86_avx2_psll_q:
Craig Topper8b831cb2016-11-13 01:51:55 +00002737 case Intrinsic::x86_avx2_psll_w:
2738 case Intrinsic::x86_avx512_psll_d_512:
2739 case Intrinsic::x86_avx512_psll_q_512:
2740 case Intrinsic::x86_avx512_psll_w_512: {
Craig Topperbb4069e2017-07-07 23:16:26 +00002741 if (Value *V = simplifyX86immShift(*II, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002742 return replaceInstUsesWith(*II, V);
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002743
2744 // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
2745 // operand to compute the shift amount.
Simon Pilgrim996725e2015-09-19 11:41:53 +00002746 Value *Arg1 = II->getArgOperand(1);
2747 assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002748 "Unexpected packed shift size");
Simon Pilgrim996725e2015-09-19 11:41:53 +00002749 unsigned VWidth = Arg1->getType()->getVectorNumElements();
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002750
Simon Pilgrim996725e2015-09-19 11:41:53 +00002751 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
Simon Pilgrimbecd5e82015-08-13 07:39:03 +00002752 II->setArgOperand(1, V);
2753 return II;
2754 }
2755 break;
2756 }
2757
Simon Pilgrimdb9893f2016-06-07 10:27:15 +00002758 case Intrinsic::x86_avx2_psllv_d:
2759 case Intrinsic::x86_avx2_psllv_d_256:
2760 case Intrinsic::x86_avx2_psllv_q:
2761 case Intrinsic::x86_avx2_psllv_q_256:
Craig Topperb4173a52016-11-13 07:26:19 +00002762 case Intrinsic::x86_avx512_psllv_d_512:
2763 case Intrinsic::x86_avx512_psllv_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +00002764 case Intrinsic::x86_avx512_psllv_w_128:
2765 case Intrinsic::x86_avx512_psllv_w_256:
2766 case Intrinsic::x86_avx512_psllv_w_512:
Simon Pilgrimdb9893f2016-06-07 10:27:15 +00002767 case Intrinsic::x86_avx2_psrav_d:
2768 case Intrinsic::x86_avx2_psrav_d_256:
Craig Topperb4173a52016-11-13 07:26:19 +00002769 case Intrinsic::x86_avx512_psrav_q_128:
2770 case Intrinsic::x86_avx512_psrav_q_256:
2771 case Intrinsic::x86_avx512_psrav_d_512:
2772 case Intrinsic::x86_avx512_psrav_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +00002773 case Intrinsic::x86_avx512_psrav_w_128:
2774 case Intrinsic::x86_avx512_psrav_w_256:
2775 case Intrinsic::x86_avx512_psrav_w_512:
Simon Pilgrimdb9893f2016-06-07 10:27:15 +00002776 case Intrinsic::x86_avx2_psrlv_d:
2777 case Intrinsic::x86_avx2_psrlv_d_256:
2778 case Intrinsic::x86_avx2_psrlv_q:
2779 case Intrinsic::x86_avx2_psrlv_q_256:
Craig Topperb4173a52016-11-13 07:26:19 +00002780 case Intrinsic::x86_avx512_psrlv_d_512:
2781 case Intrinsic::x86_avx512_psrlv_q_512:
Craig Topper1de753f2016-11-18 06:04:33 +00002782 case Intrinsic::x86_avx512_psrlv_w_128:
2783 case Intrinsic::x86_avx512_psrlv_w_256:
2784 case Intrinsic::x86_avx512_psrlv_w_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00002785 if (Value *V = simplifyX86varShift(*II, Builder))
Simon Pilgrimdb9893f2016-06-07 10:27:15 +00002786 return replaceInstUsesWith(*II, V);
2787 break;
2788
Simon Pilgrim6f6b2792017-01-25 14:37:24 +00002789 case Intrinsic::x86_sse2_packssdw_128:
2790 case Intrinsic::x86_sse2_packsswb_128:
2791 case Intrinsic::x86_avx2_packssdw:
2792 case Intrinsic::x86_avx2_packsswb:
Craig Topper3731f4d2017-02-16 07:35:23 +00002793 case Intrinsic::x86_avx512_packssdw_512:
2794 case Intrinsic::x86_avx512_packsswb_512:
Craig Topper4853c432017-07-06 23:18:42 +00002795 if (Value *V = simplifyX86pack(*II, true))
Simon Pilgrim6f6b2792017-01-25 14:37:24 +00002796 return replaceInstUsesWith(*II, V);
2797 break;
2798
2799 case Intrinsic::x86_sse2_packuswb_128:
2800 case Intrinsic::x86_sse41_packusdw:
2801 case Intrinsic::x86_avx2_packusdw:
2802 case Intrinsic::x86_avx2_packuswb:
Craig Topper3731f4d2017-02-16 07:35:23 +00002803 case Intrinsic::x86_avx512_packusdw_512:
2804 case Intrinsic::x86_avx512_packuswb_512:
Craig Topper4853c432017-07-06 23:18:42 +00002805 if (Value *V = simplifyX86pack(*II, false))
Simon Pilgrim6f6b2792017-01-25 14:37:24 +00002806 return replaceInstUsesWith(*II, V);
2807 break;
2808
Craig Topper911025b2018-05-13 21:56:32 +00002809 case Intrinsic::x86_pclmulqdq:
2810 case Intrinsic::x86_pclmulqdq_256:
2811 case Intrinsic::x86_pclmulqdq_512: {
Craig Topperb6122122017-01-26 05:17:13 +00002812 if (auto *C = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
2813 unsigned Imm = C->getZExtValue();
2814
2815 bool MadeChange = false;
2816 Value *Arg0 = II->getArgOperand(0);
2817 Value *Arg1 = II->getArgOperand(1);
2818 unsigned VWidth = Arg0->getType()->getVectorNumElements();
Craig Topperb6122122017-01-26 05:17:13 +00002819
2820 APInt UndefElts1(VWidth, 0);
Craig Topper911025b2018-05-13 21:56:32 +00002821 APInt DemandedElts1 = APInt::getSplat(VWidth,
2822 APInt(2, (Imm & 0x01) ? 2 : 1));
2823 if (Value *V = SimplifyDemandedVectorElts(Arg0, DemandedElts1,
Craig Topperb6122122017-01-26 05:17:13 +00002824 UndefElts1)) {
2825 II->setArgOperand(0, V);
2826 MadeChange = true;
2827 }
2828
2829 APInt UndefElts2(VWidth, 0);
Craig Topper911025b2018-05-13 21:56:32 +00002830 APInt DemandedElts2 = APInt::getSplat(VWidth,
2831 APInt(2, (Imm & 0x10) ? 2 : 1));
2832 if (Value *V = SimplifyDemandedVectorElts(Arg1, DemandedElts2,
Craig Topperb6122122017-01-26 05:17:13 +00002833 UndefElts2)) {
2834 II->setArgOperand(1, V);
2835 MadeChange = true;
2836 }
2837
Craig Topper911025b2018-05-13 21:56:32 +00002838 // If either input elements are undef, the result is zero.
2839 if (DemandedElts1.isSubsetOf(UndefElts1) ||
2840 DemandedElts2.isSubsetOf(UndefElts2))
Craig Topperb6122122017-01-26 05:17:13 +00002841 return replaceInstUsesWith(*II,
2842 ConstantAggregateZero::get(II->getType()));
2843
2844 if (MadeChange)
2845 return II;
2846 }
2847 break;
2848 }
2849
Sanjay Patelc86867c2015-04-16 17:52:13 +00002850 case Intrinsic::x86_sse41_insertps:
Craig Topperbb4069e2017-07-07 23:16:26 +00002851 if (Value *V = simplifyX86insertps(*II, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002852 return replaceInstUsesWith(*II, V);
Sanjay Patelc86867c2015-04-16 17:52:13 +00002853 break;
Simon Pilgrim54fcd622015-07-25 20:41:00 +00002854
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002855 case Intrinsic::x86_sse4a_extrq: {
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002856 Value *Op0 = II->getArgOperand(0);
2857 Value *Op1 = II->getArgOperand(1);
2858 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2859 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002860 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2861 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2862 VWidth1 == 16 && "Unexpected operand sizes");
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002863
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002864 // See if we're dealing with constant values.
2865 Constant *C1 = dyn_cast<Constant>(Op1);
2866 ConstantInt *CILength =
Andrea Di Biagio8df5b9c2016-09-07 12:03:03 +00002867 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)0))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002868 : nullptr;
2869 ConstantInt *CIIndex =
Andrea Di Biagio8df5b9c2016-09-07 12:03:03 +00002870 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002871 : nullptr;
2872
2873 // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
Craig Topperbb4069e2017-07-07 23:16:26 +00002874 if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002875 return replaceInstUsesWith(*II, V);
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002876
2877 // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
2878 // operands and the lowest 16-bits of the second.
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002879 bool MadeChange = false;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002880 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
2881 II->setArgOperand(0, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002882 MadeChange = true;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002883 }
2884 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
2885 II->setArgOperand(1, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002886 MadeChange = true;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002887 }
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002888 if (MadeChange)
2889 return II;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002890 break;
2891 }
2892
2893 case Intrinsic::x86_sse4a_extrqi: {
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002894 // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
2895 // bits of the lower 64-bits. The upper 64-bits are undefined.
2896 Value *Op0 = II->getArgOperand(0);
2897 unsigned VWidth = Op0->getType()->getVectorNumElements();
2898 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2899 "Unexpected operand size");
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002900
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002901 // See if we're dealing with constant values.
2902 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
2903 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
2904
2905 // Attempt to simplify to a constant or shuffle vector.
Craig Topperbb4069e2017-07-07 23:16:26 +00002906 if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002907 return replaceInstUsesWith(*II, V);
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002908
2909 // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
2910 // operand.
2911 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002912 II->setArgOperand(0, V);
2913 return II;
2914 }
2915 break;
2916 }
2917
2918 case Intrinsic::x86_sse4a_insertq: {
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002919 Value *Op0 = II->getArgOperand(0);
2920 Value *Op1 = II->getArgOperand(1);
2921 unsigned VWidth = Op0->getType()->getVectorNumElements();
2922 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2923 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
2924 Op1->getType()->getVectorNumElements() == 2 &&
2925 "Unexpected operand size");
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002926
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002927 // See if we're dealing with constant values.
2928 Constant *C1 = dyn_cast<Constant>(Op1);
2929 ConstantInt *CI11 =
Andrea Di Biagiof3fd3162016-09-07 12:47:53 +00002930 C1 ? dyn_cast_or_null<ConstantInt>(C1->getAggregateElement((unsigned)1))
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002931 : nullptr;
2932
2933 // Attempt to simplify to a constant, shuffle vector or INSERTQI call.
2934 if (CI11) {
Benjamin Kramer46e38f32016-06-08 10:01:20 +00002935 const APInt &V11 = CI11->getValue();
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002936 APInt Len = V11.zextOrTrunc(6);
2937 APInt Idx = V11.lshr(8).zextOrTrunc(6);
Craig Topperbb4069e2017-07-07 23:16:26 +00002938 if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002939 return replaceInstUsesWith(*II, V);
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002940 }
2941
2942 // INSERTQ only uses the lowest 64-bits of the first 128-bit vector
2943 // operand.
2944 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002945 II->setArgOperand(0, V);
2946 return II;
2947 }
2948 break;
2949 }
2950
Filipe Cabecinhas1a805952014-04-24 00:38:14 +00002951 case Intrinsic::x86_sse4a_insertqi: {
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002952 // INSERTQI: Extract lowest Length bits from lower half of second source and
2953 // insert over first source starting at Index bit. The upper 64-bits are
2954 // undefined.
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002955 Value *Op0 = II->getArgOperand(0);
2956 Value *Op1 = II->getArgOperand(1);
2957 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
2958 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002959 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
2960 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
2961 VWidth1 == 2 && "Unexpected operand sizes");
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002962
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002963 // See if we're dealing with constant values.
2964 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
2965 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
2966
2967 // Attempt to simplify to a constant or shuffle vector.
2968 if (CILength && CIIndex) {
2969 APInt Len = CILength->getValue().zextOrTrunc(6);
2970 APInt Idx = CIIndex->getValue().zextOrTrunc(6);
Craig Topperbb4069e2017-07-07 23:16:26 +00002971 if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
Sanjay Patel4b198802016-02-01 22:23:39 +00002972 return replaceInstUsesWith(*II, V);
Simon Pilgrim216b1bf2015-10-17 11:40:05 +00002973 }
2974
2975 // INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
2976 // operands.
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002977 bool MadeChange = false;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002978 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
2979 II->setArgOperand(0, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002980 MadeChange = true;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002981 }
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002982 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
2983 II->setArgOperand(1, V);
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002984 MadeChange = true;
Simon Pilgrim61116dd2015-09-17 20:32:45 +00002985 }
Simon Pilgrim1c9a9f22016-04-24 17:57:27 +00002986 if (MadeChange)
2987 return II;
Filipe Cabecinhas1a805952014-04-24 00:38:14 +00002988 break;
2989 }
2990
Filipe Cabecinhas82ac07c2014-05-27 03:42:20 +00002991 case Intrinsic::x86_sse41_pblendvb:
2992 case Intrinsic::x86_sse41_blendvps:
2993 case Intrinsic::x86_sse41_blendvpd:
2994 case Intrinsic::x86_avx_blendv_ps_256:
2995 case Intrinsic::x86_avx_blendv_pd_256:
2996 case Intrinsic::x86_avx2_pblendvb: {
Sanjay Patel296d35a2018-09-15 14:25:44 +00002997 // fold (blend A, A, Mask) -> A
Simon Pilgrim8c049d52015-08-12 08:08:56 +00002998 Value *Op0 = II->getArgOperand(0);
2999 Value *Op1 = II->getArgOperand(1);
Filipe Cabecinhas82ac07c2014-05-27 03:42:20 +00003000 Value *Mask = II->getArgOperand(2);
Simon Pilgrim8c049d52015-08-12 08:08:56 +00003001 if (Op0 == Op1)
Sanjay Patel4b198802016-02-01 22:23:39 +00003002 return replaceInstUsesWith(CI, Op0);
Simon Pilgrim8c049d52015-08-12 08:08:56 +00003003
3004 // Zero Mask - select 1st argument.
Simon Pilgrim93f59f52015-08-12 08:23:36 +00003005 if (isa<ConstantAggregateZero>(Mask))
Sanjay Patel4b198802016-02-01 22:23:39 +00003006 return replaceInstUsesWith(CI, Op0);
Simon Pilgrim8c049d52015-08-12 08:08:56 +00003007
3008 // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
Sanjay Patel368ac5d2016-02-21 17:29:33 +00003009 if (auto *ConstantMask = dyn_cast<ConstantDataVector>(Mask)) {
3010 Constant *NewSelector = getNegativeIsTrueBoolVec(ConstantMask);
Simon Pilgrim8c049d52015-08-12 08:08:56 +00003011 return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
Filipe Cabecinhas82ac07c2014-05-27 03:42:20 +00003012 }
Sanjay Patel296d35a2018-09-15 14:25:44 +00003013
3014 // Convert to a vector select if we can bypass casts and find a boolean
3015 // vector condition value.
3016 Value *BoolVec;
Sanjay Patel09e02fb2018-09-22 14:43:55 +00003017 Mask = peekThroughBitcast(Mask);
3018 if (match(Mask, m_SExt(m_Value(BoolVec))) &&
3019 BoolVec->getType()->isVectorTy() &&
3020 BoolVec->getType()->getScalarSizeInBits() == 1) {
3021 assert(Mask->getType()->getPrimitiveSizeInBits() ==
3022 II->getType()->getPrimitiveSizeInBits() &&
3023 "Not expecting mask and operands with different sizes");
3024
3025 unsigned NumMaskElts = Mask->getType()->getVectorNumElements();
3026 unsigned NumOperandElts = II->getType()->getVectorNumElements();
3027 if (NumMaskElts == NumOperandElts)
Sanjay Patel296d35a2018-09-15 14:25:44 +00003028 return SelectInst::Create(BoolVec, Op1, Op0);
Sanjay Patel09e02fb2018-09-22 14:43:55 +00003029
3030 // If the mask has less elements than the operands, each mask bit maps to
3031 // multiple elements of the operands. Bitcast back and forth.
3032 if (NumMaskElts < NumOperandElts) {
3033 Value *CastOp0 = Builder.CreateBitCast(Op0, Mask->getType());
3034 Value *CastOp1 = Builder.CreateBitCast(Op1, Mask->getType());
3035 Value *Sel = Builder.CreateSelect(BoolVec, CastOp1, CastOp0);
3036 return new BitCastInst(Sel, II->getType());
3037 }
Sanjay Patel296d35a2018-09-15 14:25:44 +00003038 }
3039
Simon Pilgrim8c049d52015-08-12 08:08:56 +00003040 break;
Filipe Cabecinhas82ac07c2014-05-27 03:42:20 +00003041 }
3042
Andrea Di Biagio0594e2a2015-09-30 16:44:39 +00003043 case Intrinsic::x86_ssse3_pshuf_b_128:
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00003044 case Intrinsic::x86_avx2_pshuf_b:
Simon Pilgrima22c3a12017-01-18 13:44:04 +00003045 case Intrinsic::x86_avx512_pshuf_b_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00003046 if (Value *V = simplifyX86pshufb(*II, Builder))
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00003047 return replaceInstUsesWith(*II, V);
3048 break;
Andrea Di Biagio0594e2a2015-09-30 16:44:39 +00003049
Rafael Espindolabad3f772014-04-21 22:06:04 +00003050 case Intrinsic::x86_avx_vpermilvar_ps:
3051 case Intrinsic::x86_avx_vpermilvar_ps_256:
Craig Topper58917f32016-12-11 01:59:36 +00003052 case Intrinsic::x86_avx512_vpermilvar_ps_512:
Rafael Espindolabad3f772014-04-21 22:06:04 +00003053 case Intrinsic::x86_avx_vpermilvar_pd:
Simon Pilgrim2f6097d2016-04-24 17:23:46 +00003054 case Intrinsic::x86_avx_vpermilvar_pd_256:
Simon Pilgrima22c3a12017-01-18 13:44:04 +00003055 case Intrinsic::x86_avx512_vpermilvar_pd_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00003056 if (Value *V = simplifyX86vpermilvar(*II, Builder))
Simon Pilgrim2f6097d2016-04-24 17:23:46 +00003057 return replaceInstUsesWith(*II, V);
3058 break;
Rafael Espindolabad3f772014-04-21 22:06:04 +00003059
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +00003060 case Intrinsic::x86_avx2_permd:
3061 case Intrinsic::x86_avx2_permps:
Craig Toppere4c045b2018-05-20 23:34:04 +00003062 case Intrinsic::x86_avx512_permvar_df_256:
3063 case Intrinsic::x86_avx512_permvar_df_512:
3064 case Intrinsic::x86_avx512_permvar_di_256:
3065 case Intrinsic::x86_avx512_permvar_di_512:
3066 case Intrinsic::x86_avx512_permvar_hi_128:
3067 case Intrinsic::x86_avx512_permvar_hi_256:
3068 case Intrinsic::x86_avx512_permvar_hi_512:
3069 case Intrinsic::x86_avx512_permvar_qi_128:
3070 case Intrinsic::x86_avx512_permvar_qi_256:
3071 case Intrinsic::x86_avx512_permvar_qi_512:
3072 case Intrinsic::x86_avx512_permvar_sf_512:
3073 case Intrinsic::x86_avx512_permvar_si_512:
Craig Topperbb4069e2017-07-07 23:16:26 +00003074 if (Value *V = simplifyX86vpermv(*II, Builder))
Simon Pilgrim8cddf8b2016-05-01 16:41:22 +00003075 return replaceInstUsesWith(*II, V);
3076 break;
3077
Sanjay Patel98a71502016-02-29 23:16:48 +00003078 case Intrinsic::x86_avx_maskload_ps:
Sanjay Patel6f2c01f2016-02-29 23:59:00 +00003079 case Intrinsic::x86_avx_maskload_pd:
3080 case Intrinsic::x86_avx_maskload_ps_256:
3081 case Intrinsic::x86_avx_maskload_pd_256:
3082 case Intrinsic::x86_avx2_maskload_d:
3083 case Intrinsic::x86_avx2_maskload_q:
3084 case Intrinsic::x86_avx2_maskload_d_256:
3085 case Intrinsic::x86_avx2_maskload_q_256:
Sanjay Patel98a71502016-02-29 23:16:48 +00003086 if (Instruction *I = simplifyX86MaskedLoad(*II, *this))
3087 return I;
3088 break;
3089
Sanjay Patelc4acbae2016-03-12 15:16:59 +00003090 case Intrinsic::x86_sse2_maskmov_dqu:
Sanjay Patel1ace9932016-02-26 21:04:14 +00003091 case Intrinsic::x86_avx_maskstore_ps:
3092 case Intrinsic::x86_avx_maskstore_pd:
3093 case Intrinsic::x86_avx_maskstore_ps_256:
3094 case Intrinsic::x86_avx_maskstore_pd_256:
Sanjay Patelfc7e7eb2016-02-26 21:51:44 +00003095 case Intrinsic::x86_avx2_maskstore_d:
3096 case Intrinsic::x86_avx2_maskstore_q:
3097 case Intrinsic::x86_avx2_maskstore_d_256:
3098 case Intrinsic::x86_avx2_maskstore_q_256:
Sanjay Patel1ace9932016-02-26 21:04:14 +00003099 if (simplifyX86MaskedStore(*II, *this))
3100 return nullptr;
3101 break;
3102
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +00003103 case Intrinsic::x86_xop_vpcomb:
3104 case Intrinsic::x86_xop_vpcomd:
3105 case Intrinsic::x86_xop_vpcomq:
3106 case Intrinsic::x86_xop_vpcomw:
Craig Topperbb4069e2017-07-07 23:16:26 +00003107 if (Value *V = simplifyX86vpcom(*II, Builder, true))
Sanjay Patel4b198802016-02-01 22:23:39 +00003108 return replaceInstUsesWith(*II, V);
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +00003109 break;
3110
3111 case Intrinsic::x86_xop_vpcomub:
3112 case Intrinsic::x86_xop_vpcomud:
3113 case Intrinsic::x86_xop_vpcomuq:
3114 case Intrinsic::x86_xop_vpcomuw:
Craig Topperbb4069e2017-07-07 23:16:26 +00003115 if (Value *V = simplifyX86vpcom(*II, Builder, false))
Sanjay Patel4b198802016-02-01 22:23:39 +00003116 return replaceInstUsesWith(*II, V);
Simon Pilgrim1d1c56e22015-10-11 14:38:34 +00003117 break;
3118
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003119 case Intrinsic::ppc_altivec_vperm:
3120 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
Bill Schmidta1184632014-06-05 19:46:04 +00003121 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
3122 // a vectorshuffle for little endian, we must undo the transformation
3123 // performed on vec_perm in altivec.h. That is, we must complement
3124 // the permutation mask with respect to 31 and reverse the order of
3125 // V1 and V2.
Chris Lattner0256be92012-01-27 03:08:05 +00003126 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
3127 assert(Mask->getType()->getVectorNumElements() == 16 &&
3128 "Bad type for intrinsic!");
Jim Grosbach7815f562012-02-03 00:07:04 +00003129
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003130 // Check that all of the elements are integer constants or undefs.
3131 bool AllEltsOk = true;
3132 for (unsigned i = 0; i != 16; ++i) {
Chris Lattner0256be92012-01-27 03:08:05 +00003133 Constant *Elt = Mask->getAggregateElement(i);
Craig Topperf40110f2014-04-25 05:29:35 +00003134 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003135 AllEltsOk = false;
3136 break;
3137 }
3138 }
Jim Grosbach7815f562012-02-03 00:07:04 +00003139
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003140 if (AllEltsOk) {
3141 // Cast the input vectors to byte vectors.
Craig Topperbb4069e2017-07-07 23:16:26 +00003142 Value *Op0 = Builder.CreateBitCast(II->getArgOperand(0),
3143 Mask->getType());
3144 Value *Op1 = Builder.CreateBitCast(II->getArgOperand(1),
3145 Mask->getType());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003146 Value *Result = UndefValue::get(Op0->getType());
Jim Grosbach7815f562012-02-03 00:07:04 +00003147
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003148 // Only extract each element once.
3149 Value *ExtractedElts[32];
3150 memset(ExtractedElts, 0, sizeof(ExtractedElts));
Jim Grosbach7815f562012-02-03 00:07:04 +00003151
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003152 for (unsigned i = 0; i != 16; ++i) {
Chris Lattner0256be92012-01-27 03:08:05 +00003153 if (isa<UndefValue>(Mask->getAggregateElement(i)))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003154 continue;
Jim Grosbach7815f562012-02-03 00:07:04 +00003155 unsigned Idx =
Chris Lattner0256be92012-01-27 03:08:05 +00003156 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003157 Idx &= 31; // Match the hardware behavior.
Mehdi Aminia28d91d2015-03-10 02:37:25 +00003158 if (DL.isLittleEndian())
Bill Schmidta1184632014-06-05 19:46:04 +00003159 Idx = 31 - Idx;
Jim Grosbach7815f562012-02-03 00:07:04 +00003160
Craig Topperf40110f2014-04-25 05:29:35 +00003161 if (!ExtractedElts[Idx]) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +00003162 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
3163 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
Jim Grosbach7815f562012-02-03 00:07:04 +00003164 ExtractedElts[Idx] =
Craig Topperbb4069e2017-07-07 23:16:26 +00003165 Builder.CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
3166 Builder.getInt32(Idx&15));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003167 }
Jim Grosbach7815f562012-02-03 00:07:04 +00003168
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003169 // Insert this value into the result vector.
Craig Topperbb4069e2017-07-07 23:16:26 +00003170 Result = Builder.CreateInsertElement(Result, ExtractedElts[Idx],
3171 Builder.getInt32(i));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003172 }
3173 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
3174 }
3175 }
3176 break;
3177
Alexandros Lamprineas61f0ba12018-05-31 12:19:18 +00003178 case Intrinsic::arm_neon_vld1: {
3179 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0),
3180 DL, II, &AC, &DT);
3181 if (Value *V = simplifyNeonVld1(*II, MemAlign, Builder))
3182 return replaceInstUsesWith(*II, V);
3183 break;
3184 }
3185
Bob Wilsona4e231c2010-10-22 21:41:48 +00003186 case Intrinsic::arm_neon_vld2:
3187 case Intrinsic::arm_neon_vld3:
3188 case Intrinsic::arm_neon_vld4:
3189 case Intrinsic::arm_neon_vld2lane:
3190 case Intrinsic::arm_neon_vld3lane:
3191 case Intrinsic::arm_neon_vld4lane:
3192 case Intrinsic::arm_neon_vst1:
3193 case Intrinsic::arm_neon_vst2:
3194 case Intrinsic::arm_neon_vst3:
3195 case Intrinsic::arm_neon_vst4:
3196 case Intrinsic::arm_neon_vst2lane:
3197 case Intrinsic::arm_neon_vst3lane:
3198 case Intrinsic::arm_neon_vst4lane: {
Justin Bogner99798402016-08-05 01:06:44 +00003199 unsigned MemAlign =
Daniel Jasperaec2fa32016-12-19 08:22:17 +00003200 getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
Bob Wilsona4e231c2010-10-22 21:41:48 +00003201 unsigned AlignArg = II->getNumArgOperands() - 1;
3202 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
3203 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
3204 II->setArgOperand(AlignArg,
3205 ConstantInt::get(Type::getInt32Ty(II->getContext()),
3206 MemAlign, false));
3207 return II;
3208 }
3209 break;
3210 }
3211
Alexandros Lamprineas52457d32018-05-30 14:38:50 +00003212 case Intrinsic::arm_neon_vtbl1:
3213 case Intrinsic::aarch64_neon_tbl1:
3214 if (Value *V = simplifyNeonTbl1(*II, Builder))
3215 return replaceInstUsesWith(*II, V);
3216 break;
3217
Lang Hames3a90fab2012-05-01 00:20:38 +00003218 case Intrinsic::arm_neon_vmulls:
Tim Northover00ed9962014-03-29 10:18:08 +00003219 case Intrinsic::arm_neon_vmullu:
Tim Northover3b0846e2014-05-24 12:50:23 +00003220 case Intrinsic::aarch64_neon_smull:
3221 case Intrinsic::aarch64_neon_umull: {
Lang Hames3a90fab2012-05-01 00:20:38 +00003222 Value *Arg0 = II->getArgOperand(0);
3223 Value *Arg1 = II->getArgOperand(1);
3224
3225 // Handle mul by zero first:
3226 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
Sanjay Patel4b198802016-02-01 22:23:39 +00003227 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
Lang Hames3a90fab2012-05-01 00:20:38 +00003228 }
3229
3230 // Check for constant LHS & RHS - in this case we just simplify.
Tim Northover00ed9962014-03-29 10:18:08 +00003231 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
Tim Northover3b0846e2014-05-24 12:50:23 +00003232 II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
Lang Hames3a90fab2012-05-01 00:20:38 +00003233 VectorType *NewVT = cast<VectorType>(II->getType());
Benjamin Kramer92040952014-02-13 18:23:24 +00003234 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
3235 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
3236 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
3237 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
3238
Sanjay Patel4b198802016-02-01 22:23:39 +00003239 return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
Lang Hames3a90fab2012-05-01 00:20:38 +00003240 }
3241
Alp Tokercb402912014-01-24 17:20:08 +00003242 // Couldn't simplify - canonicalize constant to the RHS.
Lang Hames3a90fab2012-05-01 00:20:38 +00003243 std::swap(Arg0, Arg1);
3244 }
3245
3246 // Handle mul by one:
Benjamin Kramer92040952014-02-13 18:23:24 +00003247 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
Lang Hames3a90fab2012-05-01 00:20:38 +00003248 if (ConstantInt *Splat =
Benjamin Kramer92040952014-02-13 18:23:24 +00003249 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
3250 if (Splat->isOne())
3251 return CastInst::CreateIntegerCast(Arg0, II->getType(),
3252 /*isSigned=*/!Zext);
Lang Hames3a90fab2012-05-01 00:20:38 +00003253
3254 break;
3255 }
Chad Rosier274d72f2018-05-24 15:26:42 +00003256 case Intrinsic::arm_neon_aesd:
3257 case Intrinsic::arm_neon_aese:
3258 case Intrinsic::aarch64_crypto_aesd:
3259 case Intrinsic::aarch64_crypto_aese: {
3260 Value *DataArg = II->getArgOperand(0);
3261 Value *KeyArg = II->getArgOperand(1);
3262
3263 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
3264 Value *Data, *Key;
3265 if (match(KeyArg, m_ZeroInt()) &&
3266 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
3267 II->setArgOperand(0, Data);
3268 II->setArgOperand(1, Key);
3269 return II;
3270 }
3271 break;
3272 }
Matt Arsenaultbef34e22016-01-22 21:30:34 +00003273 case Intrinsic::amdgcn_rcp: {
Matt Arsenault4c7795d2017-03-24 19:04:57 +00003274 Value *Src = II->getArgOperand(0);
3275
3276 // TODO: Move to ConstantFolding/InstSimplify?
3277 if (isa<UndefValue>(Src))
3278 return replaceInstUsesWith(CI, Src);
3279
3280 if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
Matt Arsenaulta0050b02014-06-19 01:19:19 +00003281 const APFloat &ArgVal = C->getValueAPF();
3282 APFloat Val(ArgVal.getSemantics(), 1.0);
3283 APFloat::opStatus Status = Val.divide(ArgVal,
3284 APFloat::rmNearestTiesToEven);
3285 // Only do this if it was exact and therefore not dependent on the
3286 // rounding mode.
3287 if (Status == APFloat::opOK)
Sanjay Patel4b198802016-02-01 22:23:39 +00003288 return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
Matt Arsenaulta0050b02014-06-19 01:19:19 +00003289 }
3290
3291 break;
3292 }
Matt Arsenault4c7795d2017-03-24 19:04:57 +00003293 case Intrinsic::amdgcn_rsq: {
3294 Value *Src = II->getArgOperand(0);
3295
3296 // TODO: Move to ConstantFolding/InstSimplify?
3297 if (isa<UndefValue>(Src))
3298 return replaceInstUsesWith(CI, Src);
3299 break;
3300 }
Matt Arsenault2fe4fbc2016-03-30 22:28:52 +00003301 case Intrinsic::amdgcn_frexp_mant:
3302 case Intrinsic::amdgcn_frexp_exp: {
Matt Arsenault5cd4f8f2016-03-30 22:28:26 +00003303 Value *Src = II->getArgOperand(0);
3304 if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
3305 int Exp;
3306 APFloat Significand = frexp(C->getValueAPF(), Exp,
3307 APFloat::rmNearestTiesToEven);
3308
Matt Arsenault2fe4fbc2016-03-30 22:28:52 +00003309 if (II->getIntrinsicID() == Intrinsic::amdgcn_frexp_mant) {
3310 return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(),
3311 Significand));
3312 }
3313
3314 // Match instruction special case behavior.
3315 if (Exp == APFloat::IEK_NaN || Exp == APFloat::IEK_Inf)
3316 Exp = 0;
3317
3318 return replaceInstUsesWith(CI, ConstantInt::get(II->getType(), Exp));
3319 }
3320
3321 if (isa<UndefValue>(Src))
3322 return replaceInstUsesWith(CI, UndefValue::get(II->getType()));
Matt Arsenault5cd4f8f2016-03-30 22:28:26 +00003323
3324 break;
3325 }
Matt Arsenault46a03822016-09-03 07:06:58 +00003326 case Intrinsic::amdgcn_class: {
3327 enum {
3328 S_NAN = 1 << 0, // Signaling NaN
3329 Q_NAN = 1 << 1, // Quiet NaN
3330 N_INFINITY = 1 << 2, // Negative infinity
3331 N_NORMAL = 1 << 3, // Negative normal
3332 N_SUBNORMAL = 1 << 4, // Negative subnormal
3333 N_ZERO = 1 << 5, // Negative zero
3334 P_ZERO = 1 << 6, // Positive zero
3335 P_SUBNORMAL = 1 << 7, // Positive subnormal
3336 P_NORMAL = 1 << 8, // Positive normal
3337 P_INFINITY = 1 << 9 // Positive infinity
3338 };
3339
3340 const uint32_t FullMask = S_NAN | Q_NAN | N_INFINITY | N_NORMAL |
3341 N_SUBNORMAL | N_ZERO | P_ZERO | P_SUBNORMAL | P_NORMAL | P_INFINITY;
3342
3343 Value *Src0 = II->getArgOperand(0);
3344 Value *Src1 = II->getArgOperand(1);
3345 const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1);
3346 if (!CMask) {
3347 if (isa<UndefValue>(Src0))
3348 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3349
3350 if (isa<UndefValue>(Src1))
3351 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3352 break;
3353 }
3354
3355 uint32_t Mask = CMask->getZExtValue();
3356
3357 // If all tests are made, it doesn't matter what the value is.
3358 if ((Mask & FullMask) == FullMask)
3359 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), true));
3360
3361 if ((Mask & FullMask) == 0)
3362 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), false));
3363
3364 if (Mask == (S_NAN | Q_NAN)) {
3365 // Equivalent of isnan. Replace with standard fcmp.
Craig Topperbb4069e2017-07-07 23:16:26 +00003366 Value *FCmp = Builder.CreateFCmpUNO(Src0, Src0);
Matt Arsenault46a03822016-09-03 07:06:58 +00003367 FCmp->takeName(II);
3368 return replaceInstUsesWith(*II, FCmp);
3369 }
3370
Matt Arsenaultd35f46c2018-08-10 18:58:49 +00003371 if (Mask == (N_ZERO | P_ZERO)) {
3372 // Equivalent of == 0.
3373 Value *FCmp = Builder.CreateFCmpOEQ(
3374 Src0, ConstantFP::get(Src0->getType(), 0.0));
3375
3376 FCmp->takeName(II);
3377 return replaceInstUsesWith(*II, FCmp);
3378 }
3379
Matt Arsenault10de2772018-08-28 18:10:02 +00003380 // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other
3381 if (((Mask & S_NAN) || (Mask & Q_NAN)) && isKnownNeverNaN(Src0, &TLI)) {
3382 II->setArgOperand(1, ConstantInt::get(Src1->getType(),
3383 Mask & ~(S_NAN | Q_NAN)));
3384 return II;
3385 }
3386
Matt Arsenault46a03822016-09-03 07:06:58 +00003387 const ConstantFP *CVal = dyn_cast<ConstantFP>(Src0);
3388 if (!CVal) {
3389 if (isa<UndefValue>(Src0))
3390 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3391
3392 // Clamp mask to used bits
3393 if ((Mask & FullMask) != Mask) {
Craig Topperbb4069e2017-07-07 23:16:26 +00003394 CallInst *NewCall = Builder.CreateCall(II->getCalledFunction(),
Matt Arsenault46a03822016-09-03 07:06:58 +00003395 { Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) }
3396 );
3397
3398 NewCall->takeName(II);
3399 return replaceInstUsesWith(*II, NewCall);
3400 }
3401
3402 break;
3403 }
3404
3405 const APFloat &Val = CVal->getValueAPF();
3406
3407 bool Result =
3408 ((Mask & S_NAN) && Val.isNaN() && Val.isSignaling()) ||
3409 ((Mask & Q_NAN) && Val.isNaN() && !Val.isSignaling()) ||
3410 ((Mask & N_INFINITY) && Val.isInfinity() && Val.isNegative()) ||
3411 ((Mask & N_NORMAL) && Val.isNormal() && Val.isNegative()) ||
3412 ((Mask & N_SUBNORMAL) && Val.isDenormal() && Val.isNegative()) ||
3413 ((Mask & N_ZERO) && Val.isZero() && Val.isNegative()) ||
3414 ((Mask & P_ZERO) && Val.isZero() && !Val.isNegative()) ||
3415 ((Mask & P_SUBNORMAL) && Val.isDenormal() && !Val.isNegative()) ||
3416 ((Mask & P_NORMAL) && Val.isNormal() && !Val.isNegative()) ||
3417 ((Mask & P_INFINITY) && Val.isInfinity() && !Val.isNegative());
3418
3419 return replaceInstUsesWith(*II, ConstantInt::get(II->getType(), Result));
3420 }
Matt Arsenault1f17c662017-02-22 00:27:34 +00003421 case Intrinsic::amdgcn_cvt_pkrtz: {
3422 Value *Src0 = II->getArgOperand(0);
3423 Value *Src1 = II->getArgOperand(1);
3424 if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3425 if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3426 const fltSemantics &HalfSem
3427 = II->getType()->getScalarType()->getFltSemantics();
3428 bool LosesInfo;
3429 APFloat Val0 = C0->getValueAPF();
3430 APFloat Val1 = C1->getValueAPF();
3431 Val0.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3432 Val1.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
3433
3434 Constant *Folded = ConstantVector::get({
3435 ConstantFP::get(II->getContext(), Val0),
3436 ConstantFP::get(II->getContext(), Val1) });
3437 return replaceInstUsesWith(*II, Folded);
3438 }
3439 }
3440
3441 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3442 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3443
3444 break;
3445 }
Marek Olsak13e47412018-01-31 20:18:04 +00003446 case Intrinsic::amdgcn_cvt_pknorm_i16:
3447 case Intrinsic::amdgcn_cvt_pknorm_u16:
3448 case Intrinsic::amdgcn_cvt_pk_i16:
3449 case Intrinsic::amdgcn_cvt_pk_u16: {
3450 Value *Src0 = II->getArgOperand(0);
3451 Value *Src1 = II->getArgOperand(1);
3452
3453 if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1))
3454 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
3455
3456 break;
3457 }
Matt Arsenaultf5262252017-02-22 23:04:58 +00003458 case Intrinsic::amdgcn_ubfe:
3459 case Intrinsic::amdgcn_sbfe: {
3460 // Decompose simple cases into standard shifts.
3461 Value *Src = II->getArgOperand(0);
3462 if (isa<UndefValue>(Src))
3463 return replaceInstUsesWith(*II, Src);
3464
3465 unsigned Width;
3466 Type *Ty = II->getType();
3467 unsigned IntSize = Ty->getIntegerBitWidth();
3468
3469 ConstantInt *CWidth = dyn_cast<ConstantInt>(II->getArgOperand(2));
3470 if (CWidth) {
3471 Width = CWidth->getZExtValue();
3472 if ((Width & (IntSize - 1)) == 0)
3473 return replaceInstUsesWith(*II, ConstantInt::getNullValue(Ty));
3474
3475 if (Width >= IntSize) {
3476 // Hardware ignores high bits, so remove those.
3477 II->setArgOperand(2, ConstantInt::get(CWidth->getType(),
3478 Width & (IntSize - 1)));
3479 return II;
3480 }
3481 }
3482
3483 unsigned Offset;
3484 ConstantInt *COffset = dyn_cast<ConstantInt>(II->getArgOperand(1));
3485 if (COffset) {
3486 Offset = COffset->getZExtValue();
3487 if (Offset >= IntSize) {
3488 II->setArgOperand(1, ConstantInt::get(COffset->getType(),
3489 Offset & (IntSize - 1)));
3490 return II;
3491 }
3492 }
3493
3494 bool Signed = II->getIntrinsicID() == Intrinsic::amdgcn_sbfe;
3495
Matt Arsenaultf5262252017-02-22 23:04:58 +00003496 if (!CWidth || !COffset)
3497 break;
3498
Tom Stellard28d66212018-11-08 17:57:57 +00003499 // The case of Width == 0 is handled above, which makes this tranformation
3500 // safe. If Width == 0, then the ashr and lshr instructions become poison
3501 // value since the shift amount would be equal to the bit size.
3502 assert(Width != 0);
3503
Matt Arsenaultf5262252017-02-22 23:04:58 +00003504 // TODO: This allows folding to undef when the hardware has specific
3505 // behavior?
3506 if (Offset + Width < IntSize) {
Craig Topperbb4069e2017-07-07 23:16:26 +00003507 Value *Shl = Builder.CreateShl(Src, IntSize - Offset - Width);
3508 Value *RightShift = Signed ? Builder.CreateAShr(Shl, IntSize - Width)
3509 : Builder.CreateLShr(Shl, IntSize - Width);
Matt Arsenaultf5262252017-02-22 23:04:58 +00003510 RightShift->takeName(II);
3511 return replaceInstUsesWith(*II, RightShift);
3512 }
3513
Craig Topperbb4069e2017-07-07 23:16:26 +00003514 Value *RightShift = Signed ? Builder.CreateAShr(Src, Offset)
3515 : Builder.CreateLShr(Src, Offset);
Matt Arsenaultf5262252017-02-22 23:04:58 +00003516
3517 RightShift->takeName(II);
3518 return replaceInstUsesWith(*II, RightShift);
3519 }
Matt Arsenaultd4bca1e2017-02-23 00:44:03 +00003520 case Intrinsic::amdgcn_exp:
3521 case Intrinsic::amdgcn_exp_compr: {
3522 ConstantInt *En = dyn_cast<ConstantInt>(II->getArgOperand(1));
3523 if (!En) // Illegal.
3524 break;
3525
3526 unsigned EnBits = En->getZExtValue();
3527 if (EnBits == 0xf)
3528 break; // All inputs enabled.
3529
3530 bool IsCompr = II->getIntrinsicID() == Intrinsic::amdgcn_exp_compr;
3531 bool Changed = false;
3532 for (int I = 0; I < (IsCompr ? 2 : 4); ++I) {
3533 if ((!IsCompr && (EnBits & (1 << I)) == 0) ||
3534 (IsCompr && ((EnBits & (0x3 << (2 * I))) == 0))) {
3535 Value *Src = II->getArgOperand(I + 2);
3536 if (!isa<UndefValue>(Src)) {
3537 II->setArgOperand(I + 2, UndefValue::get(Src->getType()));
3538 Changed = true;
3539 }
3540 }
3541 }
3542
3543 if (Changed)
3544 return II;
3545
3546 break;
Matt Arsenaultcdb468c2017-02-27 23:08:49 +00003547 }
3548 case Intrinsic::amdgcn_fmed3: {
3549 // Note this does not preserve proper sNaN behavior if IEEE-mode is enabled
3550 // for the shader.
3551
3552 Value *Src0 = II->getArgOperand(0);
3553 Value *Src1 = II->getArgOperand(1);
3554 Value *Src2 = II->getArgOperand(2);
3555
Matt Arsenault24ce89b2018-07-05 17:05:36 +00003556 // Checking for NaN before canonicalization provides better fidelity when
3557 // mapping other operations onto fmed3 since the order of operands is
3558 // unchanged.
3559 CallInst *NewCall = nullptr;
3560 if (match(Src0, m_NaN()) || isa<UndefValue>(Src0)) {
3561 NewCall = Builder.CreateMinNum(Src1, Src2);
3562 } else if (match(Src1, m_NaN()) || isa<UndefValue>(Src1)) {
3563 NewCall = Builder.CreateMinNum(Src0, Src2);
3564 } else if (match(Src2, m_NaN()) || isa<UndefValue>(Src2)) {
3565 NewCall = Builder.CreateMaxNum(Src0, Src1);
3566 }
3567
3568 if (NewCall) {
3569 NewCall->copyFastMathFlags(II);
3570 NewCall->takeName(II);
3571 return replaceInstUsesWith(*II, NewCall);
3572 }
3573
Matt Arsenaultcdb468c2017-02-27 23:08:49 +00003574 bool Swap = false;
3575 // Canonicalize constants to RHS operands.
3576 //
3577 // fmed3(c0, x, c1) -> fmed3(x, c0, c1)
3578 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3579 std::swap(Src0, Src1);
3580 Swap = true;
3581 }
3582
3583 if (isa<Constant>(Src1) && !isa<Constant>(Src2)) {
3584 std::swap(Src1, Src2);
3585 Swap = true;
3586 }
3587
3588 if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
3589 std::swap(Src0, Src1);
3590 Swap = true;
3591 }
3592
3593 if (Swap) {
3594 II->setArgOperand(0, Src0);
3595 II->setArgOperand(1, Src1);
3596 II->setArgOperand(2, Src2);
3597 return II;
3598 }
3599
Matt Arsenaultcdb468c2017-02-27 23:08:49 +00003600 if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
3601 if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
3602 if (const ConstantFP *C2 = dyn_cast<ConstantFP>(Src2)) {
3603 APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(),
3604 C2->getValueAPF());
3605 return replaceInstUsesWith(*II,
Craig Topperbb4069e2017-07-07 23:16:26 +00003606 ConstantFP::get(Builder.getContext(), Result));
Matt Arsenaultcdb468c2017-02-27 23:08:49 +00003607 }
3608 }
3609 }
3610
3611 break;
Matt Arsenaultd4bca1e2017-02-23 00:44:03 +00003612 }
Matt Arsenaultd81f5572017-03-13 18:14:02 +00003613 case Intrinsic::amdgcn_icmp:
3614 case Intrinsic::amdgcn_fcmp: {
3615 const ConstantInt *CC = dyn_cast<ConstantInt>(II->getArgOperand(2));
3616 if (!CC)
3617 break;
3618
3619 // Guard against invalid arguments.
3620 int64_t CCVal = CC->getZExtValue();
3621 bool IsInteger = II->getIntrinsicID() == Intrinsic::amdgcn_icmp;
3622 if ((IsInteger && (CCVal < CmpInst::FIRST_ICMP_PREDICATE ||
3623 CCVal > CmpInst::LAST_ICMP_PREDICATE)) ||
3624 (!IsInteger && (CCVal < CmpInst::FIRST_FCMP_PREDICATE ||
3625 CCVal > CmpInst::LAST_FCMP_PREDICATE)))
3626 break;
3627
3628 Value *Src0 = II->getArgOperand(0);
3629 Value *Src1 = II->getArgOperand(1);
3630
3631 if (auto *CSrc0 = dyn_cast<Constant>(Src0)) {
3632 if (auto *CSrc1 = dyn_cast<Constant>(Src1)) {
3633 Constant *CCmp = ConstantExpr::getCompare(CCVal, CSrc0, CSrc1);
Nicolai Haehnle9c661852017-04-24 17:08:43 +00003634 if (CCmp->isNullValue()) {
3635 return replaceInstUsesWith(
3636 *II, ConstantExpr::getSExt(CCmp, II->getType()));
3637 }
3638
3639 // The result of V_ICMP/V_FCMP assembly instructions (which this
3640 // intrinsic exposes) is one bit per thread, masked with the EXEC
3641 // register (which contains the bitmask of live threads). So a
3642 // comparison that always returns true is the same as a read of the
3643 // EXEC register.
3644 Value *NewF = Intrinsic::getDeclaration(
3645 II->getModule(), Intrinsic::read_register, II->getType());
3646 Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")};
3647 MDNode *MD = MDNode::get(II->getContext(), MDArgs);
3648 Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
Craig Topperbb4069e2017-07-07 23:16:26 +00003649 CallInst *NewCall = Builder.CreateCall(NewF, Args);
Nicolai Haehnle9c661852017-04-24 17:08:43 +00003650 NewCall->addAttribute(AttributeList::FunctionIndex,
3651 Attribute::Convergent);
3652 NewCall->takeName(II);
3653 return replaceInstUsesWith(*II, NewCall);
Matt Arsenaultd81f5572017-03-13 18:14:02 +00003654 }
3655
3656 // Canonicalize constants to RHS.
3657 CmpInst::Predicate SwapPred
3658 = CmpInst::getSwappedPredicate(static_cast<CmpInst::Predicate>(CCVal));
3659 II->setArgOperand(0, Src1);
3660 II->setArgOperand(1, Src0);
3661 II->setArgOperand(2, ConstantInt::get(CC->getType(),
3662 static_cast<int>(SwapPred)));
3663 return II;
3664 }
3665
3666 if (CCVal != CmpInst::ICMP_EQ && CCVal != CmpInst::ICMP_NE)
3667 break;
3668
3669 // Canonicalize compare eq with true value to compare != 0
3670 // llvm.amdgcn.icmp(zext (i1 x), 1, eq)
3671 // -> llvm.amdgcn.icmp(zext (i1 x), 0, ne)
3672 // llvm.amdgcn.icmp(sext (i1 x), -1, eq)
3673 // -> llvm.amdgcn.icmp(sext (i1 x), 0, ne)
3674 Value *ExtSrc;
3675 if (CCVal == CmpInst::ICMP_EQ &&
3676 ((match(Src1, m_One()) && match(Src0, m_ZExt(m_Value(ExtSrc)))) ||
3677 (match(Src1, m_AllOnes()) && match(Src0, m_SExt(m_Value(ExtSrc))))) &&
3678 ExtSrc->getType()->isIntegerTy(1)) {
3679 II->setArgOperand(1, ConstantInt::getNullValue(Src1->getType()));
3680 II->setArgOperand(2, ConstantInt::get(CC->getType(), CmpInst::ICMP_NE));
3681 return II;
3682 }
3683
3684 CmpInst::Predicate SrcPred;
3685 Value *SrcLHS;
3686 Value *SrcRHS;
3687
3688 // Fold compare eq/ne with 0 from a compare result as the predicate to the
3689 // intrinsic. The typical use is a wave vote function in the library, which
3690 // will be fed from a user code condition compared with 0. Fold in the
3691 // redundant compare.
3692
3693 // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, ne)
3694 // -> llvm.amdgcn.[if]cmp(a, b, pred)
3695 //
3696 // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, eq)
3697 // -> llvm.amdgcn.[if]cmp(a, b, inv pred)
3698 if (match(Src1, m_Zero()) &&
3699 match(Src0,
3700 m_ZExtOrSExt(m_Cmp(SrcPred, m_Value(SrcLHS), m_Value(SrcRHS))))) {
3701 if (CCVal == CmpInst::ICMP_EQ)
3702 SrcPred = CmpInst::getInversePredicate(SrcPred);
3703
3704 Intrinsic::ID NewIID = CmpInst::isFPPredicate(SrcPred) ?
3705 Intrinsic::amdgcn_fcmp : Intrinsic::amdgcn_icmp;
3706
Matt Arsenault9a389fb2018-08-15 21:14:25 +00003707 Type *Ty = SrcLHS->getType();
3708 if (auto *CmpType = dyn_cast<IntegerType>(Ty)) {
3709 // Promote to next legal integer type.
3710 unsigned Width = CmpType->getBitWidth();
3711 unsigned NewWidth = Width;
3712 if (Width <= 16)
3713 NewWidth = 16;
3714 else if (Width <= 32)
3715 NewWidth = 32;
3716 else if (Width <= 64)
3717 NewWidth = 64;
3718 else if (Width > 64)
3719 break; // Can't handle this.
3720
3721 if (Width != NewWidth) {
3722 IntegerType *CmpTy = Builder.getIntNTy(NewWidth);
3723 if (CmpInst::isSigned(SrcPred)) {
3724 SrcLHS = Builder.CreateSExt(SrcLHS, CmpTy);
3725 SrcRHS = Builder.CreateSExt(SrcRHS, CmpTy);
3726 } else {
3727 SrcLHS = Builder.CreateZExt(SrcLHS, CmpTy);
3728 SrcRHS = Builder.CreateZExt(SrcRHS, CmpTy);
3729 }
3730 }
3731 } else if (!Ty->isFloatTy() && !Ty->isDoubleTy() && !Ty->isHalfTy())
3732 break;
3733
Matt Arsenaultd81f5572017-03-13 18:14:02 +00003734 Value *NewF = Intrinsic::getDeclaration(II->getModule(), NewIID,
3735 SrcLHS->getType());
3736 Value *Args[] = { SrcLHS, SrcRHS,
3737 ConstantInt::get(CC->getType(), SrcPred) };
Craig Topperbb4069e2017-07-07 23:16:26 +00003738 CallInst *NewCall = Builder.CreateCall(NewF, Args);
Matt Arsenaultd81f5572017-03-13 18:14:02 +00003739 NewCall->takeName(II);
3740 return replaceInstUsesWith(*II, NewCall);
3741 }
3742
3743 break;
3744 }
Marek Olsak2114fc32017-10-24 10:26:59 +00003745 case Intrinsic::amdgcn_wqm_vote: {
3746 // wqm_vote is identity when the argument is constant.
3747 if (!isa<Constant>(II->getArgOperand(0)))
3748 break;
3749
3750 return replaceInstUsesWith(*II, II->getArgOperand(0));
3751 }
Marek Olsakce76ea02017-10-24 10:27:13 +00003752 case Intrinsic::amdgcn_kill: {
3753 const ConstantInt *C = dyn_cast<ConstantInt>(II->getArgOperand(0));
3754 if (!C || !C->getZExtValue())
3755 break;
3756
3757 // amdgcn.kill(i1 1) is a no-op
3758 return eraseInstFromFunction(CI);
3759 }
Stanislav Mekhanoshin0e132dc2018-05-22 08:04:33 +00003760 case Intrinsic::amdgcn_update_dpp: {
3761 Value *Old = II->getArgOperand(0);
3762
3763 auto BC = dyn_cast<ConstantInt>(II->getArgOperand(5));
3764 auto RM = dyn_cast<ConstantInt>(II->getArgOperand(3));
3765 auto BM = dyn_cast<ConstantInt>(II->getArgOperand(4));
3766 if (!BC || !RM || !BM ||
3767 BC->isZeroValue() ||
3768 RM->getZExtValue() != 0xF ||
3769 BM->getZExtValue() != 0xF ||
3770 isa<UndefValue>(Old))
3771 break;
3772
3773 // If bound_ctrl = 1, row mask = bank mask = 0xf we can omit old value.
3774 II->setOperand(0, UndefValue::get(Old->getType()));
3775 return II;
3776 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003777 case Intrinsic::stackrestore: {
3778 // If the save is right next to the restore, remove the restore. This can
3779 // happen when variable allocas are DCE'd.
Gabor Greif589a0b92010-06-24 12:58:35 +00003780 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003781 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
Vedant Kumarf01827f2018-06-19 23:42:17 +00003782 // Skip over debug info.
3783 if (SS->getNextNonDebugInstruction() == II) {
Sanjay Patel4b198802016-02-01 22:23:39 +00003784 return eraseInstFromFunction(CI);
Davide Italiano189c2cf2018-06-08 20:42:36 +00003785 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003786 }
3787 }
Jim Grosbach7815f562012-02-03 00:07:04 +00003788
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003789 // Scan down this block to see if there is another stack restore in the
3790 // same block without an intervening call/alloca.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00003791 BasicBlock::iterator BI(II);
Chandler Carruthedb12a82018-10-15 10:04:59 +00003792 Instruction *TI = II->getParent()->getTerminator();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003793 bool CannotRemove = false;
3794 for (++BI; &*BI != TI; ++BI) {
Nuno Lopes55fff832012-06-21 15:45:28 +00003795 if (isa<AllocaInst>(BI)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003796 CannotRemove = true;
3797 break;
3798 }
3799 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
3800 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
3801 // If there is a stackrestore below this one, remove this one.
3802 if (II->getIntrinsicID() == Intrinsic::stackrestore)
Sanjay Patel4b198802016-02-01 22:23:39 +00003803 return eraseInstFromFunction(CI);
Reid Kleckner892ae2e2016-02-27 00:53:54 +00003804
3805 // Bail if we cross over an intrinsic with side effects, such as
3806 // llvm.stacksave, llvm.read_register, or llvm.setjmp.
3807 if (II->mayHaveSideEffects()) {
3808 CannotRemove = true;
3809 break;
3810 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003811 } else {
3812 // If we found a non-intrinsic call, we can't remove the stack
3813 // restore.
3814 CannotRemove = true;
3815 break;
3816 }
3817 }
3818 }
Jim Grosbach7815f562012-02-03 00:07:04 +00003819
Bill Wendlingf891bf82011-07-31 06:30:59 +00003820 // If the stack restore is in a return, resume, or unwind block and if there
3821 // are no allocas or calls between the restore and the return, nuke the
3822 // restore.
Bill Wendlingd5d95b02012-02-06 21:16:41 +00003823 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
Sanjay Patel4b198802016-02-01 22:23:39 +00003824 return eraseInstFromFunction(CI);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003825 break;
3826 }
Vitaly Bukaf0500b62016-07-28 22:50:48 +00003827 case Intrinsic::lifetime_start:
Vitaly Buka0ab23cf2016-07-28 22:59:03 +00003828 // Asan needs to poison memory to detect invalid access which is possible
3829 // even for empty lifetime range.
Evgeniy Stepanovc667c1f2017-12-09 00:21:41 +00003830 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3831 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
Vitaly Buka0ab23cf2016-07-28 22:59:03 +00003832 break;
3833
Arnaud A. de Grandmaison333ef382016-05-10 09:24:49 +00003834 if (removeTriviallyEmptyRange(*II, Intrinsic::lifetime_start,
3835 Intrinsic::lifetime_end, *this))
3836 return nullptr;
Arnaud A. de Grandmaison849f3bf2015-10-01 14:54:31 +00003837 break;
Hal Finkelf5867a72014-07-25 21:45:17 +00003838 case Intrinsic::assume: {
David Majnemerfcc58112016-04-08 16:37:12 +00003839 Value *IIOperand = II->getArgOperand(0);
Sanjay Patel825a4fa2018-06-20 13:22:26 +00003840 // Remove an assume if it is followed by an identical assume.
3841 // TODO: Do we need this? Unless there are conflicting assumptions, the
3842 // computeKnownBits(IIOperand) below here eliminates redundant assumes.
3843 Instruction *Next = II->getNextNonDebugInstruction();
3844 if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
David Majnemerfcc58112016-04-08 16:37:12 +00003845 return eraseInstFromFunction(CI);
3846
Hal Finkelf5867a72014-07-25 21:45:17 +00003847 // Canonicalize assume(a && b) -> assume(a); assume(b);
Hal Finkel74c2f352014-09-07 12:44:26 +00003848 // Note: New assumption intrinsics created here are registered by
3849 // the InstCombineIRInserter object.
David Majnemerfcc58112016-04-08 16:37:12 +00003850 Value *AssumeIntrinsic = II->getCalledValue(), *A, *B;
Hal Finkelf5867a72014-07-25 21:45:17 +00003851 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
Craig Topperbb4069e2017-07-07 23:16:26 +00003852 Builder.CreateCall(AssumeIntrinsic, A, II->getName());
3853 Builder.CreateCall(AssumeIntrinsic, B, II->getName());
Sanjay Patel4b198802016-02-01 22:23:39 +00003854 return eraseInstFromFunction(*II);
Hal Finkelf5867a72014-07-25 21:45:17 +00003855 }
3856 // assume(!(a || b)) -> assume(!a); assume(!b);
3857 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
Craig Topperbb4069e2017-07-07 23:16:26 +00003858 Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(A), II->getName());
3859 Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(B), II->getName());
Sanjay Patel4b198802016-02-01 22:23:39 +00003860 return eraseInstFromFunction(*II);
Hal Finkelf5867a72014-07-25 21:45:17 +00003861 }
Hal Finkel04a15612014-10-04 21:27:06 +00003862
Philip Reames66c6de62014-11-11 23:33:19 +00003863 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
3864 // (if assume is valid at the load)
Sanjay Patelf0d1e7732017-01-03 22:25:31 +00003865 CmpInst::Predicate Pred;
3866 Instruction *LHS;
3867 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
3868 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
3869 LHS->getType()->isPointerTy() &&
3870 isValidAssumeForContext(II, LHS, &DT)) {
3871 MDNode *MD = MDNode::get(II->getContext(), None);
3872 LHS->setMetadata(LLVMContext::MD_nonnull, MD);
3873 return eraseInstFromFunction(*II);
3874
Chandler Carruth24969102015-02-10 08:07:32 +00003875 // TODO: apply nonnull return attributes to calls and invokes
Philip Reames66c6de62014-11-11 23:33:19 +00003876 // TODO: apply range metadata for range check patterns?
3877 }
Sanjay Patelf0d1e7732017-01-03 22:25:31 +00003878
Hal Finkel04a15612014-10-04 21:27:06 +00003879 // If there is a dominating assume with the same condition as this one,
3880 // then this one is redundant, and should be removed.
Craig Topperb45eabc2017-04-26 16:39:58 +00003881 KnownBits Known(1);
3882 computeKnownBits(IIOperand, Known, 0, II);
Craig Topperf0aeee02017-05-05 17:36:09 +00003883 if (Known.isAllOnes())
Sanjay Patel4b198802016-02-01 22:23:39 +00003884 return eraseInstFromFunction(*II);
Hal Finkel04a15612014-10-04 21:27:06 +00003885
Hal Finkel8a9a7832017-01-11 13:24:24 +00003886 // Update the cache of affected values for this assumption (we might be
3887 // here because we just simplified the condition).
3888 AC.updateAffectedValues(II);
Hal Finkelf5867a72014-07-25 21:45:17 +00003889 break;
3890 }
Philip Reames9db26ff2014-12-29 23:27:30 +00003891 case Intrinsic::experimental_gc_relocate: {
3892 // Translate facts known about a pointer before relocating into
3893 // facts about the relocate value, while being careful to
3894 // preserve relocation semantics.
Manuel Jacob83eefa62016-01-05 04:03:00 +00003895 Value *DerivedPtr = cast<GCRelocateInst>(II)->getDerivedPtr();
Philip Reames9db26ff2014-12-29 23:27:30 +00003896
3897 // Remove the relocation if unused, note that this check is required
3898 // to prevent the cases below from looping forever.
3899 if (II->use_empty())
Sanjay Patel4b198802016-02-01 22:23:39 +00003900 return eraseInstFromFunction(*II);
Philip Reames9db26ff2014-12-29 23:27:30 +00003901
3902 // Undef is undef, even after relocation.
3903 // TODO: provide a hook for this in GCStrategy. This is clearly legal for
3904 // most practical collectors, but there was discussion in the review thread
3905 // about whether it was legal for all possible collectors.
Philip Reamesea4d8e82016-02-09 21:09:22 +00003906 if (isa<UndefValue>(DerivedPtr))
3907 // Use undef of gc_relocate's type to replace it.
3908 return replaceInstUsesWith(*II, UndefValue::get(II->getType()));
Philip Reames9db26ff2014-12-29 23:27:30 +00003909
Philip Reamesea4d8e82016-02-09 21:09:22 +00003910 if (auto *PT = dyn_cast<PointerType>(II->getType())) {
3911 // The relocation of null will be null for most any collector.
3912 // TODO: provide a hook for this in GCStrategy. There might be some
3913 // weird collector this property does not hold for.
3914 if (isa<ConstantPointerNull>(DerivedPtr))
3915 // Use null-pointer of gc_relocate's type to replace it.
3916 return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
Simon Pilgrimc0c56e72016-04-24 17:00:34 +00003917
Philip Reamesea4d8e82016-02-09 21:09:22 +00003918 // isKnownNonNull -> nonnull attribute
Philip Reamesb8d8db32018-11-12 20:00:53 +00003919 if (!II->hasRetAttr(Attribute::NonNull) &&
3920 isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT)) {
Reid Klecknerb5180542017-03-21 16:57:19 +00003921 II->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
Philip Reamesb8d8db32018-11-12 20:00:53 +00003922 return II;
3923 }
Ramkumar Ramachandra8fcb4982015-02-14 19:37:54 +00003924 }
Philip Reames9db26ff2014-12-29 23:27:30 +00003925
3926 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
3927 // Canonicalize on the type from the uses to the defs
Ramkumar Ramachandra8fcb4982015-02-14 19:37:54 +00003928
Philip Reames9db26ff2014-12-29 23:27:30 +00003929 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
Philip Reamesea4d8e82016-02-09 21:09:22 +00003930 break;
Philip Reames9db26ff2014-12-29 23:27:30 +00003931 }
Artur Pilipenkoe812ca02017-01-25 14:12:12 +00003932
3933 case Intrinsic::experimental_guard: {
Philip Reames79e917d2018-05-09 22:56:32 +00003934 // Is this guard followed by another guard? We scan forward over a small
3935 // fixed window of instructions to handle common cases with conditions
3936 // computed between guards.
Sanjoy Dase0e57952017-02-01 16:34:55 +00003937 Instruction *NextInst = II->getNextNode();
Philip Reames913a7792018-05-10 00:05:29 +00003938 for (unsigned i = 0; i < GuardWideningWindow; i++) {
Philip Reames79e917d2018-05-09 22:56:32 +00003939 // Note: Using context-free form to avoid compile time blow up
3940 if (!isSafeToSpeculativelyExecute(NextInst))
3941 break;
3942 NextInst = NextInst->getNextNode();
3943 }
Sanjoy Dase0e57952017-02-01 16:34:55 +00003944 Value *NextCond = nullptr;
3945 if (match(NextInst,
3946 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
3947 Value *CurrCond = II->getArgOperand(0);
Artur Pilipenkoe812ca02017-01-25 14:12:12 +00003948
Simon Pilgrim68168d12017-03-30 12:59:53 +00003949 // Remove a guard that it is immediately preceded by an identical guard.
Sanjoy Dase0e57952017-02-01 16:34:55 +00003950 if (CurrCond == NextCond)
3951 return eraseInstFromFunction(*NextInst);
3952
3953 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
Philip Reames79e917d2018-05-09 22:56:32 +00003954 Instruction* MoveI = II->getNextNode();
3955 while (MoveI != NextInst) {
3956 auto *Temp = MoveI;
3957 MoveI = MoveI->getNextNode();
3958 Temp->moveBefore(II);
3959 }
Craig Topperbb4069e2017-07-07 23:16:26 +00003960 II->setArgOperand(0, Builder.CreateAnd(CurrCond, NextCond));
Sanjoy Dase0e57952017-02-01 16:34:55 +00003961 return eraseInstFromFunction(*NextInst);
3962 }
Artur Pilipenkoe812ca02017-01-25 14:12:12 +00003963 break;
3964 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003965 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003966 return visitCallSite(II);
3967}
3968
Davide Italianoaec46172017-01-31 18:09:05 +00003969// Fence instruction simplification
3970Instruction *InstCombiner::visitFenceInst(FenceInst &FI) {
3971 // Remove identical consecutive fences.
Vedant Kumarf01827f2018-06-19 23:42:17 +00003972 Instruction *Next = FI.getNextNonDebugInstruction();
Tim Northover9b800602018-06-06 12:46:02 +00003973 if (auto *NFI = dyn_cast<FenceInst>(Next))
Davide Italianoaec46172017-01-31 18:09:05 +00003974 if (FI.isIdenticalTo(NFI))
3975 return eraseInstFromFunction(FI);
3976 return nullptr;
3977}
3978
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003979// InvokeInst simplification
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003980Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
3981 return visitCallSite(&II);
3982}
3983
Sanjay Patelcd4377c2016-01-20 22:24:38 +00003984/// If this cast does not affect the value passed through the varargs area, we
3985/// can eliminate the use of the cast.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003986static bool isSafeToEliminateVarargsCast(const CallSite CS,
Mehdi Aminia28d91d2015-03-10 02:37:25 +00003987 const DataLayout &DL,
3988 const CastInst *const CI,
Chris Lattner7a9e47a2010-01-05 07:32:13 +00003989 const int ix) {
3990 if (!CI->isLosslessCast())
3991 return false;
3992
Philip Reames1a1bdb22014-12-02 18:50:36 +00003993 // If this is a GC intrinsic, avoid munging types. We need types for
3994 // statepoint reconstruction in SelectionDAG.
3995 // TODO: This is probably something which should be expanded to all
3996 // intrinsics since the entire point of intrinsics is that
3997 // they are understandable by the optimizer.
3998 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
3999 return false;
4000
Reid Kleckner26af2ca2014-01-28 02:38:36 +00004001 // The size of ByVal or InAlloca arguments is derived from the type, so we
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004002 // can't change to a type with a different size. If the size were
4003 // passed explicitly we could avoid this check.
Reid Kleckner26af2ca2014-01-28 02:38:36 +00004004 if (!CS.isByValOrInAllocaArgument(ix))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004005 return true;
4006
Jim Grosbach7815f562012-02-03 00:07:04 +00004007 Type* SrcTy =
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004008 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
Chris Lattner229907c2011-07-18 04:54:35 +00004009 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004010 if (!SrcTy->isSized() || !DstTy->isSized())
4011 return false;
Mehdi Aminia28d91d2015-03-10 02:37:25 +00004012 if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004013 return false;
4014 return true;
4015}
4016
Mehdi Aminia28d91d2015-03-10 02:37:25 +00004017Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
Craig Topperf40110f2014-04-25 05:29:35 +00004018 if (!CI->getCalledFunction()) return nullptr;
Eric Christophera7fb58f2010-03-06 10:50:38 +00004019
Chandler Carruthba4c5172015-01-21 11:23:40 +00004020 auto InstCombineRAUW = [this](Instruction *From, Value *With) {
Sanjay Patel4b198802016-02-01 22:23:39 +00004021 replaceInstUsesWith(*From, With);
Chandler Carruthba4c5172015-01-21 11:23:40 +00004022 };
Amara Emerson54f60252018-10-11 14:51:11 +00004023 auto InstCombineErase = [this](Instruction *I) {
4024 eraseInstFromFunction(*I);
4025 };
4026 LibCallSimplifier Simplifier(DL, &TLI, ORE, InstCombineRAUW,
4027 InstCombineErase);
Chandler Carruthba4c5172015-01-21 11:23:40 +00004028 if (Value *With = Simplifier.optimizeCall(CI)) {
Meador Ingee3f2b262012-11-30 04:05:06 +00004029 ++NumSimplified;
Sanjay Patel4b198802016-02-01 22:23:39 +00004030 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
Meador Ingee3f2b262012-11-30 04:05:06 +00004031 }
Meador Ingedf796f82012-10-13 16:45:24 +00004032
Craig Topperf40110f2014-04-25 05:29:35 +00004033 return nullptr;
Eric Christophera7fb58f2010-03-06 10:50:38 +00004034}
4035
Sanjay Patel6038d3e2016-01-29 23:27:03 +00004036static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
Duncan Sandsa0984362011-09-06 13:37:06 +00004037 // Strip off at most one level of pointer casts, looking for an alloca. This
4038 // is good enough in practice and simpler than handling any number of casts.
4039 Value *Underlying = TrampMem->stripPointerCasts();
4040 if (Underlying != TrampMem &&
Chandler Carruthcdf47882014-03-09 03:16:01 +00004041 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
Craig Topperf40110f2014-04-25 05:29:35 +00004042 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00004043 if (!isa<AllocaInst>(Underlying))
Craig Topperf40110f2014-04-25 05:29:35 +00004044 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00004045
Craig Topperf40110f2014-04-25 05:29:35 +00004046 IntrinsicInst *InitTrampoline = nullptr;
Chandler Carruthcdf47882014-03-09 03:16:01 +00004047 for (User *U : TrampMem->users()) {
4048 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
Duncan Sandsa0984362011-09-06 13:37:06 +00004049 if (!II)
Craig Topperf40110f2014-04-25 05:29:35 +00004050 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00004051 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
4052 if (InitTrampoline)
4053 // More than one init_trampoline writes to this value. Give up.
Craig Topperf40110f2014-04-25 05:29:35 +00004054 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00004055 InitTrampoline = II;
4056 continue;
4057 }
4058 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
4059 // Allow any number of calls to adjust.trampoline.
4060 continue;
Craig Topperf40110f2014-04-25 05:29:35 +00004061 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00004062 }
4063
4064 // No call to init.trampoline found.
4065 if (!InitTrampoline)
Craig Topperf40110f2014-04-25 05:29:35 +00004066 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00004067
4068 // Check that the alloca is being used in the expected way.
4069 if (InitTrampoline->getOperand(0) != TrampMem)
Craig Topperf40110f2014-04-25 05:29:35 +00004070 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00004071
4072 return InitTrampoline;
4073}
4074
Sanjay Patel6038d3e2016-01-29 23:27:03 +00004075static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
Duncan Sandsa0984362011-09-06 13:37:06 +00004076 Value *TrampMem) {
4077 // Visit all the previous instructions in the basic block, and try to find a
4078 // init.trampoline which has a direct path to the adjust.trampoline.
Duncan P. N. Exon Smith9f8aaf22015-10-13 16:59:33 +00004079 for (BasicBlock::iterator I = AdjustTramp->getIterator(),
4080 E = AdjustTramp->getParent()->begin();
4081 I != E;) {
4082 Instruction *Inst = &*--I;
Duncan Sandsa0984362011-09-06 13:37:06 +00004083 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
4084 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
4085 II->getOperand(0) == TrampMem)
4086 return II;
4087 if (Inst->mayWriteToMemory())
Craig Topperf40110f2014-04-25 05:29:35 +00004088 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00004089 }
Craig Topperf40110f2014-04-25 05:29:35 +00004090 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00004091}
4092
4093// Given a call to llvm.adjust.trampoline, find and return the corresponding
4094// call to llvm.init.trampoline if the call to the trampoline can be optimized
4095// to a direct call to a function. Otherwise return NULL.
Sanjay Patel6038d3e2016-01-29 23:27:03 +00004096static IntrinsicInst *findInitTrampoline(Value *Callee) {
Duncan Sandsa0984362011-09-06 13:37:06 +00004097 Callee = Callee->stripPointerCasts();
4098 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
4099 if (!AdjustTramp ||
4100 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
Craig Topperf40110f2014-04-25 05:29:35 +00004101 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00004102
4103 Value *TrampMem = AdjustTramp->getOperand(0);
4104
Sanjay Patel6038d3e2016-01-29 23:27:03 +00004105 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
Duncan Sandsa0984362011-09-06 13:37:06 +00004106 return IT;
Sanjay Patel6038d3e2016-01-29 23:27:03 +00004107 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
Duncan Sandsa0984362011-09-06 13:37:06 +00004108 return IT;
Craig Topperf40110f2014-04-25 05:29:35 +00004109 return nullptr;
Duncan Sandsa0984362011-09-06 13:37:06 +00004110}
4111
Sanjay Patelcd4377c2016-01-20 22:24:38 +00004112/// Improvements for call and invoke instructions.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004113Instruction *InstCombiner::visitCallSite(CallSite CS) {
Justin Bogner99798402016-08-05 01:06:44 +00004114 if (isAllocLikeFn(CS.getInstruction(), &TLI))
Nuno Lopes95cc4f32012-07-09 18:38:20 +00004115 return visitAllocSite(*CS.getInstruction());
Nuno Lopesdc6085e2012-06-21 21:25:05 +00004116
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004117 bool Changed = false;
4118
Philip Reamesc25df112015-06-16 20:24:25 +00004119 // Mark any parameters that are known to be non-null with the nonnull
4120 // attribute. This is helpful for inlining calls to functions with null
4121 // checks on their arguments.
Reid Kleckner5fbdd172017-05-31 19:23:09 +00004122 SmallVector<unsigned, 4> ArgNos;
Philip Reamesc25df112015-06-16 20:24:25 +00004123 unsigned ArgNo = 0;
Akira Hatanaka237916b2015-12-02 06:58:49 +00004124
Philip Reamesc25df112015-06-16 20:24:25 +00004125 for (Value *V : CS.args()) {
Sanjay Patelf9f5d3c2016-01-29 23:14:58 +00004126 if (V->getType()->isPointerTy() &&
Reid Klecknerfb502d22017-04-14 20:19:02 +00004127 !CS.paramHasAttr(ArgNo, Attribute::NonNull) &&
Nuno Lopes404f1062017-09-09 18:23:11 +00004128 isKnownNonZero(V, DL, 0, &AC, CS.getInstruction(), &DT))
Reid Kleckner5fbdd172017-05-31 19:23:09 +00004129 ArgNos.push_back(ArgNo);
Philip Reamesc25df112015-06-16 20:24:25 +00004130 ArgNo++;
4131 }
Akira Hatanaka237916b2015-12-02 06:58:49 +00004132
Philip Reamesc25df112015-06-16 20:24:25 +00004133 assert(ArgNo == CS.arg_size() && "sanity check");
4134
Reid Kleckner5fbdd172017-05-31 19:23:09 +00004135 if (!ArgNos.empty()) {
Reid Klecknerb5180542017-03-21 16:57:19 +00004136 AttributeList AS = CS.getAttributes();
Akira Hatanaka237916b2015-12-02 06:58:49 +00004137 LLVMContext &Ctx = CS.getInstruction()->getContext();
Reid Kleckner5fbdd172017-05-31 19:23:09 +00004138 AS = AS.addParamAttribute(Ctx, ArgNos,
4139 Attribute::get(Ctx, Attribute::NonNull));
Akira Hatanaka237916b2015-12-02 06:58:49 +00004140 CS.setAttributes(AS);
4141 Changed = true;
4142 }
4143
Chris Lattner73989652010-12-20 08:25:06 +00004144 // If the callee is a pointer to a function, attempt to move any casts to the
4145 // arguments of the call/invoke.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004146 Value *Callee = CS.getCalledValue();
Chris Lattner73989652010-12-20 08:25:06 +00004147 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
Craig Topperf40110f2014-04-25 05:29:35 +00004148 return nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004149
Justin Lebar9d943972016-03-14 20:18:54 +00004150 if (Function *CalleeF = dyn_cast<Function>(Callee)) {
4151 // Remove the convergent attr on calls when the callee is not convergent.
Matt Arsenault802ebcb2016-06-20 19:04:44 +00004152 if (CS.isConvergent() && !CalleeF->isConvergent() &&
4153 !CalleeF->isIntrinsic()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +00004154 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr "
4155 << CS.getInstruction() << "\n");
Justin Lebar9d943972016-03-14 20:18:54 +00004156 CS.setNotConvergent();
4157 return CS.getInstruction();
4158 }
4159
Chris Lattner846a52e2010-02-01 18:11:34 +00004160 // If the call and callee calling conventions don't match, this call must
4161 // be unreachable, as the call is undefined.
4162 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
4163 // Only do this for calls to a function with a body. A prototype may
4164 // not actually end up matching the implementation's calling conv for a
4165 // variety of reasons (e.g. it may be written in assembly).
4166 !CalleeF->isDeclaration()) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004167 Instruction *OldCall = CS.getInstruction();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004168 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
Jim Grosbach7815f562012-02-03 00:07:04 +00004169 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004170 OldCall);
Chad Rosiere28ae302012-12-13 00:18:46 +00004171 // If OldCall does not return void then replaceAllUsesWith undef.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004172 // This allows ValueHandlers and custom metadata to adjust itself.
4173 if (!OldCall->getType()->isVoidTy())
Sanjay Patel4b198802016-02-01 22:23:39 +00004174 replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
Chris Lattner2cecedf2010-02-01 18:04:58 +00004175 if (isa<CallInst>(OldCall))
Sanjay Patel4b198802016-02-01 22:23:39 +00004176 return eraseInstFromFunction(*OldCall);
Jim Grosbach7815f562012-02-03 00:07:04 +00004177
Chris Lattner2cecedf2010-02-01 18:04:58 +00004178 // We cannot remove an invoke, because it would change the CFG, just
4179 // change the callee to a null pointer.
Gabor Greiffebf6ab2010-03-20 21:00:25 +00004180 cast<InvokeInst>(OldCall)->setCalledFunction(
Chris Lattner2cecedf2010-02-01 18:04:58 +00004181 Constant::getNullValue(CalleeF->getType()));
Craig Topperf40110f2014-04-25 05:29:35 +00004182 return nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004183 }
Justin Lebar9d943972016-03-14 20:18:54 +00004184 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004185
Manoj Gupta77eeac32018-07-09 22:27:23 +00004186 if ((isa<ConstantPointerNull>(Callee) &&
4187 !NullPointerIsDefined(CS.getInstruction()->getFunction())) ||
4188 isa<UndefValue>(Callee)) {
Gabor Greif589a0b92010-06-24 12:58:35 +00004189 // If CS does not return void then replaceAllUsesWith undef.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004190 // This allows ValueHandlers and custom metadata to adjust itself.
4191 if (!CS.getInstruction()->getType()->isVoidTy())
Sanjay Patel4b198802016-02-01 22:23:39 +00004192 replaceInstUsesWith(*CS.getInstruction(),
Eli Friedmanb9ed18f2011-05-18 00:32:01 +00004193 UndefValue::get(CS.getInstruction()->getType()));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004194
Nuno Lopes771e7bd2012-06-21 23:52:14 +00004195 if (isa<InvokeInst>(CS.getInstruction())) {
4196 // Can't remove an invoke because we cannot change the CFG.
Craig Topperf40110f2014-04-25 05:29:35 +00004197 return nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004198 }
Nuno Lopes771e7bd2012-06-21 23:52:14 +00004199
4200 // This instruction is not reachable, just remove it. We insert a store to
4201 // undef so that we know that this code is not reachable, despite the fact
4202 // that we can't modify the CFG here.
4203 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
4204 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
4205 CS.getInstruction());
4206
Sanjay Patel4b198802016-02-01 22:23:39 +00004207 return eraseInstFromFunction(*CS.getInstruction());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004208 }
4209
Sanjay Patel6038d3e2016-01-29 23:27:03 +00004210 if (IntrinsicInst *II = findInitTrampoline(Callee))
Duncan Sandsa0984362011-09-06 13:37:06 +00004211 return transformCallThroughTrampoline(CS, II);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004212
Chris Lattner229907c2011-07-18 04:54:35 +00004213 PointerType *PTy = cast<PointerType>(Callee->getType());
4214 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004215 if (FTy->isVarArg()) {
Eli Friedman7534b4682011-11-29 01:18:23 +00004216 int ix = FTy->getNumParams();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004217 // See if we can optimize any arguments passed through the varargs area of
4218 // the call.
Matt Arsenault5d2e85f2013-06-28 00:25:40 +00004219 for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004220 E = CS.arg_end(); I != E; ++I, ++ix) {
4221 CastInst *CI = dyn_cast<CastInst>(*I);
Mehdi Aminia28d91d2015-03-10 02:37:25 +00004222 if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004223 *I = CI->getOperand(0);
4224 Changed = true;
4225 }
4226 }
4227 }
4228
4229 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
4230 // Inline asm calls cannot throw - mark them 'nounwind'.
4231 CS.setDoesNotThrow();
4232 Changed = true;
4233 }
4234
Micah Villmowcdfe20b2012-10-08 16:38:25 +00004235 // Try to optimize the call if possible, we require DataLayout for most of
Eric Christophera7fb58f2010-03-06 10:50:38 +00004236 // this. None of these calls are seen as possibly dead so go ahead and
4237 // delete the instruction now.
4238 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
Mehdi Aminia28d91d2015-03-10 02:37:25 +00004239 Instruction *I = tryOptimizeCall(CI);
Eric Christopher1810d772010-03-06 10:59:25 +00004240 // If we changed something return the result, etc. Otherwise let
4241 // the fallthrough check.
Sanjay Patel4b198802016-02-01 22:23:39 +00004242 if (I) return eraseInstFromFunction(*I);
Eric Christophera7fb58f2010-03-06 10:50:38 +00004243 }
4244
Craig Topperf40110f2014-04-25 05:29:35 +00004245 return Changed ? CS.getInstruction() : nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004246}
4247
Sanjay Patelcd4377c2016-01-20 22:24:38 +00004248/// If the callee is a constexpr cast of a function, attempt to move the cast to
4249/// the arguments of the call/invoke.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004250bool InstCombiner::transformConstExprCastCall(CallSite CS) {
Sanjay Patele3c335c2016-08-11 15:21:21 +00004251 auto *Callee = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
Craig Topperf40110f2014-04-25 05:29:35 +00004252 if (!Callee)
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004253 return false;
Sanjay Patel38ae83d2016-08-11 15:23:56 +00004254
Reid Kleckner298ffc62018-04-02 22:49:44 +00004255 // If this is a call to a thunk function, don't remove the cast. Thunks are
4256 // used to transparently forward all incoming parameters and outgoing return
4257 // values, so it's important to leave the cast in place.
David Majnemer4c0a6e92015-01-21 22:32:04 +00004258 if (Callee->hasFnAttribute("thunk"))
4259 return false;
Sanjay Patel38ae83d2016-08-11 15:23:56 +00004260
Reid Kleckner298ffc62018-04-02 22:49:44 +00004261 // If this is a musttail call, the callee's prototype must match the caller's
4262 // prototype with the exception of pointee types. The code below doesn't
4263 // implement that, so we can't do this transform.
4264 // TODO: Do the transform if it only requires adding pointer casts.
4265 if (CS.isMustTailCall())
4266 return false;
4267
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004268 Instruction *Caller = CS.getInstruction();
Reid Klecknerb5180542017-03-21 16:57:19 +00004269 const AttributeList &CallerPAL = CS.getAttributes();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004270
4271 // Okay, this is a cast from a function to a different type. Unless doing so
4272 // would cause a type conversion of one of our arguments, change this call to
4273 // be a direct call with arguments casted to the appropriate types.
Chris Lattner229907c2011-07-18 04:54:35 +00004274 FunctionType *FT = Callee->getFunctionType();
4275 Type *OldRetTy = Caller->getType();
4276 Type *NewRetTy = FT->getReturnType();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004277
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004278 // Check to see if we are changing the return type...
4279 if (OldRetTy != NewRetTy) {
Nick Lewyckya6a17d72014-01-18 22:47:12 +00004280
4281 if (NewRetTy->isStructTy())
4282 return false; // TODO: Handle multiple return values.
4283
David Majnemer9b6b8222015-01-06 08:41:31 +00004284 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
Matt Arsenaulte6952f22013-09-17 21:10:14 +00004285 if (Callee->isDeclaration())
4286 return false; // Cannot transform this return value.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004287
Matt Arsenaulte6952f22013-09-17 21:10:14 +00004288 if (!Caller->use_empty() &&
4289 // void -> non-void is handled specially
4290 !NewRetTy->isVoidTy())
Frederic Rissc1892e22014-10-23 04:08:42 +00004291 return false; // Cannot transform this return value.
Matt Arsenaulte6952f22013-09-17 21:10:14 +00004292 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004293
4294 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
Reid Klecknerb5180542017-03-21 16:57:19 +00004295 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
Pete Cooper2777d8872015-05-06 23:19:56 +00004296 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004297 return false; // Attribute not compatible with transformed value.
4298 }
4299
4300 // If the callsite is an invoke instruction, and the return value is used by
4301 // a PHI node in a successor, we cannot change the return type of the call
4302 // because there is no place to put the cast instruction (without breaking
4303 // the critical edge). Bail out in this case.
4304 if (!Caller->use_empty())
4305 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
Chandler Carruthcdf47882014-03-09 03:16:01 +00004306 for (User *U : II->users())
4307 if (PHINode *PN = dyn_cast<PHINode>(U))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004308 if (PN->getParent() == II->getNormalDest() ||
4309 PN->getParent() == II->getUnwindDest())
4310 return false;
4311 }
4312
Matt Arsenault5d2e85f2013-06-28 00:25:40 +00004313 unsigned NumActualArgs = CS.arg_size();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004314 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4315
David Majnemer9b6b8222015-01-06 08:41:31 +00004316 // Prevent us turning:
4317 // declare void @takes_i32_inalloca(i32* inalloca)
4318 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
4319 //
4320 // into:
4321 // call void @takes_i32_inalloca(i32* null)
David Majnemerd61a6fd2015-03-11 18:03:05 +00004322 //
4323 // Similarly, avoid folding away bitcasts of byval calls.
4324 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4325 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
David Majnemer9b6b8222015-01-06 08:41:31 +00004326 return false;
4327
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004328 CallSite::arg_iterator AI = CS.arg_begin();
4329 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
Chris Lattner229907c2011-07-18 04:54:35 +00004330 Type *ParamTy = FT->getParamType(i);
4331 Type *ActTy = (*AI)->getType();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004332
David Majnemer9b6b8222015-01-06 08:41:31 +00004333 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004334 return false; // Cannot transform this parameter value.
4335
Reid Klecknerf021fab2017-04-13 23:12:13 +00004336 if (AttrBuilder(CallerPAL.getParamAttributes(i))
4337 .overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004338 return false; // Attribute not compatible with transformed value.
Jim Grosbach7815f562012-02-03 00:07:04 +00004339
Reid Kleckner26af2ca2014-01-28 02:38:36 +00004340 if (CS.isInAllocaArgument(i))
4341 return false; // Cannot transform to and from inalloca.
4342
Chris Lattner27ca8eb2010-12-20 08:36:38 +00004343 // If the parameter is passed as a byval argument, then we have to have a
4344 // sized type and the sized type has to have the same size as the old type.
Reid Klecknerf021fab2017-04-13 23:12:13 +00004345 if (ParamTy != ActTy && CallerPAL.hasParamAttribute(i, Attribute::ByVal)) {
Chris Lattner229907c2011-07-18 04:54:35 +00004346 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
Mehdi Aminia28d91d2015-03-10 02:37:25 +00004347 if (!ParamPTy || !ParamPTy->getElementType()->isSized())
Chris Lattner27ca8eb2010-12-20 08:36:38 +00004348 return false;
Jim Grosbach7815f562012-02-03 00:07:04 +00004349
Matt Arsenaultfa252722013-09-27 22:18:51 +00004350 Type *CurElTy = ActTy->getPointerElementType();
Mehdi Aminia28d91d2015-03-10 02:37:25 +00004351 if (DL.getTypeAllocSize(CurElTy) !=
4352 DL.getTypeAllocSize(ParamPTy->getElementType()))
Chris Lattner27ca8eb2010-12-20 08:36:38 +00004353 return false;
4354 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004355 }
4356
Chris Lattneradf38b32011-02-24 05:10:56 +00004357 if (Callee->isDeclaration()) {
4358 // Do not delete arguments unless we have a function body.
4359 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
4360 return false;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004361
Chris Lattneradf38b32011-02-24 05:10:56 +00004362 // If the callee is just a declaration, don't change the varargsness of the
4363 // call. We don't want to introduce a varargs call where one doesn't
4364 // already exist.
Chris Lattner229907c2011-07-18 04:54:35 +00004365 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
Chris Lattneradf38b32011-02-24 05:10:56 +00004366 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
4367 return false;
Jim Grosbache84ae7b2012-02-03 00:00:55 +00004368
4369 // If both the callee and the cast type are varargs, we still have to make
4370 // sure the number of fixed parameters are the same or we have the same
4371 // ABI issues as if we introduce a varargs call.
Jim Grosbach1df8cdc2012-02-03 00:26:07 +00004372 if (FT->isVarArg() &&
4373 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
4374 FT->getNumParams() !=
Jim Grosbache84ae7b2012-02-03 00:00:55 +00004375 cast<FunctionType>(APTy->getElementType())->getNumParams())
4376 return false;
Chris Lattneradf38b32011-02-24 05:10:56 +00004377 }
Jim Grosbach7815f562012-02-03 00:07:04 +00004378
Jim Grosbach0ab54182012-02-03 00:00:50 +00004379 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
Reid Kleckneraa0cec72017-04-19 23:17:47 +00004380 !CallerPAL.isEmpty()) {
Jim Grosbach0ab54182012-02-03 00:00:50 +00004381 // In this case we have more arguments than the new function type, but we
4382 // won't be dropping them. Check that these extra arguments have attributes
4383 // that are compatible with being a vararg call argument.
Reid Kleckneraa0cec72017-04-19 23:17:47 +00004384 unsigned SRetIdx;
4385 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
4386 SRetIdx > FT->getNumParams())
4387 return false;
4388 }
Jim Grosbach7815f562012-02-03 00:07:04 +00004389
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004390 // Okay, we decided that this is a safe thing to do: go ahead and start
Chris Lattneradf38b32011-02-24 05:10:56 +00004391 // inserting cast instructions as necessary.
Reid Klecknerc3fae792017-04-13 18:11:03 +00004392 SmallVector<Value *, 8> Args;
4393 SmallVector<AttributeSet, 8> ArgAttrs;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004394 Args.reserve(NumActualArgs);
Reid Klecknerc3fae792017-04-13 18:11:03 +00004395 ArgAttrs.reserve(NumActualArgs);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004396
4397 // Get any return attributes.
Reid Klecknerb5180542017-03-21 16:57:19 +00004398 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004399
4400 // If the return value is not being used, the type may not be compatible
4401 // with the existing attributes. Wipe out any problematic attributes.
Pete Cooper2777d8872015-05-06 23:19:56 +00004402 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004403
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004404 AI = CS.arg_begin();
4405 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
Chris Lattner229907c2011-07-18 04:54:35 +00004406 Type *ParamTy = FT->getParamType(i);
Matt Arsenaultcacbb232013-07-30 20:45:05 +00004407
Reid Klecknerc3fae792017-04-13 18:11:03 +00004408 Value *NewArg = *AI;
4409 if ((*AI)->getType() != ParamTy)
Craig Topperbb4069e2017-07-07 23:16:26 +00004410 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
Reid Klecknerc3fae792017-04-13 18:11:03 +00004411 Args.push_back(NewArg);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004412
4413 // Add any parameter attributes.
Reid Klecknerf021fab2017-04-13 23:12:13 +00004414 ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004415 }
4416
4417 // If the function takes more arguments than the call was taking, add them
4418 // now.
Reid Klecknerc3fae792017-04-13 18:11:03 +00004419 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004420 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
Reid Klecknerc3fae792017-04-13 18:11:03 +00004421 ArgAttrs.push_back(AttributeSet());
4422 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004423
4424 // If we are removing arguments to the function, emit an obnoxious warning.
4425 if (FT->getNumParams() < NumActualArgs) {
Nick Lewycky90053a12012-12-26 22:00:35 +00004426 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
4427 if (FT->isVarArg()) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004428 // Add all of the arguments in their promoted form to the arg list.
4429 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
Chris Lattner229907c2011-07-18 04:54:35 +00004430 Type *PTy = getPromotedType((*AI)->getType());
Reid Klecknerc3fae792017-04-13 18:11:03 +00004431 Value *NewArg = *AI;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004432 if (PTy != (*AI)->getType()) {
4433 // Must promote to pass through va_arg area!
4434 Instruction::CastOps opcode =
4435 CastInst::getCastOpcode(*AI, false, PTy, false);
Craig Topperbb4069e2017-07-07 23:16:26 +00004436 NewArg = Builder.CreateCast(opcode, *AI, PTy);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004437 }
Reid Klecknerc3fae792017-04-13 18:11:03 +00004438 Args.push_back(NewArg);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004439
4440 // Add any parameter attributes.
Reid Klecknerf021fab2017-04-13 23:12:13 +00004441 ArgAttrs.push_back(CallerPAL.getParamAttributes(i));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004442 }
4443 }
4444 }
4445
Reid Klecknerc2cb5602017-04-12 00:38:00 +00004446 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004447
4448 if (NewRetTy->isVoidTy())
4449 Caller->setName(""); // Void type should not have a name.
4450
Reid Klecknerc3fae792017-04-13 18:11:03 +00004451 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
4452 "missing argument attributes");
4453 LLVMContext &Ctx = Callee->getContext();
4454 AttributeList NewCallerPAL = AttributeList::get(
4455 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004456
Sanjoy Das76293462015-11-25 00:42:19 +00004457 SmallVector<OperandBundleDef, 1> OpBundles;
Sanjoy Dasc521c7b2015-11-25 00:42:24 +00004458 CS.getOperandBundlesAsDefs(OpBundles);
Sanjoy Das76293462015-11-25 00:42:19 +00004459
Reid Kleckner257cb4e2017-04-13 20:26:38 +00004460 CallSite NewCS;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004461 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
Craig Topperbb4069e2017-07-07 23:16:26 +00004462 NewCS = Builder.CreateInvoke(Callee, II->getNormalDest(),
4463 II->getUnwindDest(), Args, OpBundles);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004464 } else {
Craig Topperbb4069e2017-07-07 23:16:26 +00004465 NewCS = Builder.CreateCall(Callee, Args, OpBundles);
Reid Kleckner257cb4e2017-04-13 20:26:38 +00004466 cast<CallInst>(NewCS.getInstruction())
4467 ->setTailCallKind(cast<CallInst>(Caller)->getTailCallKind());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004468 }
Reid Kleckner257cb4e2017-04-13 20:26:38 +00004469 NewCS->takeName(Caller);
4470 NewCS.setCallingConv(CS.getCallingConv());
4471 NewCS.setAttributes(NewCallerPAL);
4472
4473 // Preserve the weight metadata for the new call instruction. The metadata
4474 // is used by SamplePGO to check callsite's hotness.
4475 uint64_t W;
4476 if (Caller->extractProfTotalWeight(W))
4477 NewCS->setProfWeight(W);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004478
4479 // Insert a cast of the return type as necessary.
Reid Kleckner257cb4e2017-04-13 20:26:38 +00004480 Instruction *NC = NewCS.getInstruction();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004481 Value *NV = NC;
4482 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
4483 if (!NV->getType()->isVoidTy()) {
David Majnemer9b6b8222015-01-06 08:41:31 +00004484 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
Eli Friedman35211c62011-05-27 00:19:40 +00004485 NC->setDebugLoc(Caller->getDebugLoc());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004486
4487 // If this is an invoke instruction, we should insert it after the first
4488 // non-phi, instruction in the normal successor block.
4489 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
Bill Wendling07efd6f2011-08-25 01:08:34 +00004490 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004491 InsertNewInstBefore(NC, *I);
4492 } else {
Chris Lattner73989652010-12-20 08:25:06 +00004493 // Otherwise, it's a call, just insert cast right after the call.
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004494 InsertNewInstBefore(NC, *Caller);
4495 }
4496 Worklist.AddUsersToWorkList(*Caller);
4497 } else {
4498 NV = UndefValue::get(Caller->getType());
4499 }
4500 }
4501
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004502 if (!Caller->use_empty())
Sanjay Patel4b198802016-02-01 22:23:39 +00004503 replaceInstUsesWith(*Caller, NV);
Frederic Rissc1892e22014-10-23 04:08:42 +00004504 else if (Caller->hasValueHandle()) {
4505 if (OldRetTy == NV->getType())
4506 ValueHandleBase::ValueIsRAUWd(Caller, NV);
4507 else
4508 // We cannot call ValueIsRAUWd with a different type, and the
4509 // actual tracked value will disappear.
4510 ValueHandleBase::ValueIsDeleted(Caller);
4511 }
Eli Friedmanb9ed18f2011-05-18 00:32:01 +00004512
Sanjay Patel4b198802016-02-01 22:23:39 +00004513 eraseInstFromFunction(*Caller);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004514 return true;
4515}
4516
Sanjay Patelcd4377c2016-01-20 22:24:38 +00004517/// Turn a call to a function created by init_trampoline / adjust_trampoline
4518/// intrinsic pair into a direct call to the underlying function.
Duncan Sandsa0984362011-09-06 13:37:06 +00004519Instruction *
4520InstCombiner::transformCallThroughTrampoline(CallSite CS,
4521 IntrinsicInst *Tramp) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004522 Value *Callee = CS.getCalledValue();
Chris Lattner229907c2011-07-18 04:54:35 +00004523 PointerType *PTy = cast<PointerType>(Callee->getType());
4524 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
Reid Klecknereb9dd5b2017-04-10 23:31:05 +00004525 AttributeList Attrs = CS.getAttributes();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004526
4527 // If the call already has the 'nest' attribute somewhere then give up -
4528 // otherwise 'nest' would occur twice after splicing in the chain.
Bill Wendling6e95ae82012-12-31 00:49:59 +00004529 if (Attrs.hasAttrSomewhere(Attribute::Nest))
Craig Topperf40110f2014-04-25 05:29:35 +00004530 return nullptr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004531
Duncan Sandsa0984362011-09-06 13:37:06 +00004532 assert(Tramp &&
4533 "transformCallThroughTrampoline called with incorrect CallSite.");
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004534
Gabor Greif3e44ea12010-07-22 10:37:47 +00004535 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
Manuel Jacob5f6eaac2016-01-16 20:30:46 +00004536 FunctionType *NestFTy = cast<FunctionType>(NestF->getValueType());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004537
Reid Klecknereb9dd5b2017-04-10 23:31:05 +00004538 AttributeList NestAttrs = NestF->getAttributes();
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004539 if (!NestAttrs.isEmpty()) {
Reid Klecknerf021fab2017-04-13 23:12:13 +00004540 unsigned NestArgNo = 0;
Craig Topperf40110f2014-04-25 05:29:35 +00004541 Type *NestTy = nullptr;
Reid Klecknerc2cb5602017-04-12 00:38:00 +00004542 AttributeSet NestAttr;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004543
4544 // Look for a parameter marked with the 'nest' attribute.
4545 for (FunctionType::param_iterator I = NestFTy->param_begin(),
Reid Klecknerf021fab2017-04-13 23:12:13 +00004546 E = NestFTy->param_end();
4547 I != E; ++NestArgNo, ++I) {
4548 AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo);
4549 if (AS.hasAttribute(Attribute::Nest)) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004550 // Record the parameter type and any other attributes.
4551 NestTy = *I;
Reid Klecknerf021fab2017-04-13 23:12:13 +00004552 NestAttr = AS;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004553 break;
4554 }
Reid Klecknerf021fab2017-04-13 23:12:13 +00004555 }
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004556
4557 if (NestTy) {
4558 Instruction *Caller = CS.getInstruction();
4559 std::vector<Value*> NewArgs;
Reid Kleckner7f720332017-04-13 00:58:09 +00004560 std::vector<AttributeSet> NewArgAttrs;
Matt Arsenault5d2e85f2013-06-28 00:25:40 +00004561 NewArgs.reserve(CS.arg_size() + 1);
Reid Kleckner7f720332017-04-13 00:58:09 +00004562 NewArgAttrs.reserve(CS.arg_size());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004563
4564 // Insert the nest argument into the call argument list, which may
4565 // mean appending it. Likewise for attributes.
4566
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004567 {
Reid Klecknerf021fab2017-04-13 23:12:13 +00004568 unsigned ArgNo = 0;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004569 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
4570 do {
Reid Klecknerf021fab2017-04-13 23:12:13 +00004571 if (ArgNo == NestArgNo) {
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004572 // Add the chain argument and attributes.
Gabor Greif589a0b92010-06-24 12:58:35 +00004573 Value *NestVal = Tramp->getArgOperand(2);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004574 if (NestVal->getType() != NestTy)
Craig Topperbb4069e2017-07-07 23:16:26 +00004575 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004576 NewArgs.push_back(NestVal);
Reid Kleckner7f720332017-04-13 00:58:09 +00004577 NewArgAttrs.push_back(NestAttr);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004578 }
4579
4580 if (I == E)
4581 break;
4582
4583 // Add the original argument and attributes.
4584 NewArgs.push_back(*I);
Reid Klecknerf021fab2017-04-13 23:12:13 +00004585 NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004586
Reid Klecknerf021fab2017-04-13 23:12:13 +00004587 ++ArgNo;
Richard Trieu7a083812016-02-18 22:09:30 +00004588 ++I;
Eugene Zelenkocdc71612016-08-11 17:20:18 +00004589 } while (true);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004590 }
4591
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004592 // The trampoline may have been bitcast to a bogus type (FTy).
4593 // Handle this by synthesizing a new function type, equal to FTy
4594 // with the chain parameter inserted.
4595
Jay Foadb804a2b2011-07-12 14:06:48 +00004596 std::vector<Type*> NewTypes;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004597 NewTypes.reserve(FTy->getNumParams()+1);
4598
4599 // Insert the chain's type into the list of parameter types, which may
4600 // mean appending it.
4601 {
Reid Klecknerf021fab2017-04-13 23:12:13 +00004602 unsigned ArgNo = 0;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004603 FunctionType::param_iterator I = FTy->param_begin(),
4604 E = FTy->param_end();
4605
4606 do {
Reid Klecknerf021fab2017-04-13 23:12:13 +00004607 if (ArgNo == NestArgNo)
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004608 // Add the chain's type.
4609 NewTypes.push_back(NestTy);
4610
4611 if (I == E)
4612 break;
4613
4614 // Add the original type.
4615 NewTypes.push_back(*I);
4616
Reid Klecknerf021fab2017-04-13 23:12:13 +00004617 ++ArgNo;
Richard Trieu7a083812016-02-18 22:09:30 +00004618 ++I;
Eugene Zelenkocdc71612016-08-11 17:20:18 +00004619 } while (true);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004620 }
4621
4622 // Replace the trampoline call with a direct call. Let the generic
4623 // code sort out any function type mismatches.
Jim Grosbach7815f562012-02-03 00:07:04 +00004624 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004625 FTy->isVarArg());
4626 Constant *NewCallee =
4627 NestF->getType() == PointerType::getUnqual(NewFTy) ?
Jim Grosbach7815f562012-02-03 00:07:04 +00004628 NestF : ConstantExpr::getBitCast(NestF,
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004629 PointerType::getUnqual(NewFTy));
Reid Kleckner7f720332017-04-13 00:58:09 +00004630 AttributeList NewPAL =
4631 AttributeList::get(FTy->getContext(), Attrs.getFnAttributes(),
4632 Attrs.getRetAttributes(), NewArgAttrs);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004633
David Majnemer231a68c2016-04-29 08:07:20 +00004634 SmallVector<OperandBundleDef, 1> OpBundles;
4635 CS.getOperandBundlesAsDefs(OpBundles);
4636
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004637 Instruction *NewCaller;
4638 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4639 NewCaller = InvokeInst::Create(NewCallee,
4640 II->getNormalDest(), II->getUnwindDest(),
David Majnemer231a68c2016-04-29 08:07:20 +00004641 NewArgs, OpBundles);
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004642 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
4643 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4644 } else {
David Majnemer231a68c2016-04-29 08:07:20 +00004645 NewCaller = CallInst::Create(NewCallee, NewArgs, OpBundles);
David Majnemerd5648c72016-11-25 22:35:09 +00004646 cast<CallInst>(NewCaller)->setTailCallKind(
4647 cast<CallInst>(Caller)->getTailCallKind());
4648 cast<CallInst>(NewCaller)->setCallingConv(
4649 cast<CallInst>(Caller)->getCallingConv());
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004650 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4651 }
Florian Hahn012c8f92017-12-20 17:16:59 +00004652 NewCaller->setDebugLoc(Caller->getDebugLoc());
Eli Friedman49346012011-05-18 19:57:14 +00004653
4654 return NewCaller;
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004655 }
4656 }
4657
4658 // Replace the trampoline call with a direct call. Since there is no 'nest'
4659 // parameter, there is no need to adjust the argument list. Let the generic
4660 // code sort out any function type mismatches.
4661 Constant *NewCallee =
Jim Grosbach7815f562012-02-03 00:07:04 +00004662 NestF->getType() == PTy ? NestF :
Chris Lattner7a9e47a2010-01-05 07:32:13 +00004663 ConstantExpr::getBitCast(NestF, PTy);
4664 CS.setCalledFunction(NewCallee);
4665 return CS.getInstruction();
4666}