1 //===- InstCombineCalls.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitCall, visitInvoke, and visitCallBr functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLFunctionalExtras.h"
21 #include "llvm/ADT/SmallBitVector.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumeBundleQueries.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/InstructionSimplify.h"
28 #include "llvm/Analysis/Loads.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Analysis/VectorUtils.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/Constant.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/InlineAsm.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/IntrinsicsAArch64.h"
47 #include "llvm/IR/IntrinsicsAMDGPU.h"
48 #include "llvm/IR/IntrinsicsARM.h"
49 #include "llvm/IR/IntrinsicsHexagon.h"
50 #include "llvm/IR/LLVMContext.h"
51 #include "llvm/IR/Metadata.h"
52 #include "llvm/IR/PatternMatch.h"
53 #include "llvm/IR/Statepoint.h"
54 #include "llvm/IR/Type.h"
55 #include "llvm/IR/User.h"
56 #include "llvm/IR/Value.h"
57 #include "llvm/IR/ValueHandle.h"
58 #include "llvm/Support/AtomicOrdering.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/Compiler.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Support/KnownBits.h"
65 #include "llvm/Support/MathExtras.h"
66 #include "llvm/Support/raw_ostream.h"
67 #include "llvm/Transforms/InstCombine/InstCombiner.h"
68 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
69 #include "llvm/Transforms/Utils/Local.h"
70 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
71 #include <algorithm>
72 #include <cassert>
73 #include <cstdint>
74 #include <utility>
75 #include <vector>
76 
77 #define DEBUG_TYPE "instcombine"
78 #include "llvm/Transforms/Utils/InstructionWorklist.h"
79 
80 using namespace llvm;
81 using namespace PatternMatch;
82 
83 STATISTIC(NumSimplified, "Number of library calls simplified");
84 
85 static cl::opt<unsigned> GuardWideningWindow(
86     "instcombine-guard-widening-window",
87     cl::init(3),
88     cl::desc("How wide an instruction window to bypass looking for "
89              "another guard"));
90 
91 namespace llvm {
92 /// enable preservation of attributes in assume like:
93 /// call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ]
94 extern cl::opt<bool> EnableKnowledgeRetention;
95 } // namespace llvm
96 
97 /// Return the specified type promoted as it would be to pass though a va_arg
98 /// area.
99 static Type *getPromotedType(Type *Ty) {
100   if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
101     if (ITy->getBitWidth() < 32)
102       return Type::getInt32Ty(Ty->getContext());
103   }
104   return Ty;
105 }
106 
107 /// Recognize a memcpy/memmove from a trivially otherwise unused alloca.
108 /// TODO: This should probably be integrated with visitAllocSites, but that
109 /// requires a deeper change to allow either unread or unwritten objects.
110 static bool hasUndefSource(AnyMemTransferInst *MI) {
111   auto *Src = MI->getRawSource();
112   while (isa<GetElementPtrInst>(Src) || isa<BitCastInst>(Src)) {
113     if (!Src->hasOneUse())
114       return false;
115     Src = cast<Instruction>(Src)->getOperand(0);
116   }
117   return isa<AllocaInst>(Src) && Src->hasOneUse();
118 }
119 
120 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
121   Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
122   MaybeAlign CopyDstAlign = MI->getDestAlign();
123   if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
124     MI->setDestAlignment(DstAlign);
125     return MI;
126   }
127 
128   Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
129   MaybeAlign CopySrcAlign = MI->getSourceAlign();
130   if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
131     MI->setSourceAlignment(SrcAlign);
132     return MI;
133   }
134 
135   // If we have a store to a location which is known constant, we can conclude
136   // that the store must be storing the constant value (else the memory
137   // wouldn't be constant), and this must be a noop.
138   if (AA->pointsToConstantMemory(MI->getDest())) {
139     // Set the size of the copy to 0, it will be deleted on the next iteration.
140     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
141     return MI;
142   }
143 
144   // If the source is provably undef, the memcpy/memmove doesn't do anything
145   // (unless the transfer is volatile).
146   if (hasUndefSource(MI) && !MI->isVolatile()) {
147     // Set the size of the copy to 0, it will be deleted on the next iteration.
148     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
149     return MI;
150   }
151 
152   // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
153   // load/store.
154   ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
155   if (!MemOpLength) return nullptr;
156 
157   // Source and destination pointer types are always "i8*" for intrinsic.  See
158   // if the size is something we can handle with a single primitive load/store.
159   // A single load+store correctly handles overlapping memory in the memmove
160   // case.
161   uint64_t Size = MemOpLength->getLimitedValue();
162   assert(Size && "0-sized memory transferring should be removed already.");
163 
164   if (Size > 8 || (Size&(Size-1)))
165     return nullptr;  // If not 1/2/4/8 bytes, exit.
166 
167   // If it is an atomic and alignment is less than the size then we will
168   // introduce the unaligned memory access which will be later transformed
169   // into libcall in CodeGen. This is not evident performance gain so disable
170   // it now.
171   if (isa<AtomicMemTransferInst>(MI))
172     if (*CopyDstAlign < Size || *CopySrcAlign < Size)
173       return nullptr;
174 
175   // Use an integer load+store unless we can find something better.
176   unsigned SrcAddrSp =
177     cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
178   unsigned DstAddrSp =
179     cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
180 
181   IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
182   Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
183   Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
184 
185   // If the memcpy has metadata describing the members, see if we can get the
186   // TBAA tag describing our copy.
187   MDNode *CopyMD = nullptr;
188   if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
189     CopyMD = M;
190   } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
191     if (M->getNumOperands() == 3 && M->getOperand(0) &&
192         mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
193         mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
194         M->getOperand(1) &&
195         mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
196         mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
197         Size &&
198         M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
199       CopyMD = cast<MDNode>(M->getOperand(2));
200   }
201 
202   Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
203   Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
204   LoadInst *L = Builder.CreateLoad(IntType, Src);
205   // Alignment from the mem intrinsic will be better, so use it.
206   L->setAlignment(*CopySrcAlign);
207   if (CopyMD)
208     L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
209   MDNode *LoopMemParallelMD =
210     MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
211   if (LoopMemParallelMD)
212     L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
213   MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
214   if (AccessGroupMD)
215     L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
216 
217   StoreInst *S = Builder.CreateStore(L, Dest);
218   // Alignment from the mem intrinsic will be better, so use it.
219   S->setAlignment(*CopyDstAlign);
220   if (CopyMD)
221     S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
222   if (LoopMemParallelMD)
223     S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
224   if (AccessGroupMD)
225     S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
226 
227   if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
228     // non-atomics can be volatile
229     L->setVolatile(MT->isVolatile());
230     S->setVolatile(MT->isVolatile());
231   }
232   if (isa<AtomicMemTransferInst>(MI)) {
233     // atomics have to be unordered
234     L->setOrdering(AtomicOrdering::Unordered);
235     S->setOrdering(AtomicOrdering::Unordered);
236   }
237 
238   // Set the size of the copy to 0, it will be deleted on the next iteration.
239   MI->setLength(Constant::getNullValue(MemOpLength->getType()));
240   return MI;
241 }
242 
243 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
244   const Align KnownAlignment =
245       getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
246   MaybeAlign MemSetAlign = MI->getDestAlign();
247   if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
248     MI->setDestAlignment(KnownAlignment);
249     return MI;
250   }
251 
252   // If we have a store to a location which is known constant, we can conclude
253   // that the store must be storing the constant value (else the memory
254   // wouldn't be constant), and this must be a noop.
255   if (AA->pointsToConstantMemory(MI->getDest())) {
256     // Set the size of the copy to 0, it will be deleted on the next iteration.
257     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
258     return MI;
259   }
260 
261   // Remove memset with an undef value.
262   // FIXME: This is technically incorrect because it might overwrite a poison
263   // value. Change to PoisonValue once #52930 is resolved.
264   if (isa<UndefValue>(MI->getValue())) {
265     // Set the size of the copy to 0, it will be deleted on the next iteration.
266     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
267     return MI;
268   }
269 
270   // Extract the length and alignment and fill if they are constant.
271   ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
272   ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
273   if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
274     return nullptr;
275   const uint64_t Len = LenC->getLimitedValue();
276   assert(Len && "0-sized memory setting should be removed already.");
277   const Align Alignment = assumeAligned(MI->getDestAlignment());
278 
279   // If it is an atomic and alignment is less than the size then we will
280   // introduce the unaligned memory access which will be later transformed
281   // into libcall in CodeGen. This is not evident performance gain so disable
282   // it now.
283   if (isa<AtomicMemSetInst>(MI))
284     if (Alignment < Len)
285       return nullptr;
286 
287   // memset(s,c,n) -> store s, c (for n=1,2,4,8)
288   if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
289     Type *ITy = IntegerType::get(MI->getContext(), Len*8);  // n=1 -> i8.
290 
291     Value *Dest = MI->getDest();
292     unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
293     Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
294     Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
295 
296     // Extract the fill value and store.
297     uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
298     StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
299                                        MI->isVolatile());
300     S->setAlignment(Alignment);
301     if (isa<AtomicMemSetInst>(MI))
302       S->setOrdering(AtomicOrdering::Unordered);
303 
304     // Set the size of the copy to 0, it will be deleted on the next iteration.
305     MI->setLength(Constant::getNullValue(LenC->getType()));
306     return MI;
307   }
308 
309   return nullptr;
310 }
311 
312 // TODO, Obvious Missing Transforms:
313 // * Narrow width by halfs excluding zero/undef lanes
314 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) {
315   Value *LoadPtr = II.getArgOperand(0);
316   const Align Alignment =
317       cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();
318 
319   // If the mask is all ones or undefs, this is a plain vector load of the 1st
320   // argument.
321   if (maskIsAllOneOrUndef(II.getArgOperand(2))) {
322     LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
323                                             "unmaskedload");
324     L->copyMetadata(II);
325     return L;
326   }
327 
328   // If we can unconditionally load from this address, replace with a
329   // load/select idiom. TODO: use DT for context sensitive query
330   if (isDereferenceablePointer(LoadPtr, II.getType(),
331                                II.getModule()->getDataLayout(), &II, nullptr)) {
332     LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
333                                              "unmaskedload");
334     LI->copyMetadata(II);
335     return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
336   }
337 
338   return nullptr;
339 }
340 
341 // TODO, Obvious Missing Transforms:
342 // * Single constant active lane -> store
343 // * Narrow width by halfs excluding zero/undef lanes
344 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) {
345   auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
346   if (!ConstMask)
347     return nullptr;
348 
349   // If the mask is all zeros, this instruction does nothing.
350   if (ConstMask->isNullValue())
351     return eraseInstFromFunction(II);
352 
353   // If the mask is all ones, this is a plain vector store of the 1st argument.
354   if (ConstMask->isAllOnesValue()) {
355     Value *StorePtr = II.getArgOperand(1);
356     Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
357     StoreInst *S =
358         new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
359     S->copyMetadata(II);
360     return S;
361   }
362 
363   if (isa<ScalableVectorType>(ConstMask->getType()))
364     return nullptr;
365 
366   // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
367   APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
368   APInt UndefElts(DemandedElts.getBitWidth(), 0);
369   if (Value *V =
370           SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts))
371     return replaceOperand(II, 0, V);
372 
373   return nullptr;
374 }
375 
376 // TODO, Obvious Missing Transforms:
377 // * Single constant active lane load -> load
378 // * Dereferenceable address & few lanes -> scalarize speculative load/selects
379 // * Adjacent vector addresses -> masked.load
380 // * Narrow width by halfs excluding zero/undef lanes
381 // * Vector incrementing address -> vector masked load
382 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) {
383   auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
384   if (!ConstMask)
385     return nullptr;
386 
387   // Vector splat address w/known mask -> scalar load
388   // Fold the gather to load the source vector first lane
389   // because it is reloading the same value each time
390   if (ConstMask->isAllOnesValue())
391     if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) {
392       auto *VecTy = cast<VectorType>(II.getType());
393       const Align Alignment =
394           cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();
395       LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr,
396                                               Alignment, "load.scalar");
397       Value *Shuf =
398           Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast");
399       return replaceInstUsesWith(II, cast<Instruction>(Shuf));
400     }
401 
402   return nullptr;
403 }
404 
405 // TODO, Obvious Missing Transforms:
406 // * Single constant active lane -> store
407 // * Adjacent vector addresses -> masked.store
408 // * Narrow store width by halfs excluding zero/undef lanes
409 // * Vector incrementing address -> vector masked store
410 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) {
411   auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
412   if (!ConstMask)
413     return nullptr;
414 
415   // If the mask is all zeros, a scatter does nothing.
416   if (ConstMask->isNullValue())
417     return eraseInstFromFunction(II);
418 
419   // Vector splat address -> scalar store
420   if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) {
421     // scatter(splat(value), splat(ptr), non-zero-mask) -> store value, ptr
422     if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) {
423       Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
424       StoreInst *S =
425           new StoreInst(SplatValue, SplatPtr, /*IsVolatile=*/false, Alignment);
426       S->copyMetadata(II);
427       return S;
428     }
429     // scatter(vector, splat(ptr), splat(true)) -> store extract(vector,
430     // lastlane), ptr
431     if (ConstMask->isAllOnesValue()) {
432       Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
433       VectorType *WideLoadTy = cast<VectorType>(II.getArgOperand(1)->getType());
434       ElementCount VF = WideLoadTy->getElementCount();
435       Constant *EC =
436           ConstantInt::get(Builder.getInt32Ty(), VF.getKnownMinValue());
437       Value *RunTimeVF = VF.isScalable() ? Builder.CreateVScale(EC) : EC;
438       Value *LastLane = Builder.CreateSub(RunTimeVF, Builder.getInt32(1));
439       Value *Extract =
440           Builder.CreateExtractElement(II.getArgOperand(0), LastLane);
441       StoreInst *S =
442           new StoreInst(Extract, SplatPtr, /*IsVolatile=*/false, Alignment);
443       S->copyMetadata(II);
444       return S;
445     }
446   }
447   if (isa<ScalableVectorType>(ConstMask->getType()))
448     return nullptr;
449 
450   // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
451   APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
452   APInt UndefElts(DemandedElts.getBitWidth(), 0);
453   if (Value *V =
454           SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts))
455     return replaceOperand(II, 0, V);
456   if (Value *V =
457           SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, UndefElts))
458     return replaceOperand(II, 1, V);
459 
460   return nullptr;
461 }
462 
463 /// This function transforms launder.invariant.group and strip.invariant.group
464 /// like:
465 /// launder(launder(%x)) -> launder(%x)       (the result is not the argument)
466 /// launder(strip(%x)) -> launder(%x)
467 /// strip(strip(%x)) -> strip(%x)             (the result is not the argument)
468 /// strip(launder(%x)) -> strip(%x)
469 /// This is legal because it preserves the most recent information about
470 /// the presence or absence of invariant.group.
471 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II,
472                                                     InstCombinerImpl &IC) {
473   auto *Arg = II.getArgOperand(0);
474   auto *StrippedArg = Arg->stripPointerCasts();
475   auto *StrippedInvariantGroupsArg = StrippedArg;
476   while (auto *Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
477     if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
478         Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
479       break;
480     StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts();
481   }
482   if (StrippedArg == StrippedInvariantGroupsArg)
483     return nullptr; // No launders/strips to remove.
484 
485   Value *Result = nullptr;
486 
487   if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
488     Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
489   else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
490     Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
491   else
492     llvm_unreachable(
493         "simplifyInvariantGroupIntrinsic only handles launder and strip");
494   if (Result->getType()->getPointerAddressSpace() !=
495       II.getType()->getPointerAddressSpace())
496     Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
497   if (Result->getType() != II.getType())
498     Result = IC.Builder.CreateBitCast(Result, II.getType());
499 
500   return cast<Instruction>(Result);
501 }
502 
503 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
504   assert((II.getIntrinsicID() == Intrinsic::cttz ||
505           II.getIntrinsicID() == Intrinsic::ctlz) &&
506          "Expected cttz or ctlz intrinsic");
507   bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
508   Value *Op0 = II.getArgOperand(0);
509   Value *Op1 = II.getArgOperand(1);
510   Value *X;
511   // ctlz(bitreverse(x)) -> cttz(x)
512   // cttz(bitreverse(x)) -> ctlz(x)
513   if (match(Op0, m_BitReverse(m_Value(X)))) {
514     Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;
515     Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType());
516     return CallInst::Create(F, {X, II.getArgOperand(1)});
517   }
518 
519   if (II.getType()->isIntOrIntVectorTy(1)) {
520     // ctlz/cttz i1 Op0 --> not Op0
521     if (match(Op1, m_Zero()))
522       return BinaryOperator::CreateNot(Op0);
523     // If zero is poison, then the input can be assumed to be "true", so the
524     // instruction simplifies to "false".
525     assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1");
526     return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(II.getType()));
527   }
528 
529   // If the operand is a select with constant arm(s), try to hoist ctlz/cttz.
530   if (auto *Sel = dyn_cast<SelectInst>(Op0))
531     if (Instruction *R = IC.FoldOpIntoSelect(II, Sel))
532       return R;
533 
534   if (IsTZ) {
535     // cttz(-x) -> cttz(x)
536     if (match(Op0, m_Neg(m_Value(X))))
537       return IC.replaceOperand(II, 0, X);
538 
539     // cttz(sext(x)) -> cttz(zext(x))
540     if (match(Op0, m_OneUse(m_SExt(m_Value(X))))) {
541       auto *Zext = IC.Builder.CreateZExt(X, II.getType());
542       auto *CttzZext =
543           IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Zext, Op1);
544       return IC.replaceInstUsesWith(II, CttzZext);
545     }
546 
547     // Zext doesn't change the number of trailing zeros, so narrow:
548     // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsPoison' parameter is 'true'.
549     if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) {
550       auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X,
551                                                     IC.Builder.getTrue());
552       auto *ZextCttz = IC.Builder.CreateZExt(Cttz, II.getType());
553       return IC.replaceInstUsesWith(II, ZextCttz);
554     }
555 
556     // cttz(abs(x)) -> cttz(x)
557     // cttz(nabs(x)) -> cttz(x)
558     Value *Y;
559     SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
560     if (SPF == SPF_ABS || SPF == SPF_NABS)
561       return IC.replaceOperand(II, 0, X);
562 
563     if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X))))
564       return IC.replaceOperand(II, 0, X);
565   }
566 
567   KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
568 
569   // Create a mask for bits above (ctlz) or below (cttz) the first known one.
570   unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
571                                 : Known.countMaxLeadingZeros();
572   unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
573                                 : Known.countMinLeadingZeros();
574 
575   // If all bits above (ctlz) or below (cttz) the first known one are known
576   // zero, this value is constant.
577   // FIXME: This should be in InstSimplify because we're replacing an
578   // instruction with a constant.
579   if (PossibleZeros == DefiniteZeros) {
580     auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
581     return IC.replaceInstUsesWith(II, C);
582   }
583 
584   // If the input to cttz/ctlz is known to be non-zero,
585   // then change the 'ZeroIsPoison' parameter to 'true'
586   // because we know the zero behavior can't affect the result.
587   if (!Known.One.isZero() ||
588       isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
589                      &IC.getDominatorTree())) {
590     if (!match(II.getArgOperand(1), m_One()))
591       return IC.replaceOperand(II, 1, IC.Builder.getTrue());
592   }
593 
594   // Add range metadata since known bits can't completely reflect what we know.
595   // TODO: Handle splat vectors.
596   auto *IT = dyn_cast<IntegerType>(Op0->getType());
597   if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
598     Metadata *LowAndHigh[] = {
599         ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
600         ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
601     II.setMetadata(LLVMContext::MD_range,
602                    MDNode::get(II.getContext(), LowAndHigh));
603     return &II;
604   }
605 
606   return nullptr;
607 }
608 
609 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) {
610   assert(II.getIntrinsicID() == Intrinsic::ctpop &&
611          "Expected ctpop intrinsic");
612   Type *Ty = II.getType();
613   unsigned BitWidth = Ty->getScalarSizeInBits();
614   Value *Op0 = II.getArgOperand(0);
615   Value *X, *Y;
616 
617   // ctpop(bitreverse(x)) -> ctpop(x)
618   // ctpop(bswap(x)) -> ctpop(x)
619   if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X))))
620     return IC.replaceOperand(II, 0, X);
621 
622   // ctpop(rot(x)) -> ctpop(x)
623   if ((match(Op0, m_FShl(m_Value(X), m_Value(Y), m_Value())) ||
624        match(Op0, m_FShr(m_Value(X), m_Value(Y), m_Value()))) &&
625       X == Y)
626     return IC.replaceOperand(II, 0, X);
627 
628   // ctpop(x | -x) -> bitwidth - cttz(x, false)
629   if (Op0->hasOneUse() &&
630       match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) {
631     Function *F =
632         Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
633     auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()});
634     auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth));
635     return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz));
636   }
637 
638   // ctpop(~x & (x - 1)) -> cttz(x, false)
639   if (match(Op0,
640             m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) {
641     Function *F =
642         Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
643     return CallInst::Create(F, {X, IC.Builder.getFalse()});
644   }
645 
646   // Zext doesn't change the number of set bits, so narrow:
647   // ctpop (zext X) --> zext (ctpop X)
648   if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) {
649     Value *NarrowPop = IC.Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, X);
650     return CastInst::Create(Instruction::ZExt, NarrowPop, Ty);
651   }
652 
653   // If the operand is a select with constant arm(s), try to hoist ctpop.
654   if (auto *Sel = dyn_cast<SelectInst>(Op0))
655     if (Instruction *R = IC.FoldOpIntoSelect(II, Sel))
656       return R;
657 
658   KnownBits Known(BitWidth);
659   IC.computeKnownBits(Op0, Known, 0, &II);
660 
661   // If all bits are zero except for exactly one fixed bit, then the result
662   // must be 0 or 1, and we can get that answer by shifting to LSB:
663   // ctpop (X & 32) --> (X & 32) >> 5
664   if ((~Known.Zero).isPowerOf2())
665     return BinaryOperator::CreateLShr(
666         Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2()));
667 
668   // FIXME: Try to simplify vectors of integers.
669   auto *IT = dyn_cast<IntegerType>(Ty);
670   if (!IT)
671     return nullptr;
672 
673   // Add range metadata since known bits can't completely reflect what we know.
674   unsigned MinCount = Known.countMinPopulation();
675   unsigned MaxCount = Known.countMaxPopulation();
676   if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
677     Metadata *LowAndHigh[] = {
678         ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)),
679         ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
680     II.setMetadata(LLVMContext::MD_range,
681                    MDNode::get(II.getContext(), LowAndHigh));
682     return &II;
683   }
684 
685   return nullptr;
686 }
687 
688 /// Convert a table lookup to shufflevector if the mask is constant.
689 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
690 /// which case we could lower the shufflevector with rev64 instructions
691 /// as it's actually a byte reverse.
692 static Value *simplifyNeonTbl1(const IntrinsicInst &II,
693                                InstCombiner::BuilderTy &Builder) {
694   // Bail out if the mask is not a constant.
695   auto *C = dyn_cast<Constant>(II.getArgOperand(1));
696   if (!C)
697     return nullptr;
698 
699   auto *VecTy = cast<FixedVectorType>(II.getType());
700   unsigned NumElts = VecTy->getNumElements();
701 
702   // Only perform this transformation for <8 x i8> vector types.
703   if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
704     return nullptr;
705 
706   int Indexes[8];
707 
708   for (unsigned I = 0; I < NumElts; ++I) {
709     Constant *COp = C->getAggregateElement(I);
710 
711     if (!COp || !isa<ConstantInt>(COp))
712       return nullptr;
713 
714     Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
715 
716     // Make sure the mask indices are in range.
717     if ((unsigned)Indexes[I] >= NumElts)
718       return nullptr;
719   }
720 
721   auto *V1 = II.getArgOperand(0);
722   auto *V2 = Constant::getNullValue(V1->getType());
723   return Builder.CreateShuffleVector(V1, V2, makeArrayRef(Indexes));
724 }
725 
726 // Returns true iff the 2 intrinsics have the same operands, limiting the
727 // comparison to the first NumOperands.
728 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
729                              unsigned NumOperands) {
730   assert(I.arg_size() >= NumOperands && "Not enough operands");
731   assert(E.arg_size() >= NumOperands && "Not enough operands");
732   for (unsigned i = 0; i < NumOperands; i++)
733     if (I.getArgOperand(i) != E.getArgOperand(i))
734       return false;
735   return true;
736 }
737 
738 // Remove trivially empty start/end intrinsic ranges, i.e. a start
739 // immediately followed by an end (ignoring debuginfo or other
740 // start/end intrinsics in between). As this handles only the most trivial
741 // cases, tracking the nesting level is not needed:
742 //
743 //   call @llvm.foo.start(i1 0)
744 //   call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed
745 //   call @llvm.foo.end(i1 0)
746 //   call @llvm.foo.end(i1 0) ; &I
747 static bool
748 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC,
749                           std::function<bool(const IntrinsicInst &)> IsStart) {
750   // We start from the end intrinsic and scan backwards, so that InstCombine
751   // has already processed (and potentially removed) all the instructions
752   // before the end intrinsic.
753   BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend());
754   for (; BI != BE; ++BI) {
755     if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) {
756       if (I->isDebugOrPseudoInst() ||
757           I->getIntrinsicID() == EndI.getIntrinsicID())
758         continue;
759       if (IsStart(*I)) {
760         if (haveSameOperands(EndI, *I, EndI.arg_size())) {
761           IC.eraseInstFromFunction(*I);
762           IC.eraseInstFromFunction(EndI);
763           return true;
764         }
765         // Skip start intrinsics that don't pair with this end intrinsic.
766         continue;
767       }
768     }
769     break;
770   }
771 
772   return false;
773 }
774 
775 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) {
776   removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) {
777     return I.getIntrinsicID() == Intrinsic::vastart ||
778            I.getIntrinsicID() == Intrinsic::vacopy;
779   });
780   return nullptr;
781 }
782 
783 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) {
784   assert(Call.arg_size() > 1 && "Need at least 2 args to swap");
785   Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
786   if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
787     Call.setArgOperand(0, Arg1);
788     Call.setArgOperand(1, Arg0);
789     return &Call;
790   }
791   return nullptr;
792 }
793 
794 /// Creates a result tuple for an overflow intrinsic \p II with a given
795 /// \p Result and a constant \p Overflow value.
796 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result,
797                                         Constant *Overflow) {
798   Constant *V[] = {UndefValue::get(Result->getType()), Overflow};
799   StructType *ST = cast<StructType>(II->getType());
800   Constant *Struct = ConstantStruct::get(ST, V);
801   return InsertValueInst::Create(Struct, Result, 0);
802 }
803 
804 Instruction *
805 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
806   WithOverflowInst *WO = cast<WithOverflowInst>(II);
807   Value *OperationResult = nullptr;
808   Constant *OverflowResult = nullptr;
809   if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(),
810                             WO->getRHS(), *WO, OperationResult, OverflowResult))
811     return createOverflowTuple(WO, OperationResult, OverflowResult);
812   return nullptr;
813 }
814 
815 static Optional<bool> getKnownSign(Value *Op, Instruction *CxtI,
816                                    const DataLayout &DL, AssumptionCache *AC,
817                                    DominatorTree *DT) {
818   KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT);
819   if (Known.isNonNegative())
820     return false;
821   if (Known.isNegative())
822     return true;
823 
824   Value *X, *Y;
825   if (match(Op, m_NSWSub(m_Value(X), m_Value(Y))))
826     return isImpliedByDomCondition(ICmpInst::ICMP_SLT, X, Y, CxtI, DL);
827 
828   return isImpliedByDomCondition(
829       ICmpInst::ICMP_SLT, Op, Constant::getNullValue(Op->getType()), CxtI, DL);
830 }
831 
832 /// Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0. This
833 /// can trigger other combines.
834 static Instruction *moveAddAfterMinMax(IntrinsicInst *II,
835                                        InstCombiner::BuilderTy &Builder) {
836   Intrinsic::ID MinMaxID = II->getIntrinsicID();
837   assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
838           MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
839          "Expected a min or max intrinsic");
840 
841   // TODO: Match vectors with undef elements, but undef may not propagate.
842   Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
843   Value *X;
844   const APInt *C0, *C1;
845   if (!match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C0)))) ||
846       !match(Op1, m_APInt(C1)))
847     return nullptr;
848 
849   // Check for necessary no-wrap and overflow constraints.
850   bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
851   auto *Add = cast<BinaryOperator>(Op0);
852   if ((IsSigned && !Add->hasNoSignedWrap()) ||
853       (!IsSigned && !Add->hasNoUnsignedWrap()))
854     return nullptr;
855 
856   // If the constant difference overflows, then instsimplify should reduce the
857   // min/max to the add or C1.
858   bool Overflow;
859   APInt CDiff =
860       IsSigned ? C1->ssub_ov(*C0, Overflow) : C1->usub_ov(*C0, Overflow);
861   assert(!Overflow && "Expected simplify of min/max");
862 
863   // min/max (add X, C0), C1 --> add (min/max X, C1 - C0), C0
864   // Note: the "mismatched" no-overflow setting does not propagate.
865   Constant *NewMinMaxC = ConstantInt::get(II->getType(), CDiff);
866   Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID, X, NewMinMaxC);
867   return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax, Add->getOperand(1))
868                   : BinaryOperator::CreateNUWAdd(NewMinMax, Add->getOperand(1));
869 }
870 /// Match a sadd_sat or ssub_sat which is using min/max to clamp the value.
871 Instruction *InstCombinerImpl::matchSAddSubSat(IntrinsicInst &MinMax1) {
872   Type *Ty = MinMax1.getType();
873 
874   // We are looking for a tree of:
875   // max(INT_MIN, min(INT_MAX, add(sext(A), sext(B))))
876   // Where the min and max could be reversed
877   Instruction *MinMax2;
878   BinaryOperator *AddSub;
879   const APInt *MinValue, *MaxValue;
880   if (match(&MinMax1, m_SMin(m_Instruction(MinMax2), m_APInt(MaxValue)))) {
881     if (!match(MinMax2, m_SMax(m_BinOp(AddSub), m_APInt(MinValue))))
882       return nullptr;
883   } else if (match(&MinMax1,
884                    m_SMax(m_Instruction(MinMax2), m_APInt(MinValue)))) {
885     if (!match(MinMax2, m_SMin(m_BinOp(AddSub), m_APInt(MaxValue))))
886       return nullptr;
887   } else
888     return nullptr;
889 
890   // Check that the constants clamp a saturate, and that the new type would be
891   // sensible to convert to.
892   if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
893     return nullptr;
894   // In what bitwidth can this be treated as saturating arithmetics?
895   unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
896   // FIXME: This isn't quite right for vectors, but using the scalar type is a
897   // good first approximation for what should be done there.
898   if (!shouldChangeType(Ty->getScalarType()->getIntegerBitWidth(), NewBitWidth))
899     return nullptr;
900 
901   // Also make sure that the inner min/max and the add/sub have one use.
902   if (!MinMax2->hasOneUse() || !AddSub->hasOneUse())
903     return nullptr;
904 
905   // Create the new type (which can be a vector type)
906   Type *NewTy = Ty->getWithNewBitWidth(NewBitWidth);
907 
908   Intrinsic::ID IntrinsicID;
909   if (AddSub->getOpcode() == Instruction::Add)
910     IntrinsicID = Intrinsic::sadd_sat;
911   else if (AddSub->getOpcode() == Instruction::Sub)
912     IntrinsicID = Intrinsic::ssub_sat;
913   else
914     return nullptr;
915 
916   // The two operands of the add/sub must be nsw-truncatable to the NewTy. This
917   // is usually achieved via a sext from a smaller type.
918   if (ComputeMaxSignificantBits(AddSub->getOperand(0), 0, AddSub) >
919           NewBitWidth ||
920       ComputeMaxSignificantBits(AddSub->getOperand(1), 0, AddSub) > NewBitWidth)
921     return nullptr;
922 
923   // Finally create and return the sat intrinsic, truncated to the new type
924   Function *F = Intrinsic::getDeclaration(MinMax1.getModule(), IntrinsicID, NewTy);
925   Value *AT = Builder.CreateTrunc(AddSub->getOperand(0), NewTy);
926   Value *BT = Builder.CreateTrunc(AddSub->getOperand(1), NewTy);
927   Value *Sat = Builder.CreateCall(F, {AT, BT});
928   return CastInst::Create(Instruction::SExt, Sat, Ty);
929 }
930 
931 
932 /// If we have a clamp pattern like max (min X, 42), 41 -- where the output
933 /// can only be one of two possible constant values -- turn that into a select
934 /// of constants.
935 static Instruction *foldClampRangeOfTwo(IntrinsicInst *II,
936                                         InstCombiner::BuilderTy &Builder) {
937   Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
938   Value *X;
939   const APInt *C0, *C1;
940   if (!match(I1, m_APInt(C1)) || !I0->hasOneUse())
941     return nullptr;
942 
943   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
944   switch (II->getIntrinsicID()) {
945   case Intrinsic::smax:
946     if (match(I0, m_SMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1)
947       Pred = ICmpInst::ICMP_SGT;
948     break;
949   case Intrinsic::smin:
950     if (match(I0, m_SMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1)
951       Pred = ICmpInst::ICMP_SLT;
952     break;
953   case Intrinsic::umax:
954     if (match(I0, m_UMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1)
955       Pred = ICmpInst::ICMP_UGT;
956     break;
957   case Intrinsic::umin:
958     if (match(I0, m_UMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1)
959       Pred = ICmpInst::ICMP_ULT;
960     break;
961   default:
962     llvm_unreachable("Expected min/max intrinsic");
963   }
964   if (Pred == CmpInst::BAD_ICMP_PREDICATE)
965     return nullptr;
966 
967   // max (min X, 42), 41 --> X > 41 ? 42 : 41
968   // min (max X, 42), 43 --> X < 43 ? 42 : 43
969   Value *Cmp = Builder.CreateICmp(Pred, X, I1);
970   return SelectInst::Create(Cmp, ConstantInt::get(II->getType(), *C0), I1);
971 }
972 
973 /// If this min/max has a constant operand and an operand that is a matching
974 /// min/max with a constant operand, constant-fold the 2 constant operands.
975 static Instruction *reassociateMinMaxWithConstants(IntrinsicInst *II) {
976   Intrinsic::ID MinMaxID = II->getIntrinsicID();
977   auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
978   if (!LHS || LHS->getIntrinsicID() != MinMaxID)
979     return nullptr;
980 
981   Constant *C0, *C1;
982   if (!match(LHS->getArgOperand(1), m_ImmConstant(C0)) ||
983       !match(II->getArgOperand(1), m_ImmConstant(C1)))
984     return nullptr;
985 
986   // max (max X, C0), C1 --> max X, (max C0, C1) --> max X, NewC
987   ICmpInst::Predicate Pred = MinMaxIntrinsic::getPredicate(MinMaxID);
988   Constant *CondC = ConstantExpr::getICmp(Pred, C0, C1);
989   Constant *NewC = ConstantExpr::getSelect(CondC, C0, C1);
990 
991   Module *Mod = II->getModule();
992   Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType());
993   return CallInst::Create(MinMax, {LHS->getArgOperand(0), NewC});
994 }
995 
996 /// If this min/max has a matching min/max operand with a constant, try to push
997 /// the constant operand into this instruction. This can enable more folds.
998 static Instruction *
999 reassociateMinMaxWithConstantInOperand(IntrinsicInst *II,
1000                                        InstCombiner::BuilderTy &Builder) {
1001   // Match and capture a min/max operand candidate.
1002   Value *X, *Y;
1003   Constant *C;
1004   Instruction *Inner;
1005   if (!match(II, m_c_MaxOrMin(m_OneUse(m_CombineAnd(
1006                                   m_Instruction(Inner),
1007                                   m_MaxOrMin(m_Value(X), m_ImmConstant(C)))),
1008                               m_Value(Y))))
1009     return nullptr;
1010 
1011   // The inner op must match. Check for constants to avoid infinite loops.
1012   Intrinsic::ID MinMaxID = II->getIntrinsicID();
1013   auto *InnerMM = dyn_cast<IntrinsicInst>(Inner);
1014   if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1015       match(X, m_ImmConstant()) || match(Y, m_ImmConstant()))
1016     return nullptr;
1017 
1018   // max (max X, C), Y --> max (max X, Y), C
1019   Function *MinMax =
1020       Intrinsic::getDeclaration(II->getModule(), MinMaxID, II->getType());
1021   Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID, X, Y);
1022   NewInner->takeName(Inner);
1023   return CallInst::Create(MinMax, {NewInner, C});
1024 }
1025 
1026 /// Reduce a sequence of min/max intrinsics with a common operand.
1027 static Instruction *factorizeMinMaxTree(IntrinsicInst *II) {
1028   // Match 3 of the same min/max ops. Example: umin(umin(), umin()).
1029   auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
1030   auto *RHS = dyn_cast<IntrinsicInst>(II->getArgOperand(1));
1031   Intrinsic::ID MinMaxID = II->getIntrinsicID();
1032   if (!LHS || !RHS || LHS->getIntrinsicID() != MinMaxID ||
1033       RHS->getIntrinsicID() != MinMaxID ||
1034       (!LHS->hasOneUse() && !RHS->hasOneUse()))
1035     return nullptr;
1036 
1037   Value *A = LHS->getArgOperand(0);
1038   Value *B = LHS->getArgOperand(1);
1039   Value *C = RHS->getArgOperand(0);
1040   Value *D = RHS->getArgOperand(1);
1041 
1042   // Look for a common operand.
1043   Value *MinMaxOp = nullptr;
1044   Value *ThirdOp = nullptr;
1045   if (LHS->hasOneUse()) {
1046     // If the LHS is only used in this chain and the RHS is used outside of it,
1047     // reuse the RHS min/max because that will eliminate the LHS.
1048     if (D == A || C == A) {
1049       // min(min(a, b), min(c, a)) --> min(min(c, a), b)
1050       // min(min(a, b), min(a, d)) --> min(min(a, d), b)
1051       MinMaxOp = RHS;
1052       ThirdOp = B;
1053     } else if (D == B || C == B) {
1054       // min(min(a, b), min(c, b)) --> min(min(c, b), a)
1055       // min(min(a, b), min(b, d)) --> min(min(b, d), a)
1056       MinMaxOp = RHS;
1057       ThirdOp = A;
1058     }
1059   } else {
1060     assert(RHS->hasOneUse() && "Expected one-use operand");
1061     // Reuse the LHS. This will eliminate the RHS.
1062     if (D == A || D == B) {
1063       // min(min(a, b), min(c, a)) --> min(min(a, b), c)
1064       // min(min(a, b), min(c, b)) --> min(min(a, b), c)
1065       MinMaxOp = LHS;
1066       ThirdOp = C;
1067     } else if (C == A || C == B) {
1068       // min(min(a, b), min(b, d)) --> min(min(a, b), d)
1069       // min(min(a, b), min(c, b)) --> min(min(a, b), d)
1070       MinMaxOp = LHS;
1071       ThirdOp = D;
1072     }
1073   }
1074 
1075   if (!MinMaxOp || !ThirdOp)
1076     return nullptr;
1077 
1078   Module *Mod = II->getModule();
1079   Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType());
1080   return CallInst::Create(MinMax, { MinMaxOp, ThirdOp });
1081 }
1082 
1083 /// If all arguments of the intrinsic are unary shuffles with the same mask,
1084 /// try to shuffle after the intrinsic.
1085 static Instruction *
1086 foldShuffledIntrinsicOperands(IntrinsicInst *II,
1087                               InstCombiner::BuilderTy &Builder) {
1088   // TODO: This should be extended to handle other intrinsics like fshl, ctpop,
1089   //       etc. Use llvm::isTriviallyVectorizable() and related to determine
1090   //       which intrinsics are safe to shuffle?
1091   switch (II->getIntrinsicID()) {
1092   case Intrinsic::smax:
1093   case Intrinsic::smin:
1094   case Intrinsic::umax:
1095   case Intrinsic::umin:
1096   case Intrinsic::fma:
1097   case Intrinsic::fshl:
1098   case Intrinsic::fshr:
1099     break;
1100   default:
1101     return nullptr;
1102   }
1103 
1104   Value *X;
1105   ArrayRef<int> Mask;
1106   if (!match(II->getArgOperand(0),
1107              m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask))))
1108     return nullptr;
1109 
1110   // At least 1 operand must have 1 use because we are creating 2 instructions.
1111   if (none_of(II->args(), [](Value *V) { return V->hasOneUse(); }))
1112     return nullptr;
1113 
1114   // See if all arguments are shuffled with the same mask.
1115   SmallVector<Value *, 4> NewArgs(II->arg_size());
1116   NewArgs[0] = X;
1117   Type *SrcTy = X->getType();
1118   for (unsigned i = 1, e = II->arg_size(); i != e; ++i) {
1119     if (!match(II->getArgOperand(i),
1120                m_Shuffle(m_Value(X), m_Undef(), m_SpecificMask(Mask))) ||
1121         X->getType() != SrcTy)
1122       return nullptr;
1123     NewArgs[i] = X;
1124   }
1125 
1126   // intrinsic (shuf X, M), (shuf Y, M), ... --> shuf (intrinsic X, Y, ...), M
1127   Instruction *FPI = isa<FPMathOperator>(II) ? II : nullptr;
1128   Value *NewIntrinsic =
1129       Builder.CreateIntrinsic(II->getIntrinsicID(), SrcTy, NewArgs, FPI);
1130   return new ShuffleVectorInst(NewIntrinsic, Mask);
1131 }
1132 
1133 /// CallInst simplification. This mostly only handles folding of intrinsic
1134 /// instructions. For normal calls, it allows visitCallBase to do the heavy
1135 /// lifting.
1136 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
1137   // Don't try to simplify calls without uses. It will not do anything useful,
1138   // but will result in the following folds being skipped.
1139   if (!CI.use_empty())
1140     if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
1141       return replaceInstUsesWith(CI, V);
1142 
1143   if (isFreeCall(&CI, &TLI))
1144     return visitFree(CI);
1145 
1146   // If the caller function (i.e. us, the function that contains this CallInst)
1147   // is nounwind, mark the call as nounwind, even if the callee isn't.
1148   if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
1149     CI.setDoesNotThrow();
1150     return &CI;
1151   }
1152 
1153   IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1154   if (!II) return visitCallBase(CI);
1155 
1156   // For atomic unordered mem intrinsics if len is not a positive or
1157   // not a multiple of element size then behavior is undefined.
1158   if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
1159     if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
1160       if (NumBytes->getSExtValue() < 0 ||
1161           (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
1162         CreateNonTerminatorUnreachable(AMI);
1163         assert(AMI->getType()->isVoidTy() &&
1164                "non void atomic unordered mem intrinsic");
1165         return eraseInstFromFunction(*AMI);
1166       }
1167 
1168   // Intrinsics cannot occur in an invoke or a callbr, so handle them here
1169   // instead of in visitCallBase.
1170   if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1171     bool Changed = false;
1172 
1173     // memmove/cpy/set of zero bytes is a noop.
1174     if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
1175       if (NumBytes->isNullValue())
1176         return eraseInstFromFunction(CI);
1177     }
1178 
1179     // No other transformations apply to volatile transfers.
1180     if (auto *M = dyn_cast<MemIntrinsic>(MI))
1181       if (M->isVolatile())
1182         return nullptr;
1183 
1184     // If we have a memmove and the source operation is a constant global,
1185     // then the source and dest pointers can't alias, so we can change this
1186     // into a call to memcpy.
1187     if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
1188       if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1189         if (GVSrc->isConstant()) {
1190           Module *M = CI.getModule();
1191           Intrinsic::ID MemCpyID =
1192               isa<AtomicMemMoveInst>(MMI)
1193                   ? Intrinsic::memcpy_element_unordered_atomic
1194                   : Intrinsic::memcpy;
1195           Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1196                            CI.getArgOperand(1)->getType(),
1197                            CI.getArgOperand(2)->getType() };
1198           CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
1199           Changed = true;
1200         }
1201     }
1202 
1203     if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1204       // memmove(x,x,size) -> noop.
1205       if (MTI->getSource() == MTI->getDest())
1206         return eraseInstFromFunction(CI);
1207     }
1208 
1209     // If we can determine a pointer alignment that is bigger than currently
1210     // set, update the alignment.
1211     if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1212       if (Instruction *I = SimplifyAnyMemTransfer(MTI))
1213         return I;
1214     } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1215       if (Instruction *I = SimplifyAnyMemSet(MSI))
1216         return I;
1217     }
1218 
1219     if (Changed) return II;
1220   }
1221 
1222   // For fixed width vector result intrinsics, use the generic demanded vector
1223   // support.
1224   if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) {
1225     auto VWidth = IIFVTy->getNumElements();
1226     APInt UndefElts(VWidth, 0);
1227     APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
1228     if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
1229       if (V != II)
1230         return replaceInstUsesWith(*II, V);
1231       return II;
1232     }
1233   }
1234 
1235   if (II->isCommutative()) {
1236     if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI))
1237       return NewCall;
1238   }
1239 
1240   // Unused constrained FP intrinsic calls may have declared side effect, which
1241   // prevents it from being removed. In some cases however the side effect is
1242   // actually absent. To detect this case, call SimplifyConstrainedFPCall. If it
1243   // returns a replacement, the call may be removed.
1244   if (CI.use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
1245     if (SimplifyConstrainedFPCall(&CI, SQ.getWithInstruction(&CI)))
1246       return eraseInstFromFunction(CI);
1247   }
1248 
1249   Intrinsic::ID IID = II->getIntrinsicID();
1250   switch (IID) {
1251   case Intrinsic::objectsize:
1252     if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false))
1253       return replaceInstUsesWith(CI, V);
1254     return nullptr;
1255   case Intrinsic::abs: {
1256     Value *IIOperand = II->getArgOperand(0);
1257     bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue();
1258 
1259     // abs(-x) -> abs(x)
1260     // TODO: Copy nsw if it was present on the neg?
1261     Value *X;
1262     if (match(IIOperand, m_Neg(m_Value(X))))
1263       return replaceOperand(*II, 0, X);
1264     if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X)))))
1265       return replaceOperand(*II, 0, X);
1266     if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X))))
1267       return replaceOperand(*II, 0, X);
1268 
1269     if (Optional<bool> Sign = getKnownSign(IIOperand, II, DL, &AC, &DT)) {
1270       // abs(x) -> x if x >= 0
1271       if (!*Sign)
1272         return replaceInstUsesWith(*II, IIOperand);
1273 
1274       // abs(x) -> -x if x < 0
1275       if (IntMinIsPoison)
1276         return BinaryOperator::CreateNSWNeg(IIOperand);
1277       return BinaryOperator::CreateNeg(IIOperand);
1278     }
1279 
1280     // abs (sext X) --> zext (abs X*)
1281     // Clear the IsIntMin (nsw) bit on the abs to allow narrowing.
1282     if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) {
1283       Value *NarrowAbs =
1284           Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse());
1285       return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType());
1286     }
1287 
1288     // Match a complicated way to check if a number is odd/even:
1289     // abs (srem X, 2) --> and X, 1
1290     const APInt *C;
1291     if (match(IIOperand, m_SRem(m_Value(X), m_APInt(C))) && *C == 2)
1292       return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1));
1293 
1294     break;
1295   }
1296   case Intrinsic::umin: {
1297     Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
1298     // umin(x, 1) == zext(x != 0)
1299     if (match(I1, m_One())) {
1300       Value *Zero = Constant::getNullValue(I0->getType());
1301       Value *Cmp = Builder.CreateICmpNE(I0, Zero);
1302       return CastInst::Create(Instruction::ZExt, Cmp, II->getType());
1303     }
1304     LLVM_FALLTHROUGH;
1305   }
1306   case Intrinsic::umax: {
1307     Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
1308     Value *X, *Y;
1309     if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) &&
1310         (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {
1311       Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);
1312       return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());
1313     }
1314     Constant *C;
1315     if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_Constant(C)) &&
1316         I0->hasOneUse()) {
1317       Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType());
1318       if (ConstantExpr::getZExt(NarrowC, II->getType()) == C) {
1319         Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);
1320         return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());
1321       }
1322     }
1323     // If both operands of unsigned min/max are sign-extended, it is still ok
1324     // to narrow the operation.
1325     LLVM_FALLTHROUGH;
1326   }
1327   case Intrinsic::smax:
1328   case Intrinsic::smin: {
1329     Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
1330     Value *X, *Y;
1331     if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) &&
1332         (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {
1333       Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);
1334       return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());
1335     }
1336 
1337     Constant *C;
1338     if (match(I0, m_SExt(m_Value(X))) && match(I1, m_Constant(C)) &&
1339         I0->hasOneUse()) {
1340       Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType());
1341       if (ConstantExpr::getSExt(NarrowC, II->getType()) == C) {
1342         Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);
1343         return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());
1344       }
1345     }
1346 
1347     if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1348       // smax (neg nsw X), (neg nsw Y) --> neg nsw (smin X, Y)
1349       // smin (neg nsw X), (neg nsw Y) --> neg nsw (smax X, Y)
1350       // TODO: Canonicalize neg after min/max if I1 is constant.
1351       if (match(I0, m_NSWNeg(m_Value(X))) && match(I1, m_NSWNeg(m_Value(Y))) &&
1352           (I0->hasOneUse() || I1->hasOneUse())) {
1353         Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID);
1354         Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y);
1355         return BinaryOperator::CreateNSWNeg(InvMaxMin);
1356       }
1357     }
1358 
1359     // If we can eliminate ~A and Y is free to invert:
1360     // max ~A, Y --> ~(min A, ~Y)
1361     //
1362     // Examples:
1363     // max ~A, ~Y --> ~(min A, Y)
1364     // max ~A, C --> ~(min A, ~C)
1365     // max ~A, (max ~Y, ~Z) --> ~min( A, (min Y, Z))
1366     auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * {
1367       Value *A;
1368       if (match(X, m_OneUse(m_Not(m_Value(A)))) &&
1369           !isFreeToInvert(A, A->hasOneUse()) &&
1370           isFreeToInvert(Y, Y->hasOneUse())) {
1371         Value *NotY = Builder.CreateNot(Y);
1372         Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID);
1373         Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY);
1374         return BinaryOperator::CreateNot(InvMaxMin);
1375       }
1376       return nullptr;
1377     };
1378 
1379     if (Instruction *I = moveNotAfterMinMax(I0, I1))
1380       return I;
1381     if (Instruction *I = moveNotAfterMinMax(I1, I0))
1382       return I;
1383 
1384     if (Instruction *I = moveAddAfterMinMax(II, Builder))
1385       return I;
1386 
1387     // smax(X, -X) --> abs(X)
1388     // smin(X, -X) --> -abs(X)
1389     // umax(X, -X) --> -abs(X)
1390     // umin(X, -X) --> abs(X)
1391     if (isKnownNegation(I0, I1)) {
1392       // We can choose either operand as the input to abs(), but if we can
1393       // eliminate the only use of a value, that's better for subsequent
1394       // transforms/analysis.
1395       if (I0->hasOneUse() && !I1->hasOneUse())
1396         std::swap(I0, I1);
1397 
1398       // This is some variant of abs(). See if we can propagate 'nsw' to the abs
1399       // operation and potentially its negation.
1400       bool IntMinIsPoison = isKnownNegation(I0, I1, /* NeedNSW */ true);
1401       Value *Abs = Builder.CreateBinaryIntrinsic(
1402           Intrinsic::abs, I0,
1403           ConstantInt::getBool(II->getContext(), IntMinIsPoison));
1404 
1405       // We don't have a "nabs" intrinsic, so negate if needed based on the
1406       // max/min operation.
1407       if (IID == Intrinsic::smin || IID == Intrinsic::umax)
1408         Abs = Builder.CreateNeg(Abs, "nabs", /* NUW */ false, IntMinIsPoison);
1409       return replaceInstUsesWith(CI, Abs);
1410     }
1411 
1412     if (Instruction *Sel = foldClampRangeOfTwo(II, Builder))
1413       return Sel;
1414 
1415     if (Instruction *SAdd = matchSAddSubSat(*II))
1416       return SAdd;
1417 
1418     if (match(I1, m_ImmConstant()))
1419       if (auto *Sel = dyn_cast<SelectInst>(I0))
1420         if (Instruction *R = FoldOpIntoSelect(*II, Sel))
1421           return R;
1422 
1423     if (Instruction *NewMinMax = reassociateMinMaxWithConstants(II))
1424       return NewMinMax;
1425 
1426     if (Instruction *R = reassociateMinMaxWithConstantInOperand(II, Builder))
1427       return R;
1428 
1429     if (Instruction *NewMinMax = factorizeMinMaxTree(II))
1430        return NewMinMax;
1431 
1432     break;
1433   }
1434   case Intrinsic::bswap: {
1435     Value *IIOperand = II->getArgOperand(0);
1436     Value *X = nullptr;
1437 
1438     // Try to canonicalize bswap-of-logical-shift-by-8-bit-multiple as
1439     // inverse-shift-of-bswap:
1440     // bswap (shl X, C) --> lshr (bswap X), C
1441     // bswap (lshr X, C) --> shl (bswap X), C
1442     // TODO: Use knownbits to allow variable shift and non-splat vector match.
1443     BinaryOperator *BO;
1444     if (match(IIOperand, m_OneUse(m_BinOp(BO)))) {
1445       const APInt *C;
1446       if (match(BO, m_LogicalShift(m_Value(X), m_APIntAllowUndef(C))) &&
1447           (*C & 7) == 0) {
1448         Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X);
1449         BinaryOperator::BinaryOps InverseShift =
1450             BO->getOpcode() == Instruction::Shl ? Instruction::LShr
1451                                                 : Instruction::Shl;
1452         return BinaryOperator::Create(InverseShift, NewSwap, BO->getOperand(1));
1453       }
1454     }
1455 
1456     KnownBits Known = computeKnownBits(IIOperand, 0, II);
1457     uint64_t LZ = alignDown(Known.countMinLeadingZeros(), 8);
1458     uint64_t TZ = alignDown(Known.countMinTrailingZeros(), 8);
1459     unsigned BW = Known.getBitWidth();
1460 
1461     // bswap(x) -> shift(x) if x has exactly one "active byte"
1462     if (BW - LZ - TZ == 8) {
1463       assert(LZ != TZ && "active byte cannot be in the middle");
1464       if (LZ > TZ)  // -> shl(x) if the "active byte" is in the low part of x
1465         return BinaryOperator::CreateNUWShl(
1466             IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ));
1467       // -> lshr(x) if the "active byte" is in the high part of x
1468       return BinaryOperator::CreateExactLShr(
1469             IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ));
1470     }
1471 
1472     // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
1473     if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1474       unsigned C = X->getType()->getScalarSizeInBits() - BW;
1475       Value *CV = ConstantInt::get(X->getType(), C);
1476       Value *V = Builder.CreateLShr(X, CV);
1477       return new TruncInst(V, IIOperand->getType());
1478     }
1479     break;
1480   }
1481   case Intrinsic::masked_load:
1482     if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1483       return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1484     break;
1485   case Intrinsic::masked_store:
1486     return simplifyMaskedStore(*II);
1487   case Intrinsic::masked_gather:
1488     return simplifyMaskedGather(*II);
1489   case Intrinsic::masked_scatter:
1490     return simplifyMaskedScatter(*II);
1491   case Intrinsic::launder_invariant_group:
1492   case Intrinsic::strip_invariant_group:
1493     if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
1494       return replaceInstUsesWith(*II, SkippedBarrier);
1495     break;
1496   case Intrinsic::powi:
1497     if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
1498       // 0 and 1 are handled in instsimplify
1499       // powi(x, -1) -> 1/x
1500       if (Power->isMinusOne())
1501         return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0),
1502                                              II->getArgOperand(0), II);
1503       // powi(x, 2) -> x*x
1504       if (Power->equalsInt(2))
1505         return BinaryOperator::CreateFMulFMF(II->getArgOperand(0),
1506                                              II->getArgOperand(0), II);
1507 
1508       if (!Power->getValue()[0]) {
1509         Value *X;
1510         // If power is even:
1511         // powi(-x, p) -> powi(x, p)
1512         // powi(fabs(x), p) -> powi(x, p)
1513         // powi(copysign(x, y), p) -> powi(x, p)
1514         if (match(II->getArgOperand(0), m_FNeg(m_Value(X))) ||
1515             match(II->getArgOperand(0), m_FAbs(m_Value(X))) ||
1516             match(II->getArgOperand(0),
1517                   m_Intrinsic<Intrinsic::copysign>(m_Value(X), m_Value())))
1518           return replaceOperand(*II, 0, X);
1519       }
1520     }
1521     break;
1522 
1523   case Intrinsic::cttz:
1524   case Intrinsic::ctlz:
1525     if (auto *I = foldCttzCtlz(*II, *this))
1526       return I;
1527     break;
1528 
1529   case Intrinsic::ctpop:
1530     if (auto *I = foldCtpop(*II, *this))
1531       return I;
1532     break;
1533 
1534   case Intrinsic::fshl:
1535   case Intrinsic::fshr: {
1536     Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
1537     Type *Ty = II->getType();
1538     unsigned BitWidth = Ty->getScalarSizeInBits();
1539     Constant *ShAmtC;
1540     if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC)) &&
1541         !ShAmtC->containsConstantExpression()) {
1542       // Canonicalize a shift amount constant operand to modulo the bit-width.
1543       Constant *WidthC = ConstantInt::get(Ty, BitWidth);
1544       Constant *ModuloC = ConstantExpr::getURem(ShAmtC, WidthC);
1545       if (ModuloC != ShAmtC)
1546         return replaceOperand(*II, 2, ModuloC);
1547 
1548       assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) ==
1549                  ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) &&
1550              "Shift amount expected to be modulo bitwidth");
1551 
1552       // Canonicalize funnel shift right by constant to funnel shift left. This
1553       // is not entirely arbitrary. For historical reasons, the backend may
1554       // recognize rotate left patterns but miss rotate right patterns.
1555       if (IID == Intrinsic::fshr) {
1556         // fshr X, Y, C --> fshl X, Y, (BitWidth - C)
1557         Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
1558         Module *Mod = II->getModule();
1559         Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty);
1560         return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
1561       }
1562       assert(IID == Intrinsic::fshl &&
1563              "All funnel shifts by simple constants should go left");
1564 
1565       // fshl(X, 0, C) --> shl X, C
1566       // fshl(X, undef, C) --> shl X, C
1567       if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef()))
1568         return BinaryOperator::CreateShl(Op0, ShAmtC);
1569 
1570       // fshl(0, X, C) --> lshr X, (BW-C)
1571       // fshl(undef, X, C) --> lshr X, (BW-C)
1572       if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef()))
1573         return BinaryOperator::CreateLShr(Op1,
1574                                           ConstantExpr::getSub(WidthC, ShAmtC));
1575 
1576       // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
1577       if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) {
1578         Module *Mod = II->getModule();
1579         Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty);
1580         return CallInst::Create(Bswap, { Op0 });
1581       }
1582     }
1583 
1584     // Left or right might be masked.
1585     if (SimplifyDemandedInstructionBits(*II))
1586       return &CI;
1587 
1588     // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
1589     // so only the low bits of the shift amount are demanded if the bitwidth is
1590     // a power-of-2.
1591     if (!isPowerOf2_32(BitWidth))
1592       break;
1593     APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
1594     KnownBits Op2Known(BitWidth);
1595     if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
1596       return &CI;
1597     break;
1598   }
1599   case Intrinsic::uadd_with_overflow:
1600   case Intrinsic::sadd_with_overflow: {
1601     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
1602       return I;
1603 
1604     // Given 2 constant operands whose sum does not overflow:
1605     // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
1606     // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
1607     Value *X;
1608     const APInt *C0, *C1;
1609     Value *Arg0 = II->getArgOperand(0);
1610     Value *Arg1 = II->getArgOperand(1);
1611     bool IsSigned = IID == Intrinsic::sadd_with_overflow;
1612     bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))
1613                              : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)));
1614     if (HasNWAdd && match(Arg1, m_APInt(C1))) {
1615       bool Overflow;
1616       APInt NewC =
1617           IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
1618       if (!Overflow)
1619         return replaceInstUsesWith(
1620             *II, Builder.CreateBinaryIntrinsic(
1621                      IID, X, ConstantInt::get(Arg1->getType(), NewC)));
1622     }
1623     break;
1624   }
1625 
1626   case Intrinsic::umul_with_overflow:
1627   case Intrinsic::smul_with_overflow:
1628   case Intrinsic::usub_with_overflow:
1629     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
1630       return I;
1631     break;
1632 
1633   case Intrinsic::ssub_with_overflow: {
1634     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
1635       return I;
1636 
1637     Constant *C;
1638     Value *Arg0 = II->getArgOperand(0);
1639     Value *Arg1 = II->getArgOperand(1);
1640     // Given a constant C that is not the minimum signed value
1641     // for an integer of a given bit width:
1642     //
1643     // ssubo X, C -> saddo X, -C
1644     if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) {
1645       Value *NegVal = ConstantExpr::getNeg(C);
1646       // Build a saddo call that is equivalent to the discovered
1647       // ssubo call.
1648       return replaceInstUsesWith(
1649           *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
1650                                              Arg0, NegVal));
1651     }
1652 
1653     break;
1654   }
1655 
1656   case Intrinsic::uadd_sat:
1657   case Intrinsic::sadd_sat:
1658   case Intrinsic::usub_sat:
1659   case Intrinsic::ssub_sat: {
1660     SaturatingInst *SI = cast<SaturatingInst>(II);
1661     Type *Ty = SI->getType();
1662     Value *Arg0 = SI->getLHS();
1663     Value *Arg1 = SI->getRHS();
1664 
1665     // Make use of known overflow information.
1666     OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(),
1667                                         Arg0, Arg1, SI);
1668     switch (OR) {
1669       case OverflowResult::MayOverflow:
1670         break;
1671       case OverflowResult::NeverOverflows:
1672         if (SI->isSigned())
1673           return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1);
1674         else
1675           return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1);
1676       case OverflowResult::AlwaysOverflowsLow: {
1677         unsigned BitWidth = Ty->getScalarSizeInBits();
1678         APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned());
1679         return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min));
1680       }
1681       case OverflowResult::AlwaysOverflowsHigh: {
1682         unsigned BitWidth = Ty->getScalarSizeInBits();
1683         APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned());
1684         return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max));
1685       }
1686     }
1687 
1688     // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
1689     Constant *C;
1690     if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&
1691         C->isNotMinSignedValue()) {
1692       Value *NegVal = ConstantExpr::getNeg(C);
1693       return replaceInstUsesWith(
1694           *II, Builder.CreateBinaryIntrinsic(
1695               Intrinsic::sadd_sat, Arg0, NegVal));
1696     }
1697 
1698     // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
1699     // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
1700     // if Val and Val2 have the same sign
1701     if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
1702       Value *X;
1703       const APInt *Val, *Val2;
1704       APInt NewVal;
1705       bool IsUnsigned =
1706           IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
1707       if (Other->getIntrinsicID() == IID &&
1708           match(Arg1, m_APInt(Val)) &&
1709           match(Other->getArgOperand(0), m_Value(X)) &&
1710           match(Other->getArgOperand(1), m_APInt(Val2))) {
1711         if (IsUnsigned)
1712           NewVal = Val->uadd_sat(*Val2);
1713         else if (Val->isNonNegative() == Val2->isNonNegative()) {
1714           bool Overflow;
1715           NewVal = Val->sadd_ov(*Val2, Overflow);
1716           if (Overflow) {
1717             // Both adds together may add more than SignedMaxValue
1718             // without saturating the final result.
1719             break;
1720           }
1721         } else {
1722           // Cannot fold saturated addition with different signs.
1723           break;
1724         }
1725 
1726         return replaceInstUsesWith(
1727             *II, Builder.CreateBinaryIntrinsic(
1728                      IID, X, ConstantInt::get(II->getType(), NewVal)));
1729       }
1730     }
1731     break;
1732   }
1733 
1734   case Intrinsic::minnum:
1735   case Intrinsic::maxnum:
1736   case Intrinsic::minimum:
1737   case Intrinsic::maximum: {
1738     Value *Arg0 = II->getArgOperand(0);
1739     Value *Arg1 = II->getArgOperand(1);
1740     Value *X, *Y;
1741     if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
1742         (Arg0->hasOneUse() || Arg1->hasOneUse())) {
1743       // If both operands are negated, invert the call and negate the result:
1744       // min(-X, -Y) --> -(max(X, Y))
1745       // max(-X, -Y) --> -(min(X, Y))
1746       Intrinsic::ID NewIID;
1747       switch (IID) {
1748       case Intrinsic::maxnum:
1749         NewIID = Intrinsic::minnum;
1750         break;
1751       case Intrinsic::minnum:
1752         NewIID = Intrinsic::maxnum;
1753         break;
1754       case Intrinsic::maximum:
1755         NewIID = Intrinsic::minimum;
1756         break;
1757       case Intrinsic::minimum:
1758         NewIID = Intrinsic::maximum;
1759         break;
1760       default:
1761         llvm_unreachable("unexpected intrinsic ID");
1762       }
1763       Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
1764       Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
1765       FNeg->copyIRFlags(II);
1766       return FNeg;
1767     }
1768 
1769     // m(m(X, C2), C1) -> m(X, C)
1770     const APFloat *C1, *C2;
1771     if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
1772       if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
1773           ((match(M->getArgOperand(0), m_Value(X)) &&
1774             match(M->getArgOperand(1), m_APFloat(C2))) ||
1775            (match(M->getArgOperand(1), m_Value(X)) &&
1776             match(M->getArgOperand(0), m_APFloat(C2))))) {
1777         APFloat Res(0.0);
1778         switch (IID) {
1779         case Intrinsic::maxnum:
1780           Res = maxnum(*C1, *C2);
1781           break;
1782         case Intrinsic::minnum:
1783           Res = minnum(*C1, *C2);
1784           break;
1785         case Intrinsic::maximum:
1786           Res = maximum(*C1, *C2);
1787           break;
1788         case Intrinsic::minimum:
1789           Res = minimum(*C1, *C2);
1790           break;
1791         default:
1792           llvm_unreachable("unexpected intrinsic ID");
1793         }
1794         Instruction *NewCall = Builder.CreateBinaryIntrinsic(
1795             IID, X, ConstantFP::get(Arg0->getType(), Res), II);
1796         // TODO: Conservatively intersecting FMF. If Res == C2, the transform
1797         //       was a simplification (so Arg0 and its original flags could
1798         //       propagate?)
1799         NewCall->andIRFlags(M);
1800         return replaceInstUsesWith(*II, NewCall);
1801       }
1802     }
1803 
1804     // m((fpext X), (fpext Y)) -> fpext (m(X, Y))
1805     if (match(Arg0, m_OneUse(m_FPExt(m_Value(X)))) &&
1806         match(Arg1, m_OneUse(m_FPExt(m_Value(Y)))) &&
1807         X->getType() == Y->getType()) {
1808       Value *NewCall =
1809           Builder.CreateBinaryIntrinsic(IID, X, Y, II, II->getName());
1810       return new FPExtInst(NewCall, II->getType());
1811     }
1812 
1813     // max X, -X --> fabs X
1814     // min X, -X --> -(fabs X)
1815     // TODO: Remove one-use limitation? That is obviously better for max.
1816     //       It would be an extra instruction for min (fnabs), but that is
1817     //       still likely better for analysis and codegen.
1818     if ((match(Arg0, m_OneUse(m_FNeg(m_Value(X)))) && Arg1 == X) ||
1819         (match(Arg1, m_OneUse(m_FNeg(m_Value(X)))) && Arg0 == X)) {
1820       Value *R = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II);
1821       if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
1822         R = Builder.CreateFNegFMF(R, II);
1823       return replaceInstUsesWith(*II, R);
1824     }
1825 
1826     break;
1827   }
1828   case Intrinsic::fmuladd: {
1829     // Canonicalize fast fmuladd to the separate fmul + fadd.
1830     if (II->isFast()) {
1831       BuilderTy::FastMathFlagGuard Guard(Builder);
1832       Builder.setFastMathFlags(II->getFastMathFlags());
1833       Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
1834                                       II->getArgOperand(1));
1835       Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
1836       Add->takeName(II);
1837       return replaceInstUsesWith(*II, Add);
1838     }
1839 
1840     // Try to simplify the underlying FMul.
1841     if (Value *V = SimplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
1842                                     II->getFastMathFlags(),
1843                                     SQ.getWithInstruction(II))) {
1844       auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
1845       FAdd->copyFastMathFlags(II);
1846       return FAdd;
1847     }
1848 
1849     LLVM_FALLTHROUGH;
1850   }
1851   case Intrinsic::fma: {
1852     // fma fneg(x), fneg(y), z -> fma x, y, z
1853     Value *Src0 = II->getArgOperand(0);
1854     Value *Src1 = II->getArgOperand(1);
1855     Value *X, *Y;
1856     if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
1857       replaceOperand(*II, 0, X);
1858       replaceOperand(*II, 1, Y);
1859       return II;
1860     }
1861 
1862     // fma fabs(x), fabs(x), z -> fma x, x, z
1863     if (match(Src0, m_FAbs(m_Value(X))) &&
1864         match(Src1, m_FAbs(m_Specific(X)))) {
1865       replaceOperand(*II, 0, X);
1866       replaceOperand(*II, 1, X);
1867       return II;
1868     }
1869 
1870     // Try to simplify the underlying FMul. We can only apply simplifications
1871     // that do not require rounding.
1872     if (Value *V = SimplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1),
1873                                    II->getFastMathFlags(),
1874                                    SQ.getWithInstruction(II))) {
1875       auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
1876       FAdd->copyFastMathFlags(II);
1877       return FAdd;
1878     }
1879 
1880     // fma x, y, 0 -> fmul x, y
1881     // This is always valid for -0.0, but requires nsz for +0.0 as
1882     // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own.
1883     if (match(II->getArgOperand(2), m_NegZeroFP()) ||
1884         (match(II->getArgOperand(2), m_PosZeroFP()) &&
1885          II->getFastMathFlags().noSignedZeros()))
1886       return BinaryOperator::CreateFMulFMF(Src0, Src1, II);
1887 
1888     break;
1889   }
1890   case Intrinsic::copysign: {
1891     Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1);
1892     if (SignBitMustBeZero(Sign, &TLI)) {
1893       // If we know that the sign argument is positive, reduce to FABS:
1894       // copysign Mag, +Sign --> fabs Mag
1895       Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);
1896       return replaceInstUsesWith(*II, Fabs);
1897     }
1898     // TODO: There should be a ValueTracking sibling like SignBitMustBeOne.
1899     const APFloat *C;
1900     if (match(Sign, m_APFloat(C)) && C->isNegative()) {
1901       // If we know that the sign argument is negative, reduce to FNABS:
1902       // copysign Mag, -Sign --> fneg (fabs Mag)
1903       Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);
1904       return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II));
1905     }
1906 
1907     // Propagate sign argument through nested calls:
1908     // copysign Mag, (copysign ?, X) --> copysign Mag, X
1909     Value *X;
1910     if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X))))
1911       return replaceOperand(*II, 1, X);
1912 
1913     // Peek through changes of magnitude's sign-bit. This call rewrites those:
1914     // copysign (fabs X), Sign --> copysign X, Sign
1915     // copysign (fneg X), Sign --> copysign X, Sign
1916     if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X))))
1917       return replaceOperand(*II, 0, X);
1918 
1919     break;
1920   }
1921   case Intrinsic::fabs: {
1922     Value *Cond, *TVal, *FVal;
1923     if (match(II->getArgOperand(0),
1924               m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) {
1925       // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF
1926       if (isa<Constant>(TVal) && isa<Constant>(FVal)) {
1927         CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal});
1928         CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal});
1929         return SelectInst::Create(Cond, AbsT, AbsF);
1930       }
1931       // fabs (select Cond, -FVal, FVal) --> fabs FVal
1932       if (match(TVal, m_FNeg(m_Specific(FVal))))
1933         return replaceOperand(*II, 0, FVal);
1934       // fabs (select Cond, TVal, -TVal) --> fabs TVal
1935       if (match(FVal, m_FNeg(m_Specific(TVal))))
1936         return replaceOperand(*II, 0, TVal);
1937     }
1938 
1939     LLVM_FALLTHROUGH;
1940   }
1941   case Intrinsic::ceil:
1942   case Intrinsic::floor:
1943   case Intrinsic::round:
1944   case Intrinsic::roundeven:
1945   case Intrinsic::nearbyint:
1946   case Intrinsic::rint:
1947   case Intrinsic::trunc: {
1948     Value *ExtSrc;
1949     if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
1950       // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
1951       Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);
1952       return new FPExtInst(NarrowII, II->getType());
1953     }
1954     break;
1955   }
1956   case Intrinsic::cos:
1957   case Intrinsic::amdgcn_cos: {
1958     Value *X;
1959     Value *Src = II->getArgOperand(0);
1960     if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) {
1961       // cos(-x) -> cos(x)
1962       // cos(fabs(x)) -> cos(x)
1963       return replaceOperand(*II, 0, X);
1964     }
1965     break;
1966   }
1967   case Intrinsic::sin: {
1968     Value *X;
1969     if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
1970       // sin(-x) --> -sin(x)
1971       Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II);
1972       Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin);
1973       FNeg->copyFastMathFlags(II);
1974       return FNeg;
1975     }
1976     break;
1977   }
1978 
1979   case Intrinsic::arm_neon_vtbl1:
1980   case Intrinsic::aarch64_neon_tbl1:
1981     if (Value *V = simplifyNeonTbl1(*II, Builder))
1982       return replaceInstUsesWith(*II, V);
1983     break;
1984 
1985   case Intrinsic::arm_neon_vmulls:
1986   case Intrinsic::arm_neon_vmullu:
1987   case Intrinsic::aarch64_neon_smull:
1988   case Intrinsic::aarch64_neon_umull: {
1989     Value *Arg0 = II->getArgOperand(0);
1990     Value *Arg1 = II->getArgOperand(1);
1991 
1992     // Handle mul by zero first:
1993     if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
1994       return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
1995     }
1996 
1997     // Check for constant LHS & RHS - in this case we just simplify.
1998     bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
1999                  IID == Intrinsic::aarch64_neon_umull);
2000     VectorType *NewVT = cast<VectorType>(II->getType());
2001     if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
2002       if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
2003         CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
2004         CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
2005 
2006         return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
2007       }
2008 
2009       // Couldn't simplify - canonicalize constant to the RHS.
2010       std::swap(Arg0, Arg1);
2011     }
2012 
2013     // Handle mul by one:
2014     if (Constant *CV1 = dyn_cast<Constant>(Arg1))
2015       if (ConstantInt *Splat =
2016               dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
2017         if (Splat->isOne())
2018           return CastInst::CreateIntegerCast(Arg0, II->getType(),
2019                                              /*isSigned=*/!Zext);
2020 
2021     break;
2022   }
2023   case Intrinsic::arm_neon_aesd:
2024   case Intrinsic::arm_neon_aese:
2025   case Intrinsic::aarch64_crypto_aesd:
2026   case Intrinsic::aarch64_crypto_aese: {
2027     Value *DataArg = II->getArgOperand(0);
2028     Value *KeyArg  = II->getArgOperand(1);
2029 
2030     // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
2031     Value *Data, *Key;
2032     if (match(KeyArg, m_ZeroInt()) &&
2033         match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
2034       replaceOperand(*II, 0, Data);
2035       replaceOperand(*II, 1, Key);
2036       return II;
2037     }
2038     break;
2039   }
2040   case Intrinsic::hexagon_V6_vandvrt:
2041   case Intrinsic::hexagon_V6_vandvrt_128B: {
2042     // Simplify Q -> V -> Q conversion.
2043     if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
2044       Intrinsic::ID ID0 = Op0->getIntrinsicID();
2045       if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
2046           ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
2047         break;
2048       Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1);
2049       uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue();
2050       uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue();
2051       // Check if every byte has common bits in Bytes and Mask.
2052       uint64_t C = Bytes1 & Mask1;
2053       if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000))
2054         return replaceInstUsesWith(*II, Op0->getArgOperand(0));
2055     }
2056     break;
2057   }
2058   case Intrinsic::stackrestore: {
2059     enum class ClassifyResult {
2060       None,
2061       Alloca,
2062       StackRestore,
2063       CallWithSideEffects,
2064     };
2065     auto Classify = [](const Instruction *I) {
2066       if (isa<AllocaInst>(I))
2067         return ClassifyResult::Alloca;
2068 
2069       if (auto *CI = dyn_cast<CallInst>(I)) {
2070         if (auto *II = dyn_cast<IntrinsicInst>(CI)) {
2071           if (II->getIntrinsicID() == Intrinsic::stackrestore)
2072             return ClassifyResult::StackRestore;
2073 
2074           if (II->mayHaveSideEffects())
2075             return ClassifyResult::CallWithSideEffects;
2076         } else {
2077           // Consider all non-intrinsic calls to be side effects
2078           return ClassifyResult::CallWithSideEffects;
2079         }
2080       }
2081 
2082       return ClassifyResult::None;
2083     };
2084 
2085     // If the stacksave and the stackrestore are in the same BB, and there is
2086     // no intervening call, alloca, or stackrestore of a different stacksave,
2087     // remove the restore. This can happen when variable allocas are DCE'd.
2088     if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
2089       if (SS->getIntrinsicID() == Intrinsic::stacksave &&
2090           SS->getParent() == II->getParent()) {
2091         BasicBlock::iterator BI(SS);
2092         bool CannotRemove = false;
2093         for (++BI; &*BI != II; ++BI) {
2094           switch (Classify(&*BI)) {
2095           case ClassifyResult::None:
2096             // So far so good, look at next instructions.
2097             break;
2098 
2099           case ClassifyResult::StackRestore:
2100             // If we found an intervening stackrestore for a different
2101             // stacksave, we can't remove the stackrestore. Otherwise, continue.
2102             if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS)
2103               CannotRemove = true;
2104             break;
2105 
2106           case ClassifyResult::Alloca:
2107           case ClassifyResult::CallWithSideEffects:
2108             // If we found an alloca, a non-intrinsic call, or an intrinsic
2109             // call with side effects, we can't remove the stackrestore.
2110             CannotRemove = true;
2111             break;
2112           }
2113           if (CannotRemove)
2114             break;
2115         }
2116 
2117         if (!CannotRemove)
2118           return eraseInstFromFunction(CI);
2119       }
2120     }
2121 
2122     // Scan down this block to see if there is another stack restore in the
2123     // same block without an intervening call/alloca.
2124     BasicBlock::iterator BI(II);
2125     Instruction *TI = II->getParent()->getTerminator();
2126     bool CannotRemove = false;
2127     for (++BI; &*BI != TI; ++BI) {
2128       switch (Classify(&*BI)) {
2129       case ClassifyResult::None:
2130         // So far so good, look at next instructions.
2131         break;
2132 
2133       case ClassifyResult::StackRestore:
2134         // If there is a stackrestore below this one, remove this one.
2135         return eraseInstFromFunction(CI);
2136 
2137       case ClassifyResult::Alloca:
2138       case ClassifyResult::CallWithSideEffects:
2139         // If we found an alloca, a non-intrinsic call, or an intrinsic call
2140         // with side effects (such as llvm.stacksave and llvm.read_register),
2141         // we can't remove the stack restore.
2142         CannotRemove = true;
2143         break;
2144       }
2145       if (CannotRemove)
2146         break;
2147     }
2148 
2149     // If the stack restore is in a return, resume, or unwind block and if there
2150     // are no allocas or calls between the restore and the return, nuke the
2151     // restore.
2152     if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
2153       return eraseInstFromFunction(CI);
2154     break;
2155   }
2156   case Intrinsic::lifetime_end:
2157     // Asan needs to poison memory to detect invalid access which is possible
2158     // even for empty lifetime range.
2159     if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
2160         II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
2161         II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
2162       break;
2163 
2164     if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) {
2165           return I.getIntrinsicID() == Intrinsic::lifetime_start;
2166         }))
2167       return nullptr;
2168     break;
2169   case Intrinsic::assume: {
2170     Value *IIOperand = II->getArgOperand(0);
2171     SmallVector<OperandBundleDef, 4> OpBundles;
2172     II->getOperandBundlesAsDefs(OpBundles);
2173 
2174     /// This will remove the boolean Condition from the assume given as
2175     /// argument and remove the assume if it becomes useless.
2176     /// always returns nullptr for use as a return values.
2177     auto RemoveConditionFromAssume = [&](Instruction *Assume) -> Instruction * {
2178       assert(isa<AssumeInst>(Assume));
2179       if (isAssumeWithEmptyBundle(*cast<AssumeInst>(II)))
2180         return eraseInstFromFunction(CI);
2181       replaceUse(II->getOperandUse(0), ConstantInt::getTrue(II->getContext()));
2182       return nullptr;
2183     };
2184     // Remove an assume if it is followed by an identical assume.
2185     // TODO: Do we need this? Unless there are conflicting assumptions, the
2186     // computeKnownBits(IIOperand) below here eliminates redundant assumes.
2187     Instruction *Next = II->getNextNonDebugInstruction();
2188     if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
2189       return RemoveConditionFromAssume(Next);
2190 
2191     // Canonicalize assume(a && b) -> assume(a); assume(b);
2192     // Note: New assumption intrinsics created here are registered by
2193     // the InstCombineIRInserter object.
2194     FunctionType *AssumeIntrinsicTy = II->getFunctionType();
2195     Value *AssumeIntrinsic = II->getCalledOperand();
2196     Value *A, *B;
2197     if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) {
2198       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles,
2199                          II->getName());
2200       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
2201       return eraseInstFromFunction(*II);
2202     }
2203     // assume(!(a || b)) -> assume(!a); assume(!b);
2204     if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) {
2205       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
2206                          Builder.CreateNot(A), OpBundles, II->getName());
2207       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
2208                          Builder.CreateNot(B), II->getName());
2209       return eraseInstFromFunction(*II);
2210     }
2211 
2212     // assume( (load addr) != null ) -> add 'nonnull' metadata to load
2213     // (if assume is valid at the load)
2214     CmpInst::Predicate Pred;
2215     Instruction *LHS;
2216     if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
2217         Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
2218         LHS->getType()->isPointerTy() &&
2219         isValidAssumeForContext(II, LHS, &DT)) {
2220       MDNode *MD = MDNode::get(II->getContext(), None);
2221       LHS->setMetadata(LLVMContext::MD_nonnull, MD);
2222       return RemoveConditionFromAssume(II);
2223 
2224       // TODO: apply nonnull return attributes to calls and invokes
2225       // TODO: apply range metadata for range check patterns?
2226     }
2227 
2228     // Convert nonnull assume like:
2229     // %A = icmp ne i32* %PTR, null
2230     // call void @llvm.assume(i1 %A)
2231     // into
2232     // call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ]
2233     if (EnableKnowledgeRetention &&
2234         match(IIOperand, m_Cmp(Pred, m_Value(A), m_Zero())) &&
2235         Pred == CmpInst::ICMP_NE && A->getType()->isPointerTy()) {
2236       if (auto *Replacement = buildAssumeFromKnowledge(
2237               {RetainedKnowledge{Attribute::NonNull, 0, A}}, Next, &AC, &DT)) {
2238 
2239         Replacement->insertBefore(Next);
2240         AC.registerAssumption(Replacement);
2241         return RemoveConditionFromAssume(II);
2242       }
2243     }
2244 
2245     // Convert alignment assume like:
2246     // %B = ptrtoint i32* %A to i64
2247     // %C = and i64 %B, Constant
2248     // %D = icmp eq i64 %C, 0
2249     // call void @llvm.assume(i1 %D)
2250     // into
2251     // call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64  Constant + 1)]
2252     uint64_t AlignMask;
2253     if (EnableKnowledgeRetention &&
2254         match(IIOperand,
2255               m_Cmp(Pred, m_And(m_Value(A), m_ConstantInt(AlignMask)),
2256                     m_Zero())) &&
2257         Pred == CmpInst::ICMP_EQ) {
2258       if (isPowerOf2_64(AlignMask + 1)) {
2259         uint64_t Offset = 0;
2260         match(A, m_Add(m_Value(A), m_ConstantInt(Offset)));
2261         if (match(A, m_PtrToInt(m_Value(A)))) {
2262           /// Note: this doesn't preserve the offset information but merges
2263           /// offset and alignment.
2264           /// TODO: we can generate a GEP instead of merging the alignment with
2265           /// the offset.
2266           RetainedKnowledge RK{Attribute::Alignment,
2267                                (unsigned)MinAlign(Offset, AlignMask + 1), A};
2268           if (auto *Replacement =
2269                   buildAssumeFromKnowledge(RK, Next, &AC, &DT)) {
2270 
2271             Replacement->insertAfter(II);
2272             AC.registerAssumption(Replacement);
2273           }
2274           return RemoveConditionFromAssume(II);
2275         }
2276       }
2277     }
2278 
2279     /// Canonicalize Knowledge in operand bundles.
2280     if (EnableKnowledgeRetention && II->hasOperandBundles()) {
2281       for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {
2282         auto &BOI = II->bundle_op_info_begin()[Idx];
2283         RetainedKnowledge RK =
2284           llvm::getKnowledgeFromBundle(cast<AssumeInst>(*II), BOI);
2285         if (BOI.End - BOI.Begin > 2)
2286           continue; // Prevent reducing knowledge in an align with offset since
2287                     // extracting a RetainedKnowledge form them looses offset
2288                     // information
2289         RetainedKnowledge CanonRK =
2290           llvm::simplifyRetainedKnowledge(cast<AssumeInst>(II), RK,
2291                                           &getAssumptionCache(),
2292                                           &getDominatorTree());
2293         if (CanonRK == RK)
2294           continue;
2295         if (!CanonRK) {
2296           if (BOI.End - BOI.Begin > 0) {
2297             Worklist.pushValue(II->op_begin()[BOI.Begin]);
2298             Value::dropDroppableUse(II->op_begin()[BOI.Begin]);
2299           }
2300           continue;
2301         }
2302         assert(RK.AttrKind == CanonRK.AttrKind);
2303         if (BOI.End - BOI.Begin > 0)
2304           II->op_begin()[BOI.Begin].set(CanonRK.WasOn);
2305         if (BOI.End - BOI.Begin > 1)
2306           II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(
2307               Type::getInt64Ty(II->getContext()), CanonRK.ArgValue));
2308         if (RK.WasOn)
2309           Worklist.pushValue(RK.WasOn);
2310         return II;
2311       }
2312     }
2313 
2314     // If there is a dominating assume with the same condition as this one,
2315     // then this one is redundant, and should be removed.
2316     KnownBits Known(1);
2317     computeKnownBits(IIOperand, Known, 0, II);
2318     if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II)))
2319       return eraseInstFromFunction(*II);
2320 
2321     // Update the cache of affected values for this assumption (we might be
2322     // here because we just simplified the condition).
2323     AC.updateAffectedValues(cast<AssumeInst>(II));
2324     break;
2325   }
2326   case Intrinsic::experimental_guard: {
2327     // Is this guard followed by another guard?  We scan forward over a small
2328     // fixed window of instructions to handle common cases with conditions
2329     // computed between guards.
2330     Instruction *NextInst = II->getNextNonDebugInstruction();
2331     for (unsigned i = 0; i < GuardWideningWindow; i++) {
2332       // Note: Using context-free form to avoid compile time blow up
2333       if (!isSafeToSpeculativelyExecute(NextInst))
2334         break;
2335       NextInst = NextInst->getNextNonDebugInstruction();
2336     }
2337     Value *NextCond = nullptr;
2338     if (match(NextInst,
2339               m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
2340       Value *CurrCond = II->getArgOperand(0);
2341 
2342       // Remove a guard that it is immediately preceded by an identical guard.
2343       // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
2344       if (CurrCond != NextCond) {
2345         Instruction *MoveI = II->getNextNonDebugInstruction();
2346         while (MoveI != NextInst) {
2347           auto *Temp = MoveI;
2348           MoveI = MoveI->getNextNonDebugInstruction();
2349           Temp->moveBefore(II);
2350         }
2351         replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond));
2352       }
2353       eraseInstFromFunction(*NextInst);
2354       return II;
2355     }
2356     break;
2357   }
2358   case Intrinsic::experimental_vector_insert: {
2359     Value *Vec = II->getArgOperand(0);
2360     Value *SubVec = II->getArgOperand(1);
2361     Value *Idx = II->getArgOperand(2);
2362     auto *DstTy = dyn_cast<FixedVectorType>(II->getType());
2363     auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
2364     auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType());
2365 
2366     // Only canonicalize if the destination vector, Vec, and SubVec are all
2367     // fixed vectors.
2368     if (DstTy && VecTy && SubVecTy) {
2369       unsigned DstNumElts = DstTy->getNumElements();
2370       unsigned VecNumElts = VecTy->getNumElements();
2371       unsigned SubVecNumElts = SubVecTy->getNumElements();
2372       unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
2373 
2374       // An insert that entirely overwrites Vec with SubVec is a nop.
2375       if (VecNumElts == SubVecNumElts)
2376         return replaceInstUsesWith(CI, SubVec);
2377 
2378       // Widen SubVec into a vector of the same width as Vec, since
2379       // shufflevector requires the two input vectors to be the same width.
2380       // Elements beyond the bounds of SubVec within the widened vector are
2381       // undefined.
2382       SmallVector<int, 8> WidenMask;
2383       unsigned i;
2384       for (i = 0; i != SubVecNumElts; ++i)
2385         WidenMask.push_back(i);
2386       for (; i != VecNumElts; ++i)
2387         WidenMask.push_back(UndefMaskElem);
2388 
2389       Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask);
2390 
2391       SmallVector<int, 8> Mask;
2392       for (unsigned i = 0; i != IdxN; ++i)
2393         Mask.push_back(i);
2394       for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
2395         Mask.push_back(i);
2396       for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
2397         Mask.push_back(i);
2398 
2399       Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask);
2400       return replaceInstUsesWith(CI, Shuffle);
2401     }
2402     break;
2403   }
2404   case Intrinsic::experimental_vector_extract: {
2405     Value *Vec = II->getArgOperand(0);
2406     Value *Idx = II->getArgOperand(1);
2407 
2408     auto *DstTy = dyn_cast<FixedVectorType>(II->getType());
2409     auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
2410 
2411     // Only canonicalize if the the destination vector and Vec are fixed
2412     // vectors.
2413     if (DstTy && VecTy) {
2414       unsigned DstNumElts = DstTy->getNumElements();
2415       unsigned VecNumElts = VecTy->getNumElements();
2416       unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
2417 
2418       // Extracting the entirety of Vec is a nop.
2419       if (VecNumElts == DstNumElts) {
2420         replaceInstUsesWith(CI, Vec);
2421         return eraseInstFromFunction(CI);
2422       }
2423 
2424       SmallVector<int, 8> Mask;
2425       for (unsigned i = 0; i != DstNumElts; ++i)
2426         Mask.push_back(IdxN + i);
2427 
2428       Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask);
2429       return replaceInstUsesWith(CI, Shuffle);
2430     }
2431     break;
2432   }
2433   case Intrinsic::experimental_vector_reverse: {
2434     Value *BO0, *BO1, *X, *Y;
2435     Value *Vec = II->getArgOperand(0);
2436     if (match(Vec, m_OneUse(m_BinOp(m_Value(BO0), m_Value(BO1))))) {
2437       auto *OldBinOp = cast<BinaryOperator>(Vec);
2438       if (match(BO0, m_Intrinsic<Intrinsic::experimental_vector_reverse>(
2439                          m_Value(X)))) {
2440         // rev(binop rev(X), rev(Y)) --> binop X, Y
2441         if (match(BO1, m_Intrinsic<Intrinsic::experimental_vector_reverse>(
2442                            m_Value(Y))))
2443           return replaceInstUsesWith(CI,
2444                                      BinaryOperator::CreateWithCopiedFlags(
2445                                          OldBinOp->getOpcode(), X, Y, OldBinOp,
2446                                          OldBinOp->getName(), II));
2447         // rev(binop rev(X), BO1Splat) --> binop X, BO1Splat
2448         if (isSplatValue(BO1))
2449           return replaceInstUsesWith(CI,
2450                                      BinaryOperator::CreateWithCopiedFlags(
2451                                          OldBinOp->getOpcode(), X, BO1,
2452                                          OldBinOp, OldBinOp->getName(), II));
2453       }
2454       // rev(binop BO0Splat, rev(Y)) --> binop BO0Splat, Y
2455       if (match(BO1, m_Intrinsic<Intrinsic::experimental_vector_reverse>(
2456                          m_Value(Y))) &&
2457           isSplatValue(BO0))
2458         return replaceInstUsesWith(CI, BinaryOperator::CreateWithCopiedFlags(
2459                                            OldBinOp->getOpcode(), BO0, Y,
2460                                            OldBinOp, OldBinOp->getName(), II));
2461     }
2462     // rev(unop rev(X)) --> unop X
2463     if (match(Vec, m_OneUse(m_UnOp(
2464                        m_Intrinsic<Intrinsic::experimental_vector_reverse>(
2465                            m_Value(X)))))) {
2466       auto *OldUnOp = cast<UnaryOperator>(Vec);
2467       auto *NewUnOp = UnaryOperator::CreateWithCopiedFlags(
2468           OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(), II);
2469       return replaceInstUsesWith(CI, NewUnOp);
2470     }
2471     break;
2472   }
2473   case Intrinsic::vector_reduce_or:
2474   case Intrinsic::vector_reduce_and: {
2475     // Canonicalize logical or/and reductions:
2476     // Or reduction for i1 is represented as:
2477     // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2478     // %res = cmp ne iReduxWidth %val, 0
2479     // And reduction for i1 is represented as:
2480     // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2481     // %res = cmp eq iReduxWidth %val, 11111
2482     Value *Arg = II->getArgOperand(0);
2483     Value *Vect;
2484     if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2485       if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2486         if (FTy->getElementType() == Builder.getInt1Ty()) {
2487           Value *Res = Builder.CreateBitCast(
2488               Vect, Builder.getIntNTy(FTy->getNumElements()));
2489           if (IID == Intrinsic::vector_reduce_and) {
2490             Res = Builder.CreateICmpEQ(
2491                 Res, ConstantInt::getAllOnesValue(Res->getType()));
2492           } else {
2493             assert(IID == Intrinsic::vector_reduce_or &&
2494                    "Expected or reduction.");
2495             Res = Builder.CreateIsNotNull(Res);
2496           }
2497           if (Arg != Vect)
2498             Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
2499                                      II->getType());
2500           return replaceInstUsesWith(CI, Res);
2501         }
2502     }
2503     LLVM_FALLTHROUGH;
2504   }
2505   case Intrinsic::vector_reduce_add: {
2506     if (IID == Intrinsic::vector_reduce_add) {
2507       // Convert vector_reduce_add(ZExt(<n x i1>)) to
2508       // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
2509       // Convert vector_reduce_add(SExt(<n x i1>)) to
2510       // -ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
2511       // Convert vector_reduce_add(<n x i1>) to
2512       // Trunc(ctpop(bitcast <n x i1> to in)).
2513       Value *Arg = II->getArgOperand(0);
2514       Value *Vect;
2515       if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2516         if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2517           if (FTy->getElementType() == Builder.getInt1Ty()) {
2518             Value *V = Builder.CreateBitCast(
2519                 Vect, Builder.getIntNTy(FTy->getNumElements()));
2520             Value *Res = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V);
2521             if (Res->getType() != II->getType())
2522               Res = Builder.CreateZExtOrTrunc(Res, II->getType());
2523             if (Arg != Vect &&
2524                 cast<Instruction>(Arg)->getOpcode() == Instruction::SExt)
2525               Res = Builder.CreateNeg(Res);
2526             return replaceInstUsesWith(CI, Res);
2527           }
2528       }
2529     }
2530     LLVM_FALLTHROUGH;
2531   }
2532   case Intrinsic::vector_reduce_xor: {
2533     if (IID == Intrinsic::vector_reduce_xor) {
2534       // Exclusive disjunction reduction over the vector with
2535       // (potentially-extended) i1 element type is actually a
2536       // (potentially-extended) arithmetic `add` reduction over the original
2537       // non-extended value:
2538       //   vector_reduce_xor(?ext(<n x i1>))
2539       //     -->
2540       //   ?ext(vector_reduce_add(<n x i1>))
2541       Value *Arg = II->getArgOperand(0);
2542       Value *Vect;
2543       if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2544         if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2545           if (FTy->getElementType() == Builder.getInt1Ty()) {
2546             Value *Res = Builder.CreateAddReduce(Vect);
2547             if (Arg != Vect)
2548               Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
2549                                        II->getType());
2550             return replaceInstUsesWith(CI, Res);
2551           }
2552       }
2553     }
2554     LLVM_FALLTHROUGH;
2555   }
2556   case Intrinsic::vector_reduce_mul: {
2557     if (IID == Intrinsic::vector_reduce_mul) {
2558       // Multiplicative reduction over the vector with (potentially-extended)
2559       // i1 element type is actually a (potentially zero-extended)
2560       // logical `and` reduction over the original non-extended value:
2561       //   vector_reduce_mul(?ext(<n x i1>))
2562       //     -->
2563       //   zext(vector_reduce_and(<n x i1>))
2564       Value *Arg = II->getArgOperand(0);
2565       Value *Vect;
2566       if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2567         if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2568           if (FTy->getElementType() == Builder.getInt1Ty()) {
2569             Value *Res = Builder.CreateAndReduce(Vect);
2570             if (Res->getType() != II->getType())
2571               Res = Builder.CreateZExt(Res, II->getType());
2572             return replaceInstUsesWith(CI, Res);
2573           }
2574       }
2575     }
2576     LLVM_FALLTHROUGH;
2577   }
2578   case Intrinsic::vector_reduce_umin:
2579   case Intrinsic::vector_reduce_umax: {
2580     if (IID == Intrinsic::vector_reduce_umin ||
2581         IID == Intrinsic::vector_reduce_umax) {
2582       // UMin/UMax reduction over the vector with (potentially-extended)
2583       // i1 element type is actually a (potentially-extended)
2584       // logical `and`/`or` reduction over the original non-extended value:
2585       //   vector_reduce_u{min,max}(?ext(<n x i1>))
2586       //     -->
2587       //   ?ext(vector_reduce_{and,or}(<n x i1>))
2588       Value *Arg = II->getArgOperand(0);
2589       Value *Vect;
2590       if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2591         if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2592           if (FTy->getElementType() == Builder.getInt1Ty()) {
2593             Value *Res = IID == Intrinsic::vector_reduce_umin
2594                              ? Builder.CreateAndReduce(Vect)
2595                              : Builder.CreateOrReduce(Vect);
2596             if (Arg != Vect)
2597               Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
2598                                        II->getType());
2599             return replaceInstUsesWith(CI, Res);
2600           }
2601       }
2602     }
2603     LLVM_FALLTHROUGH;
2604   }
2605   case Intrinsic::vector_reduce_smin:
2606   case Intrinsic::vector_reduce_smax: {
2607     if (IID == Intrinsic::vector_reduce_smin ||
2608         IID == Intrinsic::vector_reduce_smax) {
2609       // SMin/SMax reduction over the vector with (potentially-extended)
2610       // i1 element type is actually a (potentially-extended)
2611       // logical `and`/`or` reduction over the original non-extended value:
2612       //   vector_reduce_s{min,max}(<n x i1>)
2613       //     -->
2614       //   vector_reduce_{or,and}(<n x i1>)
2615       // and
2616       //   vector_reduce_s{min,max}(sext(<n x i1>))
2617       //     -->
2618       //   sext(vector_reduce_{or,and}(<n x i1>))
2619       // and
2620       //   vector_reduce_s{min,max}(zext(<n x i1>))
2621       //     -->
2622       //   zext(vector_reduce_{and,or}(<n x i1>))
2623       Value *Arg = II->getArgOperand(0);
2624       Value *Vect;
2625       if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2626         if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2627           if (FTy->getElementType() == Builder.getInt1Ty()) {
2628             Instruction::CastOps ExtOpc = Instruction::CastOps::CastOpsEnd;
2629             if (Arg != Vect)
2630               ExtOpc = cast<CastInst>(Arg)->getOpcode();
2631             Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
2632                           (ExtOpc == Instruction::CastOps::ZExt))
2633                              ? Builder.CreateAndReduce(Vect)
2634                              : Builder.CreateOrReduce(Vect);
2635             if (Arg != Vect)
2636               Res = Builder.CreateCast(ExtOpc, Res, II->getType());
2637             return replaceInstUsesWith(CI, Res);
2638           }
2639       }
2640     }
2641     LLVM_FALLTHROUGH;
2642   }
2643   case Intrinsic::vector_reduce_fmax:
2644   case Intrinsic::vector_reduce_fmin:
2645   case Intrinsic::vector_reduce_fadd:
2646   case Intrinsic::vector_reduce_fmul: {
2647     bool CanBeReassociated = (IID != Intrinsic::vector_reduce_fadd &&
2648                               IID != Intrinsic::vector_reduce_fmul) ||
2649                              II->hasAllowReassoc();
2650     const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
2651                              IID == Intrinsic::vector_reduce_fmul)
2652                                 ? 1
2653                                 : 0;
2654     Value *Arg = II->getArgOperand(ArgIdx);
2655     Value *V;
2656     ArrayRef<int> Mask;
2657     if (!isa<FixedVectorType>(Arg->getType()) || !CanBeReassociated ||
2658         !match(Arg, m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))) ||
2659         !cast<ShuffleVectorInst>(Arg)->isSingleSource())
2660       break;
2661     int Sz = Mask.size();
2662     SmallBitVector UsedIndices(Sz);
2663     for (int Idx : Mask) {
2664       if (Idx == UndefMaskElem || UsedIndices.test(Idx))
2665         break;
2666       UsedIndices.set(Idx);
2667     }
2668     // Can remove shuffle iff just shuffled elements, no repeats, undefs, or
2669     // other changes.
2670     if (UsedIndices.all()) {
2671       replaceUse(II->getOperandUse(ArgIdx), V);
2672       return nullptr;
2673     }
2674     break;
2675   }
2676   default: {
2677     // Handle target specific intrinsics
2678     Optional<Instruction *> V = targetInstCombineIntrinsic(*II);
2679     if (V.hasValue())
2680       return V.getValue();
2681     break;
2682   }
2683   }
2684 
2685   if (Instruction *Shuf = foldShuffledIntrinsicOperands(II, Builder))
2686     return Shuf;
2687 
2688   // Some intrinsics (like experimental_gc_statepoint) can be used in invoke
2689   // context, so it is handled in visitCallBase and we should trigger it.
2690   return visitCallBase(*II);
2691 }
2692 
2693 // Fence instruction simplification
2694 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) {
2695   auto *NFI = dyn_cast<FenceInst>(FI.getNextNonDebugInstruction());
2696   // This check is solely here to handle arbitrary target-dependent syncscopes.
2697   // TODO: Can remove if does not matter in practice.
2698   if (NFI && FI.isIdenticalTo(NFI))
2699     return eraseInstFromFunction(FI);
2700 
2701   // Returns true if FI1 is identical or stronger fence than FI2.
2702   auto isIdenticalOrStrongerFence = [](FenceInst *FI1, FenceInst *FI2) {
2703     auto FI1SyncScope = FI1->getSyncScopeID();
2704     // Consider same scope, where scope is global or single-thread.
2705     if (FI1SyncScope != FI2->getSyncScopeID() ||
2706         (FI1SyncScope != SyncScope::System &&
2707          FI1SyncScope != SyncScope::SingleThread))
2708       return false;
2709 
2710     return isAtLeastOrStrongerThan(FI1->getOrdering(), FI2->getOrdering());
2711   };
2712   if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
2713     return eraseInstFromFunction(FI);
2714 
2715   if (auto *PFI = dyn_cast_or_null<FenceInst>(FI.getPrevNonDebugInstruction()))
2716     if (isIdenticalOrStrongerFence(PFI, &FI))
2717       return eraseInstFromFunction(FI);
2718   return nullptr;
2719 }
2720 
2721 // InvokeInst simplification
2722 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) {
2723   return visitCallBase(II);
2724 }
2725 
2726 // CallBrInst simplification
2727 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) {
2728   return visitCallBase(CBI);
2729 }
2730 
2731 /// If this cast does not affect the value passed through the varargs area, we
2732 /// can eliminate the use of the cast.
2733 static bool isSafeToEliminateVarargsCast(const CallBase &Call,
2734                                          const DataLayout &DL,
2735                                          const CastInst *const CI,
2736                                          const int ix) {
2737   if (!CI->isLosslessCast())
2738     return false;
2739 
2740   // If this is a GC intrinsic, avoid munging types.  We need types for
2741   // statepoint reconstruction in SelectionDAG.
2742   // TODO: This is probably something which should be expanded to all
2743   // intrinsics since the entire point of intrinsics is that
2744   // they are understandable by the optimizer.
2745   if (isa<GCStatepointInst>(Call) || isa<GCRelocateInst>(Call) ||
2746       isa<GCResultInst>(Call))
2747     return false;
2748 
2749   // Opaque pointers are compatible with any byval types.
2750   PointerType *SrcTy = cast<PointerType>(CI->getOperand(0)->getType());
2751   if (SrcTy->isOpaque())
2752     return true;
2753 
2754   // The size of ByVal or InAlloca arguments is derived from the type, so we
2755   // can't change to a type with a different size.  If the size were
2756   // passed explicitly we could avoid this check.
2757   if (!Call.isPassPointeeByValueArgument(ix))
2758     return true;
2759 
2760   // The transform currently only handles type replacement for byval, not other
2761   // type-carrying attributes.
2762   if (!Call.isByValArgument(ix))
2763     return false;
2764 
2765   Type *SrcElemTy = SrcTy->getNonOpaquePointerElementType();
2766   Type *DstElemTy = Call.getParamByValType(ix);
2767   if (!SrcElemTy->isSized() || !DstElemTy->isSized())
2768     return false;
2769   if (DL.getTypeAllocSize(SrcElemTy) != DL.getTypeAllocSize(DstElemTy))
2770     return false;
2771   return true;
2772 }
2773 
2774 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) {
2775   if (!CI->getCalledFunction()) return nullptr;
2776 
2777   // Skip optimizing notail and musttail calls so
2778   // LibCallSimplifier::optimizeCall doesn't have to preserve those invariants.
2779   // LibCallSimplifier::optimizeCall should try to preseve tail calls though.
2780   if (CI->isMustTailCall() || CI->isNoTailCall())
2781     return nullptr;
2782 
2783   auto InstCombineRAUW = [this](Instruction *From, Value *With) {
2784     replaceInstUsesWith(*From, With);
2785   };
2786   auto InstCombineErase = [this](Instruction *I) {
2787     eraseInstFromFunction(*I);
2788   };
2789   LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW,
2790                                InstCombineErase);
2791   if (Value *With = Simplifier.optimizeCall(CI, Builder)) {
2792     ++NumSimplified;
2793     return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
2794   }
2795 
2796   return nullptr;
2797 }
2798 
2799 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
2800   // Strip off at most one level of pointer casts, looking for an alloca.  This
2801   // is good enough in practice and simpler than handling any number of casts.
2802   Value *Underlying = TrampMem->stripPointerCasts();
2803   if (Underlying != TrampMem &&
2804       (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
2805     return nullptr;
2806   if (!isa<AllocaInst>(Underlying))
2807     return nullptr;
2808 
2809   IntrinsicInst *InitTrampoline = nullptr;
2810   for (User *U : TrampMem->users()) {
2811     IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2812     if (!II)
2813       return nullptr;
2814     if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
2815       if (InitTrampoline)
2816         // More than one init_trampoline writes to this value.  Give up.
2817         return nullptr;
2818       InitTrampoline = II;
2819       continue;
2820     }
2821     if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
2822       // Allow any number of calls to adjust.trampoline.
2823       continue;
2824     return nullptr;
2825   }
2826 
2827   // No call to init.trampoline found.
2828   if (!InitTrampoline)
2829     return nullptr;
2830 
2831   // Check that the alloca is being used in the expected way.
2832   if (InitTrampoline->getOperand(0) != TrampMem)
2833     return nullptr;
2834 
2835   return InitTrampoline;
2836 }
2837 
2838 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
2839                                                Value *TrampMem) {
2840   // Visit all the previous instructions in the basic block, and try to find a
2841   // init.trampoline which has a direct path to the adjust.trampoline.
2842   for (BasicBlock::iterator I = AdjustTramp->getIterator(),
2843                             E = AdjustTramp->getParent()->begin();
2844        I != E;) {
2845     Instruction *Inst = &*--I;
2846     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2847       if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
2848           II->getOperand(0) == TrampMem)
2849         return II;
2850     if (Inst->mayWriteToMemory())
2851       return nullptr;
2852   }
2853   return nullptr;
2854 }
2855 
2856 // Given a call to llvm.adjust.trampoline, find and return the corresponding
2857 // call to llvm.init.trampoline if the call to the trampoline can be optimized
2858 // to a direct call to a function.  Otherwise return NULL.
2859 static IntrinsicInst *findInitTrampoline(Value *Callee) {
2860   Callee = Callee->stripPointerCasts();
2861   IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
2862   if (!AdjustTramp ||
2863       AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
2864     return nullptr;
2865 
2866   Value *TrampMem = AdjustTramp->getOperand(0);
2867 
2868   if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
2869     return IT;
2870   if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
2871     return IT;
2872   return nullptr;
2873 }
2874 
2875 bool InstCombinerImpl::annotateAnyAllocSite(CallBase &Call,
2876                                             const TargetLibraryInfo *TLI) {
2877   // Note: We only handle cases which can't be driven from generic attributes
2878   // here.  So, for example, nonnull and noalias (which are common properties
2879   // of some allocation functions) are expected to be handled via annotation
2880   // of the respective allocator declaration with generic attributes.
2881   bool Changed = false;
2882 
2883   if (isAllocationFn(&Call, TLI)) {
2884     uint64_t Size;
2885     ObjectSizeOpts Opts;
2886     if (getObjectSize(&Call, Size, DL, TLI, Opts) && Size > 0) {
2887       // TODO: We really should just emit deref_or_null here and then
2888       // let the generic inference code combine that with nonnull.
2889       if (Call.hasRetAttr(Attribute::NonNull)) {
2890         Changed = !Call.hasRetAttr(Attribute::Dereferenceable);
2891         Call.addRetAttr(
2892             Attribute::getWithDereferenceableBytes(Call.getContext(), Size));
2893       } else {
2894         Changed = !Call.hasRetAttr(Attribute::DereferenceableOrNull);
2895         Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes(
2896             Call.getContext(), Size));
2897       }
2898     }
2899   }
2900 
2901   // Add alignment attribute if alignment is a power of two constant.
2902   Value *Alignment = getAllocAlignment(&Call, TLI);
2903   if (!Alignment)
2904     return Changed;
2905 
2906   ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
2907   if (AlignOpC && AlignOpC->getValue().ult(llvm::Value::MaximumAlignment)) {
2908     uint64_t AlignmentVal = AlignOpC->getZExtValue();
2909     if (llvm::isPowerOf2_64(AlignmentVal)) {
2910       Align ExistingAlign = Call.getRetAlign().valueOrOne();
2911       Align NewAlign = Align(AlignmentVal);
2912       if (NewAlign > ExistingAlign) {
2913         Call.addRetAttr(
2914             Attribute::getWithAlignment(Call.getContext(), NewAlign));
2915         Changed = true;
2916       }
2917     }
2918   }
2919   return Changed;
2920 }
2921 
2922 /// Improvements for call, callbr and invoke instructions.
2923 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {
2924   bool Changed = annotateAnyAllocSite(Call, &TLI);
2925 
2926   // Mark any parameters that are known to be non-null with the nonnull
2927   // attribute.  This is helpful for inlining calls to functions with null
2928   // checks on their arguments.
2929   SmallVector<unsigned, 4> ArgNos;
2930   unsigned ArgNo = 0;
2931 
2932   for (Value *V : Call.args()) {
2933     if (V->getType()->isPointerTy() &&
2934         !Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
2935         isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
2936       ArgNos.push_back(ArgNo);
2937     ArgNo++;
2938   }
2939 
2940   assert(ArgNo == Call.arg_size() && "Call arguments not processed correctly.");
2941 
2942   if (!ArgNos.empty()) {
2943     AttributeList AS = Call.getAttributes();
2944     LLVMContext &Ctx = Call.getContext();
2945     AS = AS.addParamAttribute(Ctx, ArgNos,
2946                               Attribute::get(Ctx, Attribute::NonNull));
2947     Call.setAttributes(AS);
2948     Changed = true;
2949   }
2950 
2951   // If the callee is a pointer to a function, attempt to move any casts to the
2952   // arguments of the call/callbr/invoke.
2953   Value *Callee = Call.getCalledOperand();
2954   Function *CalleeF = dyn_cast<Function>(Callee);
2955   if ((!CalleeF || CalleeF->getFunctionType() != Call.getFunctionType()) &&
2956       transformConstExprCastCall(Call))
2957     return nullptr;
2958 
2959   if (CalleeF) {
2960     // Remove the convergent attr on calls when the callee is not convergent.
2961     if (Call.isConvergent() && !CalleeF->isConvergent() &&
2962         !CalleeF->isIntrinsic()) {
2963       LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
2964                         << "\n");
2965       Call.setNotConvergent();
2966       return &Call;
2967     }
2968 
2969     // If the call and callee calling conventions don't match, and neither one
2970     // of the calling conventions is compatible with C calling convention
2971     // this call must be unreachable, as the call is undefined.
2972     if ((CalleeF->getCallingConv() != Call.getCallingConv() &&
2973          !(CalleeF->getCallingConv() == llvm::CallingConv::C &&
2974            TargetLibraryInfoImpl::isCallingConvCCompatible(&Call)) &&
2975          !(Call.getCallingConv() == llvm::CallingConv::C &&
2976            TargetLibraryInfoImpl::isCallingConvCCompatible(CalleeF))) &&
2977         // Only do this for calls to a function with a body.  A prototype may
2978         // not actually end up matching the implementation's calling conv for a
2979         // variety of reasons (e.g. it may be written in assembly).
2980         !CalleeF->isDeclaration()) {
2981       Instruction *OldCall = &Call;
2982       CreateNonTerminatorUnreachable(OldCall);
2983       // If OldCall does not return void then replaceInstUsesWith poison.
2984       // This allows ValueHandlers and custom metadata to adjust itself.
2985       if (!OldCall->getType()->isVoidTy())
2986         replaceInstUsesWith(*OldCall, PoisonValue::get(OldCall->getType()));
2987       if (isa<CallInst>(OldCall))
2988         return eraseInstFromFunction(*OldCall);
2989 
2990       // We cannot remove an invoke or a callbr, because it would change thexi
2991       // CFG, just change the callee to a null pointer.
2992       cast<CallBase>(OldCall)->setCalledFunction(
2993           CalleeF->getFunctionType(),
2994           Constant::getNullValue(CalleeF->getType()));
2995       return nullptr;
2996     }
2997   }
2998 
2999   // Calling a null function pointer is undefined if a null address isn't
3000   // dereferenceable.
3001   if ((isa<ConstantPointerNull>(Callee) &&
3002        !NullPointerIsDefined(Call.getFunction())) ||
3003       isa<UndefValue>(Callee)) {
3004     // If Call does not return void then replaceInstUsesWith poison.
3005     // This allows ValueHandlers and custom metadata to adjust itself.
3006     if (!Call.getType()->isVoidTy())
3007       replaceInstUsesWith(Call, PoisonValue::get(Call.getType()));
3008 
3009     if (Call.isTerminator()) {
3010       // Can't remove an invoke or callbr because we cannot change the CFG.
3011       return nullptr;
3012     }
3013 
3014     // This instruction is not reachable, just remove it.
3015     CreateNonTerminatorUnreachable(&Call);
3016     return eraseInstFromFunction(Call);
3017   }
3018 
3019   if (IntrinsicInst *II = findInitTrampoline(Callee))
3020     return transformCallThroughTrampoline(Call, *II);
3021 
3022   // TODO: Drop this transform once opaque pointer transition is done.
3023   FunctionType *FTy = Call.getFunctionType();
3024   if (FTy->isVarArg()) {
3025     int ix = FTy->getNumParams();
3026     // See if we can optimize any arguments passed through the varargs area of
3027     // the call.
3028     for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end();
3029          I != E; ++I, ++ix) {
3030       CastInst *CI = dyn_cast<CastInst>(*I);
3031       if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) {
3032         replaceUse(*I, CI->getOperand(0));
3033 
3034         // Update the byval type to match the pointer type.
3035         // Not necessary for opaque pointers.
3036         PointerType *NewTy = cast<PointerType>(CI->getOperand(0)->getType());
3037         if (!NewTy->isOpaque() && Call.isByValArgument(ix)) {
3038           Call.removeParamAttr(ix, Attribute::ByVal);
3039           Call.addParamAttr(ix, Attribute::getWithByValType(
3040                                     Call.getContext(),
3041                                     NewTy->getNonOpaquePointerElementType()));
3042         }
3043         Changed = true;
3044       }
3045     }
3046   }
3047 
3048   if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
3049     InlineAsm *IA = cast<InlineAsm>(Callee);
3050     if (!IA->canThrow()) {
3051       // Normal inline asm calls cannot throw - mark them
3052       // 'nounwind'.
3053       Call.setDoesNotThrow();
3054       Changed = true;
3055     }
3056   }
3057 
3058   // Try to optimize the call if possible, we require DataLayout for most of
3059   // this.  None of these calls are seen as possibly dead so go ahead and
3060   // delete the instruction now.
3061   if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
3062     Instruction *I = tryOptimizeCall(CI);
3063     // If we changed something return the result, etc. Otherwise let
3064     // the fallthrough check.
3065     if (I) return eraseInstFromFunction(*I);
3066   }
3067 
3068   if (!Call.use_empty() && !Call.isMustTailCall())
3069     if (Value *ReturnedArg = Call.getReturnedArgOperand()) {
3070       Type *CallTy = Call.getType();
3071       Type *RetArgTy = ReturnedArg->getType();
3072       if (RetArgTy->canLosslesslyBitCastTo(CallTy))
3073         return replaceInstUsesWith(
3074             Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy));
3075     }
3076 
3077   if (isAllocationFn(&Call, &TLI) &&
3078       isAllocRemovable(&cast<CallBase>(Call), &TLI))
3079     return visitAllocSite(Call);
3080 
3081   // Handle intrinsics which can be used in both call and invoke context.
3082   switch (Call.getIntrinsicID()) {
3083   case Intrinsic::experimental_gc_statepoint: {
3084     GCStatepointInst &GCSP = *cast<GCStatepointInst>(&Call);
3085     SmallPtrSet<Value *, 32> LiveGcValues;
3086     for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
3087       GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
3088 
3089       // Remove the relocation if unused.
3090       if (GCR.use_empty()) {
3091         eraseInstFromFunction(GCR);
3092         continue;
3093       }
3094 
3095       Value *DerivedPtr = GCR.getDerivedPtr();
3096       Value *BasePtr = GCR.getBasePtr();
3097 
3098       // Undef is undef, even after relocation.
3099       if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
3100         replaceInstUsesWith(GCR, UndefValue::get(GCR.getType()));
3101         eraseInstFromFunction(GCR);
3102         continue;
3103       }
3104 
3105       if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
3106         // The relocation of null will be null for most any collector.
3107         // TODO: provide a hook for this in GCStrategy.  There might be some
3108         // weird collector this property does not hold for.
3109         if (isa<ConstantPointerNull>(DerivedPtr)) {
3110           // Use null-pointer of gc_relocate's type to replace it.
3111           replaceInstUsesWith(GCR, ConstantPointerNull::get(PT));
3112           eraseInstFromFunction(GCR);
3113           continue;
3114         }
3115 
3116         // isKnownNonNull -> nonnull attribute
3117         if (!GCR.hasRetAttr(Attribute::NonNull) &&
3118             isKnownNonZero(DerivedPtr, DL, 0, &AC, &Call, &DT)) {
3119           GCR.addRetAttr(Attribute::NonNull);
3120           // We discovered new fact, re-check users.
3121           Worklist.pushUsersToWorkList(GCR);
3122         }
3123       }
3124 
3125       // If we have two copies of the same pointer in the statepoint argument
3126       // list, canonicalize to one.  This may let us common gc.relocates.
3127       if (GCR.getBasePtr() == GCR.getDerivedPtr() &&
3128           GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) {
3129         auto *OpIntTy = GCR.getOperand(2)->getType();
3130         GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex()));
3131       }
3132 
3133       // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
3134       // Canonicalize on the type from the uses to the defs
3135 
3136       // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
3137       LiveGcValues.insert(BasePtr);
3138       LiveGcValues.insert(DerivedPtr);
3139     }
3140     Optional<OperandBundleUse> Bundle =
3141         GCSP.getOperandBundle(LLVMContext::OB_gc_live);
3142     unsigned NumOfGCLives = LiveGcValues.size();
3143     if (!Bundle.hasValue() || NumOfGCLives == Bundle->Inputs.size())
3144       break;
3145     // We can reduce the size of gc live bundle.
3146     DenseMap<Value *, unsigned> Val2Idx;
3147     std::vector<Value *> NewLiveGc;
3148     for (unsigned I = 0, E = Bundle->Inputs.size(); I < E; ++I) {
3149       Value *V = Bundle->Inputs[I];
3150       if (Val2Idx.count(V))
3151         continue;
3152       if (LiveGcValues.count(V)) {
3153         Val2Idx[V] = NewLiveGc.size();
3154         NewLiveGc.push_back(V);
3155       } else
3156         Val2Idx[V] = NumOfGCLives;
3157     }
3158     // Update all gc.relocates
3159     for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
3160       GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
3161       Value *BasePtr = GCR.getBasePtr();
3162       assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
3163              "Missed live gc for base pointer");
3164       auto *OpIntTy1 = GCR.getOperand(1)->getType();
3165       GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
3166       Value *DerivedPtr = GCR.getDerivedPtr();
3167       assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
3168              "Missed live gc for derived pointer");
3169       auto *OpIntTy2 = GCR.getOperand(2)->getType();
3170       GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
3171     }
3172     // Create new statepoint instruction.
3173     OperandBundleDef NewBundle("gc-live", NewLiveGc);
3174     return CallBase::Create(&Call, NewBundle);
3175   }
3176   default: { break; }
3177   }
3178 
3179   return Changed ? &Call : nullptr;
3180 }
3181 
3182 /// If the callee is a constexpr cast of a function, attempt to move the cast to
3183 /// the arguments of the call/callbr/invoke.
3184 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
3185   auto *Callee =
3186       dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3187   if (!Callee)
3188     return false;
3189 
3190   // If this is a call to a thunk function, don't remove the cast. Thunks are
3191   // used to transparently forward all incoming parameters and outgoing return
3192   // values, so it's important to leave the cast in place.
3193   if (Callee->hasFnAttribute("thunk"))
3194     return false;
3195 
3196   // If this is a musttail call, the callee's prototype must match the caller's
3197   // prototype with the exception of pointee types. The code below doesn't
3198   // implement that, so we can't do this transform.
3199   // TODO: Do the transform if it only requires adding pointer casts.
3200   if (Call.isMustTailCall())
3201     return false;
3202 
3203   Instruction *Caller = &Call;
3204   const AttributeList &CallerPAL = Call.getAttributes();
3205 
3206   // Okay, this is a cast from a function to a different type.  Unless doing so
3207   // would cause a type conversion of one of our arguments, change this call to
3208   // be a direct call with arguments casted to the appropriate types.
3209   FunctionType *FT = Callee->getFunctionType();
3210   Type *OldRetTy = Caller->getType();
3211   Type *NewRetTy = FT->getReturnType();
3212 
3213   // Check to see if we are changing the return type...
3214   if (OldRetTy != NewRetTy) {
3215 
3216     if (NewRetTy->isStructTy())
3217       return false; // TODO: Handle multiple return values.
3218 
3219     if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
3220       if (Callee->isDeclaration())
3221         return false;   // Cannot transform this return value.
3222 
3223       if (!Caller->use_empty() &&
3224           // void -> non-void is handled specially
3225           !NewRetTy->isVoidTy())
3226         return false;   // Cannot transform this return value.
3227     }
3228 
3229     if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
3230       AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
3231       if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
3232         return false;   // Attribute not compatible with transformed value.
3233     }
3234 
3235     // If the callbase is an invoke/callbr instruction, and the return value is
3236     // used by a PHI node in a successor, we cannot change the return type of
3237     // the call because there is no place to put the cast instruction (without
3238     // breaking the critical edge).  Bail out in this case.
3239     if (!Caller->use_empty()) {
3240       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
3241         for (User *U : II->users())
3242           if (PHINode *PN = dyn_cast<PHINode>(U))
3243             if (PN->getParent() == II->getNormalDest() ||
3244                 PN->getParent() == II->getUnwindDest())
3245               return false;
3246       // FIXME: Be conservative for callbr to avoid a quadratic search.
3247       if (isa<CallBrInst>(Caller))
3248         return false;
3249     }
3250   }
3251 
3252   unsigned NumActualArgs = Call.arg_size();
3253   unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
3254 
3255   // Prevent us turning:
3256   // declare void @takes_i32_inalloca(i32* inalloca)
3257   //  call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
3258   //
3259   // into:
3260   //  call void @takes_i32_inalloca(i32* null)
3261   //
3262   //  Similarly, avoid folding away bitcasts of byval calls.
3263   if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
3264       Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
3265     return false;
3266 
3267   auto AI = Call.arg_begin();
3268   for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
3269     Type *ParamTy = FT->getParamType(i);
3270     Type *ActTy = (*AI)->getType();
3271 
3272     if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
3273       return false;   // Cannot transform this parameter value.
3274 
3275     // Check if there are any incompatible attributes we cannot drop safely.
3276     if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i))
3277             .overlaps(AttributeFuncs::typeIncompatible(
3278                 ParamTy, AttributeFuncs::ASK_UNSAFE_TO_DROP)))
3279       return false;   // Attribute not compatible with transformed value.
3280 
3281     if (Call.isInAllocaArgument(i) ||
3282         CallerPAL.hasParamAttr(i, Attribute::Preallocated))
3283       return false; // Cannot transform to and from inalloca/preallocated.
3284 
3285     if (CallerPAL.hasParamAttr(i, Attribute::SwiftError))
3286       return false;
3287 
3288     // If the parameter is passed as a byval argument, then we have to have a
3289     // sized type and the sized type has to have the same size as the old type.
3290     if (ParamTy != ActTy && CallerPAL.hasParamAttr(i, Attribute::ByVal)) {
3291       PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
3292       if (!ParamPTy)
3293         return false;
3294 
3295       if (!ParamPTy->isOpaque()) {
3296         Type *ParamElTy = ParamPTy->getNonOpaquePointerElementType();
3297         if (!ParamElTy->isSized())
3298           return false;
3299 
3300         Type *CurElTy = Call.getParamByValType(i);
3301         if (DL.getTypeAllocSize(CurElTy) != DL.getTypeAllocSize(ParamElTy))
3302           return false;
3303       }
3304     }
3305   }
3306 
3307   if (Callee->isDeclaration()) {
3308     // Do not delete arguments unless we have a function body.
3309     if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
3310       return false;
3311 
3312     // If the callee is just a declaration, don't change the varargsness of the
3313     // call.  We don't want to introduce a varargs call where one doesn't
3314     // already exist.
3315     if (FT->isVarArg() != Call.getFunctionType()->isVarArg())
3316       return false;
3317 
3318     // If both the callee and the cast type are varargs, we still have to make
3319     // sure the number of fixed parameters are the same or we have the same
3320     // ABI issues as if we introduce a varargs call.
3321     if (FT->isVarArg() && Call.getFunctionType()->isVarArg() &&
3322         FT->getNumParams() != Call.getFunctionType()->getNumParams())
3323       return false;
3324   }
3325 
3326   if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
3327       !CallerPAL.isEmpty()) {
3328     // In this case we have more arguments than the new function type, but we
3329     // won't be dropping them.  Check that these extra arguments have attributes
3330     // that are compatible with being a vararg call argument.
3331     unsigned SRetIdx;
3332     if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
3333         SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams())
3334       return false;
3335   }
3336 
3337   // Okay, we decided that this is a safe thing to do: go ahead and start
3338   // inserting cast instructions as necessary.
3339   SmallVector<Value *, 8> Args;
3340   SmallVector<AttributeSet, 8> ArgAttrs;
3341   Args.reserve(NumActualArgs);
3342   ArgAttrs.reserve(NumActualArgs);
3343 
3344   // Get any return attributes.
3345   AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
3346 
3347   // If the return value is not being used, the type may not be compatible
3348   // with the existing attributes.  Wipe out any problematic attributes.
3349   RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
3350 
3351   LLVMContext &Ctx = Call.getContext();
3352   AI = Call.arg_begin();
3353   for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
3354     Type *ParamTy = FT->getParamType(i);
3355 
3356     Value *NewArg = *AI;
3357     if ((*AI)->getType() != ParamTy)
3358       NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
3359     Args.push_back(NewArg);
3360 
3361     // Add any parameter attributes except the ones incompatible with the new
3362     // type. Note that we made sure all incompatible ones are safe to drop.
3363     AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(
3364         ParamTy, AttributeFuncs::ASK_SAFE_TO_DROP);
3365     if (CallerPAL.hasParamAttr(i, Attribute::ByVal) &&
3366         !ParamTy->isOpaquePointerTy()) {
3367       AttrBuilder AB(Ctx, CallerPAL.getParamAttrs(i).removeAttributes(
3368                               Ctx, IncompatibleAttrs));
3369       AB.addByValAttr(ParamTy->getNonOpaquePointerElementType());
3370       ArgAttrs.push_back(AttributeSet::get(Ctx, AB));
3371     } else {
3372       ArgAttrs.push_back(
3373           CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs));
3374     }
3375   }
3376 
3377   // If the function takes more arguments than the call was taking, add them
3378   // now.
3379   for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
3380     Args.push_back(Constant::getNullValue(FT->getParamType(i)));
3381     ArgAttrs.push_back(AttributeSet());
3382   }
3383 
3384   // If we are removing arguments to the function, emit an obnoxious warning.
3385   if (FT->getNumParams() < NumActualArgs) {
3386     // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
3387     if (FT->isVarArg()) {
3388       // Add all of the arguments in their promoted form to the arg list.
3389       for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
3390         Type *PTy = getPromotedType((*AI)->getType());
3391         Value *NewArg = *AI;
3392         if (PTy != (*AI)->getType()) {
3393           // Must promote to pass through va_arg area!
3394           Instruction::CastOps opcode =
3395             CastInst::getCastOpcode(*AI, false, PTy, false);
3396           NewArg = Builder.CreateCast(opcode, *AI, PTy);
3397         }
3398         Args.push_back(NewArg);
3399 
3400         // Add any parameter attributes.
3401         ArgAttrs.push_back(CallerPAL.getParamAttrs(i));
3402       }
3403     }
3404   }
3405 
3406   AttributeSet FnAttrs = CallerPAL.getFnAttrs();
3407 
3408   if (NewRetTy->isVoidTy())
3409     Caller->setName("");   // Void type should not have a name.
3410 
3411   assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
3412          "missing argument attributes");
3413   AttributeList NewCallerPAL = AttributeList::get(
3414       Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
3415 
3416   SmallVector<OperandBundleDef, 1> OpBundles;
3417   Call.getOperandBundlesAsDefs(OpBundles);
3418 
3419   CallBase *NewCall;
3420   if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
3421     NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
3422                                    II->getUnwindDest(), Args, OpBundles);
3423   } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
3424     NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(),
3425                                    CBI->getIndirectDests(), Args, OpBundles);
3426   } else {
3427     NewCall = Builder.CreateCall(Callee, Args, OpBundles);
3428     cast<CallInst>(NewCall)->setTailCallKind(
3429         cast<CallInst>(Caller)->getTailCallKind());
3430   }
3431   NewCall->takeName(Caller);
3432   NewCall->setCallingConv(Call.getCallingConv());
3433   NewCall->setAttributes(NewCallerPAL);
3434 
3435   // Preserve prof metadata if any.
3436   NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof});
3437 
3438   // Insert a cast of the return type as necessary.
3439   Instruction *NC = NewCall;
3440   Value *NV = NC;
3441   if (OldRetTy != NV->getType() && !Caller->use_empty()) {
3442     if (!NV->getType()->isVoidTy()) {
3443       NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
3444       NC->setDebugLoc(Caller->getDebugLoc());
3445 
3446       // If this is an invoke/callbr instruction, we should insert it after the
3447       // first non-phi instruction in the normal successor block.
3448       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
3449         BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
3450         InsertNewInstBefore(NC, *I);
3451       } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
3452         BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt();
3453         InsertNewInstBefore(NC, *I);
3454       } else {
3455         // Otherwise, it's a call, just insert cast right after the call.
3456         InsertNewInstBefore(NC, *Caller);
3457       }
3458       Worklist.pushUsersToWorkList(*Caller);
3459     } else {
3460       NV = UndefValue::get(Caller->getType());
3461     }
3462   }
3463 
3464   if (!Caller->use_empty())
3465     replaceInstUsesWith(*Caller, NV);
3466   else if (Caller->hasValueHandle()) {
3467     if (OldRetTy == NV->getType())
3468       ValueHandleBase::ValueIsRAUWd(Caller, NV);
3469     else
3470       // We cannot call ValueIsRAUWd with a different type, and the
3471       // actual tracked value will disappear.
3472       ValueHandleBase::ValueIsDeleted(Caller);
3473   }
3474 
3475   eraseInstFromFunction(*Caller);
3476   return true;
3477 }
3478 
3479 /// Turn a call to a function created by init_trampoline / adjust_trampoline
3480 /// intrinsic pair into a direct call to the underlying function.
3481 Instruction *
3482 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call,
3483                                                  IntrinsicInst &Tramp) {
3484   Value *Callee = Call.getCalledOperand();
3485   Type *CalleeTy = Callee->getType();
3486   FunctionType *FTy = Call.getFunctionType();
3487   AttributeList Attrs = Call.getAttributes();
3488 
3489   // If the call already has the 'nest' attribute somewhere then give up -
3490   // otherwise 'nest' would occur twice after splicing in the chain.
3491   if (Attrs.hasAttrSomewhere(Attribute::Nest))
3492     return nullptr;
3493 
3494   Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
3495   FunctionType *NestFTy = NestF->getFunctionType();
3496 
3497   AttributeList NestAttrs = NestF->getAttributes();
3498   if (!NestAttrs.isEmpty()) {
3499     unsigned NestArgNo = 0;
3500     Type *NestTy = nullptr;
3501     AttributeSet NestAttr;
3502 
3503     // Look for a parameter marked with the 'nest' attribute.
3504     for (FunctionType::param_iterator I = NestFTy->param_begin(),
3505                                       E = NestFTy->param_end();
3506          I != E; ++NestArgNo, ++I) {
3507       AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo);
3508       if (AS.hasAttribute(Attribute::Nest)) {
3509         // Record the parameter type and any other attributes.
3510         NestTy = *I;
3511         NestAttr = AS;
3512         break;
3513       }
3514     }
3515 
3516     if (NestTy) {
3517       std::vector<Value*> NewArgs;
3518       std::vector<AttributeSet> NewArgAttrs;
3519       NewArgs.reserve(Call.arg_size() + 1);
3520       NewArgAttrs.reserve(Call.arg_size());
3521 
3522       // Insert the nest argument into the call argument list, which may
3523       // mean appending it.  Likewise for attributes.
3524 
3525       {
3526         unsigned ArgNo = 0;
3527         auto I = Call.arg_begin(), E = Call.arg_end();
3528         do {
3529           if (ArgNo == NestArgNo) {
3530             // Add the chain argument and attributes.
3531             Value *NestVal = Tramp.getArgOperand(2);
3532             if (NestVal->getType() != NestTy)
3533               NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
3534             NewArgs.push_back(NestVal);
3535             NewArgAttrs.push_back(NestAttr);
3536           }
3537 
3538           if (I == E)
3539             break;
3540 
3541           // Add the original argument and attributes.
3542           NewArgs.push_back(*I);
3543           NewArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
3544 
3545           ++ArgNo;
3546           ++I;
3547         } while (true);
3548       }
3549 
3550       // The trampoline may have been bitcast to a bogus type (FTy).
3551       // Handle this by synthesizing a new function type, equal to FTy
3552       // with the chain parameter inserted.
3553 
3554       std::vector<Type*> NewTypes;
3555       NewTypes.reserve(FTy->getNumParams()+1);
3556 
3557       // Insert the chain's type into the list of parameter types, which may
3558       // mean appending it.
3559       {
3560         unsigned ArgNo = 0;
3561         FunctionType::param_iterator I = FTy->param_begin(),
3562           E = FTy->param_end();
3563 
3564         do {
3565           if (ArgNo == NestArgNo)
3566             // Add the chain's type.
3567             NewTypes.push_back(NestTy);
3568 
3569           if (I == E)
3570             break;
3571 
3572           // Add the original type.
3573           NewTypes.push_back(*I);
3574 
3575           ++ArgNo;
3576           ++I;
3577         } while (true);
3578       }
3579 
3580       // Replace the trampoline call with a direct call.  Let the generic
3581       // code sort out any function type mismatches.
3582       FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
3583                                                 FTy->isVarArg());
3584       Constant *NewCallee =
3585         NestF->getType() == PointerType::getUnqual(NewFTy) ?
3586         NestF : ConstantExpr::getBitCast(NestF,
3587                                          PointerType::getUnqual(NewFTy));
3588       AttributeList NewPAL =
3589           AttributeList::get(FTy->getContext(), Attrs.getFnAttrs(),
3590                              Attrs.getRetAttrs(), NewArgAttrs);
3591 
3592       SmallVector<OperandBundleDef, 1> OpBundles;
3593       Call.getOperandBundlesAsDefs(OpBundles);
3594 
3595       Instruction *NewCaller;
3596       if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
3597         NewCaller = InvokeInst::Create(NewFTy, NewCallee,
3598                                        II->getNormalDest(), II->getUnwindDest(),
3599                                        NewArgs, OpBundles);
3600         cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
3601         cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
3602       } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
3603         NewCaller =
3604             CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(),
3605                                CBI->getIndirectDests(), NewArgs, OpBundles);
3606         cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
3607         cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
3608       } else {
3609         NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles);
3610         cast<CallInst>(NewCaller)->setTailCallKind(
3611             cast<CallInst>(Call).getTailCallKind());
3612         cast<CallInst>(NewCaller)->setCallingConv(
3613             cast<CallInst>(Call).getCallingConv());
3614         cast<CallInst>(NewCaller)->setAttributes(NewPAL);
3615       }
3616       NewCaller->setDebugLoc(Call.getDebugLoc());
3617 
3618       return NewCaller;
3619     }
3620   }
3621 
3622   // Replace the trampoline call with a direct call.  Since there is no 'nest'
3623   // parameter, there is no need to adjust the argument list.  Let the generic
3624   // code sort out any function type mismatches.
3625   Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy);
3626   Call.setCalledFunction(FTy, NewCallee);
3627   return &Call;
3628 }
3629