1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines routines for folding instructions into constants.
10 //
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
14 // dependency issues.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/APSInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/TargetFolder.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/Config/config.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalValue.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/InstrTypes.h"
40 #include "llvm/IR/Instruction.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/IntrinsicInst.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/IR/IntrinsicsAArch64.h"
45 #include "llvm/IR/IntrinsicsAMDGPU.h"
46 #include "llvm/IR/IntrinsicsARM.h"
47 #include "llvm/IR/IntrinsicsWebAssembly.h"
48 #include "llvm/IR/IntrinsicsX86.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/KnownBits.h"
55 #include "llvm/Support/MathExtras.h"
56 #include <cassert>
57 #include <cerrno>
58 #include <cfenv>
59 #include <cmath>
60 #include <cstdint>
61 
62 using namespace llvm;
63 
64 namespace {
65 
66 //===----------------------------------------------------------------------===//
67 // Constant Folding internal helper functions
68 //===----------------------------------------------------------------------===//
69 
70 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
71                                         Constant *C, Type *SrcEltTy,
72                                         unsigned NumSrcElts,
73                                         const DataLayout &DL) {
74   // Now that we know that the input value is a vector of integers, just shift
75   // and insert them into our result.
76   unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
77   for (unsigned i = 0; i != NumSrcElts; ++i) {
78     Constant *Element;
79     if (DL.isLittleEndian())
80       Element = C->getAggregateElement(NumSrcElts - i - 1);
81     else
82       Element = C->getAggregateElement(i);
83 
84     if (Element && isa<UndefValue>(Element)) {
85       Result <<= BitShift;
86       continue;
87     }
88 
89     auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
90     if (!ElementCI)
91       return ConstantExpr::getBitCast(C, DestTy);
92 
93     Result <<= BitShift;
94     Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
95   }
96 
97   return nullptr;
98 }
99 
100 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
101 /// This always returns a non-null constant, but it may be a
102 /// ConstantExpr if unfoldable.
103 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
104   assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
105          "Invalid constantexpr bitcast!");
106 
107   // Catch the obvious splat cases.
108   if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy))
109     return Res;
110 
111   if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
112     // Handle a vector->scalar integer/fp cast.
113     if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
114       unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
115       Type *SrcEltTy = VTy->getElementType();
116 
117       // If the vector is a vector of floating point, convert it to vector of int
118       // to simplify things.
119       if (SrcEltTy->isFloatingPointTy()) {
120         unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
121         auto *SrcIVTy = FixedVectorType::get(
122             IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
123         // Ask IR to do the conversion now that #elts line up.
124         C = ConstantExpr::getBitCast(C, SrcIVTy);
125       }
126 
127       APInt Result(DL.getTypeSizeInBits(DestTy), 0);
128       if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
129                                                 SrcEltTy, NumSrcElts, DL))
130         return CE;
131 
132       if (isa<IntegerType>(DestTy))
133         return ConstantInt::get(DestTy, Result);
134 
135       APFloat FP(DestTy->getFltSemantics(), Result);
136       return ConstantFP::get(DestTy->getContext(), FP);
137     }
138   }
139 
140   // The code below only handles casts to vectors currently.
141   auto *DestVTy = dyn_cast<VectorType>(DestTy);
142   if (!DestVTy)
143     return ConstantExpr::getBitCast(C, DestTy);
144 
145   // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
146   // vector so the code below can handle it uniformly.
147   if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
148     Constant *Ops = C; // don't take the address of C!
149     return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
150   }
151 
152   // If this is a bitcast from constant vector -> vector, fold it.
153   if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
154     return ConstantExpr::getBitCast(C, DestTy);
155 
156   // If the element types match, IR can fold it.
157   unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
158   unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements();
159   if (NumDstElt == NumSrcElt)
160     return ConstantExpr::getBitCast(C, DestTy);
161 
162   Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
163   Type *DstEltTy = DestVTy->getElementType();
164 
165   // Otherwise, we're changing the number of elements in a vector, which
166   // requires endianness information to do the right thing.  For example,
167   //    bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
168   // folds to (little endian):
169   //    <4 x i32> <i32 0, i32 0, i32 1, i32 0>
170   // and to (big endian):
171   //    <4 x i32> <i32 0, i32 0, i32 0, i32 1>
172 
173   // First thing is first.  We only want to think about integer here, so if
174   // we have something in FP form, recast it as integer.
175   if (DstEltTy->isFloatingPointTy()) {
176     // Fold to an vector of integers with same size as our FP type.
177     unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
178     auto *DestIVTy = FixedVectorType::get(
179         IntegerType::get(C->getContext(), FPWidth), NumDstElt);
180     // Recursively handle this integer conversion, if possible.
181     C = FoldBitCast(C, DestIVTy, DL);
182 
183     // Finally, IR can handle this now that #elts line up.
184     return ConstantExpr::getBitCast(C, DestTy);
185   }
186 
187   // Okay, we know the destination is integer, if the input is FP, convert
188   // it to integer first.
189   if (SrcEltTy->isFloatingPointTy()) {
190     unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
191     auto *SrcIVTy = FixedVectorType::get(
192         IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
193     // Ask IR to do the conversion now that #elts line up.
194     C = ConstantExpr::getBitCast(C, SrcIVTy);
195     // If IR wasn't able to fold it, bail out.
196     if (!isa<ConstantVector>(C) &&  // FIXME: Remove ConstantVector.
197         !isa<ConstantDataVector>(C))
198       return C;
199   }
200 
201   // Now we know that the input and output vectors are both integer vectors
202   // of the same size, and that their #elements is not the same.  Do the
203   // conversion here, which depends on whether the input or output has
204   // more elements.
205   bool isLittleEndian = DL.isLittleEndian();
206 
207   SmallVector<Constant*, 32> Result;
208   if (NumDstElt < NumSrcElt) {
209     // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
210     Constant *Zero = Constant::getNullValue(DstEltTy);
211     unsigned Ratio = NumSrcElt/NumDstElt;
212     unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
213     unsigned SrcElt = 0;
214     for (unsigned i = 0; i != NumDstElt; ++i) {
215       // Build each element of the result.
216       Constant *Elt = Zero;
217       unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
218       for (unsigned j = 0; j != Ratio; ++j) {
219         Constant *Src = C->getAggregateElement(SrcElt++);
220         if (Src && isa<UndefValue>(Src))
221           Src = Constant::getNullValue(
222               cast<VectorType>(C->getType())->getElementType());
223         else
224           Src = dyn_cast_or_null<ConstantInt>(Src);
225         if (!Src)  // Reject constantexpr elements.
226           return ConstantExpr::getBitCast(C, DestTy);
227 
228         // Zero extend the element to the right size.
229         Src = ConstantExpr::getZExt(Src, Elt->getType());
230 
231         // Shift it to the right place, depending on endianness.
232         Src = ConstantExpr::getShl(Src,
233                                    ConstantInt::get(Src->getType(), ShiftAmt));
234         ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
235 
236         // Mix it in.
237         Elt = ConstantExpr::getOr(Elt, Src);
238       }
239       Result.push_back(Elt);
240     }
241     return ConstantVector::get(Result);
242   }
243 
244   // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
245   unsigned Ratio = NumDstElt/NumSrcElt;
246   unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
247 
248   // Loop over each source value, expanding into multiple results.
249   for (unsigned i = 0; i != NumSrcElt; ++i) {
250     auto *Element = C->getAggregateElement(i);
251 
252     if (!Element) // Reject constantexpr elements.
253       return ConstantExpr::getBitCast(C, DestTy);
254 
255     if (isa<UndefValue>(Element)) {
256       // Correctly Propagate undef values.
257       Result.append(Ratio, UndefValue::get(DstEltTy));
258       continue;
259     }
260 
261     auto *Src = dyn_cast<ConstantInt>(Element);
262     if (!Src)
263       return ConstantExpr::getBitCast(C, DestTy);
264 
265     unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
266     for (unsigned j = 0; j != Ratio; ++j) {
267       // Shift the piece of the value into the right place, depending on
268       // endianness.
269       Constant *Elt = ConstantExpr::getLShr(Src,
270                                   ConstantInt::get(Src->getType(), ShiftAmt));
271       ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
272 
273       // Truncate the element to an integer with the same pointer size and
274       // convert the element back to a pointer using a inttoptr.
275       if (DstEltTy->isPointerTy()) {
276         IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
277         Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
278         Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
279         continue;
280       }
281 
282       // Truncate and remember this piece.
283       Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
284     }
285   }
286 
287   return ConstantVector::get(Result);
288 }
289 
290 } // end anonymous namespace
291 
292 /// If this constant is a constant offset from a global, return the global and
293 /// the constant. Because of constantexprs, this function is recursive.
294 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
295                                       APInt &Offset, const DataLayout &DL,
296                                       DSOLocalEquivalent **DSOEquiv) {
297   if (DSOEquiv)
298     *DSOEquiv = nullptr;
299 
300   // Trivial case, constant is the global.
301   if ((GV = dyn_cast<GlobalValue>(C))) {
302     unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
303     Offset = APInt(BitWidth, 0);
304     return true;
305   }
306 
307   if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) {
308     if (DSOEquiv)
309       *DSOEquiv = FoundDSOEquiv;
310     GV = FoundDSOEquiv->getGlobalValue();
311     unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
312     Offset = APInt(BitWidth, 0);
313     return true;
314   }
315 
316   // Otherwise, if this isn't a constant expr, bail out.
317   auto *CE = dyn_cast<ConstantExpr>(C);
318   if (!CE) return false;
319 
320   // Look through ptr->int and ptr->ptr casts.
321   if (CE->getOpcode() == Instruction::PtrToInt ||
322       CE->getOpcode() == Instruction::BitCast)
323     return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL,
324                                       DSOEquiv);
325 
326   // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
327   auto *GEP = dyn_cast<GEPOperator>(CE);
328   if (!GEP)
329     return false;
330 
331   unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
332   APInt TmpOffset(BitWidth, 0);
333 
334   // If the base isn't a global+constant, we aren't either.
335   if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL,
336                                   DSOEquiv))
337     return false;
338 
339   // Otherwise, add any offset that our operands provide.
340   if (!GEP->accumulateConstantOffset(DL, TmpOffset))
341     return false;
342 
343   Offset = TmpOffset;
344   return true;
345 }
346 
347 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
348                                          const DataLayout &DL) {
349   do {
350     Type *SrcTy = C->getType();
351     if (SrcTy == DestTy)
352       return C;
353 
354     TypeSize DestSize = DL.getTypeSizeInBits(DestTy);
355     TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy);
356     if (!TypeSize::isKnownGE(SrcSize, DestSize))
357       return nullptr;
358 
359     // Catch the obvious splat cases (since all-zeros can coerce non-integral
360     // pointers legally).
361     if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy))
362       return Res;
363 
364     // If the type sizes are the same and a cast is legal, just directly
365     // cast the constant.
366     // But be careful not to coerce non-integral pointers illegally.
367     if (SrcSize == DestSize &&
368         DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
369             DL.isNonIntegralPointerType(DestTy->getScalarType())) {
370       Instruction::CastOps Cast = Instruction::BitCast;
371       // If we are going from a pointer to int or vice versa, we spell the cast
372       // differently.
373       if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
374         Cast = Instruction::IntToPtr;
375       else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
376         Cast = Instruction::PtrToInt;
377 
378       if (CastInst::castIsValid(Cast, C, DestTy))
379         return ConstantExpr::getCast(Cast, C, DestTy);
380     }
381 
382     // If this isn't an aggregate type, there is nothing we can do to drill down
383     // and find a bitcastable constant.
384     if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
385       return nullptr;
386 
387     // We're simulating a load through a pointer that was bitcast to point to
388     // a different type, so we can try to walk down through the initial
389     // elements of an aggregate to see if some part of the aggregate is
390     // castable to implement the "load" semantic model.
391     if (SrcTy->isStructTy()) {
392       // Struct types might have leading zero-length elements like [0 x i32],
393       // which are certainly not what we are looking for, so skip them.
394       unsigned Elem = 0;
395       Constant *ElemC;
396       do {
397         ElemC = C->getAggregateElement(Elem++);
398       } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
399       C = ElemC;
400     } else {
401       // For non-byte-sized vector elements, the first element is not
402       // necessarily located at the vector base address.
403       if (auto *VT = dyn_cast<VectorType>(SrcTy))
404         if (!DL.typeSizeEqualsStoreSize(VT->getElementType()))
405           return nullptr;
406 
407       C = C->getAggregateElement(0u);
408     }
409   } while (C);
410 
411   return nullptr;
412 }
413 
414 namespace {
415 
416 /// Recursive helper to read bits out of global. C is the constant being copied
417 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
418 /// results into and BytesLeft is the number of bytes left in
419 /// the CurPtr buffer. DL is the DataLayout.
420 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
421                         unsigned BytesLeft, const DataLayout &DL) {
422   assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
423          "Out of range access");
424 
425   // If this element is zero or undefined, we can just return since *CurPtr is
426   // zero initialized.
427   if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
428     return true;
429 
430   if (auto *CI = dyn_cast<ConstantInt>(C)) {
431     if (CI->getBitWidth() > 64 ||
432         (CI->getBitWidth() & 7) != 0)
433       return false;
434 
435     uint64_t Val = CI->getZExtValue();
436     unsigned IntBytes = unsigned(CI->getBitWidth()/8);
437 
438     for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
439       int n = ByteOffset;
440       if (!DL.isLittleEndian())
441         n = IntBytes - n - 1;
442       CurPtr[i] = (unsigned char)(Val >> (n * 8));
443       ++ByteOffset;
444     }
445     return true;
446   }
447 
448   if (auto *CFP = dyn_cast<ConstantFP>(C)) {
449     if (CFP->getType()->isDoubleTy()) {
450       C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
451       return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
452     }
453     if (CFP->getType()->isFloatTy()){
454       C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
455       return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
456     }
457     if (CFP->getType()->isHalfTy()){
458       C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
459       return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
460     }
461     return false;
462   }
463 
464   if (auto *CS = dyn_cast<ConstantStruct>(C)) {
465     const StructLayout *SL = DL.getStructLayout(CS->getType());
466     unsigned Index = SL->getElementContainingOffset(ByteOffset);
467     uint64_t CurEltOffset = SL->getElementOffset(Index);
468     ByteOffset -= CurEltOffset;
469 
470     while (true) {
471       // If the element access is to the element itself and not to tail padding,
472       // read the bytes from the element.
473       uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
474 
475       if (ByteOffset < EltSize &&
476           !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
477                               BytesLeft, DL))
478         return false;
479 
480       ++Index;
481 
482       // Check to see if we read from the last struct element, if so we're done.
483       if (Index == CS->getType()->getNumElements())
484         return true;
485 
486       // If we read all of the bytes we needed from this element we're done.
487       uint64_t NextEltOffset = SL->getElementOffset(Index);
488 
489       if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
490         return true;
491 
492       // Move to the next element of the struct.
493       CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
494       BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
495       ByteOffset = 0;
496       CurEltOffset = NextEltOffset;
497     }
498     // not reached.
499   }
500 
501   if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
502       isa<ConstantDataSequential>(C)) {
503     uint64_t NumElts;
504     Type *EltTy;
505     if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
506       NumElts = AT->getNumElements();
507       EltTy = AT->getElementType();
508     } else {
509       NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
510       EltTy = cast<FixedVectorType>(C->getType())->getElementType();
511     }
512     uint64_t EltSize = DL.getTypeAllocSize(EltTy);
513     uint64_t Index = ByteOffset / EltSize;
514     uint64_t Offset = ByteOffset - Index * EltSize;
515 
516     for (; Index != NumElts; ++Index) {
517       if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
518                               BytesLeft, DL))
519         return false;
520 
521       uint64_t BytesWritten = EltSize - Offset;
522       assert(BytesWritten <= EltSize && "Not indexing into this element?");
523       if (BytesWritten >= BytesLeft)
524         return true;
525 
526       Offset = 0;
527       BytesLeft -= BytesWritten;
528       CurPtr += BytesWritten;
529     }
530     return true;
531   }
532 
533   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
534     if (CE->getOpcode() == Instruction::IntToPtr &&
535         CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
536       return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
537                                 BytesLeft, DL);
538     }
539   }
540 
541   // Otherwise, unknown initializer type.
542   return false;
543 }
544 
545 Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
546                                        int64_t Offset, const DataLayout &DL) {
547   // Bail out early. Not expect to load from scalable global variable.
548   if (isa<ScalableVectorType>(LoadTy))
549     return nullptr;
550 
551   auto *IntType = dyn_cast<IntegerType>(LoadTy);
552 
553   // If this isn't an integer load we can't fold it directly.
554   if (!IntType) {
555     // If this is a non-integer load, we can try folding it as an int load and
556     // then bitcast the result.  This can be useful for union cases.  Note
557     // that address spaces don't matter here since we're not going to result in
558     // an actual new load.
559     if (!LoadTy->isFloatingPointTy() && !LoadTy->isPointerTy() &&
560         !LoadTy->isVectorTy())
561       return nullptr;
562 
563     Type *MapTy = Type::getIntNTy(
564           C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize());
565     if (Constant *Res = FoldReinterpretLoadFromConst(C, MapTy, Offset, DL)) {
566       if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
567           !LoadTy->isX86_AMXTy())
568         // Materializing a zero can be done trivially without a bitcast
569         return Constant::getNullValue(LoadTy);
570       Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
571       Res = FoldBitCast(Res, CastTy, DL);
572       if (LoadTy->isPtrOrPtrVectorTy()) {
573         // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
574         if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
575             !LoadTy->isX86_AMXTy())
576           return Constant::getNullValue(LoadTy);
577         if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
578           // Be careful not to replace a load of an addrspace value with an inttoptr here
579           return nullptr;
580         Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy);
581       }
582       return Res;
583     }
584     return nullptr;
585   }
586 
587   unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
588   if (BytesLoaded > 32 || BytesLoaded == 0)
589     return nullptr;
590 
591   // If we're not accessing anything in this constant, the result is undefined.
592   if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
593     return UndefValue::get(IntType);
594 
595   // TODO: We should be able to support scalable types.
596   TypeSize InitializerSize = DL.getTypeAllocSize(C->getType());
597   if (InitializerSize.isScalable())
598     return nullptr;
599 
600   // If we're not accessing anything in this constant, the result is undefined.
601   if (Offset >= (int64_t)InitializerSize.getFixedValue())
602     return UndefValue::get(IntType);
603 
604   unsigned char RawBytes[32] = {0};
605   unsigned char *CurPtr = RawBytes;
606   unsigned BytesLeft = BytesLoaded;
607 
608   // If we're loading off the beginning of the global, some bytes may be valid.
609   if (Offset < 0) {
610     CurPtr += -Offset;
611     BytesLeft += Offset;
612     Offset = 0;
613   }
614 
615   if (!ReadDataFromGlobal(C, Offset, CurPtr, BytesLeft, DL))
616     return nullptr;
617 
618   APInt ResultVal = APInt(IntType->getBitWidth(), 0);
619   if (DL.isLittleEndian()) {
620     ResultVal = RawBytes[BytesLoaded - 1];
621     for (unsigned i = 1; i != BytesLoaded; ++i) {
622       ResultVal <<= 8;
623       ResultVal |= RawBytes[BytesLoaded - 1 - i];
624     }
625   } else {
626     ResultVal = RawBytes[0];
627     for (unsigned i = 1; i != BytesLoaded; ++i) {
628       ResultVal <<= 8;
629       ResultVal |= RawBytes[i];
630     }
631   }
632 
633   return ConstantInt::get(IntType->getContext(), ResultVal);
634 }
635 
636 /// If this Offset points exactly to the start of an aggregate element, return
637 /// that element, otherwise return nullptr.
638 Constant *getConstantAtOffset(Constant *Base, APInt Offset,
639                               const DataLayout &DL) {
640   if (Offset.isZero())
641     return Base;
642 
643   if (!isa<ConstantAggregate>(Base) && !isa<ConstantDataSequential>(Base))
644     return nullptr;
645 
646   Type *ElemTy = Base->getType();
647   SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
648   if (!Offset.isZero() || !Indices[0].isZero())
649     return nullptr;
650 
651   Constant *C = Base;
652   for (const APInt &Index : drop_begin(Indices)) {
653     if (Index.isNegative() || Index.getActiveBits() >= 32)
654       return nullptr;
655 
656     C = C->getAggregateElement(Index.getZExtValue());
657     if (!C)
658       return nullptr;
659   }
660 
661   return C;
662 }
663 
664 } // end anonymous namespace
665 
666 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
667                                           const APInt &Offset,
668                                           const DataLayout &DL) {
669   if (Constant *AtOffset = getConstantAtOffset(C, Offset, DL))
670     if (Constant *Result = ConstantFoldLoadThroughBitcast(AtOffset, Ty, DL))
671       return Result;
672 
673   // Explicitly check for out-of-bounds access, so we return undef even if the
674   // constant is a uniform value.
675   TypeSize Size = DL.getTypeAllocSize(C->getType());
676   if (!Size.isScalable() && Offset.sge(Size.getFixedSize()))
677     return UndefValue::get(Ty);
678 
679   // Try an offset-independent fold of a uniform value.
680   if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty))
681     return Result;
682 
683   // Try hard to fold loads from bitcasted strange and non-type-safe things.
684   if (Offset.getMinSignedBits() <= 64)
685     if (Constant *Result =
686             FoldReinterpretLoadFromConst(C, Ty, Offset.getSExtValue(), DL))
687       return Result;
688 
689   return nullptr;
690 }
691 
692 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
693                                           const DataLayout &DL) {
694   return ConstantFoldLoadFromConst(C, Ty, APInt(64, 0), DL);
695 }
696 
697 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
698                                              APInt Offset,
699                                              const DataLayout &DL) {
700   C = cast<Constant>(C->stripAndAccumulateConstantOffsets(
701           DL, Offset, /* AllowNonInbounds */ true));
702 
703   if (auto *GV = dyn_cast<GlobalVariable>(C))
704     if (GV->isConstant() && GV->hasDefinitiveInitializer())
705       if (Constant *Result = ConstantFoldLoadFromConst(GV->getInitializer(), Ty,
706                                                        Offset, DL))
707         return Result;
708 
709   // If this load comes from anywhere in a uniform constant global, the value
710   // is always the same, regardless of the loaded offset.
711   if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(C))) {
712     if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
713       if (Constant *Res =
714               ConstantFoldLoadFromUniformValue(GV->getInitializer(), Ty))
715         return Res;
716     }
717   }
718 
719   return nullptr;
720 }
721 
722 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
723                                              const DataLayout &DL) {
724   APInt Offset(DL.getIndexTypeSizeInBits(C->getType()), 0);
725   return ConstantFoldLoadFromConstPtr(C, Ty, Offset, DL);
726 }
727 
728 Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty) {
729   if (isa<PoisonValue>(C))
730     return PoisonValue::get(Ty);
731   if (isa<UndefValue>(C))
732     return UndefValue::get(Ty);
733   if (C->isNullValue() && !Ty->isX86_MMXTy() && !Ty->isX86_AMXTy())
734     return Constant::getNullValue(Ty);
735   if (C->isAllOnesValue() &&
736       (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))
737     return Constant::getAllOnesValue(Ty);
738   return nullptr;
739 }
740 
741 namespace {
742 
743 /// One of Op0/Op1 is a constant expression.
744 /// Attempt to symbolically evaluate the result of a binary operator merging
745 /// these together.  If target data info is available, it is provided as DL,
746 /// otherwise DL is null.
747 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
748                                     const DataLayout &DL) {
749   // SROA
750 
751   // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
752   // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
753   // bits.
754 
755   if (Opc == Instruction::And) {
756     KnownBits Known0 = computeKnownBits(Op0, DL);
757     KnownBits Known1 = computeKnownBits(Op1, DL);
758     if ((Known1.One | Known0.Zero).isAllOnes()) {
759       // All the bits of Op0 that the 'and' could be masking are already zero.
760       return Op0;
761     }
762     if ((Known0.One | Known1.Zero).isAllOnes()) {
763       // All the bits of Op1 that the 'and' could be masking are already zero.
764       return Op1;
765     }
766 
767     Known0 &= Known1;
768     if (Known0.isConstant())
769       return ConstantInt::get(Op0->getType(), Known0.getConstant());
770   }
771 
772   // If the constant expr is something like &A[123] - &A[4].f, fold this into a
773   // constant.  This happens frequently when iterating over a global array.
774   if (Opc == Instruction::Sub) {
775     GlobalValue *GV1, *GV2;
776     APInt Offs1, Offs2;
777 
778     if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
779       if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
780         unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
781 
782         // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
783         // PtrToInt may change the bitwidth so we have convert to the right size
784         // first.
785         return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
786                                                 Offs2.zextOrTrunc(OpSize));
787       }
788   }
789 
790   return nullptr;
791 }
792 
793 /// If array indices are not pointer-sized integers, explicitly cast them so
794 /// that they aren't implicitly casted by the getelementptr.
795 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
796                          Type *ResultTy, Optional<unsigned> InRangeIndex,
797                          const DataLayout &DL, const TargetLibraryInfo *TLI) {
798   Type *IntIdxTy = DL.getIndexType(ResultTy);
799   Type *IntIdxScalarTy = IntIdxTy->getScalarType();
800 
801   bool Any = false;
802   SmallVector<Constant*, 32> NewIdxs;
803   for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
804     if ((i == 1 ||
805          !isa<StructType>(GetElementPtrInst::getIndexedType(
806              SrcElemTy, Ops.slice(1, i - 1)))) &&
807         Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
808       Any = true;
809       Type *NewType = Ops[i]->getType()->isVectorTy()
810                           ? IntIdxTy
811                           : IntIdxScalarTy;
812       NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
813                                                                       true,
814                                                                       NewType,
815                                                                       true),
816                                               Ops[i], NewType));
817     } else
818       NewIdxs.push_back(Ops[i]);
819   }
820 
821   if (!Any)
822     return nullptr;
823 
824   Constant *C = ConstantExpr::getGetElementPtr(
825       SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
826   return ConstantFoldConstant(C, DL, TLI);
827 }
828 
829 /// Strip the pointer casts, but preserve the address space information.
830 Constant *StripPtrCastKeepAS(Constant *Ptr) {
831   assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
832   auto *OldPtrTy = cast<PointerType>(Ptr->getType());
833   Ptr = cast<Constant>(Ptr->stripPointerCasts());
834   auto *NewPtrTy = cast<PointerType>(Ptr->getType());
835 
836   // Preserve the address space number of the pointer.
837   if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
838     Ptr = ConstantExpr::getPointerCast(
839         Ptr, PointerType::getWithSamePointeeType(NewPtrTy,
840                                                  OldPtrTy->getAddressSpace()));
841   }
842   return Ptr;
843 }
844 
845 /// If we can symbolically evaluate the GEP constant expression, do so.
846 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
847                                   ArrayRef<Constant *> Ops,
848                                   const DataLayout &DL,
849                                   const TargetLibraryInfo *TLI) {
850   const GEPOperator *InnermostGEP = GEP;
851   bool InBounds = GEP->isInBounds();
852 
853   Type *SrcElemTy = GEP->getSourceElementType();
854   Type *ResElemTy = GEP->getResultElementType();
855   Type *ResTy = GEP->getType();
856   if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
857     return nullptr;
858 
859   if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
860                                    GEP->getInRangeIndex(), DL, TLI))
861     return C;
862 
863   Constant *Ptr = Ops[0];
864   if (!Ptr->getType()->isPointerTy())
865     return nullptr;
866 
867   Type *IntIdxTy = DL.getIndexType(Ptr->getType());
868 
869   // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
870   // "inttoptr (sub (ptrtoint Ptr), V)"
871   if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
872     auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
873     assert((!CE || CE->getType() == IntIdxTy) &&
874            "CastGEPIndices didn't canonicalize index types!");
875     if (CE && CE->getOpcode() == Instruction::Sub &&
876         CE->getOperand(0)->isNullValue()) {
877       Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
878       Res = ConstantExpr::getSub(Res, CE->getOperand(1));
879       Res = ConstantExpr::getIntToPtr(Res, ResTy);
880       return ConstantFoldConstant(Res, DL, TLI);
881     }
882   }
883 
884   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
885     if (!isa<ConstantInt>(Ops[i]))
886       return nullptr;
887 
888   unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
889   APInt Offset =
890       APInt(BitWidth,
891             DL.getIndexedOffsetInType(
892                 SrcElemTy,
893                 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
894   Ptr = StripPtrCastKeepAS(Ptr);
895 
896   // If this is a GEP of a GEP, fold it all into a single GEP.
897   while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
898     InnermostGEP = GEP;
899     InBounds &= GEP->isInBounds();
900 
901     SmallVector<Value *, 4> NestedOps(llvm::drop_begin(GEP->operands()));
902 
903     // Do not try the incorporate the sub-GEP if some index is not a number.
904     bool AllConstantInt = true;
905     for (Value *NestedOp : NestedOps)
906       if (!isa<ConstantInt>(NestedOp)) {
907         AllConstantInt = false;
908         break;
909       }
910     if (!AllConstantInt)
911       break;
912 
913     Ptr = cast<Constant>(GEP->getOperand(0));
914     SrcElemTy = GEP->getSourceElementType();
915     Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
916     Ptr = StripPtrCastKeepAS(Ptr);
917   }
918 
919   // If the base value for this address is a literal integer value, fold the
920   // getelementptr to the resulting integer value casted to the pointer type.
921   APInt BasePtr(BitWidth, 0);
922   if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
923     if (CE->getOpcode() == Instruction::IntToPtr) {
924       if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
925         BasePtr = Base->getValue().zextOrTrunc(BitWidth);
926     }
927   }
928 
929   auto *PTy = cast<PointerType>(Ptr->getType());
930   if ((Ptr->isNullValue() || BasePtr != 0) &&
931       !DL.isNonIntegralPointerType(PTy)) {
932     Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
933     return ConstantExpr::getIntToPtr(C, ResTy);
934   }
935 
936   // Otherwise form a regular getelementptr. Recompute the indices so that
937   // we eliminate over-indexing of the notional static type array bounds.
938   // This makes it easy to determine if the getelementptr is "inbounds".
939   // Also, this helps GlobalOpt do SROA on GlobalVariables.
940 
941   // For GEPs of GlobalValues, use the value type even for opaque pointers.
942   // Otherwise use an i8 GEP.
943   if (auto *GV = dyn_cast<GlobalValue>(Ptr))
944     SrcElemTy = GV->getValueType();
945   else if (!PTy->isOpaque())
946     SrcElemTy = PTy->getNonOpaquePointerElementType();
947   else
948     SrcElemTy = Type::getInt8Ty(Ptr->getContext());
949 
950   if (!SrcElemTy->isSized())
951     return nullptr;
952 
953   Type *ElemTy = SrcElemTy;
954   SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
955   if (Offset != 0)
956     return nullptr;
957 
958   // Try to add additional zero indices to reach the desired result element
959   // type.
960   // TODO: Should we avoid extra zero indices if ResElemTy can't be reached and
961   // we'll have to insert a bitcast anyway?
962   while (ElemTy != ResElemTy) {
963     Type *NextTy = GetElementPtrInst::getTypeAtIndex(ElemTy, (uint64_t)0);
964     if (!NextTy)
965       break;
966 
967     Indices.push_back(APInt::getZero(isa<StructType>(ElemTy) ? 32 : BitWidth));
968     ElemTy = NextTy;
969   }
970 
971   SmallVector<Constant *, 32> NewIdxs;
972   for (const APInt &Index : Indices)
973     NewIdxs.push_back(ConstantInt::get(
974         Type::getIntNTy(Ptr->getContext(), Index.getBitWidth()), Index));
975 
976   // Preserve the inrange index from the innermost GEP if possible. We must
977   // have calculated the same indices up to and including the inrange index.
978   Optional<unsigned> InRangeIndex;
979   if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
980     if (SrcElemTy == InnermostGEP->getSourceElementType() &&
981         NewIdxs.size() > *LastIRIndex) {
982       InRangeIndex = LastIRIndex;
983       for (unsigned I = 0; I <= *LastIRIndex; ++I)
984         if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
985           return nullptr;
986     }
987 
988   // Create a GEP.
989   Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
990                                                InBounds, InRangeIndex);
991   assert(
992       cast<PointerType>(C->getType())->isOpaqueOrPointeeTypeMatches(ElemTy) &&
993       "Computed GetElementPtr has unexpected type!");
994 
995   // If we ended up indexing a member with a type that doesn't match
996   // the type of what the original indices indexed, add a cast.
997   if (C->getType() != ResTy)
998     C = FoldBitCast(C, ResTy, DL);
999 
1000   return C;
1001 }
1002 
1003 /// Attempt to constant fold an instruction with the
1004 /// specified opcode and operands.  If successful, the constant result is
1005 /// returned, if not, null is returned.  Note that this function can fail when
1006 /// attempting to fold instructions like loads and stores, which have no
1007 /// constant expression form.
1008 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
1009                                        ArrayRef<Constant *> Ops,
1010                                        const DataLayout &DL,
1011                                        const TargetLibraryInfo *TLI) {
1012   Type *DestTy = InstOrCE->getType();
1013 
1014   if (Instruction::isUnaryOp(Opcode))
1015     return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
1016 
1017   if (Instruction::isBinaryOp(Opcode))
1018     return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1019 
1020   if (Instruction::isCast(Opcode))
1021     return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1022 
1023   if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1024     if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1025       return C;
1026 
1027     return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
1028                                           Ops.slice(1), GEP->isInBounds(),
1029                                           GEP->getInRangeIndex());
1030   }
1031 
1032   if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1033     return CE->getWithOperands(Ops);
1034 
1035   switch (Opcode) {
1036   default: return nullptr;
1037   case Instruction::ICmp:
1038   case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1039   case Instruction::Freeze:
1040     return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr;
1041   case Instruction::Call:
1042     if (auto *F = dyn_cast<Function>(Ops.back())) {
1043       const auto *Call = cast<CallBase>(InstOrCE);
1044       if (canConstantFoldCallTo(Call, F))
1045         return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1046     }
1047     return nullptr;
1048   case Instruction::Select:
1049     return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1050   case Instruction::ExtractElement:
1051     return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1052   case Instruction::ExtractValue:
1053     return ConstantExpr::getExtractValue(
1054         Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1055   case Instruction::InsertElement:
1056     return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1057   case Instruction::ShuffleVector:
1058     return ConstantExpr::getShuffleVector(
1059         Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1060   }
1061 }
1062 
1063 } // end anonymous namespace
1064 
1065 //===----------------------------------------------------------------------===//
1066 // Constant Folding public APIs
1067 //===----------------------------------------------------------------------===//
1068 
1069 namespace {
1070 
1071 Constant *
1072 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1073                          const TargetLibraryInfo *TLI,
1074                          SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1075   if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1076     return const_cast<Constant *>(C);
1077 
1078   SmallVector<Constant *, 8> Ops;
1079   for (const Use &OldU : C->operands()) {
1080     Constant *OldC = cast<Constant>(&OldU);
1081     Constant *NewC = OldC;
1082     // Recursively fold the ConstantExpr's operands. If we have already folded
1083     // a ConstantExpr, we don't have to process it again.
1084     if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1085       auto It = FoldedOps.find(OldC);
1086       if (It == FoldedOps.end()) {
1087         NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps);
1088         FoldedOps.insert({OldC, NewC});
1089       } else {
1090         NewC = It->second;
1091       }
1092     }
1093     Ops.push_back(NewC);
1094   }
1095 
1096   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1097     if (CE->isCompare())
1098       return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1099                                              DL, TLI);
1100 
1101     return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1102   }
1103 
1104   assert(isa<ConstantVector>(C));
1105   return ConstantVector::get(Ops);
1106 }
1107 
1108 } // end anonymous namespace
1109 
1110 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1111                                         const TargetLibraryInfo *TLI) {
1112   // Handle PHI nodes quickly here...
1113   if (auto *PN = dyn_cast<PHINode>(I)) {
1114     Constant *CommonValue = nullptr;
1115 
1116     SmallDenseMap<Constant *, Constant *> FoldedOps;
1117     for (Value *Incoming : PN->incoming_values()) {
1118       // If the incoming value is undef then skip it.  Note that while we could
1119       // skip the value if it is equal to the phi node itself we choose not to
1120       // because that would break the rule that constant folding only applies if
1121       // all operands are constants.
1122       if (isa<UndefValue>(Incoming))
1123         continue;
1124       // If the incoming value is not a constant, then give up.
1125       auto *C = dyn_cast<Constant>(Incoming);
1126       if (!C)
1127         return nullptr;
1128       // Fold the PHI's operands.
1129       C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1130       // If the incoming value is a different constant to
1131       // the one we saw previously, then give up.
1132       if (CommonValue && C != CommonValue)
1133         return nullptr;
1134       CommonValue = C;
1135     }
1136 
1137     // If we reach here, all incoming values are the same constant or undef.
1138     return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1139   }
1140 
1141   // Scan the operand list, checking to see if they are all constants, if so,
1142   // hand off to ConstantFoldInstOperandsImpl.
1143   if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1144     return nullptr;
1145 
1146   SmallDenseMap<Constant *, Constant *> FoldedOps;
1147   SmallVector<Constant *, 8> Ops;
1148   for (const Use &OpU : I->operands()) {
1149     auto *Op = cast<Constant>(&OpU);
1150     // Fold the Instruction's operands.
1151     Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps);
1152     Ops.push_back(Op);
1153   }
1154 
1155   if (const auto *CI = dyn_cast<CmpInst>(I))
1156     return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1157                                            DL, TLI);
1158 
1159   if (const auto *LI = dyn_cast<LoadInst>(I)) {
1160     if (LI->isVolatile())
1161       return nullptr;
1162     return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL);
1163   }
1164 
1165   if (auto *IVI = dyn_cast<InsertValueInst>(I))
1166     return ConstantExpr::getInsertValue(Ops[0], Ops[1], IVI->getIndices());
1167 
1168   if (auto *EVI = dyn_cast<ExtractValueInst>(I))
1169     return ConstantExpr::getExtractValue(Ops[0], EVI->getIndices());
1170 
1171   return ConstantFoldInstOperands(I, Ops, DL, TLI);
1172 }
1173 
1174 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1175                                      const TargetLibraryInfo *TLI) {
1176   SmallDenseMap<Constant *, Constant *> FoldedOps;
1177   return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1178 }
1179 
1180 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1181                                          ArrayRef<Constant *> Ops,
1182                                          const DataLayout &DL,
1183                                          const TargetLibraryInfo *TLI) {
1184   return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1185 }
1186 
1187 Constant *llvm::ConstantFoldCompareInstOperands(unsigned IntPredicate,
1188                                                 Constant *Ops0, Constant *Ops1,
1189                                                 const DataLayout &DL,
1190                                                 const TargetLibraryInfo *TLI) {
1191   CmpInst::Predicate Predicate = (CmpInst::Predicate)IntPredicate;
1192   // fold: icmp (inttoptr x), null         -> icmp x, 0
1193   // fold: icmp null, (inttoptr x)         -> icmp 0, x
1194   // fold: icmp (ptrtoint x), 0            -> icmp x, null
1195   // fold: icmp 0, (ptrtoint x)            -> icmp null, x
1196   // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1197   // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1198   //
1199   // FIXME: The following comment is out of data and the DataLayout is here now.
1200   // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1201   // around to know if bit truncation is happening.
1202   if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1203     if (Ops1->isNullValue()) {
1204       if (CE0->getOpcode() == Instruction::IntToPtr) {
1205         Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1206         // Convert the integer value to the right size to ensure we get the
1207         // proper extension or truncation.
1208         Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1209                                                    IntPtrTy, false);
1210         Constant *Null = Constant::getNullValue(C->getType());
1211         return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1212       }
1213 
1214       // Only do this transformation if the int is intptrty in size, otherwise
1215       // there is a truncation or extension that we aren't modeling.
1216       if (CE0->getOpcode() == Instruction::PtrToInt) {
1217         Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1218         if (CE0->getType() == IntPtrTy) {
1219           Constant *C = CE0->getOperand(0);
1220           Constant *Null = Constant::getNullValue(C->getType());
1221           return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1222         }
1223       }
1224     }
1225 
1226     if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1227       if (CE0->getOpcode() == CE1->getOpcode()) {
1228         if (CE0->getOpcode() == Instruction::IntToPtr) {
1229           Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1230 
1231           // Convert the integer value to the right size to ensure we get the
1232           // proper extension or truncation.
1233           Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1234                                                       IntPtrTy, false);
1235           Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1236                                                       IntPtrTy, false);
1237           return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1238         }
1239 
1240         // Only do this transformation if the int is intptrty in size, otherwise
1241         // there is a truncation or extension that we aren't modeling.
1242         if (CE0->getOpcode() == Instruction::PtrToInt) {
1243           Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1244           if (CE0->getType() == IntPtrTy &&
1245               CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1246             return ConstantFoldCompareInstOperands(
1247                 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1248           }
1249         }
1250       }
1251     }
1252 
1253     // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1254     // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1255     if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1256         CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1257       Constant *LHS = ConstantFoldCompareInstOperands(
1258           Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1259       Constant *RHS = ConstantFoldCompareInstOperands(
1260           Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1261       unsigned OpC =
1262         Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1263       return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1264     }
1265 
1266     // Convert pointer comparison (base+offset1) pred (base+offset2) into
1267     // offset1 pred offset2, for the case where the offset is inbounds. This
1268     // only works for equality and unsigned comparison, as inbounds permits
1269     // crossing the sign boundary. However, the offset comparison itself is
1270     // signed.
1271     if (Ops0->getType()->isPointerTy() && !ICmpInst::isSigned(Predicate)) {
1272       unsigned IndexWidth = DL.getIndexTypeSizeInBits(Ops0->getType());
1273       APInt Offset0(IndexWidth, 0);
1274       Value *Stripped0 =
1275           Ops0->stripAndAccumulateInBoundsConstantOffsets(DL, Offset0);
1276       APInt Offset1(IndexWidth, 0);
1277       Value *Stripped1 =
1278           Ops1->stripAndAccumulateInBoundsConstantOffsets(DL, Offset1);
1279       if (Stripped0 == Stripped1)
1280         return ConstantExpr::getCompare(
1281             ICmpInst::getSignedPredicate(Predicate),
1282             ConstantInt::get(CE0->getContext(), Offset0),
1283             ConstantInt::get(CE0->getContext(), Offset1));
1284     }
1285   } else if (isa<ConstantExpr>(Ops1)) {
1286     // If RHS is a constant expression, but the left side isn't, swap the
1287     // operands and try again.
1288     Predicate = ICmpInst::getSwappedPredicate(Predicate);
1289     return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1290   }
1291 
1292   return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1293 }
1294 
1295 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1296                                            const DataLayout &DL) {
1297   assert(Instruction::isUnaryOp(Opcode));
1298 
1299   return ConstantExpr::get(Opcode, Op);
1300 }
1301 
1302 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1303                                              Constant *RHS,
1304                                              const DataLayout &DL) {
1305   assert(Instruction::isBinaryOp(Opcode));
1306   if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1307     if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1308       return C;
1309 
1310   return ConstantExpr::get(Opcode, LHS, RHS);
1311 }
1312 
1313 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1314                                         Type *DestTy, const DataLayout &DL) {
1315   assert(Instruction::isCast(Opcode));
1316   switch (Opcode) {
1317   default:
1318     llvm_unreachable("Missing case");
1319   case Instruction::PtrToInt:
1320     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1321       Constant *FoldedValue = nullptr;
1322       // If the input is a inttoptr, eliminate the pair.  This requires knowing
1323       // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1324       if (CE->getOpcode() == Instruction::IntToPtr) {
1325         // zext/trunc the inttoptr to pointer size.
1326         FoldedValue = ConstantExpr::getIntegerCast(
1327             CE->getOperand(0), DL.getIntPtrType(CE->getType()),
1328             /*IsSigned=*/false);
1329       } else if (auto *GEP = dyn_cast<GEPOperator>(CE)) {
1330         // If we have GEP, we can perform the following folds:
1331         // (ptrtoint (gep null, x)) -> x
1332         // (ptrtoint (gep (gep null, x), y) -> x + y, etc.
1333         unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1334         APInt BaseOffset(BitWidth, 0);
1335         auto *Base = cast<Constant>(GEP->stripAndAccumulateConstantOffsets(
1336             DL, BaseOffset, /*AllowNonInbounds=*/true));
1337         if (Base->isNullValue()) {
1338           FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1339         }
1340       }
1341       if (FoldedValue) {
1342         // Do a zext or trunc to get to the ptrtoint dest size.
1343         return ConstantExpr::getIntegerCast(FoldedValue, DestTy,
1344                                             /*IsSigned=*/false);
1345       }
1346     }
1347     return ConstantExpr::getCast(Opcode, C, DestTy);
1348   case Instruction::IntToPtr:
1349     // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1350     // the int size is >= the ptr size and the address spaces are the same.
1351     // This requires knowing the width of a pointer, so it can't be done in
1352     // ConstantExpr::getCast.
1353     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1354       if (CE->getOpcode() == Instruction::PtrToInt) {
1355         Constant *SrcPtr = CE->getOperand(0);
1356         unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1357         unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1358 
1359         if (MidIntSize >= SrcPtrSize) {
1360           unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1361           if (SrcAS == DestTy->getPointerAddressSpace())
1362             return FoldBitCast(CE->getOperand(0), DestTy, DL);
1363         }
1364       }
1365     }
1366 
1367     return ConstantExpr::getCast(Opcode, C, DestTy);
1368   case Instruction::Trunc:
1369   case Instruction::ZExt:
1370   case Instruction::SExt:
1371   case Instruction::FPTrunc:
1372   case Instruction::FPExt:
1373   case Instruction::UIToFP:
1374   case Instruction::SIToFP:
1375   case Instruction::FPToUI:
1376   case Instruction::FPToSI:
1377   case Instruction::AddrSpaceCast:
1378       return ConstantExpr::getCast(Opcode, C, DestTy);
1379   case Instruction::BitCast:
1380     return FoldBitCast(C, DestTy, DL);
1381   }
1382 }
1383 
1384 //===----------------------------------------------------------------------===//
1385 //  Constant Folding for Calls
1386 //
1387 
1388 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1389   if (Call->isNoBuiltin())
1390     return false;
1391   if (Call->getFunctionType() != F->getFunctionType())
1392     return false;
1393   switch (F->getIntrinsicID()) {
1394   // Operations that do not operate floating-point numbers and do not depend on
1395   // FP environment can be folded even in strictfp functions.
1396   case Intrinsic::bswap:
1397   case Intrinsic::ctpop:
1398   case Intrinsic::ctlz:
1399   case Intrinsic::cttz:
1400   case Intrinsic::fshl:
1401   case Intrinsic::fshr:
1402   case Intrinsic::launder_invariant_group:
1403   case Intrinsic::strip_invariant_group:
1404   case Intrinsic::masked_load:
1405   case Intrinsic::get_active_lane_mask:
1406   case Intrinsic::abs:
1407   case Intrinsic::smax:
1408   case Intrinsic::smin:
1409   case Intrinsic::umax:
1410   case Intrinsic::umin:
1411   case Intrinsic::sadd_with_overflow:
1412   case Intrinsic::uadd_with_overflow:
1413   case Intrinsic::ssub_with_overflow:
1414   case Intrinsic::usub_with_overflow:
1415   case Intrinsic::smul_with_overflow:
1416   case Intrinsic::umul_with_overflow:
1417   case Intrinsic::sadd_sat:
1418   case Intrinsic::uadd_sat:
1419   case Intrinsic::ssub_sat:
1420   case Intrinsic::usub_sat:
1421   case Intrinsic::smul_fix:
1422   case Intrinsic::smul_fix_sat:
1423   case Intrinsic::bitreverse:
1424   case Intrinsic::is_constant:
1425   case Intrinsic::vector_reduce_add:
1426   case Intrinsic::vector_reduce_mul:
1427   case Intrinsic::vector_reduce_and:
1428   case Intrinsic::vector_reduce_or:
1429   case Intrinsic::vector_reduce_xor:
1430   case Intrinsic::vector_reduce_smin:
1431   case Intrinsic::vector_reduce_smax:
1432   case Intrinsic::vector_reduce_umin:
1433   case Intrinsic::vector_reduce_umax:
1434   // Target intrinsics
1435   case Intrinsic::amdgcn_perm:
1436   case Intrinsic::arm_mve_vctp8:
1437   case Intrinsic::arm_mve_vctp16:
1438   case Intrinsic::arm_mve_vctp32:
1439   case Intrinsic::arm_mve_vctp64:
1440   case Intrinsic::aarch64_sve_convert_from_svbool:
1441   // WebAssembly float semantics are always known
1442   case Intrinsic::wasm_trunc_signed:
1443   case Intrinsic::wasm_trunc_unsigned:
1444     return true;
1445 
1446   // Floating point operations cannot be folded in strictfp functions in
1447   // general case. They can be folded if FP environment is known to compiler.
1448   case Intrinsic::minnum:
1449   case Intrinsic::maxnum:
1450   case Intrinsic::minimum:
1451   case Intrinsic::maximum:
1452   case Intrinsic::log:
1453   case Intrinsic::log2:
1454   case Intrinsic::log10:
1455   case Intrinsic::exp:
1456   case Intrinsic::exp2:
1457   case Intrinsic::sqrt:
1458   case Intrinsic::sin:
1459   case Intrinsic::cos:
1460   case Intrinsic::pow:
1461   case Intrinsic::powi:
1462   case Intrinsic::fma:
1463   case Intrinsic::fmuladd:
1464   case Intrinsic::fptoui_sat:
1465   case Intrinsic::fptosi_sat:
1466   case Intrinsic::convert_from_fp16:
1467   case Intrinsic::convert_to_fp16:
1468   case Intrinsic::amdgcn_cos:
1469   case Intrinsic::amdgcn_cubeid:
1470   case Intrinsic::amdgcn_cubema:
1471   case Intrinsic::amdgcn_cubesc:
1472   case Intrinsic::amdgcn_cubetc:
1473   case Intrinsic::amdgcn_fmul_legacy:
1474   case Intrinsic::amdgcn_fma_legacy:
1475   case Intrinsic::amdgcn_fract:
1476   case Intrinsic::amdgcn_ldexp:
1477   case Intrinsic::amdgcn_sin:
1478   // The intrinsics below depend on rounding mode in MXCSR.
1479   case Intrinsic::x86_sse_cvtss2si:
1480   case Intrinsic::x86_sse_cvtss2si64:
1481   case Intrinsic::x86_sse_cvttss2si:
1482   case Intrinsic::x86_sse_cvttss2si64:
1483   case Intrinsic::x86_sse2_cvtsd2si:
1484   case Intrinsic::x86_sse2_cvtsd2si64:
1485   case Intrinsic::x86_sse2_cvttsd2si:
1486   case Intrinsic::x86_sse2_cvttsd2si64:
1487   case Intrinsic::x86_avx512_vcvtss2si32:
1488   case Intrinsic::x86_avx512_vcvtss2si64:
1489   case Intrinsic::x86_avx512_cvttss2si:
1490   case Intrinsic::x86_avx512_cvttss2si64:
1491   case Intrinsic::x86_avx512_vcvtsd2si32:
1492   case Intrinsic::x86_avx512_vcvtsd2si64:
1493   case Intrinsic::x86_avx512_cvttsd2si:
1494   case Intrinsic::x86_avx512_cvttsd2si64:
1495   case Intrinsic::x86_avx512_vcvtss2usi32:
1496   case Intrinsic::x86_avx512_vcvtss2usi64:
1497   case Intrinsic::x86_avx512_cvttss2usi:
1498   case Intrinsic::x86_avx512_cvttss2usi64:
1499   case Intrinsic::x86_avx512_vcvtsd2usi32:
1500   case Intrinsic::x86_avx512_vcvtsd2usi64:
1501   case Intrinsic::x86_avx512_cvttsd2usi:
1502   case Intrinsic::x86_avx512_cvttsd2usi64:
1503     return !Call->isStrictFP();
1504 
1505   // Sign operations are actually bitwise operations, they do not raise
1506   // exceptions even for SNANs.
1507   case Intrinsic::fabs:
1508   case Intrinsic::copysign:
1509   // Non-constrained variants of rounding operations means default FP
1510   // environment, they can be folded in any case.
1511   case Intrinsic::ceil:
1512   case Intrinsic::floor:
1513   case Intrinsic::round:
1514   case Intrinsic::roundeven:
1515   case Intrinsic::trunc:
1516   case Intrinsic::nearbyint:
1517   case Intrinsic::rint:
1518   // Constrained intrinsics can be folded if FP environment is known
1519   // to compiler.
1520   case Intrinsic::experimental_constrained_fma:
1521   case Intrinsic::experimental_constrained_fmuladd:
1522   case Intrinsic::experimental_constrained_fadd:
1523   case Intrinsic::experimental_constrained_fsub:
1524   case Intrinsic::experimental_constrained_fmul:
1525   case Intrinsic::experimental_constrained_fdiv:
1526   case Intrinsic::experimental_constrained_frem:
1527   case Intrinsic::experimental_constrained_ceil:
1528   case Intrinsic::experimental_constrained_floor:
1529   case Intrinsic::experimental_constrained_round:
1530   case Intrinsic::experimental_constrained_roundeven:
1531   case Intrinsic::experimental_constrained_trunc:
1532   case Intrinsic::experimental_constrained_nearbyint:
1533   case Intrinsic::experimental_constrained_rint:
1534   case Intrinsic::experimental_constrained_fcmp:
1535   case Intrinsic::experimental_constrained_fcmps:
1536     return true;
1537   default:
1538     return false;
1539   case Intrinsic::not_intrinsic: break;
1540   }
1541 
1542   if (!F->hasName() || Call->isStrictFP())
1543     return false;
1544 
1545   // In these cases, the check of the length is required.  We don't want to
1546   // return true for a name like "cos\0blah" which strcmp would return equal to
1547   // "cos", but has length 8.
1548   StringRef Name = F->getName();
1549   switch (Name[0]) {
1550   default:
1551     return false;
1552   case 'a':
1553     return Name == "acos" || Name == "acosf" ||
1554            Name == "asin" || Name == "asinf" ||
1555            Name == "atan" || Name == "atanf" ||
1556            Name == "atan2" || Name == "atan2f";
1557   case 'c':
1558     return Name == "ceil" || Name == "ceilf" ||
1559            Name == "cos" || Name == "cosf" ||
1560            Name == "cosh" || Name == "coshf";
1561   case 'e':
1562     return Name == "exp" || Name == "expf" ||
1563            Name == "exp2" || Name == "exp2f";
1564   case 'f':
1565     return Name == "fabs" || Name == "fabsf" ||
1566            Name == "floor" || Name == "floorf" ||
1567            Name == "fmod" || Name == "fmodf";
1568   case 'l':
1569     return Name == "log" || Name == "logf" ||
1570            Name == "log2" || Name == "log2f" ||
1571            Name == "log10" || Name == "log10f";
1572   case 'n':
1573     return Name == "nearbyint" || Name == "nearbyintf";
1574   case 'p':
1575     return Name == "pow" || Name == "powf";
1576   case 'r':
1577     return Name == "remainder" || Name == "remainderf" ||
1578            Name == "rint" || Name == "rintf" ||
1579            Name == "round" || Name == "roundf";
1580   case 's':
1581     return Name == "sin" || Name == "sinf" ||
1582            Name == "sinh" || Name == "sinhf" ||
1583            Name == "sqrt" || Name == "sqrtf";
1584   case 't':
1585     return Name == "tan" || Name == "tanf" ||
1586            Name == "tanh" || Name == "tanhf" ||
1587            Name == "trunc" || Name == "truncf";
1588   case '_':
1589     // Check for various function names that get used for the math functions
1590     // when the header files are preprocessed with the macro
1591     // __FINITE_MATH_ONLY__ enabled.
1592     // The '12' here is the length of the shortest name that can match.
1593     // We need to check the size before looking at Name[1] and Name[2]
1594     // so we may as well check a limit that will eliminate mismatches.
1595     if (Name.size() < 12 || Name[1] != '_')
1596       return false;
1597     switch (Name[2]) {
1598     default:
1599       return false;
1600     case 'a':
1601       return Name == "__acos_finite" || Name == "__acosf_finite" ||
1602              Name == "__asin_finite" || Name == "__asinf_finite" ||
1603              Name == "__atan2_finite" || Name == "__atan2f_finite";
1604     case 'c':
1605       return Name == "__cosh_finite" || Name == "__coshf_finite";
1606     case 'e':
1607       return Name == "__exp_finite" || Name == "__expf_finite" ||
1608              Name == "__exp2_finite" || Name == "__exp2f_finite";
1609     case 'l':
1610       return Name == "__log_finite" || Name == "__logf_finite" ||
1611              Name == "__log10_finite" || Name == "__log10f_finite";
1612     case 'p':
1613       return Name == "__pow_finite" || Name == "__powf_finite";
1614     case 's':
1615       return Name == "__sinh_finite" || Name == "__sinhf_finite";
1616     }
1617   }
1618 }
1619 
1620 namespace {
1621 
1622 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1623   if (Ty->isHalfTy() || Ty->isFloatTy()) {
1624     APFloat APF(V);
1625     bool unused;
1626     APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1627     return ConstantFP::get(Ty->getContext(), APF);
1628   }
1629   if (Ty->isDoubleTy())
1630     return ConstantFP::get(Ty->getContext(), APFloat(V));
1631   llvm_unreachable("Can only constant fold half/float/double");
1632 }
1633 
1634 /// Clear the floating-point exception state.
1635 inline void llvm_fenv_clearexcept() {
1636 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1637   feclearexcept(FE_ALL_EXCEPT);
1638 #endif
1639   errno = 0;
1640 }
1641 
1642 /// Test if a floating-point exception was raised.
1643 inline bool llvm_fenv_testexcept() {
1644   int errno_val = errno;
1645   if (errno_val == ERANGE || errno_val == EDOM)
1646     return true;
1647 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1648   if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1649     return true;
1650 #endif
1651   return false;
1652 }
1653 
1654 Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V,
1655                          Type *Ty) {
1656   llvm_fenv_clearexcept();
1657   double Result = NativeFP(V.convertToDouble());
1658   if (llvm_fenv_testexcept()) {
1659     llvm_fenv_clearexcept();
1660     return nullptr;
1661   }
1662 
1663   return GetConstantFoldFPValue(Result, Ty);
1664 }
1665 
1666 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1667                                const APFloat &V, const APFloat &W, Type *Ty) {
1668   llvm_fenv_clearexcept();
1669   double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
1670   if (llvm_fenv_testexcept()) {
1671     llvm_fenv_clearexcept();
1672     return nullptr;
1673   }
1674 
1675   return GetConstantFoldFPValue(Result, Ty);
1676 }
1677 
1678 Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
1679   FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
1680   if (!VT)
1681     return nullptr;
1682 
1683   // This isn't strictly necessary, but handle the special/common case of zero:
1684   // all integer reductions of a zero input produce zero.
1685   if (isa<ConstantAggregateZero>(Op))
1686     return ConstantInt::get(VT->getElementType(), 0);
1687 
1688   // This is the same as the underlying binops - poison propagates.
1689   if (isa<PoisonValue>(Op) || Op->containsPoisonElement())
1690     return PoisonValue::get(VT->getElementType());
1691 
1692   // TODO: Handle undef.
1693   if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
1694     return nullptr;
1695 
1696   auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
1697   if (!EltC)
1698     return nullptr;
1699 
1700   APInt Acc = EltC->getValue();
1701   for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
1702     if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
1703       return nullptr;
1704     const APInt &X = EltC->getValue();
1705     switch (IID) {
1706     case Intrinsic::vector_reduce_add:
1707       Acc = Acc + X;
1708       break;
1709     case Intrinsic::vector_reduce_mul:
1710       Acc = Acc * X;
1711       break;
1712     case Intrinsic::vector_reduce_and:
1713       Acc = Acc & X;
1714       break;
1715     case Intrinsic::vector_reduce_or:
1716       Acc = Acc | X;
1717       break;
1718     case Intrinsic::vector_reduce_xor:
1719       Acc = Acc ^ X;
1720       break;
1721     case Intrinsic::vector_reduce_smin:
1722       Acc = APIntOps::smin(Acc, X);
1723       break;
1724     case Intrinsic::vector_reduce_smax:
1725       Acc = APIntOps::smax(Acc, X);
1726       break;
1727     case Intrinsic::vector_reduce_umin:
1728       Acc = APIntOps::umin(Acc, X);
1729       break;
1730     case Intrinsic::vector_reduce_umax:
1731       Acc = APIntOps::umax(Acc, X);
1732       break;
1733     }
1734   }
1735 
1736   return ConstantInt::get(Op->getContext(), Acc);
1737 }
1738 
1739 /// Attempt to fold an SSE floating point to integer conversion of a constant
1740 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1741 /// used (toward nearest, ties to even). This matches the behavior of the
1742 /// non-truncating SSE instructions in the default rounding mode. The desired
1743 /// integer type Ty is used to select how many bits are available for the
1744 /// result. Returns null if the conversion cannot be performed, otherwise
1745 /// returns the Constant value resulting from the conversion.
1746 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1747                                       Type *Ty, bool IsSigned) {
1748   // All of these conversion intrinsics form an integer of at most 64bits.
1749   unsigned ResultWidth = Ty->getIntegerBitWidth();
1750   assert(ResultWidth <= 64 &&
1751          "Can only constant fold conversions to 64 and 32 bit ints");
1752 
1753   uint64_t UIntVal;
1754   bool isExact = false;
1755   APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1756                                               : APFloat::rmNearestTiesToEven;
1757   APFloat::opStatus status =
1758       Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1759                            IsSigned, mode, &isExact);
1760   if (status != APFloat::opOK &&
1761       (!roundTowardZero || status != APFloat::opInexact))
1762     return nullptr;
1763   return ConstantInt::get(Ty, UIntVal, IsSigned);
1764 }
1765 
1766 double getValueAsDouble(ConstantFP *Op) {
1767   Type *Ty = Op->getType();
1768 
1769   if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
1770     return Op->getValueAPF().convertToDouble();
1771 
1772   bool unused;
1773   APFloat APF = Op->getValueAPF();
1774   APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1775   return APF.convertToDouble();
1776 }
1777 
1778 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1779   if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1780     C = &CI->getValue();
1781     return true;
1782   }
1783   if (isa<UndefValue>(Op)) {
1784     C = nullptr;
1785     return true;
1786   }
1787   return false;
1788 }
1789 
1790 /// Checks if the given intrinsic call, which evaluates to constant, is allowed
1791 /// to be folded.
1792 ///
1793 /// \param CI Constrained intrinsic call.
1794 /// \param St Exception flags raised during constant evaluation.
1795 static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI,
1796                                APFloat::opStatus St) {
1797   Optional<RoundingMode> ORM = CI->getRoundingMode();
1798   Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
1799 
1800   // If the operation does not change exception status flags, it is safe
1801   // to fold.
1802   if (St == APFloat::opStatus::opOK)
1803     return true;
1804 
1805   // If evaluation raised FP exception, the result can depend on rounding
1806   // mode. If the latter is unknown, folding is not possible.
1807   if (ORM && *ORM == RoundingMode::Dynamic)
1808     return false;
1809 
1810   // If FP exceptions are ignored, fold the call, even if such exception is
1811   // raised.
1812   if (EB && *EB != fp::ExceptionBehavior::ebStrict)
1813     return true;
1814 
1815   // Leave the calculation for runtime so that exception flags be correctly set
1816   // in hardware.
1817   return false;
1818 }
1819 
1820 /// Returns the rounding mode that should be used for constant evaluation.
1821 static RoundingMode
1822 getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) {
1823   Optional<RoundingMode> ORM = CI->getRoundingMode();
1824   if (!ORM || *ORM == RoundingMode::Dynamic)
1825     // Even if the rounding mode is unknown, try evaluating the operation.
1826     // If it does not raise inexact exception, rounding was not applied,
1827     // so the result is exact and does not depend on rounding mode. Whether
1828     // other FP exceptions are raised, it does not depend on rounding mode.
1829     return RoundingMode::NearestTiesToEven;
1830   return *ORM;
1831 }
1832 
1833 static Constant *ConstantFoldScalarCall1(StringRef Name,
1834                                          Intrinsic::ID IntrinsicID,
1835                                          Type *Ty,
1836                                          ArrayRef<Constant *> Operands,
1837                                          const TargetLibraryInfo *TLI,
1838                                          const CallBase *Call) {
1839   assert(Operands.size() == 1 && "Wrong number of operands.");
1840 
1841   if (IntrinsicID == Intrinsic::is_constant) {
1842     // We know we have a "Constant" argument. But we want to only
1843     // return true for manifest constants, not those that depend on
1844     // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1845     if (Operands[0]->isManifestConstant())
1846       return ConstantInt::getTrue(Ty->getContext());
1847     return nullptr;
1848   }
1849   if (isa<UndefValue>(Operands[0])) {
1850     // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1851     // ctpop() is between 0 and bitwidth, pick 0 for undef.
1852     // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
1853     if (IntrinsicID == Intrinsic::cos ||
1854         IntrinsicID == Intrinsic::ctpop ||
1855         IntrinsicID == Intrinsic::fptoui_sat ||
1856         IntrinsicID == Intrinsic::fptosi_sat)
1857       return Constant::getNullValue(Ty);
1858     if (IntrinsicID == Intrinsic::bswap ||
1859         IntrinsicID == Intrinsic::bitreverse ||
1860         IntrinsicID == Intrinsic::launder_invariant_group ||
1861         IntrinsicID == Intrinsic::strip_invariant_group)
1862       return Operands[0];
1863   }
1864 
1865   if (isa<ConstantPointerNull>(Operands[0])) {
1866     // launder(null) == null == strip(null) iff in addrspace 0
1867     if (IntrinsicID == Intrinsic::launder_invariant_group ||
1868         IntrinsicID == Intrinsic::strip_invariant_group) {
1869       // If instruction is not yet put in a basic block (e.g. when cloning
1870       // a function during inlining), Call's caller may not be available.
1871       // So check Call's BB first before querying Call->getCaller.
1872       const Function *Caller =
1873           Call->getParent() ? Call->getCaller() : nullptr;
1874       if (Caller &&
1875           !NullPointerIsDefined(
1876               Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1877         return Operands[0];
1878       }
1879       return nullptr;
1880     }
1881   }
1882 
1883   if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1884     if (IntrinsicID == Intrinsic::convert_to_fp16) {
1885       APFloat Val(Op->getValueAPF());
1886 
1887       bool lost = false;
1888       Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
1889 
1890       return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1891     }
1892 
1893     APFloat U = Op->getValueAPF();
1894 
1895     if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
1896         IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
1897       bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
1898 
1899       if (U.isNaN())
1900         return nullptr;
1901 
1902       unsigned Width = Ty->getIntegerBitWidth();
1903       APSInt Int(Width, !Signed);
1904       bool IsExact = false;
1905       APFloat::opStatus Status =
1906           U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
1907 
1908       if (Status == APFloat::opOK || Status == APFloat::opInexact)
1909         return ConstantInt::get(Ty, Int);
1910 
1911       return nullptr;
1912     }
1913 
1914     if (IntrinsicID == Intrinsic::fptoui_sat ||
1915         IntrinsicID == Intrinsic::fptosi_sat) {
1916       // convertToInteger() already has the desired saturation semantics.
1917       APSInt Int(Ty->getIntegerBitWidth(),
1918                  IntrinsicID == Intrinsic::fptoui_sat);
1919       bool IsExact;
1920       U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
1921       return ConstantInt::get(Ty, Int);
1922     }
1923 
1924     if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1925       return nullptr;
1926 
1927     // Use internal versions of these intrinsics.
1928 
1929     if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
1930       U.roundToIntegral(APFloat::rmNearestTiesToEven);
1931       return ConstantFP::get(Ty->getContext(), U);
1932     }
1933 
1934     if (IntrinsicID == Intrinsic::round) {
1935       U.roundToIntegral(APFloat::rmNearestTiesToAway);
1936       return ConstantFP::get(Ty->getContext(), U);
1937     }
1938 
1939     if (IntrinsicID == Intrinsic::roundeven) {
1940       U.roundToIntegral(APFloat::rmNearestTiesToEven);
1941       return ConstantFP::get(Ty->getContext(), U);
1942     }
1943 
1944     if (IntrinsicID == Intrinsic::ceil) {
1945       U.roundToIntegral(APFloat::rmTowardPositive);
1946       return ConstantFP::get(Ty->getContext(), U);
1947     }
1948 
1949     if (IntrinsicID == Intrinsic::floor) {
1950       U.roundToIntegral(APFloat::rmTowardNegative);
1951       return ConstantFP::get(Ty->getContext(), U);
1952     }
1953 
1954     if (IntrinsicID == Intrinsic::trunc) {
1955       U.roundToIntegral(APFloat::rmTowardZero);
1956       return ConstantFP::get(Ty->getContext(), U);
1957     }
1958 
1959     if (IntrinsicID == Intrinsic::fabs) {
1960       U.clearSign();
1961       return ConstantFP::get(Ty->getContext(), U);
1962     }
1963 
1964     if (IntrinsicID == Intrinsic::amdgcn_fract) {
1965       // The v_fract instruction behaves like the OpenCL spec, which defines
1966       // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
1967       //   there to prevent fract(-small) from returning 1.0. It returns the
1968       //   largest positive floating-point number less than 1.0."
1969       APFloat FloorU(U);
1970       FloorU.roundToIntegral(APFloat::rmTowardNegative);
1971       APFloat FractU(U - FloorU);
1972       APFloat AlmostOne(U.getSemantics(), 1);
1973       AlmostOne.next(/*nextDown*/ true);
1974       return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne));
1975     }
1976 
1977     // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
1978     // raise FP exceptions, unless the argument is signaling NaN.
1979 
1980     Optional<APFloat::roundingMode> RM;
1981     switch (IntrinsicID) {
1982     default:
1983       break;
1984     case Intrinsic::experimental_constrained_nearbyint:
1985     case Intrinsic::experimental_constrained_rint: {
1986       auto CI = cast<ConstrainedFPIntrinsic>(Call);
1987       RM = CI->getRoundingMode();
1988       if (!RM || RM.getValue() == RoundingMode::Dynamic)
1989         return nullptr;
1990       break;
1991     }
1992     case Intrinsic::experimental_constrained_round:
1993       RM = APFloat::rmNearestTiesToAway;
1994       break;
1995     case Intrinsic::experimental_constrained_ceil:
1996       RM = APFloat::rmTowardPositive;
1997       break;
1998     case Intrinsic::experimental_constrained_floor:
1999       RM = APFloat::rmTowardNegative;
2000       break;
2001     case Intrinsic::experimental_constrained_trunc:
2002       RM = APFloat::rmTowardZero;
2003       break;
2004     }
2005     if (RM) {
2006       auto CI = cast<ConstrainedFPIntrinsic>(Call);
2007       if (U.isFinite()) {
2008         APFloat::opStatus St = U.roundToIntegral(*RM);
2009         if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2010             St == APFloat::opInexact) {
2011           Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2012           if (EB && *EB == fp::ebStrict)
2013             return nullptr;
2014         }
2015       } else if (U.isSignaling()) {
2016         Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2017         if (EB && *EB != fp::ebIgnore)
2018           return nullptr;
2019         U = APFloat::getQNaN(U.getSemantics());
2020       }
2021       return ConstantFP::get(Ty->getContext(), U);
2022     }
2023 
2024     /// We only fold functions with finite arguments. Folding NaN and inf is
2025     /// likely to be aborted with an exception anyway, and some host libms
2026     /// have known errors raising exceptions.
2027     if (!U.isFinite())
2028       return nullptr;
2029 
2030     /// Currently APFloat versions of these functions do not exist, so we use
2031     /// the host native double versions.  Float versions are not called
2032     /// directly but for all these it is true (float)(f((double)arg)) ==
2033     /// f(arg).  Long double not supported yet.
2034     const APFloat &APF = Op->getValueAPF();
2035 
2036     switch (IntrinsicID) {
2037       default: break;
2038       case Intrinsic::log:
2039         return ConstantFoldFP(log, APF, Ty);
2040       case Intrinsic::log2:
2041         // TODO: What about hosts that lack a C99 library?
2042         return ConstantFoldFP(Log2, APF, Ty);
2043       case Intrinsic::log10:
2044         // TODO: What about hosts that lack a C99 library?
2045         return ConstantFoldFP(log10, APF, Ty);
2046       case Intrinsic::exp:
2047         return ConstantFoldFP(exp, APF, Ty);
2048       case Intrinsic::exp2:
2049         // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2050         return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2051       case Intrinsic::sin:
2052         return ConstantFoldFP(sin, APF, Ty);
2053       case Intrinsic::cos:
2054         return ConstantFoldFP(cos, APF, Ty);
2055       case Intrinsic::sqrt:
2056         return ConstantFoldFP(sqrt, APF, Ty);
2057       case Intrinsic::amdgcn_cos:
2058       case Intrinsic::amdgcn_sin: {
2059         double V = getValueAsDouble(Op);
2060         if (V < -256.0 || V > 256.0)
2061           // The gfx8 and gfx9 architectures handle arguments outside the range
2062           // [-256, 256] differently. This should be a rare case so bail out
2063           // rather than trying to handle the difference.
2064           return nullptr;
2065         bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2066         double V4 = V * 4.0;
2067         if (V4 == floor(V4)) {
2068           // Force exact results for quarter-integer inputs.
2069           const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2070           V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2071         } else {
2072           if (IsCos)
2073             V = cos(V * 2.0 * numbers::pi);
2074           else
2075             V = sin(V * 2.0 * numbers::pi);
2076         }
2077         return GetConstantFoldFPValue(V, Ty);
2078       }
2079     }
2080 
2081     if (!TLI)
2082       return nullptr;
2083 
2084     LibFunc Func = NotLibFunc;
2085     if (!TLI->getLibFunc(Name, Func))
2086       return nullptr;
2087 
2088     switch (Func) {
2089     default:
2090       break;
2091     case LibFunc_acos:
2092     case LibFunc_acosf:
2093     case LibFunc_acos_finite:
2094     case LibFunc_acosf_finite:
2095       if (TLI->has(Func))
2096         return ConstantFoldFP(acos, APF, Ty);
2097       break;
2098     case LibFunc_asin:
2099     case LibFunc_asinf:
2100     case LibFunc_asin_finite:
2101     case LibFunc_asinf_finite:
2102       if (TLI->has(Func))
2103         return ConstantFoldFP(asin, APF, Ty);
2104       break;
2105     case LibFunc_atan:
2106     case LibFunc_atanf:
2107       if (TLI->has(Func))
2108         return ConstantFoldFP(atan, APF, Ty);
2109       break;
2110     case LibFunc_ceil:
2111     case LibFunc_ceilf:
2112       if (TLI->has(Func)) {
2113         U.roundToIntegral(APFloat::rmTowardPositive);
2114         return ConstantFP::get(Ty->getContext(), U);
2115       }
2116       break;
2117     case LibFunc_cos:
2118     case LibFunc_cosf:
2119       if (TLI->has(Func))
2120         return ConstantFoldFP(cos, APF, Ty);
2121       break;
2122     case LibFunc_cosh:
2123     case LibFunc_coshf:
2124     case LibFunc_cosh_finite:
2125     case LibFunc_coshf_finite:
2126       if (TLI->has(Func))
2127         return ConstantFoldFP(cosh, APF, Ty);
2128       break;
2129     case LibFunc_exp:
2130     case LibFunc_expf:
2131     case LibFunc_exp_finite:
2132     case LibFunc_expf_finite:
2133       if (TLI->has(Func))
2134         return ConstantFoldFP(exp, APF, Ty);
2135       break;
2136     case LibFunc_exp2:
2137     case LibFunc_exp2f:
2138     case LibFunc_exp2_finite:
2139     case LibFunc_exp2f_finite:
2140       if (TLI->has(Func))
2141         // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2142         return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2143       break;
2144     case LibFunc_fabs:
2145     case LibFunc_fabsf:
2146       if (TLI->has(Func)) {
2147         U.clearSign();
2148         return ConstantFP::get(Ty->getContext(), U);
2149       }
2150       break;
2151     case LibFunc_floor:
2152     case LibFunc_floorf:
2153       if (TLI->has(Func)) {
2154         U.roundToIntegral(APFloat::rmTowardNegative);
2155         return ConstantFP::get(Ty->getContext(), U);
2156       }
2157       break;
2158     case LibFunc_log:
2159     case LibFunc_logf:
2160     case LibFunc_log_finite:
2161     case LibFunc_logf_finite:
2162       if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2163         return ConstantFoldFP(log, APF, Ty);
2164       break;
2165     case LibFunc_log2:
2166     case LibFunc_log2f:
2167     case LibFunc_log2_finite:
2168     case LibFunc_log2f_finite:
2169       if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2170         // TODO: What about hosts that lack a C99 library?
2171         return ConstantFoldFP(Log2, APF, Ty);
2172       break;
2173     case LibFunc_log10:
2174     case LibFunc_log10f:
2175     case LibFunc_log10_finite:
2176     case LibFunc_log10f_finite:
2177       if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2178         // TODO: What about hosts that lack a C99 library?
2179         return ConstantFoldFP(log10, APF, Ty);
2180       break;
2181     case LibFunc_nearbyint:
2182     case LibFunc_nearbyintf:
2183     case LibFunc_rint:
2184     case LibFunc_rintf:
2185       if (TLI->has(Func)) {
2186         U.roundToIntegral(APFloat::rmNearestTiesToEven);
2187         return ConstantFP::get(Ty->getContext(), U);
2188       }
2189       break;
2190     case LibFunc_round:
2191     case LibFunc_roundf:
2192       if (TLI->has(Func)) {
2193         U.roundToIntegral(APFloat::rmNearestTiesToAway);
2194         return ConstantFP::get(Ty->getContext(), U);
2195       }
2196       break;
2197     case LibFunc_sin:
2198     case LibFunc_sinf:
2199       if (TLI->has(Func))
2200         return ConstantFoldFP(sin, APF, Ty);
2201       break;
2202     case LibFunc_sinh:
2203     case LibFunc_sinhf:
2204     case LibFunc_sinh_finite:
2205     case LibFunc_sinhf_finite:
2206       if (TLI->has(Func))
2207         return ConstantFoldFP(sinh, APF, Ty);
2208       break;
2209     case LibFunc_sqrt:
2210     case LibFunc_sqrtf:
2211       if (!APF.isNegative() && TLI->has(Func))
2212         return ConstantFoldFP(sqrt, APF, Ty);
2213       break;
2214     case LibFunc_tan:
2215     case LibFunc_tanf:
2216       if (TLI->has(Func))
2217         return ConstantFoldFP(tan, APF, Ty);
2218       break;
2219     case LibFunc_tanh:
2220     case LibFunc_tanhf:
2221       if (TLI->has(Func))
2222         return ConstantFoldFP(tanh, APF, Ty);
2223       break;
2224     case LibFunc_trunc:
2225     case LibFunc_truncf:
2226       if (TLI->has(Func)) {
2227         U.roundToIntegral(APFloat::rmTowardZero);
2228         return ConstantFP::get(Ty->getContext(), U);
2229       }
2230       break;
2231     }
2232     return nullptr;
2233   }
2234 
2235   if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2236     switch (IntrinsicID) {
2237     case Intrinsic::bswap:
2238       return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
2239     case Intrinsic::ctpop:
2240       return ConstantInt::get(Ty, Op->getValue().countPopulation());
2241     case Intrinsic::bitreverse:
2242       return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
2243     case Intrinsic::convert_from_fp16: {
2244       APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2245 
2246       bool lost = false;
2247       APFloat::opStatus status = Val.convert(
2248           Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
2249 
2250       // Conversion is always precise.
2251       (void)status;
2252       assert(status == APFloat::opOK && !lost &&
2253              "Precision lost during fp16 constfolding");
2254 
2255       return ConstantFP::get(Ty->getContext(), Val);
2256     }
2257     default:
2258       return nullptr;
2259     }
2260   }
2261 
2262   switch (IntrinsicID) {
2263   default: break;
2264   case Intrinsic::vector_reduce_add:
2265   case Intrinsic::vector_reduce_mul:
2266   case Intrinsic::vector_reduce_and:
2267   case Intrinsic::vector_reduce_or:
2268   case Intrinsic::vector_reduce_xor:
2269   case Intrinsic::vector_reduce_smin:
2270   case Intrinsic::vector_reduce_smax:
2271   case Intrinsic::vector_reduce_umin:
2272   case Intrinsic::vector_reduce_umax:
2273     if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
2274       return C;
2275     break;
2276   }
2277 
2278   // Support ConstantVector in case we have an Undef in the top.
2279   if (isa<ConstantVector>(Operands[0]) ||
2280       isa<ConstantDataVector>(Operands[0])) {
2281     auto *Op = cast<Constant>(Operands[0]);
2282     switch (IntrinsicID) {
2283     default: break;
2284     case Intrinsic::x86_sse_cvtss2si:
2285     case Intrinsic::x86_sse_cvtss2si64:
2286     case Intrinsic::x86_sse2_cvtsd2si:
2287     case Intrinsic::x86_sse2_cvtsd2si64:
2288       if (ConstantFP *FPOp =
2289               dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2290         return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2291                                            /*roundTowardZero=*/false, Ty,
2292                                            /*IsSigned*/true);
2293       break;
2294     case Intrinsic::x86_sse_cvttss2si:
2295     case Intrinsic::x86_sse_cvttss2si64:
2296     case Intrinsic::x86_sse2_cvttsd2si:
2297     case Intrinsic::x86_sse2_cvttsd2si64:
2298       if (ConstantFP *FPOp =
2299               dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2300         return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2301                                            /*roundTowardZero=*/true, Ty,
2302                                            /*IsSigned*/true);
2303       break;
2304     }
2305   }
2306 
2307   return nullptr;
2308 }
2309 
2310 static Constant *evaluateCompare(const APFloat &Op1, const APFloat &Op2,
2311                                  const ConstrainedFPIntrinsic *Call) {
2312   APFloat::opStatus St = APFloat::opOK;
2313   auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2314   FCmpInst::Predicate Cond = FCmp->getPredicate();
2315   if (FCmp->isSignaling()) {
2316     if (Op1.isNaN() || Op2.isNaN())
2317       St = APFloat::opInvalidOp;
2318   } else {
2319     if (Op1.isSignaling() || Op2.isSignaling())
2320       St = APFloat::opInvalidOp;
2321   }
2322   bool Result = FCmpInst::compare(Op1, Op2, Cond);
2323   if (mayFoldConstrained(const_cast<ConstrainedFPCmpIntrinsic *>(FCmp), St))
2324     return ConstantInt::get(Call->getType()->getScalarType(), Result);
2325   return nullptr;
2326 }
2327 
2328 static Constant *ConstantFoldScalarCall2(StringRef Name,
2329                                          Intrinsic::ID IntrinsicID,
2330                                          Type *Ty,
2331                                          ArrayRef<Constant *> Operands,
2332                                          const TargetLibraryInfo *TLI,
2333                                          const CallBase *Call) {
2334   assert(Operands.size() == 2 && "Wrong number of operands.");
2335 
2336   if (Ty->isFloatingPointTy()) {
2337     // TODO: We should have undef handling for all of the FP intrinsics that
2338     //       are attempted to be folded in this function.
2339     bool IsOp0Undef = isa<UndefValue>(Operands[0]);
2340     bool IsOp1Undef = isa<UndefValue>(Operands[1]);
2341     switch (IntrinsicID) {
2342     case Intrinsic::maxnum:
2343     case Intrinsic::minnum:
2344     case Intrinsic::maximum:
2345     case Intrinsic::minimum:
2346       // If one argument is undef, return the other argument.
2347       if (IsOp0Undef)
2348         return Operands[1];
2349       if (IsOp1Undef)
2350         return Operands[0];
2351       break;
2352     }
2353   }
2354 
2355   if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2356     const APFloat &Op1V = Op1->getValueAPF();
2357 
2358     if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2359       if (Op2->getType() != Op1->getType())
2360         return nullptr;
2361       const APFloat &Op2V = Op2->getValueAPF();
2362 
2363       if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2364         RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2365         APFloat Res = Op1V;
2366         APFloat::opStatus St;
2367         switch (IntrinsicID) {
2368         default:
2369           return nullptr;
2370         case Intrinsic::experimental_constrained_fadd:
2371           St = Res.add(Op2V, RM);
2372           break;
2373         case Intrinsic::experimental_constrained_fsub:
2374           St = Res.subtract(Op2V, RM);
2375           break;
2376         case Intrinsic::experimental_constrained_fmul:
2377           St = Res.multiply(Op2V, RM);
2378           break;
2379         case Intrinsic::experimental_constrained_fdiv:
2380           St = Res.divide(Op2V, RM);
2381           break;
2382         case Intrinsic::experimental_constrained_frem:
2383           St = Res.mod(Op2V);
2384           break;
2385         case Intrinsic::experimental_constrained_fcmp:
2386         case Intrinsic::experimental_constrained_fcmps:
2387           return evaluateCompare(Op1V, Op2V, ConstrIntr);
2388         }
2389         if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic *>(ConstrIntr),
2390                                St))
2391           return ConstantFP::get(Ty->getContext(), Res);
2392         return nullptr;
2393       }
2394 
2395       switch (IntrinsicID) {
2396       default:
2397         break;
2398       case Intrinsic::copysign:
2399         return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V));
2400       case Intrinsic::minnum:
2401         return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V));
2402       case Intrinsic::maxnum:
2403         return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V));
2404       case Intrinsic::minimum:
2405         return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V));
2406       case Intrinsic::maximum:
2407         return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V));
2408       }
2409 
2410       if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2411         return nullptr;
2412 
2413       switch (IntrinsicID) {
2414       default:
2415         break;
2416       case Intrinsic::pow:
2417         return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2418       case Intrinsic::amdgcn_fmul_legacy:
2419         // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2420         // NaN or infinity, gives +0.0.
2421         if (Op1V.isZero() || Op2V.isZero())
2422           return ConstantFP::getNullValue(Ty);
2423         return ConstantFP::get(Ty->getContext(), Op1V * Op2V);
2424       }
2425 
2426       if (!TLI)
2427         return nullptr;
2428 
2429       LibFunc Func = NotLibFunc;
2430       if (!TLI->getLibFunc(Name, Func))
2431         return nullptr;
2432 
2433       switch (Func) {
2434       default:
2435         break;
2436       case LibFunc_pow:
2437       case LibFunc_powf:
2438       case LibFunc_pow_finite:
2439       case LibFunc_powf_finite:
2440         if (TLI->has(Func))
2441           return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2442         break;
2443       case LibFunc_fmod:
2444       case LibFunc_fmodf:
2445         if (TLI->has(Func)) {
2446           APFloat V = Op1->getValueAPF();
2447           if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2448             return ConstantFP::get(Ty->getContext(), V);
2449         }
2450         break;
2451       case LibFunc_remainder:
2452       case LibFunc_remainderf:
2453         if (TLI->has(Func)) {
2454           APFloat V = Op1->getValueAPF();
2455           if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF()))
2456             return ConstantFP::get(Ty->getContext(), V);
2457         }
2458         break;
2459       case LibFunc_atan2:
2460       case LibFunc_atan2f:
2461       case LibFunc_atan2_finite:
2462       case LibFunc_atan2f_finite:
2463         if (TLI->has(Func))
2464           return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2465         break;
2466       }
2467     } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2468       if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2469         return nullptr;
2470       if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
2471         return ConstantFP::get(
2472             Ty->getContext(),
2473             APFloat((float)std::pow((float)Op1V.convertToDouble(),
2474                                     (int)Op2C->getZExtValue())));
2475       if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
2476         return ConstantFP::get(
2477             Ty->getContext(),
2478             APFloat((float)std::pow((float)Op1V.convertToDouble(),
2479                                     (int)Op2C->getZExtValue())));
2480       if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2481         return ConstantFP::get(
2482             Ty->getContext(),
2483             APFloat((double)std::pow(Op1V.convertToDouble(),
2484                                      (int)Op2C->getZExtValue())));
2485 
2486       if (IntrinsicID == Intrinsic::amdgcn_ldexp) {
2487         // FIXME: Should flush denorms depending on FP mode, but that's ignored
2488         // everywhere else.
2489 
2490         // scalbn is equivalent to ldexp with float radix 2
2491         APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(),
2492                                 APFloat::rmNearestTiesToEven);
2493         return ConstantFP::get(Ty->getContext(), Result);
2494       }
2495     }
2496     return nullptr;
2497   }
2498 
2499   if (Operands[0]->getType()->isIntegerTy() &&
2500       Operands[1]->getType()->isIntegerTy()) {
2501     const APInt *C0, *C1;
2502     if (!getConstIntOrUndef(Operands[0], C0) ||
2503         !getConstIntOrUndef(Operands[1], C1))
2504       return nullptr;
2505 
2506     switch (IntrinsicID) {
2507     default: break;
2508     case Intrinsic::smax:
2509     case Intrinsic::smin:
2510     case Intrinsic::umax:
2511     case Intrinsic::umin:
2512       // This is the same as for binary ops - poison propagates.
2513       // TODO: Poison handling should be consolidated.
2514       if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2515         return PoisonValue::get(Ty);
2516 
2517       if (!C0 && !C1)
2518         return UndefValue::get(Ty);
2519       if (!C0 || !C1)
2520         return MinMaxIntrinsic::getSaturationPoint(IntrinsicID, Ty);
2521       return ConstantInt::get(
2522           Ty, ICmpInst::compare(*C0, *C1,
2523                                 MinMaxIntrinsic::getPredicate(IntrinsicID))
2524                   ? *C0
2525                   : *C1);
2526 
2527     case Intrinsic::usub_with_overflow:
2528     case Intrinsic::ssub_with_overflow:
2529       // X - undef -> { 0, false }
2530       // undef - X -> { 0, false }
2531       if (!C0 || !C1)
2532         return Constant::getNullValue(Ty);
2533       LLVM_FALLTHROUGH;
2534     case Intrinsic::uadd_with_overflow:
2535     case Intrinsic::sadd_with_overflow:
2536       // X + undef -> { -1, false }
2537       // undef + x -> { -1, false }
2538       if (!C0 || !C1) {
2539         return ConstantStruct::get(
2540             cast<StructType>(Ty),
2541             {Constant::getAllOnesValue(Ty->getStructElementType(0)),
2542              Constant::getNullValue(Ty->getStructElementType(1))});
2543       }
2544       LLVM_FALLTHROUGH;
2545     case Intrinsic::smul_with_overflow:
2546     case Intrinsic::umul_with_overflow: {
2547       // undef * X -> { 0, false }
2548       // X * undef -> { 0, false }
2549       if (!C0 || !C1)
2550         return Constant::getNullValue(Ty);
2551 
2552       APInt Res;
2553       bool Overflow;
2554       switch (IntrinsicID) {
2555       default: llvm_unreachable("Invalid case");
2556       case Intrinsic::sadd_with_overflow:
2557         Res = C0->sadd_ov(*C1, Overflow);
2558         break;
2559       case Intrinsic::uadd_with_overflow:
2560         Res = C0->uadd_ov(*C1, Overflow);
2561         break;
2562       case Intrinsic::ssub_with_overflow:
2563         Res = C0->ssub_ov(*C1, Overflow);
2564         break;
2565       case Intrinsic::usub_with_overflow:
2566         Res = C0->usub_ov(*C1, Overflow);
2567         break;
2568       case Intrinsic::smul_with_overflow:
2569         Res = C0->smul_ov(*C1, Overflow);
2570         break;
2571       case Intrinsic::umul_with_overflow:
2572         Res = C0->umul_ov(*C1, Overflow);
2573         break;
2574       }
2575       Constant *Ops[] = {
2576         ConstantInt::get(Ty->getContext(), Res),
2577         ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2578       };
2579       return ConstantStruct::get(cast<StructType>(Ty), Ops);
2580     }
2581     case Intrinsic::uadd_sat:
2582     case Intrinsic::sadd_sat:
2583       // This is the same as for binary ops - poison propagates.
2584       // TODO: Poison handling should be consolidated.
2585       if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2586         return PoisonValue::get(Ty);
2587 
2588       if (!C0 && !C1)
2589         return UndefValue::get(Ty);
2590       if (!C0 || !C1)
2591         return Constant::getAllOnesValue(Ty);
2592       if (IntrinsicID == Intrinsic::uadd_sat)
2593         return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2594       else
2595         return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2596     case Intrinsic::usub_sat:
2597     case Intrinsic::ssub_sat:
2598       // This is the same as for binary ops - poison propagates.
2599       // TODO: Poison handling should be consolidated.
2600       if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2601         return PoisonValue::get(Ty);
2602 
2603       if (!C0 && !C1)
2604         return UndefValue::get(Ty);
2605       if (!C0 || !C1)
2606         return Constant::getNullValue(Ty);
2607       if (IntrinsicID == Intrinsic::usub_sat)
2608         return ConstantInt::get(Ty, C0->usub_sat(*C1));
2609       else
2610         return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2611     case Intrinsic::cttz:
2612     case Intrinsic::ctlz:
2613       assert(C1 && "Must be constant int");
2614 
2615       // cttz(0, 1) and ctlz(0, 1) are poison.
2616       if (C1->isOne() && (!C0 || C0->isZero()))
2617         return PoisonValue::get(Ty);
2618       if (!C0)
2619         return Constant::getNullValue(Ty);
2620       if (IntrinsicID == Intrinsic::cttz)
2621         return ConstantInt::get(Ty, C0->countTrailingZeros());
2622       else
2623         return ConstantInt::get(Ty, C0->countLeadingZeros());
2624 
2625     case Intrinsic::abs:
2626       assert(C1 && "Must be constant int");
2627       assert((C1->isOne() || C1->isZero()) && "Must be 0 or 1");
2628 
2629       // Undef or minimum val operand with poison min --> undef
2630       if (C1->isOne() && (!C0 || C0->isMinSignedValue()))
2631         return UndefValue::get(Ty);
2632 
2633       // Undef operand with no poison min --> 0 (sign bit must be clear)
2634       if (!C0)
2635         return Constant::getNullValue(Ty);
2636 
2637       return ConstantInt::get(Ty, C0->abs());
2638     }
2639 
2640     return nullptr;
2641   }
2642 
2643   // Support ConstantVector in case we have an Undef in the top.
2644   if ((isa<ConstantVector>(Operands[0]) ||
2645        isa<ConstantDataVector>(Operands[0])) &&
2646       // Check for default rounding mode.
2647       // FIXME: Support other rounding modes?
2648       isa<ConstantInt>(Operands[1]) &&
2649       cast<ConstantInt>(Operands[1])->getValue() == 4) {
2650     auto *Op = cast<Constant>(Operands[0]);
2651     switch (IntrinsicID) {
2652     default: break;
2653     case Intrinsic::x86_avx512_vcvtss2si32:
2654     case Intrinsic::x86_avx512_vcvtss2si64:
2655     case Intrinsic::x86_avx512_vcvtsd2si32:
2656     case Intrinsic::x86_avx512_vcvtsd2si64:
2657       if (ConstantFP *FPOp =
2658               dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2659         return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2660                                            /*roundTowardZero=*/false, Ty,
2661                                            /*IsSigned*/true);
2662       break;
2663     case Intrinsic::x86_avx512_vcvtss2usi32:
2664     case Intrinsic::x86_avx512_vcvtss2usi64:
2665     case Intrinsic::x86_avx512_vcvtsd2usi32:
2666     case Intrinsic::x86_avx512_vcvtsd2usi64:
2667       if (ConstantFP *FPOp =
2668               dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2669         return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2670                                            /*roundTowardZero=*/false, Ty,
2671                                            /*IsSigned*/false);
2672       break;
2673     case Intrinsic::x86_avx512_cvttss2si:
2674     case Intrinsic::x86_avx512_cvttss2si64:
2675     case Intrinsic::x86_avx512_cvttsd2si:
2676     case Intrinsic::x86_avx512_cvttsd2si64:
2677       if (ConstantFP *FPOp =
2678               dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2679         return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2680                                            /*roundTowardZero=*/true, Ty,
2681                                            /*IsSigned*/true);
2682       break;
2683     case Intrinsic::x86_avx512_cvttss2usi:
2684     case Intrinsic::x86_avx512_cvttss2usi64:
2685     case Intrinsic::x86_avx512_cvttsd2usi:
2686     case Intrinsic::x86_avx512_cvttsd2usi64:
2687       if (ConstantFP *FPOp =
2688               dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2689         return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2690                                            /*roundTowardZero=*/true, Ty,
2691                                            /*IsSigned*/false);
2692       break;
2693     }
2694   }
2695   return nullptr;
2696 }
2697 
2698 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
2699                                                const APFloat &S0,
2700                                                const APFloat &S1,
2701                                                const APFloat &S2) {
2702   unsigned ID;
2703   const fltSemantics &Sem = S0.getSemantics();
2704   APFloat MA(Sem), SC(Sem), TC(Sem);
2705   if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) {
2706     if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
2707       // S2 < 0
2708       ID = 5;
2709       SC = -S0;
2710     } else {
2711       ID = 4;
2712       SC = S0;
2713     }
2714     MA = S2;
2715     TC = -S1;
2716   } else if (abs(S1) >= abs(S0)) {
2717     if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
2718       // S1 < 0
2719       ID = 3;
2720       TC = -S2;
2721     } else {
2722       ID = 2;
2723       TC = S2;
2724     }
2725     MA = S1;
2726     SC = S0;
2727   } else {
2728     if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
2729       // S0 < 0
2730       ID = 1;
2731       SC = S2;
2732     } else {
2733       ID = 0;
2734       SC = -S2;
2735     }
2736     MA = S0;
2737     TC = -S1;
2738   }
2739   switch (IntrinsicID) {
2740   default:
2741     llvm_unreachable("unhandled amdgcn cube intrinsic");
2742   case Intrinsic::amdgcn_cubeid:
2743     return APFloat(Sem, ID);
2744   case Intrinsic::amdgcn_cubema:
2745     return MA + MA;
2746   case Intrinsic::amdgcn_cubesc:
2747     return SC;
2748   case Intrinsic::amdgcn_cubetc:
2749     return TC;
2750   }
2751 }
2752 
2753 static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
2754                                                  Type *Ty) {
2755   const APInt *C0, *C1, *C2;
2756   if (!getConstIntOrUndef(Operands[0], C0) ||
2757       !getConstIntOrUndef(Operands[1], C1) ||
2758       !getConstIntOrUndef(Operands[2], C2))
2759     return nullptr;
2760 
2761   if (!C2)
2762     return UndefValue::get(Ty);
2763 
2764   APInt Val(32, 0);
2765   unsigned NumUndefBytes = 0;
2766   for (unsigned I = 0; I < 32; I += 8) {
2767     unsigned Sel = C2->extractBitsAsZExtValue(8, I);
2768     unsigned B = 0;
2769 
2770     if (Sel >= 13)
2771       B = 0xff;
2772     else if (Sel == 12)
2773       B = 0x00;
2774     else {
2775       const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
2776       if (!Src)
2777         ++NumUndefBytes;
2778       else if (Sel < 8)
2779         B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
2780       else
2781         B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
2782     }
2783 
2784     Val.insertBits(B, I, 8);
2785   }
2786 
2787   if (NumUndefBytes == 4)
2788     return UndefValue::get(Ty);
2789 
2790   return ConstantInt::get(Ty, Val);
2791 }
2792 
2793 static Constant *ConstantFoldScalarCall3(StringRef Name,
2794                                          Intrinsic::ID IntrinsicID,
2795                                          Type *Ty,
2796                                          ArrayRef<Constant *> Operands,
2797                                          const TargetLibraryInfo *TLI,
2798                                          const CallBase *Call) {
2799   assert(Operands.size() == 3 && "Wrong number of operands.");
2800 
2801   if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2802     if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2803       if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2804         const APFloat &C1 = Op1->getValueAPF();
2805         const APFloat &C2 = Op2->getValueAPF();
2806         const APFloat &C3 = Op3->getValueAPF();
2807 
2808         if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2809           RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2810           APFloat Res = C1;
2811           APFloat::opStatus St;
2812           switch (IntrinsicID) {
2813           default:
2814             return nullptr;
2815           case Intrinsic::experimental_constrained_fma:
2816           case Intrinsic::experimental_constrained_fmuladd:
2817             St = Res.fusedMultiplyAdd(C2, C3, RM);
2818             break;
2819           }
2820           if (mayFoldConstrained(
2821                   const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St))
2822             return ConstantFP::get(Ty->getContext(), Res);
2823           return nullptr;
2824         }
2825 
2826         switch (IntrinsicID) {
2827         default: break;
2828         case Intrinsic::amdgcn_fma_legacy: {
2829           // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2830           // NaN or infinity, gives +0.0.
2831           if (C1.isZero() || C2.isZero()) {
2832             // It's tempting to just return C3 here, but that would give the
2833             // wrong result if C3 was -0.0.
2834             return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
2835           }
2836           LLVM_FALLTHROUGH;
2837         }
2838         case Intrinsic::fma:
2839         case Intrinsic::fmuladd: {
2840           APFloat V = C1;
2841           V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
2842           return ConstantFP::get(Ty->getContext(), V);
2843         }
2844         case Intrinsic::amdgcn_cubeid:
2845         case Intrinsic::amdgcn_cubema:
2846         case Intrinsic::amdgcn_cubesc:
2847         case Intrinsic::amdgcn_cubetc: {
2848           APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
2849           return ConstantFP::get(Ty->getContext(), V);
2850         }
2851         }
2852       }
2853     }
2854   }
2855 
2856   if (IntrinsicID == Intrinsic::smul_fix ||
2857       IntrinsicID == Intrinsic::smul_fix_sat) {
2858     // poison * C -> poison
2859     // C * poison -> poison
2860     if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2861       return PoisonValue::get(Ty);
2862 
2863     const APInt *C0, *C1;
2864     if (!getConstIntOrUndef(Operands[0], C0) ||
2865         !getConstIntOrUndef(Operands[1], C1))
2866       return nullptr;
2867 
2868     // undef * C -> 0
2869     // C * undef -> 0
2870     if (!C0 || !C1)
2871       return Constant::getNullValue(Ty);
2872 
2873     // This code performs rounding towards negative infinity in case the result
2874     // cannot be represented exactly for the given scale. Targets that do care
2875     // about rounding should use a target hook for specifying how rounding
2876     // should be done, and provide their own folding to be consistent with
2877     // rounding. This is the same approach as used by
2878     // DAGTypeLegalizer::ExpandIntRes_MULFIX.
2879     unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue();
2880     unsigned Width = C0->getBitWidth();
2881     assert(Scale < Width && "Illegal scale.");
2882     unsigned ExtendedWidth = Width * 2;
2883     APInt Product = (C0->sextOrSelf(ExtendedWidth) *
2884                      C1->sextOrSelf(ExtendedWidth)).ashr(Scale);
2885     if (IntrinsicID == Intrinsic::smul_fix_sat) {
2886       APInt Max = APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth);
2887       APInt Min = APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth);
2888       Product = APIntOps::smin(Product, Max);
2889       Product = APIntOps::smax(Product, Min);
2890     }
2891     return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width));
2892   }
2893 
2894   if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
2895     const APInt *C0, *C1, *C2;
2896     if (!getConstIntOrUndef(Operands[0], C0) ||
2897         !getConstIntOrUndef(Operands[1], C1) ||
2898         !getConstIntOrUndef(Operands[2], C2))
2899       return nullptr;
2900 
2901     bool IsRight = IntrinsicID == Intrinsic::fshr;
2902     if (!C2)
2903       return Operands[IsRight ? 1 : 0];
2904     if (!C0 && !C1)
2905       return UndefValue::get(Ty);
2906 
2907     // The shift amount is interpreted as modulo the bitwidth. If the shift
2908     // amount is effectively 0, avoid UB due to oversized inverse shift below.
2909     unsigned BitWidth = C2->getBitWidth();
2910     unsigned ShAmt = C2->urem(BitWidth);
2911     if (!ShAmt)
2912       return Operands[IsRight ? 1 : 0];
2913 
2914     // (C0 << ShlAmt) | (C1 >> LshrAmt)
2915     unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
2916     unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
2917     if (!C0)
2918       return ConstantInt::get(Ty, C1->lshr(LshrAmt));
2919     if (!C1)
2920       return ConstantInt::get(Ty, C0->shl(ShlAmt));
2921     return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
2922   }
2923 
2924   if (IntrinsicID == Intrinsic::amdgcn_perm)
2925     return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
2926 
2927   return nullptr;
2928 }
2929 
2930 static Constant *ConstantFoldScalarCall(StringRef Name,
2931                                         Intrinsic::ID IntrinsicID,
2932                                         Type *Ty,
2933                                         ArrayRef<Constant *> Operands,
2934                                         const TargetLibraryInfo *TLI,
2935                                         const CallBase *Call) {
2936   if (Operands.size() == 1)
2937     return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
2938 
2939   if (Operands.size() == 2)
2940     return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call);
2941 
2942   if (Operands.size() == 3)
2943     return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
2944 
2945   return nullptr;
2946 }
2947 
2948 static Constant *ConstantFoldFixedVectorCall(
2949     StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
2950     ArrayRef<Constant *> Operands, const DataLayout &DL,
2951     const TargetLibraryInfo *TLI, const CallBase *Call) {
2952   SmallVector<Constant *, 4> Result(FVTy->getNumElements());
2953   SmallVector<Constant *, 4> Lane(Operands.size());
2954   Type *Ty = FVTy->getElementType();
2955 
2956   switch (IntrinsicID) {
2957   case Intrinsic::masked_load: {
2958     auto *SrcPtr = Operands[0];
2959     auto *Mask = Operands[2];
2960     auto *Passthru = Operands[3];
2961 
2962     Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
2963 
2964     SmallVector<Constant *, 32> NewElements;
2965     for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
2966       auto *MaskElt = Mask->getAggregateElement(I);
2967       if (!MaskElt)
2968         break;
2969       auto *PassthruElt = Passthru->getAggregateElement(I);
2970       auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
2971       if (isa<UndefValue>(MaskElt)) {
2972         if (PassthruElt)
2973           NewElements.push_back(PassthruElt);
2974         else if (VecElt)
2975           NewElements.push_back(VecElt);
2976         else
2977           return nullptr;
2978       }
2979       if (MaskElt->isNullValue()) {
2980         if (!PassthruElt)
2981           return nullptr;
2982         NewElements.push_back(PassthruElt);
2983       } else if (MaskElt->isOneValue()) {
2984         if (!VecElt)
2985           return nullptr;
2986         NewElements.push_back(VecElt);
2987       } else {
2988         return nullptr;
2989       }
2990     }
2991     if (NewElements.size() != FVTy->getNumElements())
2992       return nullptr;
2993     return ConstantVector::get(NewElements);
2994   }
2995   case Intrinsic::arm_mve_vctp8:
2996   case Intrinsic::arm_mve_vctp16:
2997   case Intrinsic::arm_mve_vctp32:
2998   case Intrinsic::arm_mve_vctp64: {
2999     if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
3000       unsigned Lanes = FVTy->getNumElements();
3001       uint64_t Limit = Op->getZExtValue();
3002 
3003       SmallVector<Constant *, 16> NCs;
3004       for (unsigned i = 0; i < Lanes; i++) {
3005         if (i < Limit)
3006           NCs.push_back(ConstantInt::getTrue(Ty));
3007         else
3008           NCs.push_back(ConstantInt::getFalse(Ty));
3009       }
3010       return ConstantVector::get(NCs);
3011     }
3012     break;
3013   }
3014   case Intrinsic::get_active_lane_mask: {
3015     auto *Op0 = dyn_cast<ConstantInt>(Operands[0]);
3016     auto *Op1 = dyn_cast<ConstantInt>(Operands[1]);
3017     if (Op0 && Op1) {
3018       unsigned Lanes = FVTy->getNumElements();
3019       uint64_t Base = Op0->getZExtValue();
3020       uint64_t Limit = Op1->getZExtValue();
3021 
3022       SmallVector<Constant *, 16> NCs;
3023       for (unsigned i = 0; i < Lanes; i++) {
3024         if (Base + i < Limit)
3025           NCs.push_back(ConstantInt::getTrue(Ty));
3026         else
3027           NCs.push_back(ConstantInt::getFalse(Ty));
3028       }
3029       return ConstantVector::get(NCs);
3030     }
3031     break;
3032   }
3033   default:
3034     break;
3035   }
3036 
3037   for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3038     // Gather a column of constants.
3039     for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
3040       // Some intrinsics use a scalar type for certain arguments.
3041       if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) {
3042         Lane[J] = Operands[J];
3043         continue;
3044       }
3045 
3046       Constant *Agg = Operands[J]->getAggregateElement(I);
3047       if (!Agg)
3048         return nullptr;
3049 
3050       Lane[J] = Agg;
3051     }
3052 
3053     // Use the regular scalar folding to simplify this column.
3054     Constant *Folded =
3055         ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
3056     if (!Folded)
3057       return nullptr;
3058     Result[I] = Folded;
3059   }
3060 
3061   return ConstantVector::get(Result);
3062 }
3063 
3064 static Constant *ConstantFoldScalableVectorCall(
3065     StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
3066     ArrayRef<Constant *> Operands, const DataLayout &DL,
3067     const TargetLibraryInfo *TLI, const CallBase *Call) {
3068   switch (IntrinsicID) {
3069   case Intrinsic::aarch64_sve_convert_from_svbool: {
3070     auto *Src = dyn_cast<Constant>(Operands[0]);
3071     if (!Src || !Src->isNullValue())
3072       break;
3073 
3074     return ConstantInt::getFalse(SVTy);
3075   }
3076   default:
3077     break;
3078   }
3079   return nullptr;
3080 }
3081 
3082 } // end anonymous namespace
3083 
3084 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
3085                                  ArrayRef<Constant *> Operands,
3086                                  const TargetLibraryInfo *TLI) {
3087   if (Call->isNoBuiltin())
3088     return nullptr;
3089   if (!F->hasName())
3090     return nullptr;
3091 
3092   // If this is not an intrinsic and not recognized as a library call, bail out.
3093   if (F->getIntrinsicID() == Intrinsic::not_intrinsic) {
3094     if (!TLI)
3095       return nullptr;
3096     LibFunc LibF;
3097     if (!TLI->getLibFunc(*F, LibF))
3098       return nullptr;
3099   }
3100 
3101   StringRef Name = F->getName();
3102   Type *Ty = F->getReturnType();
3103   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3104     return ConstantFoldFixedVectorCall(
3105         Name, F->getIntrinsicID(), FVTy, Operands,
3106         F->getParent()->getDataLayout(), TLI, Call);
3107 
3108   if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3109     return ConstantFoldScalableVectorCall(
3110         Name, F->getIntrinsicID(), SVTy, Operands,
3111         F->getParent()->getDataLayout(), TLI, Call);
3112 
3113   // TODO: If this is a library function, we already discovered that above,
3114   //       so we should pass the LibFunc, not the name (and it might be better
3115   //       still to separate intrinsic handling from libcalls).
3116   return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
3117                                 Call);
3118 }
3119 
3120 bool llvm::isMathLibCallNoop(const CallBase *Call,
3121                              const TargetLibraryInfo *TLI) {
3122   // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
3123   // (and to some extent ConstantFoldScalarCall).
3124   if (Call->isNoBuiltin() || Call->isStrictFP())
3125     return false;
3126   Function *F = Call->getCalledFunction();
3127   if (!F)
3128     return false;
3129 
3130   LibFunc Func;
3131   if (!TLI || !TLI->getLibFunc(*F, Func))
3132     return false;
3133 
3134   if (Call->arg_size() == 1) {
3135     if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3136       const APFloat &Op = OpC->getValueAPF();
3137       switch (Func) {
3138       case LibFunc_logl:
3139       case LibFunc_log:
3140       case LibFunc_logf:
3141       case LibFunc_log2l:
3142       case LibFunc_log2:
3143       case LibFunc_log2f:
3144       case LibFunc_log10l:
3145       case LibFunc_log10:
3146       case LibFunc_log10f:
3147         return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3148 
3149       case LibFunc_expl:
3150       case LibFunc_exp:
3151       case LibFunc_expf:
3152         // FIXME: These boundaries are slightly conservative.
3153         if (OpC->getType()->isDoubleTy())
3154           return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
3155         if (OpC->getType()->isFloatTy())
3156           return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
3157         break;
3158 
3159       case LibFunc_exp2l:
3160       case LibFunc_exp2:
3161       case LibFunc_exp2f:
3162         // FIXME: These boundaries are slightly conservative.
3163         if (OpC->getType()->isDoubleTy())
3164           return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
3165         if (OpC->getType()->isFloatTy())
3166           return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
3167         break;
3168 
3169       case LibFunc_sinl:
3170       case LibFunc_sin:
3171       case LibFunc_sinf:
3172       case LibFunc_cosl:
3173       case LibFunc_cos:
3174       case LibFunc_cosf:
3175         return !Op.isInfinity();
3176 
3177       case LibFunc_tanl:
3178       case LibFunc_tan:
3179       case LibFunc_tanf: {
3180         // FIXME: Stop using the host math library.
3181         // FIXME: The computation isn't done in the right precision.
3182         Type *Ty = OpC->getType();
3183         if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
3184           return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr;
3185         break;
3186       }
3187 
3188       case LibFunc_asinl:
3189       case LibFunc_asin:
3190       case LibFunc_asinf:
3191       case LibFunc_acosl:
3192       case LibFunc_acos:
3193       case LibFunc_acosf:
3194         return !(Op < APFloat(Op.getSemantics(), "-1") ||
3195                  Op > APFloat(Op.getSemantics(), "1"));
3196 
3197       case LibFunc_sinh:
3198       case LibFunc_cosh:
3199       case LibFunc_sinhf:
3200       case LibFunc_coshf:
3201       case LibFunc_sinhl:
3202       case LibFunc_coshl:
3203         // FIXME: These boundaries are slightly conservative.
3204         if (OpC->getType()->isDoubleTy())
3205           return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
3206         if (OpC->getType()->isFloatTy())
3207           return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
3208         break;
3209 
3210       case LibFunc_sqrtl:
3211       case LibFunc_sqrt:
3212       case LibFunc_sqrtf:
3213         return Op.isNaN() || Op.isZero() || !Op.isNegative();
3214 
3215       // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
3216       // maybe others?
3217       default:
3218         break;
3219       }
3220     }
3221   }
3222 
3223   if (Call->arg_size() == 2) {
3224     ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3225     ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3226     if (Op0C && Op1C) {
3227       const APFloat &Op0 = Op0C->getValueAPF();
3228       const APFloat &Op1 = Op1C->getValueAPF();
3229 
3230       switch (Func) {
3231       case LibFunc_powl:
3232       case LibFunc_pow:
3233       case LibFunc_powf: {
3234         // FIXME: Stop using the host math library.
3235         // FIXME: The computation isn't done in the right precision.
3236         Type *Ty = Op0C->getType();
3237         if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
3238           if (Ty == Op1C->getType())
3239             return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr;
3240         }
3241         break;
3242       }
3243 
3244       case LibFunc_fmodl:
3245       case LibFunc_fmod:
3246       case LibFunc_fmodf:
3247       case LibFunc_remainderl:
3248       case LibFunc_remainder:
3249       case LibFunc_remainderf:
3250         return Op0.isNaN() || Op1.isNaN() ||
3251                (!Op0.isInfinity() && !Op1.isZero());
3252 
3253       default:
3254         break;
3255       }
3256     }
3257   }
3258 
3259   return false;
3260 }
3261 
3262 void TargetFolder::anchor() {}
3263