1 //===------ SimplifyLibCalls.cpp - Library calls simplifier ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the library calls simplifier. It does not implement
10 // any pass, but can't be used by other passes to do simplifications.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
15 #include "llvm/ADT/APSInt.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/StringMap.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/Analysis/BlockFrequencyInfo.h"
20 #include "llvm/Analysis/ConstantFolding.h"
21 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
22 #include "llvm/Analysis/ProfileSummaryInfo.h"
23 #include "llvm/Transforms/Utils/Local.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/Analysis/CaptureTracking.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/IRBuilder.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/LLVMContext.h"
33 #include "llvm/IR/Module.h"
34 #include "llvm/IR/PatternMatch.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/KnownBits.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Transforms/Utils/BuildLibCalls.h"
39 #include "llvm/Transforms/Utils/SizeOpts.h"
40 
41 using namespace llvm;
42 using namespace PatternMatch;
43 
44 static cl::opt<bool>
45     EnableUnsafeFPShrink("enable-double-float-shrink", cl::Hidden,
46                          cl::init(false),
47                          cl::desc("Enable unsafe double to float "
48                                   "shrinking for math lib calls"));
49 
50 //===----------------------------------------------------------------------===//
51 // Helper Functions
52 //===----------------------------------------------------------------------===//
53 
54 static bool ignoreCallingConv(LibFunc Func) {
55   return Func == LibFunc_abs || Func == LibFunc_labs ||
56          Func == LibFunc_llabs || Func == LibFunc_strlen;
57 }
58 
59 /// Return true if it is only used in equality comparisons with With.
60 static bool isOnlyUsedInEqualityComparison(Value *V, Value *With) {
61   for (User *U : V->users()) {
62     if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
63       if (IC->isEquality() && IC->getOperand(1) == With)
64         continue;
65     // Unknown instruction.
66     return false;
67   }
68   return true;
69 }
70 
71 static bool callHasFloatingPointArgument(const CallInst *CI) {
72   return any_of(CI->operands(), [](const Use &OI) {
73     return OI->getType()->isFloatingPointTy();
74   });
75 }
76 
77 static bool callHasFP128Argument(const CallInst *CI) {
78   return any_of(CI->operands(), [](const Use &OI) {
79     return OI->getType()->isFP128Ty();
80   });
81 }
82 
83 static Value *convertStrToNumber(CallInst *CI, StringRef &Str, int64_t Base) {
84   if (Base < 2 || Base > 36)
85     // handle special zero base
86     if (Base != 0)
87       return nullptr;
88 
89   char *End;
90   std::string nptr = Str.str();
91   errno = 0;
92   long long int Result = strtoll(nptr.c_str(), &End, Base);
93   if (errno)
94     return nullptr;
95 
96   // if we assume all possible target locales are ASCII supersets,
97   // then if strtoll successfully parses a number on the host,
98   // it will also successfully parse the same way on the target
99   if (*End != '\0')
100     return nullptr;
101 
102   if (!isIntN(CI->getType()->getPrimitiveSizeInBits(), Result))
103     return nullptr;
104 
105   return ConstantInt::get(CI->getType(), Result);
106 }
107 
108 static bool isOnlyUsedInComparisonWithZero(Value *V) {
109   for (User *U : V->users()) {
110     if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
111       if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
112         if (C->isNullValue())
113           continue;
114     // Unknown instruction.
115     return false;
116   }
117   return true;
118 }
119 
120 static bool canTransformToMemCmp(CallInst *CI, Value *Str, uint64_t Len,
121                                  const DataLayout &DL) {
122   if (!isOnlyUsedInComparisonWithZero(CI))
123     return false;
124 
125   if (!isDereferenceableAndAlignedPointer(Str, Align(1), APInt(64, Len), DL))
126     return false;
127 
128   if (CI->getFunction()->hasFnAttribute(Attribute::SanitizeMemory))
129     return false;
130 
131   return true;
132 }
133 
134 static void annotateDereferenceableBytes(CallInst *CI,
135                                          ArrayRef<unsigned> ArgNos,
136                                          uint64_t DereferenceableBytes) {
137   const Function *F = CI->getCaller();
138   if (!F)
139     return;
140   for (unsigned ArgNo : ArgNos) {
141     uint64_t DerefBytes = DereferenceableBytes;
142     unsigned AS = CI->getArgOperand(ArgNo)->getType()->getPointerAddressSpace();
143     if (!llvm::NullPointerIsDefined(F, AS) ||
144         CI->paramHasAttr(ArgNo, Attribute::NonNull))
145       DerefBytes = std::max(CI->getDereferenceableOrNullBytes(
146                                 ArgNo + AttributeList::FirstArgIndex),
147                             DereferenceableBytes);
148 
149     if (CI->getDereferenceableBytes(ArgNo + AttributeList::FirstArgIndex) <
150         DerefBytes) {
151       CI->removeParamAttr(ArgNo, Attribute::Dereferenceable);
152       if (!llvm::NullPointerIsDefined(F, AS) ||
153           CI->paramHasAttr(ArgNo, Attribute::NonNull))
154         CI->removeParamAttr(ArgNo, Attribute::DereferenceableOrNull);
155       CI->addParamAttr(ArgNo, Attribute::getWithDereferenceableBytes(
156                                   CI->getContext(), DerefBytes));
157     }
158   }
159 }
160 
161 static void annotateNonNullNoUndefBasedOnAccess(CallInst *CI,
162                                          ArrayRef<unsigned> ArgNos) {
163   Function *F = CI->getCaller();
164   if (!F)
165     return;
166 
167   for (unsigned ArgNo : ArgNos) {
168     if (!CI->paramHasAttr(ArgNo, Attribute::NoUndef))
169       CI->addParamAttr(ArgNo, Attribute::NoUndef);
170 
171     if (CI->paramHasAttr(ArgNo, Attribute::NonNull))
172       continue;
173     unsigned AS = CI->getArgOperand(ArgNo)->getType()->getPointerAddressSpace();
174     if (llvm::NullPointerIsDefined(F, AS))
175       continue;
176 
177     CI->addParamAttr(ArgNo, Attribute::NonNull);
178     annotateDereferenceableBytes(CI, ArgNo, 1);
179   }
180 }
181 
182 static void annotateNonNullAndDereferenceable(CallInst *CI, ArrayRef<unsigned> ArgNos,
183                                Value *Size, const DataLayout &DL) {
184   if (ConstantInt *LenC = dyn_cast<ConstantInt>(Size)) {
185     annotateNonNullNoUndefBasedOnAccess(CI, ArgNos);
186     annotateDereferenceableBytes(CI, ArgNos, LenC->getZExtValue());
187   } else if (isKnownNonZero(Size, DL)) {
188     annotateNonNullNoUndefBasedOnAccess(CI, ArgNos);
189     const APInt *X, *Y;
190     uint64_t DerefMin = 1;
191     if (match(Size, m_Select(m_Value(), m_APInt(X), m_APInt(Y)))) {
192       DerefMin = std::min(X->getZExtValue(), Y->getZExtValue());
193       annotateDereferenceableBytes(CI, ArgNos, DerefMin);
194     }
195   }
196 }
197 
198 //===----------------------------------------------------------------------===//
199 // String and Memory Library Call Optimizations
200 //===----------------------------------------------------------------------===//
201 
202 Value *LibCallSimplifier::optimizeStrCat(CallInst *CI, IRBuilderBase &B) {
203   // Extract some information from the instruction
204   Value *Dst = CI->getArgOperand(0);
205   Value *Src = CI->getArgOperand(1);
206   annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
207 
208   // See if we can get the length of the input string.
209   uint64_t Len = GetStringLength(Src);
210   if (Len)
211     annotateDereferenceableBytes(CI, 1, Len);
212   else
213     return nullptr;
214   --Len; // Unbias length.
215 
216   // Handle the simple, do-nothing case: strcat(x, "") -> x
217   if (Len == 0)
218     return Dst;
219 
220   return emitStrLenMemCpy(Src, Dst, Len, B);
221 }
222 
223 Value *LibCallSimplifier::emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len,
224                                            IRBuilderBase &B) {
225   // We need to find the end of the destination string.  That's where the
226   // memory is to be moved to. We just generate a call to strlen.
227   Value *DstLen = emitStrLen(Dst, B, DL, TLI);
228   if (!DstLen)
229     return nullptr;
230 
231   // Now that we have the destination's length, we must index into the
232   // destination's pointer to get the actual memcpy destination (end of
233   // the string .. we're concatenating).
234   Value *CpyDst = B.CreateGEP(B.getInt8Ty(), Dst, DstLen, "endptr");
235 
236   // We have enough information to now generate the memcpy call to do the
237   // concatenation for us.  Make a memcpy to copy the nul byte with align = 1.
238   B.CreateMemCpy(
239       CpyDst, Align(1), Src, Align(1),
240       ConstantInt::get(DL.getIntPtrType(Src->getContext()), Len + 1));
241   return Dst;
242 }
243 
244 Value *LibCallSimplifier::optimizeStrNCat(CallInst *CI, IRBuilderBase &B) {
245   // Extract some information from the instruction.
246   Value *Dst = CI->getArgOperand(0);
247   Value *Src = CI->getArgOperand(1);
248   Value *Size = CI->getArgOperand(2);
249   uint64_t Len;
250   annotateNonNullNoUndefBasedOnAccess(CI, 0);
251   if (isKnownNonZero(Size, DL))
252     annotateNonNullNoUndefBasedOnAccess(CI, 1);
253 
254   // We don't do anything if length is not constant.
255   ConstantInt *LengthArg = dyn_cast<ConstantInt>(Size);
256   if (LengthArg) {
257     Len = LengthArg->getZExtValue();
258     // strncat(x, c, 0) -> x
259     if (!Len)
260       return Dst;
261   } else {
262     return nullptr;
263   }
264 
265   // See if we can get the length of the input string.
266   uint64_t SrcLen = GetStringLength(Src);
267   if (SrcLen) {
268     annotateDereferenceableBytes(CI, 1, SrcLen);
269     --SrcLen; // Unbias length.
270   } else {
271     return nullptr;
272   }
273 
274   // strncat(x, "", c) -> x
275   if (SrcLen == 0)
276     return Dst;
277 
278   // We don't optimize this case.
279   if (Len < SrcLen)
280     return nullptr;
281 
282   // strncat(x, s, c) -> strcat(x, s)
283   // s is constant so the strcat can be optimized further.
284   return emitStrLenMemCpy(Src, Dst, SrcLen, B);
285 }
286 
287 Value *LibCallSimplifier::optimizeStrChr(CallInst *CI, IRBuilderBase &B) {
288   Function *Callee = CI->getCalledFunction();
289   FunctionType *FT = Callee->getFunctionType();
290   Value *SrcStr = CI->getArgOperand(0);
291   annotateNonNullNoUndefBasedOnAccess(CI, 0);
292 
293   // If the second operand is non-constant, see if we can compute the length
294   // of the input string and turn this into memchr.
295   ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
296   if (!CharC) {
297     uint64_t Len = GetStringLength(SrcStr);
298     if (Len)
299       annotateDereferenceableBytes(CI, 0, Len);
300     else
301       return nullptr;
302     if (!FT->getParamType(1)->isIntegerTy(32)) // memchr needs i32.
303       return nullptr;
304 
305     return emitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
306                       ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len),
307                       B, DL, TLI);
308   }
309 
310   // Otherwise, the character is a constant, see if the first argument is
311   // a string literal.  If so, we can constant fold.
312   StringRef Str;
313   if (!getConstantStringInfo(SrcStr, Str)) {
314     if (CharC->isZero()) // strchr(p, 0) -> p + strlen(p)
315       if (Value *StrLen = emitStrLen(SrcStr, B, DL, TLI))
316         return B.CreateGEP(B.getInt8Ty(), SrcStr, StrLen, "strchr");
317     return nullptr;
318   }
319 
320   // Compute the offset, make sure to handle the case when we're searching for
321   // zero (a weird way to spell strlen).
322   size_t I = (0xFF & CharC->getSExtValue()) == 0
323                  ? Str.size()
324                  : Str.find(CharC->getSExtValue());
325   if (I == StringRef::npos) // Didn't find the char.  strchr returns null.
326     return Constant::getNullValue(CI->getType());
327 
328   // strchr(s+n,c)  -> gep(s+n+i,c)
329   return B.CreateGEP(B.getInt8Ty(), SrcStr, B.getInt64(I), "strchr");
330 }
331 
332 Value *LibCallSimplifier::optimizeStrRChr(CallInst *CI, IRBuilderBase &B) {
333   Value *SrcStr = CI->getArgOperand(0);
334   ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
335   annotateNonNullNoUndefBasedOnAccess(CI, 0);
336 
337   // Cannot fold anything if we're not looking for a constant.
338   if (!CharC)
339     return nullptr;
340 
341   StringRef Str;
342   if (!getConstantStringInfo(SrcStr, Str)) {
343     // strrchr(s, 0) -> strchr(s, 0)
344     if (CharC->isZero())
345       return emitStrChr(SrcStr, '\0', B, TLI);
346     return nullptr;
347   }
348 
349   // Compute the offset.
350   size_t I = (0xFF & CharC->getSExtValue()) == 0
351                  ? Str.size()
352                  : Str.rfind(CharC->getSExtValue());
353   if (I == StringRef::npos) // Didn't find the char. Return null.
354     return Constant::getNullValue(CI->getType());
355 
356   // strrchr(s+n,c) -> gep(s+n+i,c)
357   return B.CreateGEP(B.getInt8Ty(), SrcStr, B.getInt64(I), "strrchr");
358 }
359 
360 Value *LibCallSimplifier::optimizeStrCmp(CallInst *CI, IRBuilderBase &B) {
361   Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
362   if (Str1P == Str2P) // strcmp(x,x)  -> 0
363     return ConstantInt::get(CI->getType(), 0);
364 
365   StringRef Str1, Str2;
366   bool HasStr1 = getConstantStringInfo(Str1P, Str1);
367   bool HasStr2 = getConstantStringInfo(Str2P, Str2);
368 
369   // strcmp(x, y)  -> cnst  (if both x and y are constant strings)
370   if (HasStr1 && HasStr2)
371     return ConstantInt::get(CI->getType(), Str1.compare(Str2));
372 
373   if (HasStr1 && Str1.empty()) // strcmp("", x) -> -*x
374     return B.CreateNeg(B.CreateZExt(
375         B.CreateLoad(B.getInt8Ty(), Str2P, "strcmpload"), CI->getType()));
376 
377   if (HasStr2 && Str2.empty()) // strcmp(x,"") -> *x
378     return B.CreateZExt(B.CreateLoad(B.getInt8Ty(), Str1P, "strcmpload"),
379                         CI->getType());
380 
381   // strcmp(P, "x") -> memcmp(P, "x", 2)
382   uint64_t Len1 = GetStringLength(Str1P);
383   if (Len1)
384     annotateDereferenceableBytes(CI, 0, Len1);
385   uint64_t Len2 = GetStringLength(Str2P);
386   if (Len2)
387     annotateDereferenceableBytes(CI, 1, Len2);
388 
389   if (Len1 && Len2) {
390     return emitMemCmp(Str1P, Str2P,
391                       ConstantInt::get(DL.getIntPtrType(CI->getContext()),
392                                        std::min(Len1, Len2)),
393                       B, DL, TLI);
394   }
395 
396   // strcmp to memcmp
397   if (!HasStr1 && HasStr2) {
398     if (canTransformToMemCmp(CI, Str1P, Len2, DL))
399       return emitMemCmp(
400           Str1P, Str2P,
401           ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len2), B, DL,
402           TLI);
403   } else if (HasStr1 && !HasStr2) {
404     if (canTransformToMemCmp(CI, Str2P, Len1, DL))
405       return emitMemCmp(
406           Str1P, Str2P,
407           ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len1), B, DL,
408           TLI);
409   }
410 
411   annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
412   return nullptr;
413 }
414 
415 Value *LibCallSimplifier::optimizeStrNCmp(CallInst *CI, IRBuilderBase &B) {
416   Value *Str1P = CI->getArgOperand(0);
417   Value *Str2P = CI->getArgOperand(1);
418   Value *Size = CI->getArgOperand(2);
419   if (Str1P == Str2P) // strncmp(x,x,n)  -> 0
420     return ConstantInt::get(CI->getType(), 0);
421 
422   if (isKnownNonZero(Size, DL))
423     annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
424   // Get the length argument if it is constant.
425   uint64_t Length;
426   if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(Size))
427     Length = LengthArg->getZExtValue();
428   else
429     return nullptr;
430 
431   if (Length == 0) // strncmp(x,y,0)   -> 0
432     return ConstantInt::get(CI->getType(), 0);
433 
434   if (Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
435     return emitMemCmp(Str1P, Str2P, Size, B, DL, TLI);
436 
437   StringRef Str1, Str2;
438   bool HasStr1 = getConstantStringInfo(Str1P, Str1);
439   bool HasStr2 = getConstantStringInfo(Str2P, Str2);
440 
441   // strncmp(x, y)  -> cnst  (if both x and y are constant strings)
442   if (HasStr1 && HasStr2) {
443     StringRef SubStr1 = Str1.substr(0, Length);
444     StringRef SubStr2 = Str2.substr(0, Length);
445     return ConstantInt::get(CI->getType(), SubStr1.compare(SubStr2));
446   }
447 
448   if (HasStr1 && Str1.empty()) // strncmp("", x, n) -> -*x
449     return B.CreateNeg(B.CreateZExt(
450         B.CreateLoad(B.getInt8Ty(), Str2P, "strcmpload"), CI->getType()));
451 
452   if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x
453     return B.CreateZExt(B.CreateLoad(B.getInt8Ty(), Str1P, "strcmpload"),
454                         CI->getType());
455 
456   uint64_t Len1 = GetStringLength(Str1P);
457   if (Len1)
458     annotateDereferenceableBytes(CI, 0, Len1);
459   uint64_t Len2 = GetStringLength(Str2P);
460   if (Len2)
461     annotateDereferenceableBytes(CI, 1, Len2);
462 
463   // strncmp to memcmp
464   if (!HasStr1 && HasStr2) {
465     Len2 = std::min(Len2, Length);
466     if (canTransformToMemCmp(CI, Str1P, Len2, DL))
467       return emitMemCmp(
468           Str1P, Str2P,
469           ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len2), B, DL,
470           TLI);
471   } else if (HasStr1 && !HasStr2) {
472     Len1 = std::min(Len1, Length);
473     if (canTransformToMemCmp(CI, Str2P, Len1, DL))
474       return emitMemCmp(
475           Str1P, Str2P,
476           ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len1), B, DL,
477           TLI);
478   }
479 
480   return nullptr;
481 }
482 
483 Value *LibCallSimplifier::optimizeStrNDup(CallInst *CI, IRBuilderBase &B) {
484   Value *Src = CI->getArgOperand(0);
485   ConstantInt *Size = dyn_cast<ConstantInt>(CI->getArgOperand(1));
486   uint64_t SrcLen = GetStringLength(Src);
487   if (SrcLen && Size) {
488     annotateDereferenceableBytes(CI, 0, SrcLen);
489     if (SrcLen <= Size->getZExtValue() + 1)
490       return emitStrDup(Src, B, TLI);
491   }
492 
493   return nullptr;
494 }
495 
496 Value *LibCallSimplifier::optimizeStrCpy(CallInst *CI, IRBuilderBase &B) {
497   Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
498   if (Dst == Src) // strcpy(x,x)  -> x
499     return Src;
500 
501   annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
502   // See if we can get the length of the input string.
503   uint64_t Len = GetStringLength(Src);
504   if (Len)
505     annotateDereferenceableBytes(CI, 1, Len);
506   else
507     return nullptr;
508 
509   // We have enough information to now generate the memcpy call to do the
510   // copy for us.  Make a memcpy to copy the nul byte with align = 1.
511   CallInst *NewCI =
512       B.CreateMemCpy(Dst, Align(1), Src, Align(1),
513                      ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len));
514   NewCI->setAttributes(CI->getAttributes());
515   NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
516   return Dst;
517 }
518 
519 Value *LibCallSimplifier::optimizeStpCpy(CallInst *CI, IRBuilderBase &B) {
520   Function *Callee = CI->getCalledFunction();
521   Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
522   if (Dst == Src) { // stpcpy(x,x)  -> x+strlen(x)
523     Value *StrLen = emitStrLen(Src, B, DL, TLI);
524     return StrLen ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, StrLen) : nullptr;
525   }
526 
527   // See if we can get the length of the input string.
528   uint64_t Len = GetStringLength(Src);
529   if (Len)
530     annotateDereferenceableBytes(CI, 1, Len);
531   else
532     return nullptr;
533 
534   Type *PT = Callee->getFunctionType()->getParamType(0);
535   Value *LenV = ConstantInt::get(DL.getIntPtrType(PT), Len);
536   Value *DstEnd = B.CreateGEP(B.getInt8Ty(), Dst,
537                               ConstantInt::get(DL.getIntPtrType(PT), Len - 1));
538 
539   // We have enough information to now generate the memcpy call to do the
540   // copy for us.  Make a memcpy to copy the nul byte with align = 1.
541   CallInst *NewCI = B.CreateMemCpy(Dst, Align(1), Src, Align(1), LenV);
542   NewCI->setAttributes(CI->getAttributes());
543   NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
544   return DstEnd;
545 }
546 
547 Value *LibCallSimplifier::optimizeStrNCpy(CallInst *CI, IRBuilderBase &B) {
548   Function *Callee = CI->getCalledFunction();
549   Value *Dst = CI->getArgOperand(0);
550   Value *Src = CI->getArgOperand(1);
551   Value *Size = CI->getArgOperand(2);
552   annotateNonNullNoUndefBasedOnAccess(CI, 0);
553   if (isKnownNonZero(Size, DL))
554     annotateNonNullNoUndefBasedOnAccess(CI, 1);
555 
556   uint64_t Len;
557   if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(Size))
558     Len = LengthArg->getZExtValue();
559   else
560     return nullptr;
561 
562   // strncpy(x, y, 0) -> x
563   if (Len == 0)
564     return Dst;
565 
566   // See if we can get the length of the input string.
567   uint64_t SrcLen = GetStringLength(Src);
568   if (SrcLen) {
569     annotateDereferenceableBytes(CI, 1, SrcLen);
570     --SrcLen; // Unbias length.
571   } else {
572     return nullptr;
573   }
574 
575   if (SrcLen == 0) {
576     // strncpy(x, "", y) -> memset(x, '\0', y)
577     Align MemSetAlign =
578         CI->getAttributes().getParamAttrs(0).getAlignment().valueOrOne();
579     CallInst *NewCI = B.CreateMemSet(Dst, B.getInt8('\0'), Size, MemSetAlign);
580     AttrBuilder ArgAttrs(CI->getAttributes().getParamAttrs(0));
581     NewCI->setAttributes(NewCI->getAttributes().addParamAttributes(
582         CI->getContext(), 0, ArgAttrs));
583     return Dst;
584   }
585 
586   // strncpy(a, "a", 4) - > memcpy(a, "a\0\0\0", 4)
587   if (Len > SrcLen + 1) {
588     if (Len <= 128) {
589       StringRef Str;
590       if (!getConstantStringInfo(Src, Str))
591         return nullptr;
592       std::string SrcStr = Str.str();
593       SrcStr.resize(Len, '\0');
594       Src = B.CreateGlobalString(SrcStr, "str");
595     } else {
596       return nullptr;
597     }
598   }
599 
600   Type *PT = Callee->getFunctionType()->getParamType(0);
601   // strncpy(x, s, c) -> memcpy(align 1 x, align 1 s, c) [s and c are constant]
602   CallInst *NewCI = B.CreateMemCpy(Dst, Align(1), Src, Align(1),
603                                    ConstantInt::get(DL.getIntPtrType(PT), Len));
604   NewCI->setAttributes(CI->getAttributes());
605   NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
606   return Dst;
607 }
608 
609 Value *LibCallSimplifier::optimizeStringLength(CallInst *CI, IRBuilderBase &B,
610                                                unsigned CharSize) {
611   Value *Src = CI->getArgOperand(0);
612 
613   // Constant folding: strlen("xyz") -> 3
614   if (uint64_t Len = GetStringLength(Src, CharSize))
615     return ConstantInt::get(CI->getType(), Len - 1);
616 
617   // If s is a constant pointer pointing to a string literal, we can fold
618   // strlen(s + x) to strlen(s) - x, when x is known to be in the range
619   // [0, strlen(s)] or the string has a single null terminator '\0' at the end.
620   // We only try to simplify strlen when the pointer s points to an array
621   // of i8. Otherwise, we would need to scale the offset x before doing the
622   // subtraction. This will make the optimization more complex, and it's not
623   // very useful because calling strlen for a pointer of other types is
624   // very uncommon.
625   if (GEPOperator *GEP = dyn_cast<GEPOperator>(Src)) {
626     if (!isGEPBasedOnPointerToString(GEP, CharSize))
627       return nullptr;
628 
629     ConstantDataArraySlice Slice;
630     if (getConstantDataArrayInfo(GEP->getOperand(0), Slice, CharSize)) {
631       uint64_t NullTermIdx;
632       if (Slice.Array == nullptr) {
633         NullTermIdx = 0;
634       } else {
635         NullTermIdx = ~((uint64_t)0);
636         for (uint64_t I = 0, E = Slice.Length; I < E; ++I) {
637           if (Slice.Array->getElementAsInteger(I + Slice.Offset) == 0) {
638             NullTermIdx = I;
639             break;
640           }
641         }
642         // If the string does not have '\0', leave it to strlen to compute
643         // its length.
644         if (NullTermIdx == ~((uint64_t)0))
645           return nullptr;
646       }
647 
648       Value *Offset = GEP->getOperand(2);
649       KnownBits Known = computeKnownBits(Offset, DL, 0, nullptr, CI, nullptr);
650       Known.Zero.flipAllBits();
651       uint64_t ArrSize =
652              cast<ArrayType>(GEP->getSourceElementType())->getNumElements();
653 
654       // KnownZero's bits are flipped, so zeros in KnownZero now represent
655       // bits known to be zeros in Offset, and ones in KnowZero represent
656       // bits unknown in Offset. Therefore, Offset is known to be in range
657       // [0, NullTermIdx] when the flipped KnownZero is non-negative and
658       // unsigned-less-than NullTermIdx.
659       //
660       // If Offset is not provably in the range [0, NullTermIdx], we can still
661       // optimize if we can prove that the program has undefined behavior when
662       // Offset is outside that range. That is the case when GEP->getOperand(0)
663       // is a pointer to an object whose memory extent is NullTermIdx+1.
664       if ((Known.Zero.isNonNegative() && Known.Zero.ule(NullTermIdx)) ||
665           (GEP->isInBounds() && isa<GlobalVariable>(GEP->getOperand(0)) &&
666            NullTermIdx == ArrSize - 1)) {
667         Offset = B.CreateSExtOrTrunc(Offset, CI->getType());
668         return B.CreateSub(ConstantInt::get(CI->getType(), NullTermIdx),
669                            Offset);
670       }
671     }
672   }
673 
674   // strlen(x?"foo":"bars") --> x ? 3 : 4
675   if (SelectInst *SI = dyn_cast<SelectInst>(Src)) {
676     uint64_t LenTrue = GetStringLength(SI->getTrueValue(), CharSize);
677     uint64_t LenFalse = GetStringLength(SI->getFalseValue(), CharSize);
678     if (LenTrue && LenFalse) {
679       ORE.emit([&]() {
680         return OptimizationRemark("instcombine", "simplify-libcalls", CI)
681                << "folded strlen(select) to select of constants";
682       });
683       return B.CreateSelect(SI->getCondition(),
684                             ConstantInt::get(CI->getType(), LenTrue - 1),
685                             ConstantInt::get(CI->getType(), LenFalse - 1));
686     }
687   }
688 
689   // strlen(x) != 0 --> *x != 0
690   // strlen(x) == 0 --> *x == 0
691   if (isOnlyUsedInZeroEqualityComparison(CI))
692     return B.CreateZExt(B.CreateLoad(B.getIntNTy(CharSize), Src, "strlenfirst"),
693                         CI->getType());
694 
695   return nullptr;
696 }
697 
698 Value *LibCallSimplifier::optimizeStrLen(CallInst *CI, IRBuilderBase &B) {
699   if (Value *V = optimizeStringLength(CI, B, 8))
700     return V;
701   annotateNonNullNoUndefBasedOnAccess(CI, 0);
702   return nullptr;
703 }
704 
705 Value *LibCallSimplifier::optimizeWcslen(CallInst *CI, IRBuilderBase &B) {
706   Module &M = *CI->getModule();
707   unsigned WCharSize = TLI->getWCharSize(M) * 8;
708   // We cannot perform this optimization without wchar_size metadata.
709   if (WCharSize == 0)
710     return nullptr;
711 
712   return optimizeStringLength(CI, B, WCharSize);
713 }
714 
715 Value *LibCallSimplifier::optimizeStrPBrk(CallInst *CI, IRBuilderBase &B) {
716   StringRef S1, S2;
717   bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
718   bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
719 
720   // strpbrk(s, "") -> nullptr
721   // strpbrk("", s) -> nullptr
722   if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
723     return Constant::getNullValue(CI->getType());
724 
725   // Constant folding.
726   if (HasS1 && HasS2) {
727     size_t I = S1.find_first_of(S2);
728     if (I == StringRef::npos) // No match.
729       return Constant::getNullValue(CI->getType());
730 
731     return B.CreateGEP(B.getInt8Ty(), CI->getArgOperand(0), B.getInt64(I),
732                        "strpbrk");
733   }
734 
735   // strpbrk(s, "a") -> strchr(s, 'a')
736   if (HasS2 && S2.size() == 1)
737     return emitStrChr(CI->getArgOperand(0), S2[0], B, TLI);
738 
739   return nullptr;
740 }
741 
742 Value *LibCallSimplifier::optimizeStrTo(CallInst *CI, IRBuilderBase &B) {
743   Value *EndPtr = CI->getArgOperand(1);
744   if (isa<ConstantPointerNull>(EndPtr)) {
745     // With a null EndPtr, this function won't capture the main argument.
746     // It would be readonly too, except that it still may write to errno.
747     CI->addParamAttr(0, Attribute::NoCapture);
748   }
749 
750   return nullptr;
751 }
752 
753 Value *LibCallSimplifier::optimizeStrSpn(CallInst *CI, IRBuilderBase &B) {
754   StringRef S1, S2;
755   bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
756   bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
757 
758   // strspn(s, "") -> 0
759   // strspn("", s) -> 0
760   if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
761     return Constant::getNullValue(CI->getType());
762 
763   // Constant folding.
764   if (HasS1 && HasS2) {
765     size_t Pos = S1.find_first_not_of(S2);
766     if (Pos == StringRef::npos)
767       Pos = S1.size();
768     return ConstantInt::get(CI->getType(), Pos);
769   }
770 
771   return nullptr;
772 }
773 
774 Value *LibCallSimplifier::optimizeStrCSpn(CallInst *CI, IRBuilderBase &B) {
775   StringRef S1, S2;
776   bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
777   bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
778 
779   // strcspn("", s) -> 0
780   if (HasS1 && S1.empty())
781     return Constant::getNullValue(CI->getType());
782 
783   // Constant folding.
784   if (HasS1 && HasS2) {
785     size_t Pos = S1.find_first_of(S2);
786     if (Pos == StringRef::npos)
787       Pos = S1.size();
788     return ConstantInt::get(CI->getType(), Pos);
789   }
790 
791   // strcspn(s, "") -> strlen(s)
792   if (HasS2 && S2.empty())
793     return emitStrLen(CI->getArgOperand(0), B, DL, TLI);
794 
795   return nullptr;
796 }
797 
798 Value *LibCallSimplifier::optimizeStrStr(CallInst *CI, IRBuilderBase &B) {
799   // fold strstr(x, x) -> x.
800   if (CI->getArgOperand(0) == CI->getArgOperand(1))
801     return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
802 
803   // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
804   if (isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
805     Value *StrLen = emitStrLen(CI->getArgOperand(1), B, DL, TLI);
806     if (!StrLen)
807       return nullptr;
808     Value *StrNCmp = emitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
809                                  StrLen, B, DL, TLI);
810     if (!StrNCmp)
811       return nullptr;
812     for (User *U : llvm::make_early_inc_range(CI->users())) {
813       ICmpInst *Old = cast<ICmpInst>(U);
814       Value *Cmp =
815           B.CreateICmp(Old->getPredicate(), StrNCmp,
816                        ConstantInt::getNullValue(StrNCmp->getType()), "cmp");
817       replaceAllUsesWith(Old, Cmp);
818     }
819     return CI;
820   }
821 
822   // See if either input string is a constant string.
823   StringRef SearchStr, ToFindStr;
824   bool HasStr1 = getConstantStringInfo(CI->getArgOperand(0), SearchStr);
825   bool HasStr2 = getConstantStringInfo(CI->getArgOperand(1), ToFindStr);
826 
827   // fold strstr(x, "") -> x.
828   if (HasStr2 && ToFindStr.empty())
829     return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
830 
831   // If both strings are known, constant fold it.
832   if (HasStr1 && HasStr2) {
833     size_t Offset = SearchStr.find(ToFindStr);
834 
835     if (Offset == StringRef::npos) // strstr("foo", "bar") -> null
836       return Constant::getNullValue(CI->getType());
837 
838     // strstr("abcd", "bc") -> gep((char*)"abcd", 1)
839     Value *Result = castToCStr(CI->getArgOperand(0), B);
840     Result =
841         B.CreateConstInBoundsGEP1_64(B.getInt8Ty(), Result, Offset, "strstr");
842     return B.CreateBitCast(Result, CI->getType());
843   }
844 
845   // fold strstr(x, "y") -> strchr(x, 'y').
846   if (HasStr2 && ToFindStr.size() == 1) {
847     Value *StrChr = emitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TLI);
848     return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : nullptr;
849   }
850 
851   annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
852   return nullptr;
853 }
854 
855 Value *LibCallSimplifier::optimizeMemRChr(CallInst *CI, IRBuilderBase &B) {
856   if (isKnownNonZero(CI->getOperand(2), DL))
857     annotateNonNullNoUndefBasedOnAccess(CI, 0);
858   return nullptr;
859 }
860 
861 Value *LibCallSimplifier::optimizeMemChr(CallInst *CI, IRBuilderBase &B) {
862   Value *SrcStr = CI->getArgOperand(0);
863   Value *Size = CI->getArgOperand(2);
864   annotateNonNullAndDereferenceable(CI, 0, Size, DL);
865   ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
866   ConstantInt *LenC = dyn_cast<ConstantInt>(Size);
867 
868   // memchr(x, y, 0) -> null
869   if (LenC) {
870     if (LenC->isZero())
871       return Constant::getNullValue(CI->getType());
872   } else {
873     // From now on we need at least constant length and string.
874     return nullptr;
875   }
876 
877   StringRef Str;
878   if (!getConstantStringInfo(SrcStr, Str, 0, /*TrimAtNul=*/false))
879     return nullptr;
880 
881   // Truncate the string to LenC. If Str is smaller than LenC we will still only
882   // scan the string, as reading past the end of it is undefined and we can just
883   // return null if we don't find the char.
884   Str = Str.substr(0, LenC->getZExtValue());
885 
886   // If the char is variable but the input str and length are not we can turn
887   // this memchr call into a simple bit field test. Of course this only works
888   // when the return value is only checked against null.
889   //
890   // It would be really nice to reuse switch lowering here but we can't change
891   // the CFG at this point.
892   //
893   // memchr("\r\n", C, 2) != nullptr -> (1 << C & ((1 << '\r') | (1 << '\n')))
894   // != 0
895   //   after bounds check.
896   if (!CharC && !Str.empty() && isOnlyUsedInZeroEqualityComparison(CI)) {
897     unsigned char Max =
898         *std::max_element(reinterpret_cast<const unsigned char *>(Str.begin()),
899                           reinterpret_cast<const unsigned char *>(Str.end()));
900 
901     // Make sure the bit field we're about to create fits in a register on the
902     // target.
903     // FIXME: On a 64 bit architecture this prevents us from using the
904     // interesting range of alpha ascii chars. We could do better by emitting
905     // two bitfields or shifting the range by 64 if no lower chars are used.
906     if (!DL.fitsInLegalInteger(Max + 1))
907       return nullptr;
908 
909     // For the bit field use a power-of-2 type with at least 8 bits to avoid
910     // creating unnecessary illegal types.
911     unsigned char Width = NextPowerOf2(std::max((unsigned char)7, Max));
912 
913     // Now build the bit field.
914     APInt Bitfield(Width, 0);
915     for (char C : Str)
916       Bitfield.setBit((unsigned char)C);
917     Value *BitfieldC = B.getInt(Bitfield);
918 
919     // Adjust width of "C" to the bitfield width, then mask off the high bits.
920     Value *C = B.CreateZExtOrTrunc(CI->getArgOperand(1), BitfieldC->getType());
921     C = B.CreateAnd(C, B.getIntN(Width, 0xFF));
922 
923     // First check that the bit field access is within bounds.
924     Value *Bounds = B.CreateICmp(ICmpInst::ICMP_ULT, C, B.getIntN(Width, Width),
925                                  "memchr.bounds");
926 
927     // Create code that checks if the given bit is set in the field.
928     Value *Shl = B.CreateShl(B.getIntN(Width, 1ULL), C);
929     Value *Bits = B.CreateIsNotNull(B.CreateAnd(Shl, BitfieldC), "memchr.bits");
930 
931     // Finally merge both checks and cast to pointer type. The inttoptr
932     // implicitly zexts the i1 to intptr type.
933     return B.CreateIntToPtr(B.CreateLogicalAnd(Bounds, Bits, "memchr"),
934                             CI->getType());
935   }
936 
937   // Check if all arguments are constants.  If so, we can constant fold.
938   if (!CharC)
939     return nullptr;
940 
941   // Compute the offset.
942   size_t I = Str.find(CharC->getSExtValue() & 0xFF);
943   if (I == StringRef::npos) // Didn't find the char.  memchr returns null.
944     return Constant::getNullValue(CI->getType());
945 
946   // memchr(s+n,c,l) -> gep(s+n+i,c)
947   return B.CreateGEP(B.getInt8Ty(), SrcStr, B.getInt64(I), "memchr");
948 }
949 
950 static Value *optimizeMemCmpConstantSize(CallInst *CI, Value *LHS, Value *RHS,
951                                          uint64_t Len, IRBuilderBase &B,
952                                          const DataLayout &DL) {
953   if (Len == 0) // memcmp(s1,s2,0) -> 0
954     return Constant::getNullValue(CI->getType());
955 
956   // memcmp(S1,S2,1) -> *(unsigned char*)LHS - *(unsigned char*)RHS
957   if (Len == 1) {
958     Value *LHSV =
959         B.CreateZExt(B.CreateLoad(B.getInt8Ty(), castToCStr(LHS, B), "lhsc"),
960                      CI->getType(), "lhsv");
961     Value *RHSV =
962         B.CreateZExt(B.CreateLoad(B.getInt8Ty(), castToCStr(RHS, B), "rhsc"),
963                      CI->getType(), "rhsv");
964     return B.CreateSub(LHSV, RHSV, "chardiff");
965   }
966 
967   // memcmp(S1,S2,N/8)==0 -> (*(intN_t*)S1 != *(intN_t*)S2)==0
968   // TODO: The case where both inputs are constants does not need to be limited
969   // to legal integers or equality comparison. See block below this.
970   if (DL.isLegalInteger(Len * 8) && isOnlyUsedInZeroEqualityComparison(CI)) {
971     IntegerType *IntType = IntegerType::get(CI->getContext(), Len * 8);
972     unsigned PrefAlignment = DL.getPrefTypeAlignment(IntType);
973 
974     // First, see if we can fold either argument to a constant.
975     Value *LHSV = nullptr;
976     if (auto *LHSC = dyn_cast<Constant>(LHS)) {
977       LHSC = ConstantExpr::getBitCast(LHSC, IntType->getPointerTo());
978       LHSV = ConstantFoldLoadFromConstPtr(LHSC, IntType, DL);
979     }
980     Value *RHSV = nullptr;
981     if (auto *RHSC = dyn_cast<Constant>(RHS)) {
982       RHSC = ConstantExpr::getBitCast(RHSC, IntType->getPointerTo());
983       RHSV = ConstantFoldLoadFromConstPtr(RHSC, IntType, DL);
984     }
985 
986     // Don't generate unaligned loads. If either source is constant data,
987     // alignment doesn't matter for that source because there is no load.
988     if ((LHSV || getKnownAlignment(LHS, DL, CI) >= PrefAlignment) &&
989         (RHSV || getKnownAlignment(RHS, DL, CI) >= PrefAlignment)) {
990       if (!LHSV) {
991         Type *LHSPtrTy =
992             IntType->getPointerTo(LHS->getType()->getPointerAddressSpace());
993         LHSV = B.CreateLoad(IntType, B.CreateBitCast(LHS, LHSPtrTy), "lhsv");
994       }
995       if (!RHSV) {
996         Type *RHSPtrTy =
997             IntType->getPointerTo(RHS->getType()->getPointerAddressSpace());
998         RHSV = B.CreateLoad(IntType, B.CreateBitCast(RHS, RHSPtrTy), "rhsv");
999       }
1000       return B.CreateZExt(B.CreateICmpNE(LHSV, RHSV), CI->getType(), "memcmp");
1001     }
1002   }
1003 
1004   // Constant folding: memcmp(x, y, Len) -> constant (all arguments are const).
1005   // TODO: This is limited to i8 arrays.
1006   StringRef LHSStr, RHSStr;
1007   if (getConstantStringInfo(LHS, LHSStr) &&
1008       getConstantStringInfo(RHS, RHSStr)) {
1009     // Make sure we're not reading out-of-bounds memory.
1010     if (Len > LHSStr.size() || Len > RHSStr.size())
1011       return nullptr;
1012     // Fold the memcmp and normalize the result.  This way we get consistent
1013     // results across multiple platforms.
1014     uint64_t Ret = 0;
1015     int Cmp = memcmp(LHSStr.data(), RHSStr.data(), Len);
1016     if (Cmp < 0)
1017       Ret = -1;
1018     else if (Cmp > 0)
1019       Ret = 1;
1020     return ConstantInt::get(CI->getType(), Ret);
1021   }
1022 
1023   return nullptr;
1024 }
1025 
1026 // Most simplifications for memcmp also apply to bcmp.
1027 Value *LibCallSimplifier::optimizeMemCmpBCmpCommon(CallInst *CI,
1028                                                    IRBuilderBase &B) {
1029   Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1);
1030   Value *Size = CI->getArgOperand(2);
1031 
1032   if (LHS == RHS) // memcmp(s,s,x) -> 0
1033     return Constant::getNullValue(CI->getType());
1034 
1035   annotateNonNullAndDereferenceable(CI, {0, 1}, Size, DL);
1036   // Handle constant lengths.
1037   ConstantInt *LenC = dyn_cast<ConstantInt>(Size);
1038   if (!LenC)
1039     return nullptr;
1040 
1041   // memcmp(d,s,0) -> 0
1042   if (LenC->getZExtValue() == 0)
1043     return Constant::getNullValue(CI->getType());
1044 
1045   if (Value *Res =
1046           optimizeMemCmpConstantSize(CI, LHS, RHS, LenC->getZExtValue(), B, DL))
1047     return Res;
1048   return nullptr;
1049 }
1050 
1051 Value *LibCallSimplifier::optimizeMemCmp(CallInst *CI, IRBuilderBase &B) {
1052   if (Value *V = optimizeMemCmpBCmpCommon(CI, B))
1053     return V;
1054 
1055   // memcmp(x, y, Len) == 0 -> bcmp(x, y, Len) == 0
1056   // bcmp can be more efficient than memcmp because it only has to know that
1057   // there is a difference, not how different one is to the other.
1058   if (TLI->has(LibFunc_bcmp) && isOnlyUsedInZeroEqualityComparison(CI)) {
1059     Value *LHS = CI->getArgOperand(0);
1060     Value *RHS = CI->getArgOperand(1);
1061     Value *Size = CI->getArgOperand(2);
1062     return emitBCmp(LHS, RHS, Size, B, DL, TLI);
1063   }
1064 
1065   return nullptr;
1066 }
1067 
1068 Value *LibCallSimplifier::optimizeBCmp(CallInst *CI, IRBuilderBase &B) {
1069   return optimizeMemCmpBCmpCommon(CI, B);
1070 }
1071 
1072 Value *LibCallSimplifier::optimizeMemCpy(CallInst *CI, IRBuilderBase &B) {
1073   Value *Size = CI->getArgOperand(2);
1074   annotateNonNullAndDereferenceable(CI, {0, 1}, Size, DL);
1075   if (isa<IntrinsicInst>(CI))
1076     return nullptr;
1077 
1078   // memcpy(x, y, n) -> llvm.memcpy(align 1 x, align 1 y, n)
1079   CallInst *NewCI = B.CreateMemCpy(CI->getArgOperand(0), Align(1),
1080                                    CI->getArgOperand(1), Align(1), Size);
1081   NewCI->setAttributes(CI->getAttributes());
1082   NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
1083   return CI->getArgOperand(0);
1084 }
1085 
1086 Value *LibCallSimplifier::optimizeMemCCpy(CallInst *CI, IRBuilderBase &B) {
1087   Value *Dst = CI->getArgOperand(0);
1088   Value *Src = CI->getArgOperand(1);
1089   ConstantInt *StopChar = dyn_cast<ConstantInt>(CI->getArgOperand(2));
1090   ConstantInt *N = dyn_cast<ConstantInt>(CI->getArgOperand(3));
1091   StringRef SrcStr;
1092   if (CI->use_empty() && Dst == Src)
1093     return Dst;
1094   // memccpy(d, s, c, 0) -> nullptr
1095   if (N) {
1096     if (N->isNullValue())
1097       return Constant::getNullValue(CI->getType());
1098     if (!getConstantStringInfo(Src, SrcStr, /*Offset=*/0,
1099                                /*TrimAtNul=*/false) ||
1100         !StopChar)
1101       return nullptr;
1102   } else {
1103     return nullptr;
1104   }
1105 
1106   // Wrap arg 'c' of type int to char
1107   size_t Pos = SrcStr.find(StopChar->getSExtValue() & 0xFF);
1108   if (Pos == StringRef::npos) {
1109     if (N->getZExtValue() <= SrcStr.size()) {
1110       B.CreateMemCpy(Dst, Align(1), Src, Align(1), CI->getArgOperand(3));
1111       return Constant::getNullValue(CI->getType());
1112     }
1113     return nullptr;
1114   }
1115 
1116   Value *NewN =
1117       ConstantInt::get(N->getType(), std::min(uint64_t(Pos + 1), N->getZExtValue()));
1118   // memccpy -> llvm.memcpy
1119   B.CreateMemCpy(Dst, Align(1), Src, Align(1), NewN);
1120   return Pos + 1 <= N->getZExtValue()
1121              ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, NewN)
1122              : Constant::getNullValue(CI->getType());
1123 }
1124 
1125 Value *LibCallSimplifier::optimizeMemPCpy(CallInst *CI, IRBuilderBase &B) {
1126   Value *Dst = CI->getArgOperand(0);
1127   Value *N = CI->getArgOperand(2);
1128   // mempcpy(x, y, n) -> llvm.memcpy(align 1 x, align 1 y, n), x + n
1129   CallInst *NewCI =
1130       B.CreateMemCpy(Dst, Align(1), CI->getArgOperand(1), Align(1), N);
1131   // Propagate attributes, but memcpy has no return value, so make sure that
1132   // any return attributes are compliant.
1133   // TODO: Attach return value attributes to the 1st operand to preserve them?
1134   NewCI->setAttributes(CI->getAttributes());
1135   NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
1136   return B.CreateInBoundsGEP(B.getInt8Ty(), Dst, N);
1137 }
1138 
1139 Value *LibCallSimplifier::optimizeMemMove(CallInst *CI, IRBuilderBase &B) {
1140   Value *Size = CI->getArgOperand(2);
1141   annotateNonNullAndDereferenceable(CI, {0, 1}, Size, DL);
1142   if (isa<IntrinsicInst>(CI))
1143     return nullptr;
1144 
1145   // memmove(x, y, n) -> llvm.memmove(align 1 x, align 1 y, n)
1146   CallInst *NewCI = B.CreateMemMove(CI->getArgOperand(0), Align(1),
1147                                     CI->getArgOperand(1), Align(1), Size);
1148   NewCI->setAttributes(CI->getAttributes());
1149   NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
1150   return CI->getArgOperand(0);
1151 }
1152 
1153 Value *LibCallSimplifier::optimizeMemSet(CallInst *CI, IRBuilderBase &B) {
1154   Value *Size = CI->getArgOperand(2);
1155   annotateNonNullAndDereferenceable(CI, 0, Size, DL);
1156   if (isa<IntrinsicInst>(CI))
1157     return nullptr;
1158 
1159   // memset(p, v, n) -> llvm.memset(align 1 p, v, n)
1160   Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
1161   CallInst *NewCI = B.CreateMemSet(CI->getArgOperand(0), Val, Size, Align(1));
1162   NewCI->setAttributes(CI->getAttributes());
1163   NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
1164   return CI->getArgOperand(0);
1165 }
1166 
1167 Value *LibCallSimplifier::optimizeRealloc(CallInst *CI, IRBuilderBase &B) {
1168   if (isa<ConstantPointerNull>(CI->getArgOperand(0)))
1169     return emitMalloc(CI->getArgOperand(1), B, DL, TLI);
1170 
1171   return nullptr;
1172 }
1173 
1174 //===----------------------------------------------------------------------===//
1175 // Math Library Optimizations
1176 //===----------------------------------------------------------------------===//
1177 
1178 // Replace a libcall \p CI with a call to intrinsic \p IID
1179 static Value *replaceUnaryCall(CallInst *CI, IRBuilderBase &B,
1180                                Intrinsic::ID IID) {
1181   // Propagate fast-math flags from the existing call to the new call.
1182   IRBuilderBase::FastMathFlagGuard Guard(B);
1183   B.setFastMathFlags(CI->getFastMathFlags());
1184 
1185   Module *M = CI->getModule();
1186   Value *V = CI->getArgOperand(0);
1187   Function *F = Intrinsic::getDeclaration(M, IID, CI->getType());
1188   CallInst *NewCall = B.CreateCall(F, V);
1189   NewCall->takeName(CI);
1190   return NewCall;
1191 }
1192 
1193 /// Return a variant of Val with float type.
1194 /// Currently this works in two cases: If Val is an FPExtension of a float
1195 /// value to something bigger, simply return the operand.
1196 /// If Val is a ConstantFP but can be converted to a float ConstantFP without
1197 /// loss of precision do so.
1198 static Value *valueHasFloatPrecision(Value *Val) {
1199   if (FPExtInst *Cast = dyn_cast<FPExtInst>(Val)) {
1200     Value *Op = Cast->getOperand(0);
1201     if (Op->getType()->isFloatTy())
1202       return Op;
1203   }
1204   if (ConstantFP *Const = dyn_cast<ConstantFP>(Val)) {
1205     APFloat F = Const->getValueAPF();
1206     bool losesInfo;
1207     (void)F.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
1208                     &losesInfo);
1209     if (!losesInfo)
1210       return ConstantFP::get(Const->getContext(), F);
1211   }
1212   return nullptr;
1213 }
1214 
1215 /// Shrink double -> float functions.
1216 static Value *optimizeDoubleFP(CallInst *CI, IRBuilderBase &B,
1217                                bool isBinary, bool isPrecise = false) {
1218   Function *CalleeFn = CI->getCalledFunction();
1219   if (!CI->getType()->isDoubleTy() || !CalleeFn)
1220     return nullptr;
1221 
1222   // If not all the uses of the function are converted to float, then bail out.
1223   // This matters if the precision of the result is more important than the
1224   // precision of the arguments.
1225   if (isPrecise)
1226     for (User *U : CI->users()) {
1227       FPTruncInst *Cast = dyn_cast<FPTruncInst>(U);
1228       if (!Cast || !Cast->getType()->isFloatTy())
1229         return nullptr;
1230     }
1231 
1232   // If this is something like 'g((double) float)', convert to 'gf(float)'.
1233   Value *V[2];
1234   V[0] = valueHasFloatPrecision(CI->getArgOperand(0));
1235   V[1] = isBinary ? valueHasFloatPrecision(CI->getArgOperand(1)) : nullptr;
1236   if (!V[0] || (isBinary && !V[1]))
1237     return nullptr;
1238 
1239   // If call isn't an intrinsic, check that it isn't within a function with the
1240   // same name as the float version of this call, otherwise the result is an
1241   // infinite loop.  For example, from MinGW-w64:
1242   //
1243   // float expf(float val) { return (float) exp((double) val); }
1244   StringRef CalleeName = CalleeFn->getName();
1245   bool IsIntrinsic = CalleeFn->isIntrinsic();
1246   if (!IsIntrinsic) {
1247     StringRef CallerName = CI->getFunction()->getName();
1248     if (!CallerName.empty() && CallerName.back() == 'f' &&
1249         CallerName.size() == (CalleeName.size() + 1) &&
1250         CallerName.startswith(CalleeName))
1251       return nullptr;
1252   }
1253 
1254   // Propagate the math semantics from the current function to the new function.
1255   IRBuilderBase::FastMathFlagGuard Guard(B);
1256   B.setFastMathFlags(CI->getFastMathFlags());
1257 
1258   // g((double) float) -> (double) gf(float)
1259   Value *R;
1260   if (IsIntrinsic) {
1261     Module *M = CI->getModule();
1262     Intrinsic::ID IID = CalleeFn->getIntrinsicID();
1263     Function *Fn = Intrinsic::getDeclaration(M, IID, B.getFloatTy());
1264     R = isBinary ? B.CreateCall(Fn, V) : B.CreateCall(Fn, V[0]);
1265   } else {
1266     AttributeList CalleeAttrs = CalleeFn->getAttributes();
1267     R = isBinary ? emitBinaryFloatFnCall(V[0], V[1], CalleeName, B, CalleeAttrs)
1268                  : emitUnaryFloatFnCall(V[0], CalleeName, B, CalleeAttrs);
1269   }
1270   return B.CreateFPExt(R, B.getDoubleTy());
1271 }
1272 
1273 /// Shrink double -> float for unary functions.
1274 static Value *optimizeUnaryDoubleFP(CallInst *CI, IRBuilderBase &B,
1275                                     bool isPrecise = false) {
1276   return optimizeDoubleFP(CI, B, false, isPrecise);
1277 }
1278 
1279 /// Shrink double -> float for binary functions.
1280 static Value *optimizeBinaryDoubleFP(CallInst *CI, IRBuilderBase &B,
1281                                      bool isPrecise = false) {
1282   return optimizeDoubleFP(CI, B, true, isPrecise);
1283 }
1284 
1285 // cabs(z) -> sqrt((creal(z)*creal(z)) + (cimag(z)*cimag(z)))
1286 Value *LibCallSimplifier::optimizeCAbs(CallInst *CI, IRBuilderBase &B) {
1287   if (!CI->isFast())
1288     return nullptr;
1289 
1290   // Propagate fast-math flags from the existing call to new instructions.
1291   IRBuilderBase::FastMathFlagGuard Guard(B);
1292   B.setFastMathFlags(CI->getFastMathFlags());
1293 
1294   Value *Real, *Imag;
1295   if (CI->getNumArgOperands() == 1) {
1296     Value *Op = CI->getArgOperand(0);
1297     assert(Op->getType()->isArrayTy() && "Unexpected signature for cabs!");
1298     Real = B.CreateExtractValue(Op, 0, "real");
1299     Imag = B.CreateExtractValue(Op, 1, "imag");
1300   } else {
1301     assert(CI->getNumArgOperands() == 2 && "Unexpected signature for cabs!");
1302     Real = CI->getArgOperand(0);
1303     Imag = CI->getArgOperand(1);
1304   }
1305 
1306   Value *RealReal = B.CreateFMul(Real, Real);
1307   Value *ImagImag = B.CreateFMul(Imag, Imag);
1308 
1309   Function *FSqrt = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::sqrt,
1310                                               CI->getType());
1311   return B.CreateCall(FSqrt, B.CreateFAdd(RealReal, ImagImag), "cabs");
1312 }
1313 
1314 static Value *optimizeTrigReflections(CallInst *Call, LibFunc Func,
1315                                       IRBuilderBase &B) {
1316   if (!isa<FPMathOperator>(Call))
1317     return nullptr;
1318 
1319   IRBuilderBase::FastMathFlagGuard Guard(B);
1320   B.setFastMathFlags(Call->getFastMathFlags());
1321 
1322   // TODO: Can this be shared to also handle LLVM intrinsics?
1323   Value *X;
1324   switch (Func) {
1325   case LibFunc_sin:
1326   case LibFunc_sinf:
1327   case LibFunc_sinl:
1328   case LibFunc_tan:
1329   case LibFunc_tanf:
1330   case LibFunc_tanl:
1331     // sin(-X) --> -sin(X)
1332     // tan(-X) --> -tan(X)
1333     if (match(Call->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X)))))
1334       return B.CreateFNeg(B.CreateCall(Call->getCalledFunction(), X));
1335     break;
1336   case LibFunc_cos:
1337   case LibFunc_cosf:
1338   case LibFunc_cosl:
1339     // cos(-X) --> cos(X)
1340     if (match(Call->getArgOperand(0), m_FNeg(m_Value(X))))
1341       return B.CreateCall(Call->getCalledFunction(), X, "cos");
1342     break;
1343   default:
1344     break;
1345   }
1346   return nullptr;
1347 }
1348 
1349 static Value *getPow(Value *InnerChain[33], unsigned Exp, IRBuilderBase &B) {
1350   // Multiplications calculated using Addition Chains.
1351   // Refer: http://wwwhomes.uni-bielefeld.de/achim/addition_chain.html
1352 
1353   assert(Exp != 0 && "Incorrect exponent 0 not handled");
1354 
1355   if (InnerChain[Exp])
1356     return InnerChain[Exp];
1357 
1358   static const unsigned AddChain[33][2] = {
1359       {0, 0}, // Unused.
1360       {0, 0}, // Unused (base case = pow1).
1361       {1, 1}, // Unused (pre-computed).
1362       {1, 2},  {2, 2},   {2, 3},  {3, 3},   {2, 5},  {4, 4},
1363       {1, 8},  {5, 5},   {1, 10}, {6, 6},   {4, 9},  {7, 7},
1364       {3, 12}, {8, 8},   {8, 9},  {2, 16},  {1, 18}, {10, 10},
1365       {6, 15}, {11, 11}, {3, 20}, {12, 12}, {8, 17}, {13, 13},
1366       {3, 24}, {14, 14}, {4, 25}, {15, 15}, {3, 28}, {16, 16},
1367   };
1368 
1369   InnerChain[Exp] = B.CreateFMul(getPow(InnerChain, AddChain[Exp][0], B),
1370                                  getPow(InnerChain, AddChain[Exp][1], B));
1371   return InnerChain[Exp];
1372 }
1373 
1374 // Return a properly extended integer (DstWidth bits wide) if the operation is
1375 // an itofp.
1376 static Value *getIntToFPVal(Value *I2F, IRBuilderBase &B, unsigned DstWidth) {
1377   if (isa<SIToFPInst>(I2F) || isa<UIToFPInst>(I2F)) {
1378     Value *Op = cast<Instruction>(I2F)->getOperand(0);
1379     // Make sure that the exponent fits inside an "int" of size DstWidth,
1380     // thus avoiding any range issues that FP has not.
1381     unsigned BitWidth = Op->getType()->getPrimitiveSizeInBits();
1382     if (BitWidth < DstWidth ||
1383         (BitWidth == DstWidth && isa<SIToFPInst>(I2F)))
1384       return isa<SIToFPInst>(I2F) ? B.CreateSExt(Op, B.getIntNTy(DstWidth))
1385                                   : B.CreateZExt(Op, B.getIntNTy(DstWidth));
1386   }
1387 
1388   return nullptr;
1389 }
1390 
1391 /// Use exp{,2}(x * y) for pow(exp{,2}(x), y);
1392 /// ldexp(1.0, x) for pow(2.0, itofp(x)); exp2(n * x) for pow(2.0 ** n, x);
1393 /// exp10(x) for pow(10.0, x); exp2(log2(n) * x) for pow(n, x).
1394 Value *LibCallSimplifier::replacePowWithExp(CallInst *Pow, IRBuilderBase &B) {
1395   Value *Base = Pow->getArgOperand(0), *Expo = Pow->getArgOperand(1);
1396   AttributeList Attrs; // Attributes are only meaningful on the original call
1397   Module *Mod = Pow->getModule();
1398   Type *Ty = Pow->getType();
1399   bool Ignored;
1400 
1401   // Evaluate special cases related to a nested function as the base.
1402 
1403   // pow(exp(x), y) -> exp(x * y)
1404   // pow(exp2(x), y) -> exp2(x * y)
1405   // If exp{,2}() is used only once, it is better to fold two transcendental
1406   // math functions into one.  If used again, exp{,2}() would still have to be
1407   // called with the original argument, then keep both original transcendental
1408   // functions.  However, this transformation is only safe with fully relaxed
1409   // math semantics, since, besides rounding differences, it changes overflow
1410   // and underflow behavior quite dramatically.  For example:
1411   //   pow(exp(1000), 0.001) = pow(inf, 0.001) = inf
1412   // Whereas:
1413   //   exp(1000 * 0.001) = exp(1)
1414   // TODO: Loosen the requirement for fully relaxed math semantics.
1415   // TODO: Handle exp10() when more targets have it available.
1416   CallInst *BaseFn = dyn_cast<CallInst>(Base);
1417   if (BaseFn && BaseFn->hasOneUse() && BaseFn->isFast() && Pow->isFast()) {
1418     LibFunc LibFn;
1419 
1420     Function *CalleeFn = BaseFn->getCalledFunction();
1421     if (CalleeFn &&
1422         TLI->getLibFunc(CalleeFn->getName(), LibFn) && TLI->has(LibFn)) {
1423       StringRef ExpName;
1424       Intrinsic::ID ID;
1425       Value *ExpFn;
1426       LibFunc LibFnFloat, LibFnDouble, LibFnLongDouble;
1427 
1428       switch (LibFn) {
1429       default:
1430         return nullptr;
1431       case LibFunc_expf:  case LibFunc_exp:  case LibFunc_expl:
1432         ExpName = TLI->getName(LibFunc_exp);
1433         ID = Intrinsic::exp;
1434         LibFnFloat = LibFunc_expf;
1435         LibFnDouble = LibFunc_exp;
1436         LibFnLongDouble = LibFunc_expl;
1437         break;
1438       case LibFunc_exp2f: case LibFunc_exp2: case LibFunc_exp2l:
1439         ExpName = TLI->getName(LibFunc_exp2);
1440         ID = Intrinsic::exp2;
1441         LibFnFloat = LibFunc_exp2f;
1442         LibFnDouble = LibFunc_exp2;
1443         LibFnLongDouble = LibFunc_exp2l;
1444         break;
1445       }
1446 
1447       // Create new exp{,2}() with the product as its argument.
1448       Value *FMul = B.CreateFMul(BaseFn->getArgOperand(0), Expo, "mul");
1449       ExpFn = BaseFn->doesNotAccessMemory()
1450               ? B.CreateCall(Intrinsic::getDeclaration(Mod, ID, Ty),
1451                              FMul, ExpName)
1452               : emitUnaryFloatFnCall(FMul, TLI, LibFnDouble, LibFnFloat,
1453                                      LibFnLongDouble, B,
1454                                      BaseFn->getAttributes());
1455 
1456       // Since the new exp{,2}() is different from the original one, dead code
1457       // elimination cannot be trusted to remove it, since it may have side
1458       // effects (e.g., errno).  When the only consumer for the original
1459       // exp{,2}() is pow(), then it has to be explicitly erased.
1460       substituteInParent(BaseFn, ExpFn);
1461       return ExpFn;
1462     }
1463   }
1464 
1465   // Evaluate special cases related to a constant base.
1466 
1467   const APFloat *BaseF;
1468   if (!match(Pow->getArgOperand(0), m_APFloat(BaseF)))
1469     return nullptr;
1470 
1471   // pow(2.0, itofp(x)) -> ldexp(1.0, x)
1472   if (match(Base, m_SpecificFP(2.0)) &&
1473       (isa<SIToFPInst>(Expo) || isa<UIToFPInst>(Expo)) &&
1474       hasFloatFn(TLI, Ty, LibFunc_ldexp, LibFunc_ldexpf, LibFunc_ldexpl)) {
1475     if (Value *ExpoI = getIntToFPVal(Expo, B, TLI->getIntSize()))
1476       return emitBinaryFloatFnCall(ConstantFP::get(Ty, 1.0), ExpoI, TLI,
1477                                    LibFunc_ldexp, LibFunc_ldexpf, LibFunc_ldexpl,
1478                                    B, Attrs);
1479   }
1480 
1481   // pow(2.0 ** n, x) -> exp2(n * x)
1482   if (hasFloatFn(TLI, Ty, LibFunc_exp2, LibFunc_exp2f, LibFunc_exp2l)) {
1483     APFloat BaseR = APFloat(1.0);
1484     BaseR.convert(BaseF->getSemantics(), APFloat::rmTowardZero, &Ignored);
1485     BaseR = BaseR / *BaseF;
1486     bool IsInteger = BaseF->isInteger(), IsReciprocal = BaseR.isInteger();
1487     const APFloat *NF = IsReciprocal ? &BaseR : BaseF;
1488     APSInt NI(64, false);
1489     if ((IsInteger || IsReciprocal) &&
1490         NF->convertToInteger(NI, APFloat::rmTowardZero, &Ignored) ==
1491             APFloat::opOK &&
1492         NI > 1 && NI.isPowerOf2()) {
1493       double N = NI.logBase2() * (IsReciprocal ? -1.0 : 1.0);
1494       Value *FMul = B.CreateFMul(Expo, ConstantFP::get(Ty, N), "mul");
1495       if (Pow->doesNotAccessMemory())
1496         return B.CreateCall(Intrinsic::getDeclaration(Mod, Intrinsic::exp2, Ty),
1497                             FMul, "exp2");
1498       else
1499         return emitUnaryFloatFnCall(FMul, TLI, LibFunc_exp2, LibFunc_exp2f,
1500                                     LibFunc_exp2l, B, Attrs);
1501     }
1502   }
1503 
1504   // pow(10.0, x) -> exp10(x)
1505   // TODO: There is no exp10() intrinsic yet, but some day there shall be one.
1506   if (match(Base, m_SpecificFP(10.0)) &&
1507       hasFloatFn(TLI, Ty, LibFunc_exp10, LibFunc_exp10f, LibFunc_exp10l))
1508     return emitUnaryFloatFnCall(Expo, TLI, LibFunc_exp10, LibFunc_exp10f,
1509                                 LibFunc_exp10l, B, Attrs);
1510 
1511   // pow(x, y) -> exp2(log2(x) * y)
1512   if (Pow->hasApproxFunc() && Pow->hasNoNaNs() && BaseF->isFiniteNonZero() &&
1513       !BaseF->isNegative()) {
1514     // pow(1, inf) is defined to be 1 but exp2(log2(1) * inf) evaluates to NaN.
1515     // Luckily optimizePow has already handled the x == 1 case.
1516     assert(!match(Base, m_FPOne()) &&
1517            "pow(1.0, y) should have been simplified earlier!");
1518 
1519     Value *Log = nullptr;
1520     if (Ty->isFloatTy())
1521       Log = ConstantFP::get(Ty, std::log2(BaseF->convertToFloat()));
1522     else if (Ty->isDoubleTy())
1523       Log = ConstantFP::get(Ty, std::log2(BaseF->convertToDouble()));
1524 
1525     if (Log) {
1526       Value *FMul = B.CreateFMul(Log, Expo, "mul");
1527       if (Pow->doesNotAccessMemory())
1528         return B.CreateCall(Intrinsic::getDeclaration(Mod, Intrinsic::exp2, Ty),
1529                             FMul, "exp2");
1530       else if (hasFloatFn(TLI, Ty, LibFunc_exp2, LibFunc_exp2f, LibFunc_exp2l))
1531         return emitUnaryFloatFnCall(FMul, TLI, LibFunc_exp2, LibFunc_exp2f,
1532                                     LibFunc_exp2l, B, Attrs);
1533     }
1534   }
1535 
1536   return nullptr;
1537 }
1538 
1539 static Value *getSqrtCall(Value *V, AttributeList Attrs, bool NoErrno,
1540                           Module *M, IRBuilderBase &B,
1541                           const TargetLibraryInfo *TLI) {
1542   // If errno is never set, then use the intrinsic for sqrt().
1543   if (NoErrno) {
1544     Function *SqrtFn =
1545         Intrinsic::getDeclaration(M, Intrinsic::sqrt, V->getType());
1546     return B.CreateCall(SqrtFn, V, "sqrt");
1547   }
1548 
1549   // Otherwise, use the libcall for sqrt().
1550   if (hasFloatFn(TLI, V->getType(), LibFunc_sqrt, LibFunc_sqrtf, LibFunc_sqrtl))
1551     // TODO: We also should check that the target can in fact lower the sqrt()
1552     // libcall. We currently have no way to ask this question, so we ask if
1553     // the target has a sqrt() libcall, which is not exactly the same.
1554     return emitUnaryFloatFnCall(V, TLI, LibFunc_sqrt, LibFunc_sqrtf,
1555                                 LibFunc_sqrtl, B, Attrs);
1556 
1557   return nullptr;
1558 }
1559 
1560 /// Use square root in place of pow(x, +/-0.5).
1561 Value *LibCallSimplifier::replacePowWithSqrt(CallInst *Pow, IRBuilderBase &B) {
1562   Value *Sqrt, *Base = Pow->getArgOperand(0), *Expo = Pow->getArgOperand(1);
1563   AttributeList Attrs; // Attributes are only meaningful on the original call
1564   Module *Mod = Pow->getModule();
1565   Type *Ty = Pow->getType();
1566 
1567   const APFloat *ExpoF;
1568   if (!match(Expo, m_APFloat(ExpoF)) ||
1569       (!ExpoF->isExactlyValue(0.5) && !ExpoF->isExactlyValue(-0.5)))
1570     return nullptr;
1571 
1572   // Converting pow(X, -0.5) to 1/sqrt(X) may introduce an extra rounding step,
1573   // so that requires fast-math-flags (afn or reassoc).
1574   if (ExpoF->isNegative() && (!Pow->hasApproxFunc() && !Pow->hasAllowReassoc()))
1575     return nullptr;
1576 
1577   // If we have a pow() library call (accesses memory) and we can't guarantee
1578   // that the base is not an infinity, give up:
1579   // pow(-Inf, 0.5) is optionally required to have a result of +Inf (not setting
1580   // errno), but sqrt(-Inf) is required by various standards to set errno.
1581   if (!Pow->doesNotAccessMemory() && !Pow->hasNoInfs() &&
1582       !isKnownNeverInfinity(Base, TLI))
1583     return nullptr;
1584 
1585   Sqrt = getSqrtCall(Base, Attrs, Pow->doesNotAccessMemory(), Mod, B, TLI);
1586   if (!Sqrt)
1587     return nullptr;
1588 
1589   // Handle signed zero base by expanding to fabs(sqrt(x)).
1590   if (!Pow->hasNoSignedZeros()) {
1591     Function *FAbsFn = Intrinsic::getDeclaration(Mod, Intrinsic::fabs, Ty);
1592     Sqrt = B.CreateCall(FAbsFn, Sqrt, "abs");
1593   }
1594 
1595   // Handle non finite base by expanding to
1596   // (x == -infinity ? +infinity : sqrt(x)).
1597   if (!Pow->hasNoInfs()) {
1598     Value *PosInf = ConstantFP::getInfinity(Ty),
1599           *NegInf = ConstantFP::getInfinity(Ty, true);
1600     Value *FCmp = B.CreateFCmpOEQ(Base, NegInf, "isinf");
1601     Sqrt = B.CreateSelect(FCmp, PosInf, Sqrt);
1602   }
1603 
1604   // If the exponent is negative, then get the reciprocal.
1605   if (ExpoF->isNegative())
1606     Sqrt = B.CreateFDiv(ConstantFP::get(Ty, 1.0), Sqrt, "reciprocal");
1607 
1608   return Sqrt;
1609 }
1610 
1611 static Value *createPowWithIntegerExponent(Value *Base, Value *Expo, Module *M,
1612                                            IRBuilderBase &B) {
1613   Value *Args[] = {Base, Expo};
1614   Type *Types[] = {Base->getType(), Expo->getType()};
1615   Function *F = Intrinsic::getDeclaration(M, Intrinsic::powi, Types);
1616   return B.CreateCall(F, Args);
1617 }
1618 
1619 Value *LibCallSimplifier::optimizePow(CallInst *Pow, IRBuilderBase &B) {
1620   Value *Base = Pow->getArgOperand(0);
1621   Value *Expo = Pow->getArgOperand(1);
1622   Function *Callee = Pow->getCalledFunction();
1623   StringRef Name = Callee->getName();
1624   Type *Ty = Pow->getType();
1625   Module *M = Pow->getModule();
1626   bool AllowApprox = Pow->hasApproxFunc();
1627   bool Ignored;
1628 
1629   // Propagate the math semantics from the call to any created instructions.
1630   IRBuilderBase::FastMathFlagGuard Guard(B);
1631   B.setFastMathFlags(Pow->getFastMathFlags());
1632   // Evaluate special cases related to the base.
1633 
1634   // pow(1.0, x) -> 1.0
1635   if (match(Base, m_FPOne()))
1636     return Base;
1637 
1638   if (Value *Exp = replacePowWithExp(Pow, B))
1639     return Exp;
1640 
1641   // Evaluate special cases related to the exponent.
1642 
1643   // pow(x, -1.0) -> 1.0 / x
1644   if (match(Expo, m_SpecificFP(-1.0)))
1645     return B.CreateFDiv(ConstantFP::get(Ty, 1.0), Base, "reciprocal");
1646 
1647   // pow(x, +/-0.0) -> 1.0
1648   if (match(Expo, m_AnyZeroFP()))
1649     return ConstantFP::get(Ty, 1.0);
1650 
1651   // pow(x, 1.0) -> x
1652   if (match(Expo, m_FPOne()))
1653     return Base;
1654 
1655   // pow(x, 2.0) -> x * x
1656   if (match(Expo, m_SpecificFP(2.0)))
1657     return B.CreateFMul(Base, Base, "square");
1658 
1659   if (Value *Sqrt = replacePowWithSqrt(Pow, B))
1660     return Sqrt;
1661 
1662   // pow(x, n) -> x * x * x * ...
1663   const APFloat *ExpoF;
1664   if (AllowApprox && match(Expo, m_APFloat(ExpoF)) &&
1665       !ExpoF->isExactlyValue(0.5) && !ExpoF->isExactlyValue(-0.5)) {
1666     // We limit to a max of 7 multiplications, thus the maximum exponent is 32.
1667     // If the exponent is an integer+0.5 we generate a call to sqrt and an
1668     // additional fmul.
1669     // TODO: This whole transformation should be backend specific (e.g. some
1670     //       backends might prefer libcalls or the limit for the exponent might
1671     //       be different) and it should also consider optimizing for size.
1672     APFloat LimF(ExpoF->getSemantics(), 33),
1673             ExpoA(abs(*ExpoF));
1674     if (ExpoA < LimF) {
1675       // This transformation applies to integer or integer+0.5 exponents only.
1676       // For integer+0.5, we create a sqrt(Base) call.
1677       Value *Sqrt = nullptr;
1678       if (!ExpoA.isInteger()) {
1679         APFloat Expo2 = ExpoA;
1680         // To check if ExpoA is an integer + 0.5, we add it to itself. If there
1681         // is no floating point exception and the result is an integer, then
1682         // ExpoA == integer + 0.5
1683         if (Expo2.add(ExpoA, APFloat::rmNearestTiesToEven) != APFloat::opOK)
1684           return nullptr;
1685 
1686         if (!Expo2.isInteger())
1687           return nullptr;
1688 
1689         Sqrt = getSqrtCall(Base, Pow->getCalledFunction()->getAttributes(),
1690                            Pow->doesNotAccessMemory(), M, B, TLI);
1691         if (!Sqrt)
1692           return nullptr;
1693       }
1694 
1695       // We will memoize intermediate products of the Addition Chain.
1696       Value *InnerChain[33] = {nullptr};
1697       InnerChain[1] = Base;
1698       InnerChain[2] = B.CreateFMul(Base, Base, "square");
1699 
1700       // We cannot readily convert a non-double type (like float) to a double.
1701       // So we first convert it to something which could be converted to double.
1702       ExpoA.convert(APFloat::IEEEdouble(), APFloat::rmTowardZero, &Ignored);
1703       Value *FMul = getPow(InnerChain, ExpoA.convertToDouble(), B);
1704 
1705       // Expand pow(x, y+0.5) to pow(x, y) * sqrt(x).
1706       if (Sqrt)
1707         FMul = B.CreateFMul(FMul, Sqrt);
1708 
1709       // If the exponent is negative, then get the reciprocal.
1710       if (ExpoF->isNegative())
1711         FMul = B.CreateFDiv(ConstantFP::get(Ty, 1.0), FMul, "reciprocal");
1712 
1713       return FMul;
1714     }
1715 
1716     APSInt IntExpo(TLI->getIntSize(), /*isUnsigned=*/false);
1717     // powf(x, n) -> powi(x, n) if n is a constant signed integer value
1718     if (ExpoF->isInteger() &&
1719         ExpoF->convertToInteger(IntExpo, APFloat::rmTowardZero, &Ignored) ==
1720             APFloat::opOK) {
1721       return createPowWithIntegerExponent(
1722           Base, ConstantInt::get(B.getIntNTy(TLI->getIntSize()), IntExpo), M, B);
1723     }
1724   }
1725 
1726   // powf(x, itofp(y)) -> powi(x, y)
1727   if (AllowApprox && (isa<SIToFPInst>(Expo) || isa<UIToFPInst>(Expo))) {
1728     if (Value *ExpoI = getIntToFPVal(Expo, B, TLI->getIntSize()))
1729       return createPowWithIntegerExponent(Base, ExpoI, M, B);
1730   }
1731 
1732   // Shrink pow() to powf() if the arguments are single precision,
1733   // unless the result is expected to be double precision.
1734   if (UnsafeFPShrink && Name == TLI->getName(LibFunc_pow) &&
1735       hasFloatVersion(Name)) {
1736     if (Value *Shrunk = optimizeBinaryDoubleFP(Pow, B, true))
1737       return Shrunk;
1738   }
1739 
1740   return nullptr;
1741 }
1742 
1743 Value *LibCallSimplifier::optimizeExp2(CallInst *CI, IRBuilderBase &B) {
1744   Function *Callee = CI->getCalledFunction();
1745   AttributeList Attrs; // Attributes are only meaningful on the original call
1746   StringRef Name = Callee->getName();
1747   Value *Ret = nullptr;
1748   if (UnsafeFPShrink && Name == TLI->getName(LibFunc_exp2) &&
1749       hasFloatVersion(Name))
1750     Ret = optimizeUnaryDoubleFP(CI, B, true);
1751 
1752   Type *Ty = CI->getType();
1753   Value *Op = CI->getArgOperand(0);
1754 
1755   // Turn exp2(sitofp(x)) -> ldexp(1.0, sext(x))  if sizeof(x) <= IntSize
1756   // Turn exp2(uitofp(x)) -> ldexp(1.0, zext(x))  if sizeof(x) < IntSize
1757   if ((isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) &&
1758       hasFloatFn(TLI, Ty, LibFunc_ldexp, LibFunc_ldexpf, LibFunc_ldexpl)) {
1759     if (Value *Exp = getIntToFPVal(Op, B, TLI->getIntSize()))
1760       return emitBinaryFloatFnCall(ConstantFP::get(Ty, 1.0), Exp, TLI,
1761                                    LibFunc_ldexp, LibFunc_ldexpf, LibFunc_ldexpl,
1762                                    B, Attrs);
1763   }
1764 
1765   return Ret;
1766 }
1767 
1768 Value *LibCallSimplifier::optimizeFMinFMax(CallInst *CI, IRBuilderBase &B) {
1769   // If we can shrink the call to a float function rather than a double
1770   // function, do that first.
1771   Function *Callee = CI->getCalledFunction();
1772   StringRef Name = Callee->getName();
1773   if ((Name == "fmin" || Name == "fmax") && hasFloatVersion(Name))
1774     if (Value *Ret = optimizeBinaryDoubleFP(CI, B))
1775       return Ret;
1776 
1777   // The LLVM intrinsics minnum/maxnum correspond to fmin/fmax. Canonicalize to
1778   // the intrinsics for improved optimization (for example, vectorization).
1779   // No-signed-zeros is implied by the definitions of fmax/fmin themselves.
1780   // From the C standard draft WG14/N1256:
1781   // "Ideally, fmax would be sensitive to the sign of zero, for example
1782   // fmax(-0.0, +0.0) would return +0; however, implementation in software
1783   // might be impractical."
1784   IRBuilderBase::FastMathFlagGuard Guard(B);
1785   FastMathFlags FMF = CI->getFastMathFlags();
1786   FMF.setNoSignedZeros();
1787   B.setFastMathFlags(FMF);
1788 
1789   Intrinsic::ID IID = Callee->getName().startswith("fmin") ? Intrinsic::minnum
1790                                                            : Intrinsic::maxnum;
1791   Function *F = Intrinsic::getDeclaration(CI->getModule(), IID, CI->getType());
1792   return B.CreateCall(F, { CI->getArgOperand(0), CI->getArgOperand(1) });
1793 }
1794 
1795 Value *LibCallSimplifier::optimizeLog(CallInst *Log, IRBuilderBase &B) {
1796   Function *LogFn = Log->getCalledFunction();
1797   AttributeList Attrs; // Attributes are only meaningful on the original call
1798   StringRef LogNm = LogFn->getName();
1799   Intrinsic::ID LogID = LogFn->getIntrinsicID();
1800   Module *Mod = Log->getModule();
1801   Type *Ty = Log->getType();
1802   Value *Ret = nullptr;
1803 
1804   if (UnsafeFPShrink && hasFloatVersion(LogNm))
1805     Ret = optimizeUnaryDoubleFP(Log, B, true);
1806 
1807   // The earlier call must also be 'fast' in order to do these transforms.
1808   CallInst *Arg = dyn_cast<CallInst>(Log->getArgOperand(0));
1809   if (!Log->isFast() || !Arg || !Arg->isFast() || !Arg->hasOneUse())
1810     return Ret;
1811 
1812   LibFunc LogLb, ExpLb, Exp2Lb, Exp10Lb, PowLb;
1813 
1814   // This is only applicable to log(), log2(), log10().
1815   if (TLI->getLibFunc(LogNm, LogLb))
1816     switch (LogLb) {
1817     case LibFunc_logf:
1818       LogID = Intrinsic::log;
1819       ExpLb = LibFunc_expf;
1820       Exp2Lb = LibFunc_exp2f;
1821       Exp10Lb = LibFunc_exp10f;
1822       PowLb = LibFunc_powf;
1823       break;
1824     case LibFunc_log:
1825       LogID = Intrinsic::log;
1826       ExpLb = LibFunc_exp;
1827       Exp2Lb = LibFunc_exp2;
1828       Exp10Lb = LibFunc_exp10;
1829       PowLb = LibFunc_pow;
1830       break;
1831     case LibFunc_logl:
1832       LogID = Intrinsic::log;
1833       ExpLb = LibFunc_expl;
1834       Exp2Lb = LibFunc_exp2l;
1835       Exp10Lb = LibFunc_exp10l;
1836       PowLb = LibFunc_powl;
1837       break;
1838     case LibFunc_log2f:
1839       LogID = Intrinsic::log2;
1840       ExpLb = LibFunc_expf;
1841       Exp2Lb = LibFunc_exp2f;
1842       Exp10Lb = LibFunc_exp10f;
1843       PowLb = LibFunc_powf;
1844       break;
1845     case LibFunc_log2:
1846       LogID = Intrinsic::log2;
1847       ExpLb = LibFunc_exp;
1848       Exp2Lb = LibFunc_exp2;
1849       Exp10Lb = LibFunc_exp10;
1850       PowLb = LibFunc_pow;
1851       break;
1852     case LibFunc_log2l:
1853       LogID = Intrinsic::log2;
1854       ExpLb = LibFunc_expl;
1855       Exp2Lb = LibFunc_exp2l;
1856       Exp10Lb = LibFunc_exp10l;
1857       PowLb = LibFunc_powl;
1858       break;
1859     case LibFunc_log10f:
1860       LogID = Intrinsic::log10;
1861       ExpLb = LibFunc_expf;
1862       Exp2Lb = LibFunc_exp2f;
1863       Exp10Lb = LibFunc_exp10f;
1864       PowLb = LibFunc_powf;
1865       break;
1866     case LibFunc_log10:
1867       LogID = Intrinsic::log10;
1868       ExpLb = LibFunc_exp;
1869       Exp2Lb = LibFunc_exp2;
1870       Exp10Lb = LibFunc_exp10;
1871       PowLb = LibFunc_pow;
1872       break;
1873     case LibFunc_log10l:
1874       LogID = Intrinsic::log10;
1875       ExpLb = LibFunc_expl;
1876       Exp2Lb = LibFunc_exp2l;
1877       Exp10Lb = LibFunc_exp10l;
1878       PowLb = LibFunc_powl;
1879       break;
1880     default:
1881       return Ret;
1882     }
1883   else if (LogID == Intrinsic::log || LogID == Intrinsic::log2 ||
1884            LogID == Intrinsic::log10) {
1885     if (Ty->getScalarType()->isFloatTy()) {
1886       ExpLb = LibFunc_expf;
1887       Exp2Lb = LibFunc_exp2f;
1888       Exp10Lb = LibFunc_exp10f;
1889       PowLb = LibFunc_powf;
1890     } else if (Ty->getScalarType()->isDoubleTy()) {
1891       ExpLb = LibFunc_exp;
1892       Exp2Lb = LibFunc_exp2;
1893       Exp10Lb = LibFunc_exp10;
1894       PowLb = LibFunc_pow;
1895     } else
1896       return Ret;
1897   } else
1898     return Ret;
1899 
1900   IRBuilderBase::FastMathFlagGuard Guard(B);
1901   B.setFastMathFlags(FastMathFlags::getFast());
1902 
1903   Intrinsic::ID ArgID = Arg->getIntrinsicID();
1904   LibFunc ArgLb = NotLibFunc;
1905   TLI->getLibFunc(*Arg, ArgLb);
1906 
1907   // log(pow(x,y)) -> y*log(x)
1908   if (ArgLb == PowLb || ArgID == Intrinsic::pow) {
1909     Value *LogX =
1910         Log->doesNotAccessMemory()
1911             ? B.CreateCall(Intrinsic::getDeclaration(Mod, LogID, Ty),
1912                            Arg->getOperand(0), "log")
1913             : emitUnaryFloatFnCall(Arg->getOperand(0), LogNm, B, Attrs);
1914     Value *MulY = B.CreateFMul(Arg->getArgOperand(1), LogX, "mul");
1915     // Since pow() may have side effects, e.g. errno,
1916     // dead code elimination may not be trusted to remove it.
1917     substituteInParent(Arg, MulY);
1918     return MulY;
1919   }
1920 
1921   // log(exp{,2,10}(y)) -> y*log({e,2,10})
1922   // TODO: There is no exp10() intrinsic yet.
1923   if (ArgLb == ExpLb || ArgLb == Exp2Lb || ArgLb == Exp10Lb ||
1924            ArgID == Intrinsic::exp || ArgID == Intrinsic::exp2) {
1925     Constant *Eul;
1926     if (ArgLb == ExpLb || ArgID == Intrinsic::exp)
1927       // FIXME: Add more precise value of e for long double.
1928       Eul = ConstantFP::get(Log->getType(), numbers::e);
1929     else if (ArgLb == Exp2Lb || ArgID == Intrinsic::exp2)
1930       Eul = ConstantFP::get(Log->getType(), 2.0);
1931     else
1932       Eul = ConstantFP::get(Log->getType(), 10.0);
1933     Value *LogE = Log->doesNotAccessMemory()
1934                       ? B.CreateCall(Intrinsic::getDeclaration(Mod, LogID, Ty),
1935                                      Eul, "log")
1936                       : emitUnaryFloatFnCall(Eul, LogNm, B, Attrs);
1937     Value *MulY = B.CreateFMul(Arg->getArgOperand(0), LogE, "mul");
1938     // Since exp() may have side effects, e.g. errno,
1939     // dead code elimination may not be trusted to remove it.
1940     substituteInParent(Arg, MulY);
1941     return MulY;
1942   }
1943 
1944   return Ret;
1945 }
1946 
1947 Value *LibCallSimplifier::optimizeSqrt(CallInst *CI, IRBuilderBase &B) {
1948   Function *Callee = CI->getCalledFunction();
1949   Value *Ret = nullptr;
1950   // TODO: Once we have a way (other than checking for the existince of the
1951   // libcall) to tell whether our target can lower @llvm.sqrt, relax the
1952   // condition below.
1953   if (TLI->has(LibFunc_sqrtf) && (Callee->getName() == "sqrt" ||
1954                                   Callee->getIntrinsicID() == Intrinsic::sqrt))
1955     Ret = optimizeUnaryDoubleFP(CI, B, true);
1956 
1957   if (!CI->isFast())
1958     return Ret;
1959 
1960   Instruction *I = dyn_cast<Instruction>(CI->getArgOperand(0));
1961   if (!I || I->getOpcode() != Instruction::FMul || !I->isFast())
1962     return Ret;
1963 
1964   // We're looking for a repeated factor in a multiplication tree,
1965   // so we can do this fold: sqrt(x * x) -> fabs(x);
1966   // or this fold: sqrt((x * x) * y) -> fabs(x) * sqrt(y).
1967   Value *Op0 = I->getOperand(0);
1968   Value *Op1 = I->getOperand(1);
1969   Value *RepeatOp = nullptr;
1970   Value *OtherOp = nullptr;
1971   if (Op0 == Op1) {
1972     // Simple match: the operands of the multiply are identical.
1973     RepeatOp = Op0;
1974   } else {
1975     // Look for a more complicated pattern: one of the operands is itself
1976     // a multiply, so search for a common factor in that multiply.
1977     // Note: We don't bother looking any deeper than this first level or for
1978     // variations of this pattern because instcombine's visitFMUL and/or the
1979     // reassociation pass should give us this form.
1980     Value *OtherMul0, *OtherMul1;
1981     if (match(Op0, m_FMul(m_Value(OtherMul0), m_Value(OtherMul1)))) {
1982       // Pattern: sqrt((x * y) * z)
1983       if (OtherMul0 == OtherMul1 && cast<Instruction>(Op0)->isFast()) {
1984         // Matched: sqrt((x * x) * z)
1985         RepeatOp = OtherMul0;
1986         OtherOp = Op1;
1987       }
1988     }
1989   }
1990   if (!RepeatOp)
1991     return Ret;
1992 
1993   // Fast math flags for any created instructions should match the sqrt
1994   // and multiply.
1995   IRBuilderBase::FastMathFlagGuard Guard(B);
1996   B.setFastMathFlags(I->getFastMathFlags());
1997 
1998   // If we found a repeated factor, hoist it out of the square root and
1999   // replace it with the fabs of that factor.
2000   Module *M = Callee->getParent();
2001   Type *ArgType = I->getType();
2002   Function *Fabs = Intrinsic::getDeclaration(M, Intrinsic::fabs, ArgType);
2003   Value *FabsCall = B.CreateCall(Fabs, RepeatOp, "fabs");
2004   if (OtherOp) {
2005     // If we found a non-repeated factor, we still need to get its square
2006     // root. We then multiply that by the value that was simplified out
2007     // of the square root calculation.
2008     Function *Sqrt = Intrinsic::getDeclaration(M, Intrinsic::sqrt, ArgType);
2009     Value *SqrtCall = B.CreateCall(Sqrt, OtherOp, "sqrt");
2010     return B.CreateFMul(FabsCall, SqrtCall);
2011   }
2012   return FabsCall;
2013 }
2014 
2015 // TODO: Generalize to handle any trig function and its inverse.
2016 Value *LibCallSimplifier::optimizeTan(CallInst *CI, IRBuilderBase &B) {
2017   Function *Callee = CI->getCalledFunction();
2018   Value *Ret = nullptr;
2019   StringRef Name = Callee->getName();
2020   if (UnsafeFPShrink && Name == "tan" && hasFloatVersion(Name))
2021     Ret = optimizeUnaryDoubleFP(CI, B, true);
2022 
2023   Value *Op1 = CI->getArgOperand(0);
2024   auto *OpC = dyn_cast<CallInst>(Op1);
2025   if (!OpC)
2026     return Ret;
2027 
2028   // Both calls must be 'fast' in order to remove them.
2029   if (!CI->isFast() || !OpC->isFast())
2030     return Ret;
2031 
2032   // tan(atan(x)) -> x
2033   // tanf(atanf(x)) -> x
2034   // tanl(atanl(x)) -> x
2035   LibFunc Func;
2036   Function *F = OpC->getCalledFunction();
2037   if (F && TLI->getLibFunc(F->getName(), Func) && TLI->has(Func) &&
2038       ((Func == LibFunc_atan && Callee->getName() == "tan") ||
2039        (Func == LibFunc_atanf && Callee->getName() == "tanf") ||
2040        (Func == LibFunc_atanl && Callee->getName() == "tanl")))
2041     Ret = OpC->getArgOperand(0);
2042   return Ret;
2043 }
2044 
2045 static bool isTrigLibCall(CallInst *CI) {
2046   // We can only hope to do anything useful if we can ignore things like errno
2047   // and floating-point exceptions.
2048   // We already checked the prototype.
2049   return CI->hasFnAttr(Attribute::NoUnwind) &&
2050          CI->hasFnAttr(Attribute::ReadNone);
2051 }
2052 
2053 static void insertSinCosCall(IRBuilderBase &B, Function *OrigCallee, Value *Arg,
2054                              bool UseFloat, Value *&Sin, Value *&Cos,
2055                              Value *&SinCos) {
2056   Type *ArgTy = Arg->getType();
2057   Type *ResTy;
2058   StringRef Name;
2059 
2060   Triple T(OrigCallee->getParent()->getTargetTriple());
2061   if (UseFloat) {
2062     Name = "__sincospif_stret";
2063 
2064     assert(T.getArch() != Triple::x86 && "x86 messy and unsupported for now");
2065     // x86_64 can't use {float, float} since that would be returned in both
2066     // xmm0 and xmm1, which isn't what a real struct would do.
2067     ResTy = T.getArch() == Triple::x86_64
2068                 ? static_cast<Type *>(FixedVectorType::get(ArgTy, 2))
2069                 : static_cast<Type *>(StructType::get(ArgTy, ArgTy));
2070   } else {
2071     Name = "__sincospi_stret";
2072     ResTy = StructType::get(ArgTy, ArgTy);
2073   }
2074 
2075   Module *M = OrigCallee->getParent();
2076   FunctionCallee Callee =
2077       M->getOrInsertFunction(Name, OrigCallee->getAttributes(), ResTy, ArgTy);
2078 
2079   if (Instruction *ArgInst = dyn_cast<Instruction>(Arg)) {
2080     // If the argument is an instruction, it must dominate all uses so put our
2081     // sincos call there.
2082     B.SetInsertPoint(ArgInst->getParent(), ++ArgInst->getIterator());
2083   } else {
2084     // Otherwise (e.g. for a constant) the beginning of the function is as
2085     // good a place as any.
2086     BasicBlock &EntryBB = B.GetInsertBlock()->getParent()->getEntryBlock();
2087     B.SetInsertPoint(&EntryBB, EntryBB.begin());
2088   }
2089 
2090   SinCos = B.CreateCall(Callee, Arg, "sincospi");
2091 
2092   if (SinCos->getType()->isStructTy()) {
2093     Sin = B.CreateExtractValue(SinCos, 0, "sinpi");
2094     Cos = B.CreateExtractValue(SinCos, 1, "cospi");
2095   } else {
2096     Sin = B.CreateExtractElement(SinCos, ConstantInt::get(B.getInt32Ty(), 0),
2097                                  "sinpi");
2098     Cos = B.CreateExtractElement(SinCos, ConstantInt::get(B.getInt32Ty(), 1),
2099                                  "cospi");
2100   }
2101 }
2102 
2103 Value *LibCallSimplifier::optimizeSinCosPi(CallInst *CI, IRBuilderBase &B) {
2104   // Make sure the prototype is as expected, otherwise the rest of the
2105   // function is probably invalid and likely to abort.
2106   if (!isTrigLibCall(CI))
2107     return nullptr;
2108 
2109   Value *Arg = CI->getArgOperand(0);
2110   SmallVector<CallInst *, 1> SinCalls;
2111   SmallVector<CallInst *, 1> CosCalls;
2112   SmallVector<CallInst *, 1> SinCosCalls;
2113 
2114   bool IsFloat = Arg->getType()->isFloatTy();
2115 
2116   // Look for all compatible sinpi, cospi and sincospi calls with the same
2117   // argument. If there are enough (in some sense) we can make the
2118   // substitution.
2119   Function *F = CI->getFunction();
2120   for (User *U : Arg->users())
2121     classifyArgUse(U, F, IsFloat, SinCalls, CosCalls, SinCosCalls);
2122 
2123   // It's only worthwhile if both sinpi and cospi are actually used.
2124   if (SinCalls.empty() || CosCalls.empty())
2125     return nullptr;
2126 
2127   Value *Sin, *Cos, *SinCos;
2128   insertSinCosCall(B, CI->getCalledFunction(), Arg, IsFloat, Sin, Cos, SinCos);
2129 
2130   auto replaceTrigInsts = [this](SmallVectorImpl<CallInst *> &Calls,
2131                                  Value *Res) {
2132     for (CallInst *C : Calls)
2133       replaceAllUsesWith(C, Res);
2134   };
2135 
2136   replaceTrigInsts(SinCalls, Sin);
2137   replaceTrigInsts(CosCalls, Cos);
2138   replaceTrigInsts(SinCosCalls, SinCos);
2139 
2140   return nullptr;
2141 }
2142 
2143 void LibCallSimplifier::classifyArgUse(
2144     Value *Val, Function *F, bool IsFloat,
2145     SmallVectorImpl<CallInst *> &SinCalls,
2146     SmallVectorImpl<CallInst *> &CosCalls,
2147     SmallVectorImpl<CallInst *> &SinCosCalls) {
2148   CallInst *CI = dyn_cast<CallInst>(Val);
2149 
2150   if (!CI || CI->use_empty())
2151     return;
2152 
2153   // Don't consider calls in other functions.
2154   if (CI->getFunction() != F)
2155     return;
2156 
2157   Function *Callee = CI->getCalledFunction();
2158   LibFunc Func;
2159   if (!Callee || !TLI->getLibFunc(*Callee, Func) || !TLI->has(Func) ||
2160       !isTrigLibCall(CI))
2161     return;
2162 
2163   if (IsFloat) {
2164     if (Func == LibFunc_sinpif)
2165       SinCalls.push_back(CI);
2166     else if (Func == LibFunc_cospif)
2167       CosCalls.push_back(CI);
2168     else if (Func == LibFunc_sincospif_stret)
2169       SinCosCalls.push_back(CI);
2170   } else {
2171     if (Func == LibFunc_sinpi)
2172       SinCalls.push_back(CI);
2173     else if (Func == LibFunc_cospi)
2174       CosCalls.push_back(CI);
2175     else if (Func == LibFunc_sincospi_stret)
2176       SinCosCalls.push_back(CI);
2177   }
2178 }
2179 
2180 //===----------------------------------------------------------------------===//
2181 // Integer Library Call Optimizations
2182 //===----------------------------------------------------------------------===//
2183 
2184 Value *LibCallSimplifier::optimizeFFS(CallInst *CI, IRBuilderBase &B) {
2185   // ffs(x) -> x != 0 ? (i32)llvm.cttz(x)+1 : 0
2186   Value *Op = CI->getArgOperand(0);
2187   Type *ArgType = Op->getType();
2188   Function *F = Intrinsic::getDeclaration(CI->getCalledFunction()->getParent(),
2189                                           Intrinsic::cttz, ArgType);
2190   Value *V = B.CreateCall(F, {Op, B.getTrue()}, "cttz");
2191   V = B.CreateAdd(V, ConstantInt::get(V->getType(), 1));
2192   V = B.CreateIntCast(V, B.getInt32Ty(), false);
2193 
2194   Value *Cond = B.CreateICmpNE(Op, Constant::getNullValue(ArgType));
2195   return B.CreateSelect(Cond, V, B.getInt32(0));
2196 }
2197 
2198 Value *LibCallSimplifier::optimizeFls(CallInst *CI, IRBuilderBase &B) {
2199   // fls(x) -> (i32)(sizeInBits(x) - llvm.ctlz(x, false))
2200   Value *Op = CI->getArgOperand(0);
2201   Type *ArgType = Op->getType();
2202   Function *F = Intrinsic::getDeclaration(CI->getCalledFunction()->getParent(),
2203                                           Intrinsic::ctlz, ArgType);
2204   Value *V = B.CreateCall(F, {Op, B.getFalse()}, "ctlz");
2205   V = B.CreateSub(ConstantInt::get(V->getType(), ArgType->getIntegerBitWidth()),
2206                   V);
2207   return B.CreateIntCast(V, CI->getType(), false);
2208 }
2209 
2210 Value *LibCallSimplifier::optimizeAbs(CallInst *CI, IRBuilderBase &B) {
2211   // abs(x) -> x <s 0 ? -x : x
2212   // The negation has 'nsw' because abs of INT_MIN is undefined.
2213   Value *X = CI->getArgOperand(0);
2214   Value *IsNeg = B.CreateICmpSLT(X, Constant::getNullValue(X->getType()));
2215   Value *NegX = B.CreateNSWNeg(X, "neg");
2216   return B.CreateSelect(IsNeg, NegX, X);
2217 }
2218 
2219 Value *LibCallSimplifier::optimizeIsDigit(CallInst *CI, IRBuilderBase &B) {
2220   // isdigit(c) -> (c-'0') <u 10
2221   Value *Op = CI->getArgOperand(0);
2222   Op = B.CreateSub(Op, B.getInt32('0'), "isdigittmp");
2223   Op = B.CreateICmpULT(Op, B.getInt32(10), "isdigit");
2224   return B.CreateZExt(Op, CI->getType());
2225 }
2226 
2227 Value *LibCallSimplifier::optimizeIsAscii(CallInst *CI, IRBuilderBase &B) {
2228   // isascii(c) -> c <u 128
2229   Value *Op = CI->getArgOperand(0);
2230   Op = B.CreateICmpULT(Op, B.getInt32(128), "isascii");
2231   return B.CreateZExt(Op, CI->getType());
2232 }
2233 
2234 Value *LibCallSimplifier::optimizeToAscii(CallInst *CI, IRBuilderBase &B) {
2235   // toascii(c) -> c & 0x7f
2236   return B.CreateAnd(CI->getArgOperand(0),
2237                      ConstantInt::get(CI->getType(), 0x7F));
2238 }
2239 
2240 Value *LibCallSimplifier::optimizeAtoi(CallInst *CI, IRBuilderBase &B) {
2241   StringRef Str;
2242   if (!getConstantStringInfo(CI->getArgOperand(0), Str))
2243     return nullptr;
2244 
2245   return convertStrToNumber(CI, Str, 10);
2246 }
2247 
2248 Value *LibCallSimplifier::optimizeStrtol(CallInst *CI, IRBuilderBase &B) {
2249   StringRef Str;
2250   if (!getConstantStringInfo(CI->getArgOperand(0), Str))
2251     return nullptr;
2252 
2253   if (!isa<ConstantPointerNull>(CI->getArgOperand(1)))
2254     return nullptr;
2255 
2256   if (ConstantInt *CInt = dyn_cast<ConstantInt>(CI->getArgOperand(2))) {
2257     return convertStrToNumber(CI, Str, CInt->getSExtValue());
2258   }
2259 
2260   return nullptr;
2261 }
2262 
2263 //===----------------------------------------------------------------------===//
2264 // Formatting and IO Library Call Optimizations
2265 //===----------------------------------------------------------------------===//
2266 
2267 static bool isReportingError(Function *Callee, CallInst *CI, int StreamArg);
2268 
2269 Value *LibCallSimplifier::optimizeErrorReporting(CallInst *CI, IRBuilderBase &B,
2270                                                  int StreamArg) {
2271   Function *Callee = CI->getCalledFunction();
2272   // Error reporting calls should be cold, mark them as such.
2273   // This applies even to non-builtin calls: it is only a hint and applies to
2274   // functions that the frontend might not understand as builtins.
2275 
2276   // This heuristic was suggested in:
2277   // Improving Static Branch Prediction in a Compiler
2278   // Brian L. Deitrich, Ben-Chung Cheng, Wen-mei W. Hwu
2279   // Proceedings of PACT'98, Oct. 1998, IEEE
2280   if (!CI->hasFnAttr(Attribute::Cold) &&
2281       isReportingError(Callee, CI, StreamArg)) {
2282     CI->addFnAttr(Attribute::Cold);
2283   }
2284 
2285   return nullptr;
2286 }
2287 
2288 static bool isReportingError(Function *Callee, CallInst *CI, int StreamArg) {
2289   if (!Callee || !Callee->isDeclaration())
2290     return false;
2291 
2292   if (StreamArg < 0)
2293     return true;
2294 
2295   // These functions might be considered cold, but only if their stream
2296   // argument is stderr.
2297 
2298   if (StreamArg >= (int)CI->getNumArgOperands())
2299     return false;
2300   LoadInst *LI = dyn_cast<LoadInst>(CI->getArgOperand(StreamArg));
2301   if (!LI)
2302     return false;
2303   GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getPointerOperand());
2304   if (!GV || !GV->isDeclaration())
2305     return false;
2306   return GV->getName() == "stderr";
2307 }
2308 
2309 Value *LibCallSimplifier::optimizePrintFString(CallInst *CI, IRBuilderBase &B) {
2310   // Check for a fixed format string.
2311   StringRef FormatStr;
2312   if (!getConstantStringInfo(CI->getArgOperand(0), FormatStr))
2313     return nullptr;
2314 
2315   // Empty format string -> noop.
2316   if (FormatStr.empty()) // Tolerate printf's declared void.
2317     return CI->use_empty() ? (Value *)CI : ConstantInt::get(CI->getType(), 0);
2318 
2319   // Do not do any of the following transformations if the printf return value
2320   // is used, in general the printf return value is not compatible with either
2321   // putchar() or puts().
2322   if (!CI->use_empty())
2323     return nullptr;
2324 
2325   // printf("x") -> putchar('x'), even for "%" and "%%".
2326   if (FormatStr.size() == 1 || FormatStr == "%%")
2327     return emitPutChar(B.getInt32(FormatStr[0]), B, TLI);
2328 
2329   // Try to remove call or emit putchar/puts.
2330   if (FormatStr == "%s" && CI->getNumArgOperands() > 1) {
2331     StringRef OperandStr;
2332     if (!getConstantStringInfo(CI->getOperand(1), OperandStr))
2333       return nullptr;
2334     // printf("%s", "") --> NOP
2335     if (OperandStr.empty())
2336       return (Value *)CI;
2337     // printf("%s", "a") --> putchar('a')
2338     if (OperandStr.size() == 1)
2339       return emitPutChar(B.getInt32(OperandStr[0]), B, TLI);
2340     // printf("%s", str"\n") --> puts(str)
2341     if (OperandStr.back() == '\n') {
2342       OperandStr = OperandStr.drop_back();
2343       Value *GV = B.CreateGlobalString(OperandStr, "str");
2344       return emitPutS(GV, B, TLI);
2345     }
2346     return nullptr;
2347   }
2348 
2349   // printf("foo\n") --> puts("foo")
2350   if (FormatStr.back() == '\n' &&
2351       FormatStr.find('%') == StringRef::npos) { // No format characters.
2352     // Create a string literal with no \n on it.  We expect the constant merge
2353     // pass to be run after this pass, to merge duplicate strings.
2354     FormatStr = FormatStr.drop_back();
2355     Value *GV = B.CreateGlobalString(FormatStr, "str");
2356     return emitPutS(GV, B, TLI);
2357   }
2358 
2359   // Optimize specific format strings.
2360   // printf("%c", chr) --> putchar(chr)
2361   if (FormatStr == "%c" && CI->getNumArgOperands() > 1 &&
2362       CI->getArgOperand(1)->getType()->isIntegerTy())
2363     return emitPutChar(CI->getArgOperand(1), B, TLI);
2364 
2365   // printf("%s\n", str) --> puts(str)
2366   if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&
2367       CI->getArgOperand(1)->getType()->isPointerTy())
2368     return emitPutS(CI->getArgOperand(1), B, TLI);
2369   return nullptr;
2370 }
2371 
2372 Value *LibCallSimplifier::optimizePrintF(CallInst *CI, IRBuilderBase &B) {
2373 
2374   Function *Callee = CI->getCalledFunction();
2375   FunctionType *FT = Callee->getFunctionType();
2376   if (Value *V = optimizePrintFString(CI, B)) {
2377     return V;
2378   }
2379 
2380   // printf(format, ...) -> iprintf(format, ...) if no floating point
2381   // arguments.
2382   if (TLI->has(LibFunc_iprintf) && !callHasFloatingPointArgument(CI)) {
2383     Module *M = B.GetInsertBlock()->getParent()->getParent();
2384     FunctionCallee IPrintFFn =
2385         M->getOrInsertFunction("iprintf", FT, Callee->getAttributes());
2386     CallInst *New = cast<CallInst>(CI->clone());
2387     New->setCalledFunction(IPrintFFn);
2388     B.Insert(New);
2389     return New;
2390   }
2391 
2392   // printf(format, ...) -> __small_printf(format, ...) if no 128-bit floating point
2393   // arguments.
2394   if (TLI->has(LibFunc_small_printf) && !callHasFP128Argument(CI)) {
2395     Module *M = B.GetInsertBlock()->getParent()->getParent();
2396     auto SmallPrintFFn =
2397         M->getOrInsertFunction(TLI->getName(LibFunc_small_printf),
2398                                FT, Callee->getAttributes());
2399     CallInst *New = cast<CallInst>(CI->clone());
2400     New->setCalledFunction(SmallPrintFFn);
2401     B.Insert(New);
2402     return New;
2403   }
2404 
2405   annotateNonNullNoUndefBasedOnAccess(CI, 0);
2406   return nullptr;
2407 }
2408 
2409 Value *LibCallSimplifier::optimizeSPrintFString(CallInst *CI,
2410                                                 IRBuilderBase &B) {
2411   // Check for a fixed format string.
2412   StringRef FormatStr;
2413   if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
2414     return nullptr;
2415 
2416   // If we just have a format string (nothing else crazy) transform it.
2417   Value *Dest = CI->getArgOperand(0);
2418   if (CI->getNumArgOperands() == 2) {
2419     // Make sure there's no % in the constant array.  We could try to handle
2420     // %% -> % in the future if we cared.
2421     if (FormatStr.find('%') != StringRef::npos)
2422       return nullptr; // we found a format specifier, bail out.
2423 
2424     // sprintf(str, fmt) -> llvm.memcpy(align 1 str, align 1 fmt, strlen(fmt)+1)
2425     B.CreateMemCpy(
2426         Dest, Align(1), CI->getArgOperand(1), Align(1),
2427         ConstantInt::get(DL.getIntPtrType(CI->getContext()),
2428                          FormatStr.size() + 1)); // Copy the null byte.
2429     return ConstantInt::get(CI->getType(), FormatStr.size());
2430   }
2431 
2432   // The remaining optimizations require the format string to be "%s" or "%c"
2433   // and have an extra operand.
2434   if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
2435       CI->getNumArgOperands() < 3)
2436     return nullptr;
2437 
2438   // Decode the second character of the format string.
2439   if (FormatStr[1] == 'c') {
2440     // sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
2441     if (!CI->getArgOperand(2)->getType()->isIntegerTy())
2442       return nullptr;
2443     Value *V = B.CreateTrunc(CI->getArgOperand(2), B.getInt8Ty(), "char");
2444     Value *Ptr = castToCStr(Dest, B);
2445     B.CreateStore(V, Ptr);
2446     Ptr = B.CreateGEP(B.getInt8Ty(), Ptr, B.getInt32(1), "nul");
2447     B.CreateStore(B.getInt8(0), Ptr);
2448 
2449     return ConstantInt::get(CI->getType(), 1);
2450   }
2451 
2452   if (FormatStr[1] == 's') {
2453     // sprintf(dest, "%s", str) -> llvm.memcpy(align 1 dest, align 1 str,
2454     // strlen(str)+1)
2455     if (!CI->getArgOperand(2)->getType()->isPointerTy())
2456       return nullptr;
2457 
2458     if (CI->use_empty())
2459       // sprintf(dest, "%s", str) -> strcpy(dest, str)
2460       return emitStrCpy(Dest, CI->getArgOperand(2), B, TLI);
2461 
2462     uint64_t SrcLen = GetStringLength(CI->getArgOperand(2));
2463     if (SrcLen) {
2464       B.CreateMemCpy(
2465           Dest, Align(1), CI->getArgOperand(2), Align(1),
2466           ConstantInt::get(DL.getIntPtrType(CI->getContext()), SrcLen));
2467       // Returns total number of characters written without null-character.
2468       return ConstantInt::get(CI->getType(), SrcLen - 1);
2469     } else if (Value *V = emitStpCpy(Dest, CI->getArgOperand(2), B, TLI)) {
2470       // sprintf(dest, "%s", str) -> stpcpy(dest, str) - dest
2471       // Handle mismatched pointer types (goes away with typeless pointers?).
2472       V = B.CreatePointerCast(V, Dest->getType());
2473       Value *PtrDiff = B.CreatePtrDiff(V, Dest);
2474       return B.CreateIntCast(PtrDiff, CI->getType(), false);
2475     }
2476 
2477     bool OptForSize = CI->getFunction()->hasOptSize() ||
2478                       llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI,
2479                                                   PGSOQueryType::IRPass);
2480     if (OptForSize)
2481       return nullptr;
2482 
2483     Value *Len = emitStrLen(CI->getArgOperand(2), B, DL, TLI);
2484     if (!Len)
2485       return nullptr;
2486     Value *IncLen =
2487         B.CreateAdd(Len, ConstantInt::get(Len->getType(), 1), "leninc");
2488     B.CreateMemCpy(Dest, Align(1), CI->getArgOperand(2), Align(1), IncLen);
2489 
2490     // The sprintf result is the unincremented number of bytes in the string.
2491     return B.CreateIntCast(Len, CI->getType(), false);
2492   }
2493   return nullptr;
2494 }
2495 
2496 Value *LibCallSimplifier::optimizeSPrintF(CallInst *CI, IRBuilderBase &B) {
2497   Function *Callee = CI->getCalledFunction();
2498   FunctionType *FT = Callee->getFunctionType();
2499   if (Value *V = optimizeSPrintFString(CI, B)) {
2500     return V;
2501   }
2502 
2503   // sprintf(str, format, ...) -> siprintf(str, format, ...) if no floating
2504   // point arguments.
2505   if (TLI->has(LibFunc_siprintf) && !callHasFloatingPointArgument(CI)) {
2506     Module *M = B.GetInsertBlock()->getParent()->getParent();
2507     FunctionCallee SIPrintFFn =
2508         M->getOrInsertFunction("siprintf", FT, Callee->getAttributes());
2509     CallInst *New = cast<CallInst>(CI->clone());
2510     New->setCalledFunction(SIPrintFFn);
2511     B.Insert(New);
2512     return New;
2513   }
2514 
2515   // sprintf(str, format, ...) -> __small_sprintf(str, format, ...) if no 128-bit
2516   // floating point arguments.
2517   if (TLI->has(LibFunc_small_sprintf) && !callHasFP128Argument(CI)) {
2518     Module *M = B.GetInsertBlock()->getParent()->getParent();
2519     auto SmallSPrintFFn =
2520         M->getOrInsertFunction(TLI->getName(LibFunc_small_sprintf),
2521                                FT, Callee->getAttributes());
2522     CallInst *New = cast<CallInst>(CI->clone());
2523     New->setCalledFunction(SmallSPrintFFn);
2524     B.Insert(New);
2525     return New;
2526   }
2527 
2528   annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
2529   return nullptr;
2530 }
2531 
2532 Value *LibCallSimplifier::optimizeSnPrintFString(CallInst *CI,
2533                                                  IRBuilderBase &B) {
2534   // Check for size
2535   ConstantInt *Size = dyn_cast<ConstantInt>(CI->getArgOperand(1));
2536   if (!Size)
2537     return nullptr;
2538 
2539   uint64_t N = Size->getZExtValue();
2540   // Check for a fixed format string.
2541   StringRef FormatStr;
2542   if (!getConstantStringInfo(CI->getArgOperand(2), FormatStr))
2543     return nullptr;
2544 
2545   // If we just have a format string (nothing else crazy) transform it.
2546   if (CI->getNumArgOperands() == 3) {
2547     // Make sure there's no % in the constant array.  We could try to handle
2548     // %% -> % in the future if we cared.
2549     if (FormatStr.find('%') != StringRef::npos)
2550       return nullptr; // we found a format specifier, bail out.
2551 
2552     if (N == 0)
2553       return ConstantInt::get(CI->getType(), FormatStr.size());
2554     else if (N < FormatStr.size() + 1)
2555       return nullptr;
2556 
2557     // snprintf(dst, size, fmt) -> llvm.memcpy(align 1 dst, align 1 fmt,
2558     // strlen(fmt)+1)
2559     B.CreateMemCpy(
2560         CI->getArgOperand(0), Align(1), CI->getArgOperand(2), Align(1),
2561         ConstantInt::get(DL.getIntPtrType(CI->getContext()),
2562                          FormatStr.size() + 1)); // Copy the null byte.
2563     return ConstantInt::get(CI->getType(), FormatStr.size());
2564   }
2565 
2566   // The remaining optimizations require the format string to be "%s" or "%c"
2567   // and have an extra operand.
2568   if (FormatStr.size() == 2 && FormatStr[0] == '%' &&
2569       CI->getNumArgOperands() == 4) {
2570 
2571     // Decode the second character of the format string.
2572     if (FormatStr[1] == 'c') {
2573       if (N == 0)
2574         return ConstantInt::get(CI->getType(), 1);
2575       else if (N == 1)
2576         return nullptr;
2577 
2578       // snprintf(dst, size, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
2579       if (!CI->getArgOperand(3)->getType()->isIntegerTy())
2580         return nullptr;
2581       Value *V = B.CreateTrunc(CI->getArgOperand(3), B.getInt8Ty(), "char");
2582       Value *Ptr = castToCStr(CI->getArgOperand(0), B);
2583       B.CreateStore(V, Ptr);
2584       Ptr = B.CreateGEP(B.getInt8Ty(), Ptr, B.getInt32(1), "nul");
2585       B.CreateStore(B.getInt8(0), Ptr);
2586 
2587       return ConstantInt::get(CI->getType(), 1);
2588     }
2589 
2590     if (FormatStr[1] == 's') {
2591       // snprintf(dest, size, "%s", str) to llvm.memcpy(dest, str, len+1, 1)
2592       StringRef Str;
2593       if (!getConstantStringInfo(CI->getArgOperand(3), Str))
2594         return nullptr;
2595 
2596       if (N == 0)
2597         return ConstantInt::get(CI->getType(), Str.size());
2598       else if (N < Str.size() + 1)
2599         return nullptr;
2600 
2601       B.CreateMemCpy(CI->getArgOperand(0), Align(1), CI->getArgOperand(3),
2602                      Align(1), ConstantInt::get(CI->getType(), Str.size() + 1));
2603 
2604       // The snprintf result is the unincremented number of bytes in the string.
2605       return ConstantInt::get(CI->getType(), Str.size());
2606     }
2607   }
2608   return nullptr;
2609 }
2610 
2611 Value *LibCallSimplifier::optimizeSnPrintF(CallInst *CI, IRBuilderBase &B) {
2612   if (Value *V = optimizeSnPrintFString(CI, B)) {
2613     return V;
2614   }
2615 
2616   if (isKnownNonZero(CI->getOperand(1), DL))
2617     annotateNonNullNoUndefBasedOnAccess(CI, 0);
2618   return nullptr;
2619 }
2620 
2621 Value *LibCallSimplifier::optimizeFPrintFString(CallInst *CI,
2622                                                 IRBuilderBase &B) {
2623   optimizeErrorReporting(CI, B, 0);
2624 
2625   // All the optimizations depend on the format string.
2626   StringRef FormatStr;
2627   if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
2628     return nullptr;
2629 
2630   // Do not do any of the following transformations if the fprintf return
2631   // value is used, in general the fprintf return value is not compatible
2632   // with fwrite(), fputc() or fputs().
2633   if (!CI->use_empty())
2634     return nullptr;
2635 
2636   // fprintf(F, "foo") --> fwrite("foo", 3, 1, F)
2637   if (CI->getNumArgOperands() == 2) {
2638     // Could handle %% -> % if we cared.
2639     if (FormatStr.find('%') != StringRef::npos)
2640       return nullptr; // We found a format specifier.
2641 
2642     return emitFWrite(
2643         CI->getArgOperand(1),
2644         ConstantInt::get(DL.getIntPtrType(CI->getContext()), FormatStr.size()),
2645         CI->getArgOperand(0), B, DL, TLI);
2646   }
2647 
2648   // The remaining optimizations require the format string to be "%s" or "%c"
2649   // and have an extra operand.
2650   if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
2651       CI->getNumArgOperands() < 3)
2652     return nullptr;
2653 
2654   // Decode the second character of the format string.
2655   if (FormatStr[1] == 'c') {
2656     // fprintf(F, "%c", chr) --> fputc(chr, F)
2657     if (!CI->getArgOperand(2)->getType()->isIntegerTy())
2658       return nullptr;
2659     return emitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TLI);
2660   }
2661 
2662   if (FormatStr[1] == 's') {
2663     // fprintf(F, "%s", str) --> fputs(str, F)
2664     if (!CI->getArgOperand(2)->getType()->isPointerTy())
2665       return nullptr;
2666     return emitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TLI);
2667   }
2668   return nullptr;
2669 }
2670 
2671 Value *LibCallSimplifier::optimizeFPrintF(CallInst *CI, IRBuilderBase &B) {
2672   Function *Callee = CI->getCalledFunction();
2673   FunctionType *FT = Callee->getFunctionType();
2674   if (Value *V = optimizeFPrintFString(CI, B)) {
2675     return V;
2676   }
2677 
2678   // fprintf(stream, format, ...) -> fiprintf(stream, format, ...) if no
2679   // floating point arguments.
2680   if (TLI->has(LibFunc_fiprintf) && !callHasFloatingPointArgument(CI)) {
2681     Module *M = B.GetInsertBlock()->getParent()->getParent();
2682     FunctionCallee FIPrintFFn =
2683         M->getOrInsertFunction("fiprintf", FT, Callee->getAttributes());
2684     CallInst *New = cast<CallInst>(CI->clone());
2685     New->setCalledFunction(FIPrintFFn);
2686     B.Insert(New);
2687     return New;
2688   }
2689 
2690   // fprintf(stream, format, ...) -> __small_fprintf(stream, format, ...) if no
2691   // 128-bit floating point arguments.
2692   if (TLI->has(LibFunc_small_fprintf) && !callHasFP128Argument(CI)) {
2693     Module *M = B.GetInsertBlock()->getParent()->getParent();
2694     auto SmallFPrintFFn =
2695         M->getOrInsertFunction(TLI->getName(LibFunc_small_fprintf),
2696                                FT, Callee->getAttributes());
2697     CallInst *New = cast<CallInst>(CI->clone());
2698     New->setCalledFunction(SmallFPrintFFn);
2699     B.Insert(New);
2700     return New;
2701   }
2702 
2703   return nullptr;
2704 }
2705 
2706 Value *LibCallSimplifier::optimizeFWrite(CallInst *CI, IRBuilderBase &B) {
2707   optimizeErrorReporting(CI, B, 3);
2708 
2709   // Get the element size and count.
2710   ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
2711   ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
2712   if (SizeC && CountC) {
2713     uint64_t Bytes = SizeC->getZExtValue() * CountC->getZExtValue();
2714 
2715     // If this is writing zero records, remove the call (it's a noop).
2716     if (Bytes == 0)
2717       return ConstantInt::get(CI->getType(), 0);
2718 
2719     // If this is writing one byte, turn it into fputc.
2720     // This optimisation is only valid, if the return value is unused.
2721     if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)
2722       Value *Char = B.CreateLoad(B.getInt8Ty(),
2723                                  castToCStr(CI->getArgOperand(0), B), "char");
2724       Value *NewCI = emitFPutC(Char, CI->getArgOperand(3), B, TLI);
2725       return NewCI ? ConstantInt::get(CI->getType(), 1) : nullptr;
2726     }
2727   }
2728 
2729   return nullptr;
2730 }
2731 
2732 Value *LibCallSimplifier::optimizeFPuts(CallInst *CI, IRBuilderBase &B) {
2733   optimizeErrorReporting(CI, B, 1);
2734 
2735   // Don't rewrite fputs to fwrite when optimising for size because fwrite
2736   // requires more arguments and thus extra MOVs are required.
2737   bool OptForSize = CI->getFunction()->hasOptSize() ||
2738                     llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI,
2739                                                 PGSOQueryType::IRPass);
2740   if (OptForSize)
2741     return nullptr;
2742 
2743   // We can't optimize if return value is used.
2744   if (!CI->use_empty())
2745     return nullptr;
2746 
2747   // fputs(s,F) --> fwrite(s,strlen(s),1,F)
2748   uint64_t Len = GetStringLength(CI->getArgOperand(0));
2749   if (!Len)
2750     return nullptr;
2751 
2752   // Known to have no uses (see above).
2753   return emitFWrite(
2754       CI->getArgOperand(0),
2755       ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len - 1),
2756       CI->getArgOperand(1), B, DL, TLI);
2757 }
2758 
2759 Value *LibCallSimplifier::optimizePuts(CallInst *CI, IRBuilderBase &B) {
2760   annotateNonNullNoUndefBasedOnAccess(CI, 0);
2761   if (!CI->use_empty())
2762     return nullptr;
2763 
2764   // Check for a constant string.
2765   // puts("") -> putchar('\n')
2766   StringRef Str;
2767   if (getConstantStringInfo(CI->getArgOperand(0), Str) && Str.empty())
2768     return emitPutChar(B.getInt32('\n'), B, TLI);
2769 
2770   return nullptr;
2771 }
2772 
2773 Value *LibCallSimplifier::optimizeBCopy(CallInst *CI, IRBuilderBase &B) {
2774   // bcopy(src, dst, n) -> llvm.memmove(dst, src, n)
2775   return B.CreateMemMove(CI->getArgOperand(1), Align(1), CI->getArgOperand(0),
2776                          Align(1), CI->getArgOperand(2));
2777 }
2778 
2779 bool LibCallSimplifier::hasFloatVersion(StringRef FuncName) {
2780   LibFunc Func;
2781   SmallString<20> FloatFuncName = FuncName;
2782   FloatFuncName += 'f';
2783   if (TLI->getLibFunc(FloatFuncName, Func))
2784     return TLI->has(Func);
2785   return false;
2786 }
2787 
2788 Value *LibCallSimplifier::optimizeStringMemoryLibCall(CallInst *CI,
2789                                                       IRBuilderBase &Builder) {
2790   LibFunc Func;
2791   Function *Callee = CI->getCalledFunction();
2792   // Check for string/memory library functions.
2793   if (TLI->getLibFunc(*Callee, Func) && TLI->has(Func)) {
2794     // Make sure we never change the calling convention.
2795     assert(
2796         (ignoreCallingConv(Func) ||
2797          TargetLibraryInfoImpl::isCallingConvCCompatible(CI)) &&
2798         "Optimizing string/memory libcall would change the calling convention");
2799     switch (Func) {
2800     case LibFunc_strcat:
2801       return optimizeStrCat(CI, Builder);
2802     case LibFunc_strncat:
2803       return optimizeStrNCat(CI, Builder);
2804     case LibFunc_strchr:
2805       return optimizeStrChr(CI, Builder);
2806     case LibFunc_strrchr:
2807       return optimizeStrRChr(CI, Builder);
2808     case LibFunc_strcmp:
2809       return optimizeStrCmp(CI, Builder);
2810     case LibFunc_strncmp:
2811       return optimizeStrNCmp(CI, Builder);
2812     case LibFunc_strcpy:
2813       return optimizeStrCpy(CI, Builder);
2814     case LibFunc_stpcpy:
2815       return optimizeStpCpy(CI, Builder);
2816     case LibFunc_strncpy:
2817       return optimizeStrNCpy(CI, Builder);
2818     case LibFunc_strlen:
2819       return optimizeStrLen(CI, Builder);
2820     case LibFunc_strpbrk:
2821       return optimizeStrPBrk(CI, Builder);
2822     case LibFunc_strndup:
2823       return optimizeStrNDup(CI, Builder);
2824     case LibFunc_strtol:
2825     case LibFunc_strtod:
2826     case LibFunc_strtof:
2827     case LibFunc_strtoul:
2828     case LibFunc_strtoll:
2829     case LibFunc_strtold:
2830     case LibFunc_strtoull:
2831       return optimizeStrTo(CI, Builder);
2832     case LibFunc_strspn:
2833       return optimizeStrSpn(CI, Builder);
2834     case LibFunc_strcspn:
2835       return optimizeStrCSpn(CI, Builder);
2836     case LibFunc_strstr:
2837       return optimizeStrStr(CI, Builder);
2838     case LibFunc_memchr:
2839       return optimizeMemChr(CI, Builder);
2840     case LibFunc_memrchr:
2841       return optimizeMemRChr(CI, Builder);
2842     case LibFunc_bcmp:
2843       return optimizeBCmp(CI, Builder);
2844     case LibFunc_memcmp:
2845       return optimizeMemCmp(CI, Builder);
2846     case LibFunc_memcpy:
2847       return optimizeMemCpy(CI, Builder);
2848     case LibFunc_memccpy:
2849       return optimizeMemCCpy(CI, Builder);
2850     case LibFunc_mempcpy:
2851       return optimizeMemPCpy(CI, Builder);
2852     case LibFunc_memmove:
2853       return optimizeMemMove(CI, Builder);
2854     case LibFunc_memset:
2855       return optimizeMemSet(CI, Builder);
2856     case LibFunc_realloc:
2857       return optimizeRealloc(CI, Builder);
2858     case LibFunc_wcslen:
2859       return optimizeWcslen(CI, Builder);
2860     case LibFunc_bcopy:
2861       return optimizeBCopy(CI, Builder);
2862     default:
2863       break;
2864     }
2865   }
2866   return nullptr;
2867 }
2868 
2869 Value *LibCallSimplifier::optimizeFloatingPointLibCall(CallInst *CI,
2870                                                        LibFunc Func,
2871                                                        IRBuilderBase &Builder) {
2872   // Don't optimize calls that require strict floating point semantics.
2873   if (CI->isStrictFP())
2874     return nullptr;
2875 
2876   if (Value *V = optimizeTrigReflections(CI, Func, Builder))
2877     return V;
2878 
2879   switch (Func) {
2880   case LibFunc_sinpif:
2881   case LibFunc_sinpi:
2882   case LibFunc_cospif:
2883   case LibFunc_cospi:
2884     return optimizeSinCosPi(CI, Builder);
2885   case LibFunc_powf:
2886   case LibFunc_pow:
2887   case LibFunc_powl:
2888     return optimizePow(CI, Builder);
2889   case LibFunc_exp2l:
2890   case LibFunc_exp2:
2891   case LibFunc_exp2f:
2892     return optimizeExp2(CI, Builder);
2893   case LibFunc_fabsf:
2894   case LibFunc_fabs:
2895   case LibFunc_fabsl:
2896     return replaceUnaryCall(CI, Builder, Intrinsic::fabs);
2897   case LibFunc_sqrtf:
2898   case LibFunc_sqrt:
2899   case LibFunc_sqrtl:
2900     return optimizeSqrt(CI, Builder);
2901   case LibFunc_logf:
2902   case LibFunc_log:
2903   case LibFunc_logl:
2904   case LibFunc_log10f:
2905   case LibFunc_log10:
2906   case LibFunc_log10l:
2907   case LibFunc_log1pf:
2908   case LibFunc_log1p:
2909   case LibFunc_log1pl:
2910   case LibFunc_log2f:
2911   case LibFunc_log2:
2912   case LibFunc_log2l:
2913   case LibFunc_logbf:
2914   case LibFunc_logb:
2915   case LibFunc_logbl:
2916     return optimizeLog(CI, Builder);
2917   case LibFunc_tan:
2918   case LibFunc_tanf:
2919   case LibFunc_tanl:
2920     return optimizeTan(CI, Builder);
2921   case LibFunc_ceil:
2922     return replaceUnaryCall(CI, Builder, Intrinsic::ceil);
2923   case LibFunc_floor:
2924     return replaceUnaryCall(CI, Builder, Intrinsic::floor);
2925   case LibFunc_round:
2926     return replaceUnaryCall(CI, Builder, Intrinsic::round);
2927   case LibFunc_roundeven:
2928     return replaceUnaryCall(CI, Builder, Intrinsic::roundeven);
2929   case LibFunc_nearbyint:
2930     return replaceUnaryCall(CI, Builder, Intrinsic::nearbyint);
2931   case LibFunc_rint:
2932     return replaceUnaryCall(CI, Builder, Intrinsic::rint);
2933   case LibFunc_trunc:
2934     return replaceUnaryCall(CI, Builder, Intrinsic::trunc);
2935   case LibFunc_acos:
2936   case LibFunc_acosh:
2937   case LibFunc_asin:
2938   case LibFunc_asinh:
2939   case LibFunc_atan:
2940   case LibFunc_atanh:
2941   case LibFunc_cbrt:
2942   case LibFunc_cosh:
2943   case LibFunc_exp:
2944   case LibFunc_exp10:
2945   case LibFunc_expm1:
2946   case LibFunc_cos:
2947   case LibFunc_sin:
2948   case LibFunc_sinh:
2949   case LibFunc_tanh:
2950     if (UnsafeFPShrink && hasFloatVersion(CI->getCalledFunction()->getName()))
2951       return optimizeUnaryDoubleFP(CI, Builder, true);
2952     return nullptr;
2953   case LibFunc_copysign:
2954     if (hasFloatVersion(CI->getCalledFunction()->getName()))
2955       return optimizeBinaryDoubleFP(CI, Builder);
2956     return nullptr;
2957   case LibFunc_fminf:
2958   case LibFunc_fmin:
2959   case LibFunc_fminl:
2960   case LibFunc_fmaxf:
2961   case LibFunc_fmax:
2962   case LibFunc_fmaxl:
2963     return optimizeFMinFMax(CI, Builder);
2964   case LibFunc_cabs:
2965   case LibFunc_cabsf:
2966   case LibFunc_cabsl:
2967     return optimizeCAbs(CI, Builder);
2968   default:
2969     return nullptr;
2970   }
2971 }
2972 
2973 Value *LibCallSimplifier::optimizeCall(CallInst *CI, IRBuilderBase &Builder) {
2974   // TODO: Split out the code below that operates on FP calls so that
2975   //       we can all non-FP calls with the StrictFP attribute to be
2976   //       optimized.
2977   if (CI->isNoBuiltin())
2978     return nullptr;
2979 
2980   LibFunc Func;
2981   Function *Callee = CI->getCalledFunction();
2982   bool IsCallingConvC = TargetLibraryInfoImpl::isCallingConvCCompatible(CI);
2983 
2984   SmallVector<OperandBundleDef, 2> OpBundles;
2985   CI->getOperandBundlesAsDefs(OpBundles);
2986 
2987   IRBuilderBase::OperandBundlesGuard Guard(Builder);
2988   Builder.setDefaultOperandBundles(OpBundles);
2989 
2990   // Command-line parameter overrides instruction attribute.
2991   // This can't be moved to optimizeFloatingPointLibCall() because it may be
2992   // used by the intrinsic optimizations.
2993   if (EnableUnsafeFPShrink.getNumOccurrences() > 0)
2994     UnsafeFPShrink = EnableUnsafeFPShrink;
2995   else if (isa<FPMathOperator>(CI) && CI->isFast())
2996     UnsafeFPShrink = true;
2997 
2998   // First, check for intrinsics.
2999   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
3000     if (!IsCallingConvC)
3001       return nullptr;
3002     // The FP intrinsics have corresponding constrained versions so we don't
3003     // need to check for the StrictFP attribute here.
3004     switch (II->getIntrinsicID()) {
3005     case Intrinsic::pow:
3006       return optimizePow(CI, Builder);
3007     case Intrinsic::exp2:
3008       return optimizeExp2(CI, Builder);
3009     case Intrinsic::log:
3010     case Intrinsic::log2:
3011     case Intrinsic::log10:
3012       return optimizeLog(CI, Builder);
3013     case Intrinsic::sqrt:
3014       return optimizeSqrt(CI, Builder);
3015     case Intrinsic::memset:
3016       return optimizeMemSet(CI, Builder);
3017     case Intrinsic::memcpy:
3018       return optimizeMemCpy(CI, Builder);
3019     case Intrinsic::memmove:
3020       return optimizeMemMove(CI, Builder);
3021     default:
3022       return nullptr;
3023     }
3024   }
3025 
3026   // Also try to simplify calls to fortified library functions.
3027   if (Value *SimplifiedFortifiedCI =
3028           FortifiedSimplifier.optimizeCall(CI, Builder)) {
3029     // Try to further simplify the result.
3030     CallInst *SimplifiedCI = dyn_cast<CallInst>(SimplifiedFortifiedCI);
3031     if (SimplifiedCI && SimplifiedCI->getCalledFunction()) {
3032       // Ensure that SimplifiedCI's uses are complete, since some calls have
3033       // their uses analyzed.
3034       replaceAllUsesWith(CI, SimplifiedCI);
3035 
3036       // Set insertion point to SimplifiedCI to guarantee we reach all uses
3037       // we might replace later on.
3038       IRBuilderBase::InsertPointGuard Guard(Builder);
3039       Builder.SetInsertPoint(SimplifiedCI);
3040       if (Value *V = optimizeStringMemoryLibCall(SimplifiedCI, Builder)) {
3041         // If we were able to further simplify, remove the now redundant call.
3042         substituteInParent(SimplifiedCI, V);
3043         return V;
3044       }
3045     }
3046     return SimplifiedFortifiedCI;
3047   }
3048 
3049   // Then check for known library functions.
3050   if (TLI->getLibFunc(*Callee, Func) && TLI->has(Func)) {
3051     // We never change the calling convention.
3052     if (!ignoreCallingConv(Func) && !IsCallingConvC)
3053       return nullptr;
3054     if (Value *V = optimizeStringMemoryLibCall(CI, Builder))
3055       return V;
3056     if (Value *V = optimizeFloatingPointLibCall(CI, Func, Builder))
3057       return V;
3058     switch (Func) {
3059     case LibFunc_ffs:
3060     case LibFunc_ffsl:
3061     case LibFunc_ffsll:
3062       return optimizeFFS(CI, Builder);
3063     case LibFunc_fls:
3064     case LibFunc_flsl:
3065     case LibFunc_flsll:
3066       return optimizeFls(CI, Builder);
3067     case LibFunc_abs:
3068     case LibFunc_labs:
3069     case LibFunc_llabs:
3070       return optimizeAbs(CI, Builder);
3071     case LibFunc_isdigit:
3072       return optimizeIsDigit(CI, Builder);
3073     case LibFunc_isascii:
3074       return optimizeIsAscii(CI, Builder);
3075     case LibFunc_toascii:
3076       return optimizeToAscii(CI, Builder);
3077     case LibFunc_atoi:
3078     case LibFunc_atol:
3079     case LibFunc_atoll:
3080       return optimizeAtoi(CI, Builder);
3081     case LibFunc_strtol:
3082     case LibFunc_strtoll:
3083       return optimizeStrtol(CI, Builder);
3084     case LibFunc_printf:
3085       return optimizePrintF(CI, Builder);
3086     case LibFunc_sprintf:
3087       return optimizeSPrintF(CI, Builder);
3088     case LibFunc_snprintf:
3089       return optimizeSnPrintF(CI, Builder);
3090     case LibFunc_fprintf:
3091       return optimizeFPrintF(CI, Builder);
3092     case LibFunc_fwrite:
3093       return optimizeFWrite(CI, Builder);
3094     case LibFunc_fputs:
3095       return optimizeFPuts(CI, Builder);
3096     case LibFunc_puts:
3097       return optimizePuts(CI, Builder);
3098     case LibFunc_perror:
3099       return optimizeErrorReporting(CI, Builder);
3100     case LibFunc_vfprintf:
3101     case LibFunc_fiprintf:
3102       return optimizeErrorReporting(CI, Builder, 0);
3103     default:
3104       return nullptr;
3105     }
3106   }
3107   return nullptr;
3108 }
3109 
3110 LibCallSimplifier::LibCallSimplifier(
3111     const DataLayout &DL, const TargetLibraryInfo *TLI,
3112     OptimizationRemarkEmitter &ORE,
3113     BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
3114     function_ref<void(Instruction *, Value *)> Replacer,
3115     function_ref<void(Instruction *)> Eraser)
3116     : FortifiedSimplifier(TLI), DL(DL), TLI(TLI), ORE(ORE), BFI(BFI), PSI(PSI),
3117       UnsafeFPShrink(false), Replacer(Replacer), Eraser(Eraser) {}
3118 
3119 void LibCallSimplifier::replaceAllUsesWith(Instruction *I, Value *With) {
3120   // Indirect through the replacer used in this instance.
3121   Replacer(I, With);
3122 }
3123 
3124 void LibCallSimplifier::eraseFromParent(Instruction *I) {
3125   Eraser(I);
3126 }
3127 
3128 // TODO:
3129 //   Additional cases that we need to add to this file:
3130 //
3131 // cbrt:
3132 //   * cbrt(expN(X))  -> expN(x/3)
3133 //   * cbrt(sqrt(x))  -> pow(x,1/6)
3134 //   * cbrt(cbrt(x))  -> pow(x,1/9)
3135 //
3136 // exp, expf, expl:
3137 //   * exp(log(x))  -> x
3138 //
3139 // log, logf, logl:
3140 //   * log(exp(x))   -> x
3141 //   * log(exp(y))   -> y*log(e)
3142 //   * log(exp10(y)) -> y*log(10)
3143 //   * log(sqrt(x))  -> 0.5*log(x)
3144 //
3145 // pow, powf, powl:
3146 //   * pow(sqrt(x),y) -> pow(x,y*0.5)
3147 //   * pow(pow(x,y),z)-> pow(x,y*z)
3148 //
3149 // signbit:
3150 //   * signbit(cnst) -> cnst'
3151 //   * signbit(nncst) -> 0 (if pstv is a non-negative constant)
3152 //
3153 // sqrt, sqrtf, sqrtl:
3154 //   * sqrt(expN(x))  -> expN(x*0.5)
3155 //   * sqrt(Nroot(x)) -> pow(x,1/(2*N))
3156 //   * sqrt(pow(x,y)) -> pow(|x|,y*0.5)
3157 //
3158 
3159 //===----------------------------------------------------------------------===//
3160 // Fortified Library Call Optimizations
3161 //===----------------------------------------------------------------------===//
3162 
3163 bool
3164 FortifiedLibCallSimplifier::isFortifiedCallFoldable(CallInst *CI,
3165                                                     unsigned ObjSizeOp,
3166                                                     Optional<unsigned> SizeOp,
3167                                                     Optional<unsigned> StrOp,
3168                                                     Optional<unsigned> FlagOp) {
3169   // If this function takes a flag argument, the implementation may use it to
3170   // perform extra checks. Don't fold into the non-checking variant.
3171   if (FlagOp) {
3172     ConstantInt *Flag = dyn_cast<ConstantInt>(CI->getArgOperand(*FlagOp));
3173     if (!Flag || !Flag->isZero())
3174       return false;
3175   }
3176 
3177   if (SizeOp && CI->getArgOperand(ObjSizeOp) == CI->getArgOperand(*SizeOp))
3178     return true;
3179 
3180   if (ConstantInt *ObjSizeCI =
3181           dyn_cast<ConstantInt>(CI->getArgOperand(ObjSizeOp))) {
3182     if (ObjSizeCI->isMinusOne())
3183       return true;
3184     // If the object size wasn't -1 (unknown), bail out if we were asked to.
3185     if (OnlyLowerUnknownSize)
3186       return false;
3187     if (StrOp) {
3188       uint64_t Len = GetStringLength(CI->getArgOperand(*StrOp));
3189       // If the length is 0 we don't know how long it is and so we can't
3190       // remove the check.
3191       if (Len)
3192         annotateDereferenceableBytes(CI, *StrOp, Len);
3193       else
3194         return false;
3195       return ObjSizeCI->getZExtValue() >= Len;
3196     }
3197 
3198     if (SizeOp) {
3199       if (ConstantInt *SizeCI =
3200               dyn_cast<ConstantInt>(CI->getArgOperand(*SizeOp)))
3201         return ObjSizeCI->getZExtValue() >= SizeCI->getZExtValue();
3202     }
3203   }
3204   return false;
3205 }
3206 
3207 Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI,
3208                                                      IRBuilderBase &B) {
3209   if (isFortifiedCallFoldable(CI, 3, 2)) {
3210     CallInst *NewCI =
3211         B.CreateMemCpy(CI->getArgOperand(0), Align(1), CI->getArgOperand(1),
3212                        Align(1), CI->getArgOperand(2));
3213     NewCI->setAttributes(CI->getAttributes());
3214     NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
3215     return CI->getArgOperand(0);
3216   }
3217   return nullptr;
3218 }
3219 
3220 Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI,
3221                                                       IRBuilderBase &B) {
3222   if (isFortifiedCallFoldable(CI, 3, 2)) {
3223     CallInst *NewCI =
3224         B.CreateMemMove(CI->getArgOperand(0), Align(1), CI->getArgOperand(1),
3225                         Align(1), CI->getArgOperand(2));
3226     NewCI->setAttributes(CI->getAttributes());
3227     NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
3228     return CI->getArgOperand(0);
3229   }
3230   return nullptr;
3231 }
3232 
3233 Value *FortifiedLibCallSimplifier::optimizeMemSetChk(CallInst *CI,
3234                                                      IRBuilderBase &B) {
3235   if (isFortifiedCallFoldable(CI, 3, 2)) {
3236     Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
3237     CallInst *NewCI = B.CreateMemSet(CI->getArgOperand(0), Val,
3238                                      CI->getArgOperand(2), Align(1));
3239     NewCI->setAttributes(CI->getAttributes());
3240     NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
3241     return CI->getArgOperand(0);
3242   }
3243   return nullptr;
3244 }
3245 
3246 Value *FortifiedLibCallSimplifier::optimizeMemPCpyChk(CallInst *CI,
3247                                                       IRBuilderBase &B) {
3248   const DataLayout &DL = CI->getModule()->getDataLayout();
3249   if (isFortifiedCallFoldable(CI, 3, 2))
3250     if (Value *Call = emitMemPCpy(CI->getArgOperand(0), CI->getArgOperand(1),
3251                                   CI->getArgOperand(2), B, DL, TLI)) {
3252       CallInst *NewCI = cast<CallInst>(Call);
3253       NewCI->setAttributes(CI->getAttributes());
3254       NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
3255       return NewCI;
3256     }
3257   return nullptr;
3258 }
3259 
3260 Value *FortifiedLibCallSimplifier::optimizeStrpCpyChk(CallInst *CI,
3261                                                       IRBuilderBase &B,
3262                                                       LibFunc Func) {
3263   const DataLayout &DL = CI->getModule()->getDataLayout();
3264   Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1),
3265         *ObjSize = CI->getArgOperand(2);
3266 
3267   // __stpcpy_chk(x,x,...)  -> x+strlen(x)
3268   if (Func == LibFunc_stpcpy_chk && !OnlyLowerUnknownSize && Dst == Src) {
3269     Value *StrLen = emitStrLen(Src, B, DL, TLI);
3270     return StrLen ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, StrLen) : nullptr;
3271   }
3272 
3273   // If a) we don't have any length information, or b) we know this will
3274   // fit then just lower to a plain st[rp]cpy. Otherwise we'll keep our
3275   // st[rp]cpy_chk call which may fail at runtime if the size is too long.
3276   // TODO: It might be nice to get a maximum length out of the possible
3277   // string lengths for varying.
3278   if (isFortifiedCallFoldable(CI, 2, None, 1)) {
3279     if (Func == LibFunc_strcpy_chk)
3280       return emitStrCpy(Dst, Src, B, TLI);
3281     else
3282       return emitStpCpy(Dst, Src, B, TLI);
3283   }
3284 
3285   if (OnlyLowerUnknownSize)
3286     return nullptr;
3287 
3288   // Maybe we can stil fold __st[rp]cpy_chk to __memcpy_chk.
3289   uint64_t Len = GetStringLength(Src);
3290   if (Len)
3291     annotateDereferenceableBytes(CI, 1, Len);
3292   else
3293     return nullptr;
3294 
3295   Type *SizeTTy = DL.getIntPtrType(CI->getContext());
3296   Value *LenV = ConstantInt::get(SizeTTy, Len);
3297   Value *Ret = emitMemCpyChk(Dst, Src, LenV, ObjSize, B, DL, TLI);
3298   // If the function was an __stpcpy_chk, and we were able to fold it into
3299   // a __memcpy_chk, we still need to return the correct end pointer.
3300   if (Ret && Func == LibFunc_stpcpy_chk)
3301     return B.CreateGEP(B.getInt8Ty(), Dst, ConstantInt::get(SizeTTy, Len - 1));
3302   return Ret;
3303 }
3304 
3305 Value *FortifiedLibCallSimplifier::optimizeStrLenChk(CallInst *CI,
3306                                                      IRBuilderBase &B) {
3307   if (isFortifiedCallFoldable(CI, 1, None, 0))
3308     return emitStrLen(CI->getArgOperand(0), B, CI->getModule()->getDataLayout(),
3309                       TLI);
3310   return nullptr;
3311 }
3312 
3313 Value *FortifiedLibCallSimplifier::optimizeStrpNCpyChk(CallInst *CI,
3314                                                        IRBuilderBase &B,
3315                                                        LibFunc Func) {
3316   if (isFortifiedCallFoldable(CI, 3, 2)) {
3317     if (Func == LibFunc_strncpy_chk)
3318       return emitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
3319                                CI->getArgOperand(2), B, TLI);
3320     else
3321       return emitStpNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
3322                          CI->getArgOperand(2), B, TLI);
3323   }
3324 
3325   return nullptr;
3326 }
3327 
3328 Value *FortifiedLibCallSimplifier::optimizeMemCCpyChk(CallInst *CI,
3329                                                       IRBuilderBase &B) {
3330   if (isFortifiedCallFoldable(CI, 4, 3))
3331     return emitMemCCpy(CI->getArgOperand(0), CI->getArgOperand(1),
3332                        CI->getArgOperand(2), CI->getArgOperand(3), B, TLI);
3333 
3334   return nullptr;
3335 }
3336 
3337 Value *FortifiedLibCallSimplifier::optimizeSNPrintfChk(CallInst *CI,
3338                                                        IRBuilderBase &B) {
3339   if (isFortifiedCallFoldable(CI, 3, 1, None, 2)) {
3340     SmallVector<Value *, 8> VariadicArgs(drop_begin(CI->args(), 5));
3341     return emitSNPrintf(CI->getArgOperand(0), CI->getArgOperand(1),
3342                         CI->getArgOperand(4), VariadicArgs, B, TLI);
3343   }
3344 
3345   return nullptr;
3346 }
3347 
3348 Value *FortifiedLibCallSimplifier::optimizeSPrintfChk(CallInst *CI,
3349                                                       IRBuilderBase &B) {
3350   if (isFortifiedCallFoldable(CI, 2, None, None, 1)) {
3351     SmallVector<Value *, 8> VariadicArgs(drop_begin(CI->args(), 4));
3352     return emitSPrintf(CI->getArgOperand(0), CI->getArgOperand(3), VariadicArgs,
3353                        B, TLI);
3354   }
3355 
3356   return nullptr;
3357 }
3358 
3359 Value *FortifiedLibCallSimplifier::optimizeStrCatChk(CallInst *CI,
3360                                                      IRBuilderBase &B) {
3361   if (isFortifiedCallFoldable(CI, 2))
3362     return emitStrCat(CI->getArgOperand(0), CI->getArgOperand(1), B, TLI);
3363 
3364   return nullptr;
3365 }
3366 
3367 Value *FortifiedLibCallSimplifier::optimizeStrLCat(CallInst *CI,
3368                                                    IRBuilderBase &B) {
3369   if (isFortifiedCallFoldable(CI, 3))
3370     return emitStrLCat(CI->getArgOperand(0), CI->getArgOperand(1),
3371                        CI->getArgOperand(2), B, TLI);
3372 
3373   return nullptr;
3374 }
3375 
3376 Value *FortifiedLibCallSimplifier::optimizeStrNCatChk(CallInst *CI,
3377                                                       IRBuilderBase &B) {
3378   if (isFortifiedCallFoldable(CI, 3))
3379     return emitStrNCat(CI->getArgOperand(0), CI->getArgOperand(1),
3380                        CI->getArgOperand(2), B, TLI);
3381 
3382   return nullptr;
3383 }
3384 
3385 Value *FortifiedLibCallSimplifier::optimizeStrLCpyChk(CallInst *CI,
3386                                                       IRBuilderBase &B) {
3387   if (isFortifiedCallFoldable(CI, 3))
3388     return emitStrLCpy(CI->getArgOperand(0), CI->getArgOperand(1),
3389                        CI->getArgOperand(2), B, TLI);
3390 
3391   return nullptr;
3392 }
3393 
3394 Value *FortifiedLibCallSimplifier::optimizeVSNPrintfChk(CallInst *CI,
3395                                                         IRBuilderBase &B) {
3396   if (isFortifiedCallFoldable(CI, 3, 1, None, 2))
3397     return emitVSNPrintf(CI->getArgOperand(0), CI->getArgOperand(1),
3398                          CI->getArgOperand(4), CI->getArgOperand(5), B, TLI);
3399 
3400   return nullptr;
3401 }
3402 
3403 Value *FortifiedLibCallSimplifier::optimizeVSPrintfChk(CallInst *CI,
3404                                                        IRBuilderBase &B) {
3405   if (isFortifiedCallFoldable(CI, 2, None, None, 1))
3406     return emitVSPrintf(CI->getArgOperand(0), CI->getArgOperand(3),
3407                         CI->getArgOperand(4), B, TLI);
3408 
3409   return nullptr;
3410 }
3411 
3412 Value *FortifiedLibCallSimplifier::optimizeCall(CallInst *CI,
3413                                                 IRBuilderBase &Builder) {
3414   // FIXME: We shouldn't be changing "nobuiltin" or TLI unavailable calls here.
3415   // Some clang users checked for _chk libcall availability using:
3416   //   __has_builtin(__builtin___memcpy_chk)
3417   // When compiling with -fno-builtin, this is always true.
3418   // When passing -ffreestanding/-mkernel, which both imply -fno-builtin, we
3419   // end up with fortified libcalls, which isn't acceptable in a freestanding
3420   // environment which only provides their non-fortified counterparts.
3421   //
3422   // Until we change clang and/or teach external users to check for availability
3423   // differently, disregard the "nobuiltin" attribute and TLI::has.
3424   //
3425   // PR23093.
3426 
3427   LibFunc Func;
3428   Function *Callee = CI->getCalledFunction();
3429   bool IsCallingConvC = TargetLibraryInfoImpl::isCallingConvCCompatible(CI);
3430 
3431   SmallVector<OperandBundleDef, 2> OpBundles;
3432   CI->getOperandBundlesAsDefs(OpBundles);
3433 
3434   IRBuilderBase::OperandBundlesGuard Guard(Builder);
3435   Builder.setDefaultOperandBundles(OpBundles);
3436 
3437   // First, check that this is a known library functions and that the prototype
3438   // is correct.
3439   if (!TLI->getLibFunc(*Callee, Func))
3440     return nullptr;
3441 
3442   // We never change the calling convention.
3443   if (!ignoreCallingConv(Func) && !IsCallingConvC)
3444     return nullptr;
3445 
3446   switch (Func) {
3447   case LibFunc_memcpy_chk:
3448     return optimizeMemCpyChk(CI, Builder);
3449   case LibFunc_mempcpy_chk:
3450     return optimizeMemPCpyChk(CI, Builder);
3451   case LibFunc_memmove_chk:
3452     return optimizeMemMoveChk(CI, Builder);
3453   case LibFunc_memset_chk:
3454     return optimizeMemSetChk(CI, Builder);
3455   case LibFunc_stpcpy_chk:
3456   case LibFunc_strcpy_chk:
3457     return optimizeStrpCpyChk(CI, Builder, Func);
3458   case LibFunc_strlen_chk:
3459     return optimizeStrLenChk(CI, Builder);
3460   case LibFunc_stpncpy_chk:
3461   case LibFunc_strncpy_chk:
3462     return optimizeStrpNCpyChk(CI, Builder, Func);
3463   case LibFunc_memccpy_chk:
3464     return optimizeMemCCpyChk(CI, Builder);
3465   case LibFunc_snprintf_chk:
3466     return optimizeSNPrintfChk(CI, Builder);
3467   case LibFunc_sprintf_chk:
3468     return optimizeSPrintfChk(CI, Builder);
3469   case LibFunc_strcat_chk:
3470     return optimizeStrCatChk(CI, Builder);
3471   case LibFunc_strlcat_chk:
3472     return optimizeStrLCat(CI, Builder);
3473   case LibFunc_strncat_chk:
3474     return optimizeStrNCatChk(CI, Builder);
3475   case LibFunc_strlcpy_chk:
3476     return optimizeStrLCpyChk(CI, Builder);
3477   case LibFunc_vsnprintf_chk:
3478     return optimizeVSNPrintfChk(CI, Builder);
3479   case LibFunc_vsprintf_chk:
3480     return optimizeVSPrintfChk(CI, Builder);
3481   default:
3482     break;
3483   }
3484   return nullptr;
3485 }
3486 
3487 FortifiedLibCallSimplifier::FortifiedLibCallSimplifier(
3488     const TargetLibraryInfo *TLI, bool OnlyLowerUnknownSize)
3489     : TLI(TLI), OnlyLowerUnknownSize(OnlyLowerUnknownSize) {}
3490