1 //===- FunctionSpecialization.cpp - Function Specialization ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This specialises functions with constant parameters. Constant parameters
10 // like function pointers and constant globals are propagated to the callee by
11 // specializing the function. The main benefit of this pass at the moment is
12 // that indirect calls are transformed into direct calls, which provides inline
13 // opportunities that the inliner would not have been able to achieve. That's
14 // why function specialisation is run before the inliner in the optimisation
15 // pipeline; that is by design. Otherwise, we would only benefit from constant
16 // passing, which is a valid use-case too, but hasn't been explored much in
17 // terms of performance uplifts, cost-model and compile-time impact.
18 //
19 // Current limitations:
20 // - It does not yet handle integer ranges. We do support "literal constants",
21 //   but that's off by default under an option.
22 // - Only 1 argument per function is specialised,
23 // - The cost-model could be further looked into (it mainly focuses on inlining
24 //   benefits),
25 // - We are not yet caching analysis results, but profiling and checking where
26 //   extra compile time is spent didn't suggest this to be a problem.
27 //
28 // Ideas:
29 // - With a function specialization attribute for arguments, we could have
30 //   a direct way to steer function specialization, avoiding the cost-model,
31 //   and thus control compile-times / code-size.
32 //
33 // Todos:
34 // - Specializing recursive functions relies on running the transformation a
35 //   number of times, which is controlled by option
36 //   `func-specialization-max-iters`. Thus, increasing this value and the
37 //   number of iterations, will linearly increase the number of times recursive
38 //   functions get specialized, see also the discussion in
39 //   https://reviews.llvm.org/D106426 for details. Perhaps there is a
40 //   compile-time friendlier way to control/limit the number of specialisations
41 //   for recursive functions.
42 // - Don't transform the function if function specialization does not trigger;
43 //   the SCCPSolver may make IR changes.
44 //
45 // References:
46 // - 2021 LLVM Dev Mtg “Introducing function specialisation, and can we enable
47 //   it by default?”, https://www.youtube.com/watch?v=zJiCjeXgV5Q
48 //
49 //===----------------------------------------------------------------------===//
50 
51 #include "llvm/ADT/Statistic.h"
52 #include "llvm/Analysis/AssumptionCache.h"
53 #include "llvm/Analysis/CodeMetrics.h"
54 #include "llvm/Analysis/DomTreeUpdater.h"
55 #include "llvm/Analysis/InlineCost.h"
56 #include "llvm/Analysis/LoopInfo.h"
57 #include "llvm/Analysis/TargetLibraryInfo.h"
58 #include "llvm/Analysis/TargetTransformInfo.h"
59 #include "llvm/Transforms/Scalar/SCCP.h"
60 #include "llvm/Transforms/Utils/Cloning.h"
61 #include "llvm/Transforms/Utils/SizeOpts.h"
62 #include <cmath>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "function-specialization"
67 
68 STATISTIC(NumFuncSpecialized, "Number of functions specialized");
69 
70 static cl::opt<bool> ForceFunctionSpecialization(
71     "force-function-specialization", cl::init(false), cl::Hidden,
72     cl::desc("Force function specialization for every call site with a "
73              "constant argument"));
74 
75 static cl::opt<unsigned> FuncSpecializationMaxIters(
76     "func-specialization-max-iters", cl::Hidden,
77     cl::desc("The maximum number of iterations function specialization is run"),
78     cl::init(1));
79 
80 static cl::opt<unsigned> MaxClonesThreshold(
81     "func-specialization-max-clones", cl::Hidden,
82     cl::desc("The maximum number of clones allowed for a single function "
83              "specialization"),
84     cl::init(3));
85 
86 static cl::opt<unsigned> SmallFunctionThreshold(
87     "func-specialization-size-threshold", cl::Hidden,
88     cl::desc("Don't specialize functions that have less than this theshold "
89              "number of instructions"),
90     cl::init(100));
91 
92 static cl::opt<unsigned>
93     AvgLoopIterationCount("func-specialization-avg-iters-cost", cl::Hidden,
94                           cl::desc("Average loop iteration count cost"),
95                           cl::init(10));
96 
97 static cl::opt<bool> SpecializeOnAddresses(
98     "func-specialization-on-address", cl::init(false), cl::Hidden,
99     cl::desc("Enable function specialization on the address of global values"));
100 
101 // TODO: This needs checking to see the impact on compile-times, which is why
102 // this is off by default for now.
103 static cl::opt<bool> EnableSpecializationForLiteralConstant(
104     "function-specialization-for-literal-constant", cl::init(false), cl::Hidden,
105     cl::desc("Enable specialization of functions that take a literal constant "
106              "as an argument."));
107 
108 namespace {
109 // Bookkeeping struct to pass data from the analysis and profitability phase
110 // to the actual transform helper functions.
111 struct ArgInfo {
112   Function *Fn;         // The function to perform specialisation on.
113   Argument *Formal;     // The Formal argument being analysed.
114   Constant *Actual;     // A corresponding actual constant argument.
115   InstructionCost Gain; // Profitability: Gain = Bonus - Cost.
116 
117   // Flag if this will be a partial specialization, in which case we will need
118   // to keep the original function around in addition to the added
119   // specializations.
120   bool Partial = false;
121 
122   ArgInfo(Function *F, Argument *A, Constant *C, InstructionCost G)
123       : Fn(F), Formal(A), Actual(C), Gain(G){};
124 };
125 } // Anonymous namespace
126 
127 using FuncList = SmallVectorImpl<Function *>;
128 using ConstList = SmallVectorImpl<Constant *>;
129 
130 // Helper to check if \p LV is either a constant or a constant
131 // range with a single element. This should cover exactly the same cases as the
132 // old ValueLatticeElement::isConstant() and is intended to be used in the
133 // transition to ValueLatticeElement.
134 static bool isConstant(const ValueLatticeElement &LV) {
135   return LV.isConstant() ||
136          (LV.isConstantRange() && LV.getConstantRange().isSingleElement());
137 }
138 
139 // Helper to check if \p LV is either overdefined or a constant int.
140 static bool isOverdefined(const ValueLatticeElement &LV) {
141   return !LV.isUnknownOrUndef() && !isConstant(LV);
142 }
143 
144 static Constant *getPromotableAlloca(AllocaInst *Alloca, CallInst *Call) {
145   Value *StoreValue = nullptr;
146   for (auto *User : Alloca->users()) {
147     // We can't use llvm::isAllocaPromotable() as that would fail because of
148     // the usage in the CallInst, which is what we check here.
149     if (User == Call)
150       continue;
151     if (auto *Bitcast = dyn_cast<BitCastInst>(User)) {
152       if (!Bitcast->hasOneUse() || *Bitcast->user_begin() != Call)
153         return nullptr;
154       continue;
155     }
156 
157     if (auto *Store = dyn_cast<StoreInst>(User)) {
158       // This is a duplicate store, bail out.
159       if (StoreValue || Store->isVolatile())
160         return nullptr;
161       StoreValue = Store->getValueOperand();
162       continue;
163     }
164     // Bail if there is any other unknown usage.
165     return nullptr;
166   }
167   return dyn_cast_or_null<Constant>(StoreValue);
168 }
169 
170 // A constant stack value is an AllocaInst that has a single constant
171 // value stored to it. Return this constant if such an alloca stack value
172 // is a function argument.
173 static Constant *getConstantStackValue(CallInst *Call, Value *Val,
174                                        SCCPSolver &Solver) {
175   if (!Val)
176     return nullptr;
177   Val = Val->stripPointerCasts();
178   if (auto *ConstVal = dyn_cast<ConstantInt>(Val))
179     return ConstVal;
180   auto *Alloca = dyn_cast<AllocaInst>(Val);
181   if (!Alloca || !Alloca->getAllocatedType()->isIntegerTy())
182     return nullptr;
183   return getPromotableAlloca(Alloca, Call);
184 }
185 
186 // To support specializing recursive functions, it is important to propagate
187 // constant arguments because after a first iteration of specialisation, a
188 // reduced example may look like this:
189 //
190 //     define internal void @RecursiveFn(i32* arg1) {
191 //       %temp = alloca i32, align 4
192 //       store i32 2 i32* %temp, align 4
193 //       call void @RecursiveFn.1(i32* nonnull %temp)
194 //       ret void
195 //     }
196 //
197 // Before a next iteration, we need to propagate the constant like so
198 // which allows further specialization in next iterations.
199 //
200 //     @funcspec.arg = internal constant i32 2
201 //
202 //     define internal void @someFunc(i32* arg1) {
203 //       call void @otherFunc(i32* nonnull @funcspec.arg)
204 //       ret void
205 //     }
206 //
207 static void constantArgPropagation(FuncList &WorkList,
208                                    Module &M, SCCPSolver &Solver) {
209   // Iterate over the argument tracked functions see if there
210   // are any new constant values for the call instruction via
211   // stack variables.
212   for (auto *F : WorkList) {
213     // TODO: Generalize for any read only arguments.
214     if (F->arg_size() != 1)
215       continue;
216 
217     auto &Arg = *F->arg_begin();
218     if (!Arg.onlyReadsMemory() || !Arg.getType()->isPointerTy())
219       continue;
220 
221     for (auto *User : F->users()) {
222       auto *Call = dyn_cast<CallInst>(User);
223       if (!Call)
224         break;
225       auto *ArgOp = Call->getArgOperand(0);
226       auto *ArgOpType = ArgOp->getType();
227       auto *ConstVal = getConstantStackValue(Call, ArgOp, Solver);
228       if (!ConstVal)
229         break;
230 
231       Value *GV = new GlobalVariable(M, ConstVal->getType(), true,
232                                      GlobalValue::InternalLinkage, ConstVal,
233                                      "funcspec.arg");
234 
235       if (ArgOpType != ConstVal->getType())
236         GV = ConstantExpr::getBitCast(cast<Constant>(GV), ArgOp->getType());
237 
238       Call->setArgOperand(0, GV);
239 
240       // Add the changed CallInst to Solver Worklist
241       Solver.visitCall(*Call);
242     }
243   }
244 }
245 
246 // ssa_copy intrinsics are introduced by the SCCP solver. These intrinsics
247 // interfere with the constantArgPropagation optimization.
248 static void removeSSACopy(Function &F) {
249   for (BasicBlock &BB : F) {
250     for (Instruction &Inst : llvm::make_early_inc_range(BB)) {
251       auto *II = dyn_cast<IntrinsicInst>(&Inst);
252       if (!II)
253         continue;
254       if (II->getIntrinsicID() != Intrinsic::ssa_copy)
255         continue;
256       Inst.replaceAllUsesWith(II->getOperand(0));
257       Inst.eraseFromParent();
258     }
259   }
260 }
261 
262 static void removeSSACopy(Module &M) {
263   for (Function &F : M)
264     removeSSACopy(F);
265 }
266 
267 namespace {
268 class FunctionSpecializer {
269 
270   /// The IPSCCP Solver.
271   SCCPSolver &Solver;
272 
273   /// Analyses used to help determine if a function should be specialized.
274   std::function<AssumptionCache &(Function &)> GetAC;
275   std::function<TargetTransformInfo &(Function &)> GetTTI;
276   std::function<TargetLibraryInfo &(Function &)> GetTLI;
277 
278   SmallPtrSet<Function *, 2> SpecializedFuncs;
279   SmallVector<Instruction *> ReplacedWithConstant;
280 
281 public:
282   FunctionSpecializer(SCCPSolver &Solver,
283                       std::function<AssumptionCache &(Function &)> GetAC,
284                       std::function<TargetTransformInfo &(Function &)> GetTTI,
285                       std::function<TargetLibraryInfo &(Function &)> GetTLI)
286       : Solver(Solver), GetAC(GetAC), GetTTI(GetTTI), GetTLI(GetTLI) {}
287 
288   /// Attempt to specialize functions in the module to enable constant
289   /// propagation across function boundaries.
290   ///
291   /// \returns true if at least one function is specialized.
292   bool specializeFunctions(FuncList &Candidates, FuncList &WorkList) {
293     bool Changed = false;
294     for (auto *F : Candidates) {
295       if (!isCandidateFunction(F))
296         continue;
297 
298       auto Cost = getSpecializationCost(F);
299       if (!Cost.isValid()) {
300         LLVM_DEBUG(
301             dbgs() << "FnSpecialization: Invalid specialisation cost.\n");
302         continue;
303       }
304 
305       LLVM_DEBUG(dbgs() << "FnSpecialization: Specialization cost for "
306                         << F->getName() << " is " << Cost << "\n");
307 
308       auto ConstArgs = calculateGains(F, Cost);
309       if (ConstArgs.empty()) {
310         LLVM_DEBUG(dbgs() << "FnSpecialization: no possible constants found\n");
311         continue;
312       }
313 
314       for (auto &CA : ConstArgs) {
315         specializeFunction(CA, WorkList);
316         Changed = true;
317       }
318     }
319 
320     updateSpecializedFuncs(Candidates, WorkList);
321     NumFuncSpecialized += NbFunctionsSpecialized;
322     return Changed;
323   }
324 
325   void removeDeadInstructions() {
326     for (auto *I : ReplacedWithConstant) {
327       LLVM_DEBUG(dbgs() << "FnSpecialization: Removing dead instruction "
328                         << *I << "\n");
329       I->eraseFromParent();
330     }
331     ReplacedWithConstant.clear();
332   }
333 
334   bool tryToReplaceWithConstant(Value *V) {
335     if (!V->getType()->isSingleValueType() || isa<CallBase>(V) ||
336         V->user_empty())
337       return false;
338 
339     const ValueLatticeElement &IV = Solver.getLatticeValueFor(V);
340     if (isOverdefined(IV))
341       return false;
342     auto *Const =
343         isConstant(IV) ? Solver.getConstant(IV) : UndefValue::get(V->getType());
344 
345     LLVM_DEBUG(dbgs() << "FnSpecialization: Replacing " << *V
346                       << "\nFnSpecialization: with " << *Const << "\n");
347 
348     // Record uses of V to avoid visiting irrelevant uses of const later.
349     SmallVector<Instruction *> UseInsts;
350     for (auto *U : V->users())
351       if (auto *I = dyn_cast<Instruction>(U))
352         if (Solver.isBlockExecutable(I->getParent()))
353           UseInsts.push_back(I);
354 
355     V->replaceAllUsesWith(Const);
356 
357     for (auto *I : UseInsts)
358       Solver.visit(I);
359 
360     // Remove the instruction from Block and Solver.
361     if (auto *I = dyn_cast<Instruction>(V)) {
362       if (I->isSafeToRemove()) {
363         ReplacedWithConstant.push_back(I);
364         Solver.removeLatticeValueFor(I);
365       }
366     }
367     return true;
368   }
369 
370 private:
371   // The number of functions specialised, used for collecting statistics and
372   // also in the cost model.
373   unsigned NbFunctionsSpecialized = 0;
374 
375   /// Clone the function \p F and remove the ssa_copy intrinsics added by
376   /// the SCCPSolver in the cloned version.
377   Function *cloneCandidateFunction(Function *F) {
378     ValueToValueMapTy EmptyMap;
379     Function *Clone = CloneFunction(F, EmptyMap);
380     removeSSACopy(*Clone);
381     return Clone;
382   }
383 
384   /// This function decides whether it's worthwhile to specialize function \p F
385   /// based on the known constant values its arguments can take on, i.e. it
386   /// calculates a gain and returns a list of actual arguments that are deemed
387   /// profitable to specialize. Specialization is performed on the first
388   /// interesting argument. Specializations based on additional arguments will
389   /// be evaluated on following iterations of the main IPSCCP solve loop.
390   SmallVector<ArgInfo> calculateGains(Function *F, InstructionCost Cost) {
391     SmallVector<ArgInfo> Worklist;
392     // Determine if we should specialize the function based on the values the
393     // argument can take on. If specialization is not profitable, we continue
394     // on to the next argument.
395     for (Argument &FormalArg : F->args()) {
396       // Determine if this argument is interesting. If we know the argument can
397       // take on any constant values, they are collected in Constants. If the
398       // argument can only ever equal a constant value in Constants, the
399       // function will be completely specialized, and the IsPartial flag will
400       // be set to false by isArgumentInteresting (that function only adds
401       // values to the Constants list that are deemed profitable).
402       bool IsPartial = true;
403       SmallVector<Constant *> ActualArgs;
404       if (!isArgumentInteresting(&FormalArg, ActualArgs, IsPartial)) {
405         LLVM_DEBUG(dbgs() << "FnSpecialization: Argument "
406                           << FormalArg.getNameOrAsOperand()
407                           << " is not interesting\n");
408         continue;
409       }
410 
411       for (auto *ActualArg : ActualArgs) {
412         InstructionCost Gain =
413             ForceFunctionSpecialization
414                 ? 1
415                 : getSpecializationBonus(&FormalArg, ActualArg) - Cost;
416 
417         if (Gain <= 0)
418           continue;
419         Worklist.push_back({F, &FormalArg, ActualArg, Gain});
420       }
421 
422       if (Worklist.empty())
423         continue;
424 
425       // Sort the candidates in descending order.
426       llvm::stable_sort(Worklist, [](const ArgInfo &L, const ArgInfo &R) {
427         return L.Gain > R.Gain;
428       });
429 
430       // Truncate the worklist to 'MaxClonesThreshold' candidates if
431       // necessary.
432       if (Worklist.size() > MaxClonesThreshold) {
433         LLVM_DEBUG(dbgs() << "FnSpecialization: Number of candidates exceed "
434                           << "the maximum number of clones threshold.\n"
435                           << "FnSpecialization: Truncating worklist to "
436                           << MaxClonesThreshold << " candidates.\n");
437         Worklist.erase(Worklist.begin() + MaxClonesThreshold,
438                        Worklist.end());
439       }
440 
441       if (IsPartial || Worklist.size() < ActualArgs.size())
442         for (auto &ActualArg : Worklist)
443           ActualArg.Partial = true;
444 
445       LLVM_DEBUG(
446         dbgs() << "FnSpecialization: Specializations for function "
447                << F->getName() << "\n";
448         for (auto &C : Worklist) {
449           dbgs() << "FnSpecialization:   FormalArg = "
450                  << C.Formal->getNameOrAsOperand() << ", ActualArg = "
451                  << C.Actual->getNameOrAsOperand() << ", Gain = "
452                  << C.Gain << "\n";
453         }
454       );
455 
456       // FIXME: Only one argument per function.
457       break;
458     }
459     return Worklist;
460   }
461 
462   bool isCandidateFunction(Function *F) {
463     // Do not specialize the cloned function again.
464     if (SpecializedFuncs.contains(F))
465       return false;
466 
467     // If we're optimizing the function for size, we shouldn't specialize it.
468     if (F->hasOptSize() ||
469         shouldOptimizeForSize(F, nullptr, nullptr, PGSOQueryType::IRPass))
470       return false;
471 
472     // Exit if the function is not executable. There's no point in specializing
473     // a dead function.
474     if (!Solver.isBlockExecutable(&F->getEntryBlock()))
475       return false;
476 
477     // It wastes time to specialize a function which would get inlined finally.
478     if (F->hasFnAttribute(Attribute::AlwaysInline))
479       return false;
480 
481     LLVM_DEBUG(dbgs() << "FnSpecialization: Try function: " << F->getName()
482                       << "\n");
483     return true;
484   }
485 
486   void specializeFunction(ArgInfo &AI, FuncList &WorkList) {
487     Function *Clone = cloneCandidateFunction(AI.Fn);
488     Argument *ClonedArg = Clone->getArg(AI.Formal->getArgNo());
489 
490     // Rewrite calls to the function so that they call the clone instead.
491     rewriteCallSites(AI.Fn, Clone, *ClonedArg, AI.Actual);
492 
493     // Initialize the lattice state of the arguments of the function clone,
494     // marking the argument on which we specialized the function constant
495     // with the given value.
496     Solver.markArgInFuncSpecialization(AI.Fn, ClonedArg, AI.Actual);
497 
498     // Mark all the specialized functions
499     WorkList.push_back(Clone);
500     NbFunctionsSpecialized++;
501 
502     // If the function has been completely specialized, the original function
503     // is no longer needed. Mark it unreachable.
504     if (!AI.Partial)
505       Solver.markFunctionUnreachable(AI.Fn);
506   }
507 
508   /// Compute and return the cost of specializing function \p F.
509   InstructionCost getSpecializationCost(Function *F) {
510     // Compute the code metrics for the function.
511     SmallPtrSet<const Value *, 32> EphValues;
512     CodeMetrics::collectEphemeralValues(F, &(GetAC)(*F), EphValues);
513     CodeMetrics Metrics;
514     for (BasicBlock &BB : *F)
515       Metrics.analyzeBasicBlock(&BB, (GetTTI)(*F), EphValues);
516 
517     // If the code metrics reveal that we shouldn't duplicate the function, we
518     // shouldn't specialize it. Set the specialization cost to Invalid.
519     // Or if the lines of codes implies that this function is easy to get
520     // inlined so that we shouldn't specialize it.
521     if (Metrics.notDuplicatable ||
522         (!ForceFunctionSpecialization &&
523          Metrics.NumInsts < SmallFunctionThreshold)) {
524       InstructionCost C{};
525       C.setInvalid();
526       return C;
527     }
528 
529     // Otherwise, set the specialization cost to be the cost of all the
530     // instructions in the function and penalty for specializing more functions.
531     unsigned Penalty = NbFunctionsSpecialized + 1;
532     return Metrics.NumInsts * InlineConstants::InstrCost * Penalty;
533   }
534 
535   InstructionCost getUserBonus(User *U, llvm::TargetTransformInfo &TTI,
536                                LoopInfo &LI) {
537     auto *I = dyn_cast_or_null<Instruction>(U);
538     // If not an instruction we do not know how to evaluate.
539     // Keep minimum possible cost for now so that it doesnt affect
540     // specialization.
541     if (!I)
542       return std::numeric_limits<unsigned>::min();
543 
544     auto Cost = TTI.getUserCost(U, TargetTransformInfo::TCK_SizeAndLatency);
545 
546     // Traverse recursively if there are more uses.
547     // TODO: Any other instructions to be added here?
548     if (I->mayReadFromMemory() || I->isCast())
549       for (auto *User : I->users())
550         Cost += getUserBonus(User, TTI, LI);
551 
552     // Increase the cost if it is inside the loop.
553     auto LoopDepth = LI.getLoopDepth(I->getParent());
554     Cost *= std::pow((double)AvgLoopIterationCount, LoopDepth);
555     return Cost;
556   }
557 
558   /// Compute a bonus for replacing argument \p A with constant \p C.
559   InstructionCost getSpecializationBonus(Argument *A, Constant *C) {
560     Function *F = A->getParent();
561     DominatorTree DT(*F);
562     LoopInfo LI(DT);
563     auto &TTI = (GetTTI)(*F);
564     LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing bonus for constant: "
565                       << C->getNameOrAsOperand() << "\n");
566 
567     InstructionCost TotalCost = 0;
568     for (auto *U : A->users()) {
569       TotalCost += getUserBonus(U, TTI, LI);
570       LLVM_DEBUG(dbgs() << "FnSpecialization:   User cost ";
571                  TotalCost.print(dbgs()); dbgs() << " for: " << *U << "\n");
572     }
573 
574     // The below heuristic is only concerned with exposing inlining
575     // opportunities via indirect call promotion. If the argument is not a
576     // function pointer, give up.
577     if (!isa<PointerType>(A->getType()) ||
578         !isa<FunctionType>(A->getType()->getPointerElementType()))
579       return TotalCost;
580 
581     // Since the argument is a function pointer, its incoming constant values
582     // should be functions or constant expressions. The code below attempts to
583     // look through cast expressions to find the function that will be called.
584     Value *CalledValue = C;
585     while (isa<ConstantExpr>(CalledValue) &&
586            cast<ConstantExpr>(CalledValue)->isCast())
587       CalledValue = cast<User>(CalledValue)->getOperand(0);
588     Function *CalledFunction = dyn_cast<Function>(CalledValue);
589     if (!CalledFunction)
590       return TotalCost;
591 
592     // Get TTI for the called function (used for the inline cost).
593     auto &CalleeTTI = (GetTTI)(*CalledFunction);
594 
595     // Look at all the call sites whose called value is the argument.
596     // Specializing the function on the argument would allow these indirect
597     // calls to be promoted to direct calls. If the indirect call promotion
598     // would likely enable the called function to be inlined, specializing is a
599     // good idea.
600     int Bonus = 0;
601     for (User *U : A->users()) {
602       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
603         continue;
604       auto *CS = cast<CallBase>(U);
605       if (CS->getCalledOperand() != A)
606         continue;
607 
608       // Get the cost of inlining the called function at this call site. Note
609       // that this is only an estimate. The called function may eventually
610       // change in a way that leads to it not being inlined here, even though
611       // inlining looks profitable now. For example, one of its called
612       // functions may be inlined into it, making the called function too large
613       // to be inlined into this call site.
614       //
615       // We apply a boost for performing indirect call promotion by increasing
616       // the default threshold by the threshold for indirect calls.
617       auto Params = getInlineParams();
618       Params.DefaultThreshold += InlineConstants::IndirectCallThreshold;
619       InlineCost IC =
620           getInlineCost(*CS, CalledFunction, Params, CalleeTTI, GetAC, GetTLI);
621 
622       // We clamp the bonus for this call to be between zero and the default
623       // threshold.
624       if (IC.isAlways())
625         Bonus += Params.DefaultThreshold;
626       else if (IC.isVariable() && IC.getCostDelta() > 0)
627         Bonus += IC.getCostDelta();
628 
629       LLVM_DEBUG(dbgs() << "FnSpecialization:   Inlining bonus " << Bonus
630                         << " for user " << *U << "\n");
631     }
632 
633     return TotalCost + Bonus;
634   }
635 
636   /// Determine if we should specialize a function based on the incoming values
637   /// of the given argument.
638   ///
639   /// This function implements the goal-directed heuristic. It determines if
640   /// specializing the function based on the incoming values of argument \p A
641   /// would result in any significant optimization opportunities. If
642   /// optimization opportunities exist, the constant values of \p A on which to
643   /// specialize the function are collected in \p Constants. If the values in
644   /// \p Constants represent the complete set of values that \p A can take on,
645   /// the function will be completely specialized, and the \p IsPartial flag is
646   /// set to false.
647   ///
648   /// \returns true if the function should be specialized on the given
649   /// argument.
650   bool isArgumentInteresting(Argument *A, ConstList &Constants,
651                              bool &IsPartial) {
652     // For now, don't attempt to specialize functions based on the values of
653     // composite types.
654     if (!A->getType()->isSingleValueType() || A->user_empty())
655       return false;
656 
657     // If the argument isn't overdefined, there's nothing to do. It should
658     // already be constant.
659     if (!Solver.getLatticeValueFor(A).isOverdefined()) {
660       LLVM_DEBUG(dbgs() << "FnSpecialization: Nothing to do, argument "
661                         << A->getNameOrAsOperand()
662                         << " is already constant?\n");
663       return false;
664     }
665 
666     // Collect the constant values that the argument can take on. If the
667     // argument can't take on any constant values, we aren't going to
668     // specialize the function. While it's possible to specialize the function
669     // based on non-constant arguments, there's likely not much benefit to
670     // constant propagation in doing so.
671     //
672     // TODO 1: currently it won't specialize if there are over the threshold of
673     // calls using the same argument, e.g foo(a) x 4 and foo(b) x 1, but it
674     // might be beneficial to take the occurrences into account in the cost
675     // model, so we would need to find the unique constants.
676     //
677     // TODO 2: this currently does not support constants, i.e. integer ranges.
678     //
679     IsPartial = !getPossibleConstants(A, Constants);
680     LLVM_DEBUG(dbgs() << "FnSpecialization: Found interesting argument "
681                       << A->getNameOrAsOperand() << "\n");
682     return true;
683   }
684 
685   /// Collect in \p Constants all the constant values that argument \p A can
686   /// take on.
687   ///
688   /// \returns true if all of the values the argument can take on are constant
689   /// (e.g., the argument's parent function cannot be called with an
690   /// overdefined value).
691   bool getPossibleConstants(Argument *A, ConstList &Constants) {
692     Function *F = A->getParent();
693     bool AllConstant = true;
694 
695     // Iterate over all the call sites of the argument's parent function.
696     for (User *U : F->users()) {
697       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
698         continue;
699       auto &CS = *cast<CallBase>(U);
700       // If the call site has attribute minsize set, that callsite won't be
701       // specialized.
702       if (CS.hasFnAttr(Attribute::MinSize)) {
703         AllConstant = false;
704         continue;
705       }
706 
707       // If the parent of the call site will never be executed, we don't need
708       // to worry about the passed value.
709       if (!Solver.isBlockExecutable(CS.getParent()))
710         continue;
711 
712       auto *V = CS.getArgOperand(A->getArgNo());
713       if (isa<PoisonValue>(V))
714         return false;
715 
716       // For now, constant expressions are fine but only if they are function
717       // calls.
718       if (auto *CE = dyn_cast<ConstantExpr>(V))
719         if (!isa<Function>(CE->getOperand(0)))
720           return false;
721 
722       // TrackValueOfGlobalVariable only tracks scalar global variables.
723       if (auto *GV = dyn_cast<GlobalVariable>(V)) {
724         // Check if we want to specialize on the address of non-constant
725         // global values.
726         if (!GV->isConstant())
727           if (!SpecializeOnAddresses)
728             return false;
729 
730         if (!GV->getValueType()->isSingleValueType())
731           return false;
732       }
733 
734       if (isa<Constant>(V) && (Solver.getLatticeValueFor(V).isConstant() ||
735                                EnableSpecializationForLiteralConstant))
736         Constants.push_back(cast<Constant>(V));
737       else
738         AllConstant = false;
739     }
740 
741     // If the argument can only take on constant values, AllConstant will be
742     // true.
743     return AllConstant;
744   }
745 
746   /// Rewrite calls to function \p F to call function \p Clone instead.
747   ///
748   /// This function modifies calls to function \p F whose argument at index \p
749   /// ArgNo is equal to constant \p C. The calls are rewritten to call function
750   /// \p Clone instead.
751   ///
752   /// Callsites that have been marked with the MinSize function attribute won't
753   /// be specialized and rewritten.
754   void rewriteCallSites(Function *F, Function *Clone, Argument &Arg,
755                         Constant *C) {
756     unsigned ArgNo = Arg.getArgNo();
757     SmallVector<CallBase *, 4> CallSitesToRewrite;
758     for (auto *U : F->users()) {
759       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
760         continue;
761       auto &CS = *cast<CallBase>(U);
762       if (!CS.getCalledFunction() || CS.getCalledFunction() != F)
763         continue;
764       CallSitesToRewrite.push_back(&CS);
765     }
766 
767     LLVM_DEBUG(dbgs() << "FnSpecialization: Replacing call sites of "
768                       << F->getName() << " with "
769                       << Clone->getName() << "\n");
770 
771     for (auto *CS : CallSitesToRewrite) {
772       LLVM_DEBUG(dbgs() << "FnSpecialization:   "
773                         << CS->getFunction()->getName() << " ->"
774                         << *CS << "\n");
775       if ((CS->getFunction() == Clone && CS->getArgOperand(ArgNo) == &Arg) ||
776           CS->getArgOperand(ArgNo) == C) {
777         CS->setCalledFunction(Clone);
778         Solver.markOverdefined(CS);
779       }
780     }
781   }
782 
783   void updateSpecializedFuncs(FuncList &Candidates, FuncList &WorkList) {
784     for (auto *F : WorkList) {
785       SpecializedFuncs.insert(F);
786 
787       // Initialize the state of the newly created functions, marking them
788       // argument-tracked and executable.
789       if (F->hasExactDefinition() && !F->hasFnAttribute(Attribute::Naked))
790         Solver.addTrackedFunction(F);
791 
792       Solver.addArgumentTrackedFunction(F);
793       Candidates.push_back(F);
794       Solver.markBlockExecutable(&F->front());
795 
796       // Replace the function arguments for the specialized functions.
797       for (Argument &Arg : F->args())
798         if (!Arg.use_empty() && tryToReplaceWithConstant(&Arg))
799           LLVM_DEBUG(dbgs() << "FnSpecialization: Replaced constant argument: "
800                             << Arg.getNameOrAsOperand() << "\n");
801     }
802   }
803 };
804 } // namespace
805 
806 bool llvm::runFunctionSpecialization(
807     Module &M, const DataLayout &DL,
808     std::function<TargetLibraryInfo &(Function &)> GetTLI,
809     std::function<TargetTransformInfo &(Function &)> GetTTI,
810     std::function<AssumptionCache &(Function &)> GetAC,
811     function_ref<AnalysisResultsForFn(Function &)> GetAnalysis) {
812   SCCPSolver Solver(DL, GetTLI, M.getContext());
813   FunctionSpecializer FS(Solver, GetAC, GetTTI, GetTLI);
814   bool Changed = false;
815 
816   // Loop over all functions, marking arguments to those with their addresses
817   // taken or that are external as overdefined.
818   for (Function &F : M) {
819     if (F.isDeclaration())
820       continue;
821     if (F.hasFnAttribute(Attribute::NoDuplicate))
822       continue;
823 
824     LLVM_DEBUG(dbgs() << "\nFnSpecialization: Analysing decl: " << F.getName()
825                       << "\n");
826     Solver.addAnalysis(F, GetAnalysis(F));
827 
828     // Determine if we can track the function's arguments. If so, add the
829     // function to the solver's set of argument-tracked functions.
830     if (canTrackArgumentsInterprocedurally(&F)) {
831       LLVM_DEBUG(dbgs() << "FnSpecialization: Can track arguments\n");
832       Solver.addArgumentTrackedFunction(&F);
833       continue;
834     } else {
835       LLVM_DEBUG(dbgs() << "FnSpecialization: Can't track arguments!\n"
836                         << "FnSpecialization: Doesn't have local linkage, or "
837                         << "has its address taken\n");
838     }
839 
840     // Assume the function is called.
841     Solver.markBlockExecutable(&F.front());
842 
843     // Assume nothing about the incoming arguments.
844     for (Argument &AI : F.args())
845       Solver.markOverdefined(&AI);
846   }
847 
848   // Determine if we can track any of the module's global variables. If so, add
849   // the global variables we can track to the solver's set of tracked global
850   // variables.
851   for (GlobalVariable &G : M.globals()) {
852     G.removeDeadConstantUsers();
853     if (canTrackGlobalVariableInterprocedurally(&G))
854       Solver.trackValueOfGlobalVariable(&G);
855   }
856 
857   auto &TrackedFuncs = Solver.getArgumentTrackedFunctions();
858   SmallVector<Function *, 16> FuncDecls(TrackedFuncs.begin(),
859                                         TrackedFuncs.end());
860 
861   // No tracked functions, so nothing to do: don't run the solver and remove
862   // the ssa_copy intrinsics that may have been introduced.
863   if (TrackedFuncs.empty()) {
864     removeSSACopy(M);
865     return false;
866   }
867 
868   // Solve for constants.
869   auto RunSCCPSolver = [&](auto &WorkList) {
870     bool ResolvedUndefs = true;
871 
872     while (ResolvedUndefs) {
873       // Not running the solver unnecessary is checked in regression test
874       // nothing-to-do.ll, so if this debug message is changed, this regression
875       // test needs updating too.
876       LLVM_DEBUG(dbgs() << "FnSpecialization: Running solver\n");
877 
878       Solver.solve();
879       LLVM_DEBUG(dbgs() << "FnSpecialization: Resolving undefs\n");
880       ResolvedUndefs = false;
881       for (Function *F : WorkList)
882         if (Solver.resolvedUndefsIn(*F))
883           ResolvedUndefs = true;
884     }
885 
886     for (auto *F : WorkList) {
887       for (BasicBlock &BB : *F) {
888         if (!Solver.isBlockExecutable(&BB))
889           continue;
890         // FIXME: The solver may make changes to the function here, so set
891         // Changed, even if later function specialization does not trigger.
892         for (auto &I : make_early_inc_range(BB))
893           Changed |= FS.tryToReplaceWithConstant(&I);
894       }
895     }
896   };
897 
898 #ifndef NDEBUG
899   LLVM_DEBUG(dbgs() << "FnSpecialization: Worklist fn decls:\n");
900   for (auto *F : FuncDecls)
901     LLVM_DEBUG(dbgs() << "FnSpecialization: *) " << F->getName() << "\n");
902 #endif
903 
904   // Initially resolve the constants in all the argument tracked functions.
905   RunSCCPSolver(FuncDecls);
906 
907   SmallVector<Function *, 2> WorkList;
908   unsigned I = 0;
909   while (FuncSpecializationMaxIters != I++ &&
910          FS.specializeFunctions(FuncDecls, WorkList)) {
911     LLVM_DEBUG(dbgs() << "FnSpecialization: Finished iteration " << I << "\n");
912 
913     // Run the solver for the specialized functions.
914     RunSCCPSolver(WorkList);
915 
916     // Replace some unresolved constant arguments.
917     constantArgPropagation(FuncDecls, M, Solver);
918 
919     WorkList.clear();
920     Changed = true;
921   }
922 
923   LLVM_DEBUG(dbgs() << "FnSpecialization: Number of specializations = "
924                     << NumFuncSpecialized <<"\n");
925 
926   // Clean up the IR by removing dead instructions and ssa_copy intrinsics.
927   FS.removeDeadInstructions();
928   removeSSACopy(M);
929   return Changed;
930 }
931