1 //===- FunctionSpecialization.cpp - Function Specialization ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This specialises functions with constant parameters (e.g. functions,
10 // globals). Constant parameters like function pointers and constant globals
11 // are propagated to the callee by specializing the function.
12 //
13 // Current limitations:
14 // - It does not yet handle integer ranges.
15 // - Only 1 argument per function is specialised,
16 // - The cost-model could be further looked into,
17 // - We are not yet caching analysis results.
18 //
19 // Ideas:
20 // - With a function specialization attribute for arguments, we could have
21 //   a direct way to steer function specialization, avoiding the cost-model,
22 //   and thus control compile-times / code-size.
23 //
24 // Todos:
25 // - Specializing recursive functions relies on running the transformation a
26 //   number of times, which is controlled by option
27 //   `func-specialization-max-iters`. Thus, increasing this value and the
28 //   number of iterations, will linearly increase the number of times recursive
29 //   functions get specialized, see also the discussion in
30 //   https://reviews.llvm.org/D106426 for details. Perhaps there is a
31 //   compile-time friendlier way to control/limit the number of specialisations
32 //   for recursive functions.
33 // - Don't transform the function if there is no function specialization
34 //   happens.
35 //
36 //===----------------------------------------------------------------------===//
37 
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/Analysis/AssumptionCache.h"
40 #include "llvm/Analysis/CodeMetrics.h"
41 #include "llvm/Analysis/DomTreeUpdater.h"
42 #include "llvm/Analysis/InlineCost.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/TargetLibraryInfo.h"
45 #include "llvm/Analysis/TargetTransformInfo.h"
46 #include "llvm/Transforms/Scalar/SCCP.h"
47 #include "llvm/Transforms/Utils/Cloning.h"
48 #include "llvm/Transforms/Utils/SizeOpts.h"
49 #include <cmath>
50 
51 using namespace llvm;
52 
53 #define DEBUG_TYPE "function-specialization"
54 
55 STATISTIC(NumFuncSpecialized, "Number of functions specialized");
56 
57 static cl::opt<bool> ForceFunctionSpecialization(
58     "force-function-specialization", cl::init(false), cl::Hidden,
59     cl::desc("Force function specialization for every call site with a "
60              "constant argument"));
61 
62 static cl::opt<unsigned> FuncSpecializationMaxIters(
63     "func-specialization-max-iters", cl::Hidden,
64     cl::desc("The maximum number of iterations function specialization is run"),
65     cl::init(1));
66 
67 static cl::opt<unsigned> MaxConstantsThreshold(
68     "func-specialization-max-constants", cl::Hidden,
69     cl::desc("The maximum number of clones allowed for a single function "
70              "specialization"),
71     cl::init(3));
72 
73 static cl::opt<unsigned> SmallFunctionThreshold(
74     "func-specialization-size-threshold", cl::Hidden,
75     cl::desc("Don't specialize functions that have less than this theshold "
76              "number of instructions"),
77     cl::init(100));
78 
79 static cl::opt<unsigned>
80     AvgLoopIterationCount("func-specialization-avg-iters-cost", cl::Hidden,
81                           cl::desc("Average loop iteration count cost"),
82                           cl::init(10));
83 
84 static cl::opt<bool> SpecializeOnAddresses(
85     "func-specialization-on-address", cl::init(false), cl::Hidden,
86     cl::desc("Enable function specialization on the address of global values"));
87 
88 // TODO: This needs checking to see the impact on compile-times, which is why
89 // this is off by default for now.
90 static cl::opt<bool> EnableSpecializationForLiteralConstant(
91     "function-specialization-for-literal-constant", cl::init(false), cl::Hidden,
92     cl::desc("Enable specialization of functions that take a literal constant "
93              "as an argument."));
94 
95 namespace {
96 // Bookkeeping struct to pass data from the analysis and profitability phase
97 // to the actual transform helper functions.
98 struct ArgInfo {
99   Function *Fn;         // The function to perform specialisation on.
100   Argument *Arg;        // The Formal argument being analysed.
101   Constant *Const;      // A corresponding actual constant argument.
102   InstructionCost Gain; // Profitability: Gain = Bonus - Cost.
103 
104   // Flag if this will be a partial specialization, in which case we will need
105   // to keep the original function around in addition to the added
106   // specializations.
107   bool Partial = false;
108 
109   ArgInfo(Function *F, Argument *A, Constant *C, InstructionCost G)
110       : Fn(F), Arg(A), Const(C), Gain(G){};
111 };
112 } // Anonymous namespace
113 
114 // Helper to check if \p LV is either a constant or a constant
115 // range with a single element. This should cover exactly the same cases as the
116 // old ValueLatticeElement::isConstant() and is intended to be used in the
117 // transition to ValueLatticeElement.
118 static bool isConstant(const ValueLatticeElement &LV) {
119   return LV.isConstant() ||
120          (LV.isConstantRange() && LV.getConstantRange().isSingleElement());
121 }
122 
123 // Helper to check if \p LV is either overdefined or a constant int.
124 static bool isOverdefined(const ValueLatticeElement &LV) {
125   return !LV.isUnknownOrUndef() && !isConstant(LV);
126 }
127 
128 static Constant *getPromotableAlloca(AllocaInst *Alloca, CallInst *Call) {
129   Value *StoreValue = nullptr;
130   for (auto *User : Alloca->users()) {
131     // We can't use llvm::isAllocaPromotable() as that would fail because of
132     // the usage in the CallInst, which is what we check here.
133     if (User == Call)
134       continue;
135     if (auto *Bitcast = dyn_cast<BitCastInst>(User)) {
136       if (!Bitcast->hasOneUse() || *Bitcast->user_begin() != Call)
137         return nullptr;
138       continue;
139     }
140 
141     if (auto *Store = dyn_cast<StoreInst>(User)) {
142       // This is a duplicate store, bail out.
143       if (StoreValue || Store->isVolatile())
144         return nullptr;
145       StoreValue = Store->getValueOperand();
146       continue;
147     }
148     // Bail if there is any other unknown usage.
149     return nullptr;
150   }
151   return dyn_cast_or_null<Constant>(StoreValue);
152 }
153 
154 // A constant stack value is an AllocaInst that has a single constant
155 // value stored to it. Return this constant if such an alloca stack value
156 // is a function argument.
157 static Constant *getConstantStackValue(CallInst *Call, Value *Val,
158                                        SCCPSolver &Solver) {
159   if (!Val)
160     return nullptr;
161   Val = Val->stripPointerCasts();
162   if (auto *ConstVal = dyn_cast<ConstantInt>(Val))
163     return ConstVal;
164   auto *Alloca = dyn_cast<AllocaInst>(Val);
165   if (!Alloca || !Alloca->getAllocatedType()->isIntegerTy())
166     return nullptr;
167   return getPromotableAlloca(Alloca, Call);
168 }
169 
170 // To support specializing recursive functions, it is important to propagate
171 // constant arguments because after a first iteration of specialisation, a
172 // reduced example may look like this:
173 //
174 //     define internal void @RecursiveFn(i32* arg1) {
175 //       %temp = alloca i32, align 4
176 //       store i32 2 i32* %temp, align 4
177 //       call void @RecursiveFn.1(i32* nonnull %temp)
178 //       ret void
179 //     }
180 //
181 // Before a next iteration, we need to propagate the constant like so
182 // which allows further specialization in next iterations.
183 //
184 //     @funcspec.arg = internal constant i32 2
185 //
186 //     define internal void @someFunc(i32* arg1) {
187 //       call void @otherFunc(i32* nonnull @funcspec.arg)
188 //       ret void
189 //     }
190 //
191 static void constantArgPropagation(SmallVectorImpl<Function *> &WorkList,
192                                    Module &M, SCCPSolver &Solver) {
193   // Iterate over the argument tracked functions see if there
194   // are any new constant values for the call instruction via
195   // stack variables.
196   for (auto *F : WorkList) {
197     // TODO: Generalize for any read only arguments.
198     if (F->arg_size() != 1)
199       continue;
200 
201     auto &Arg = *F->arg_begin();
202     if (!Arg.onlyReadsMemory() || !Arg.getType()->isPointerTy())
203       continue;
204 
205     for (auto *User : F->users()) {
206       auto *Call = dyn_cast<CallInst>(User);
207       if (!Call)
208         break;
209       auto *ArgOp = Call->getArgOperand(0);
210       auto *ArgOpType = ArgOp->getType();
211       auto *ConstVal = getConstantStackValue(Call, ArgOp, Solver);
212       if (!ConstVal)
213         break;
214 
215       Value *GV = new GlobalVariable(M, ConstVal->getType(), true,
216                                      GlobalValue::InternalLinkage, ConstVal,
217                                      "funcspec.arg");
218 
219       if (ArgOpType != ConstVal->getType())
220         GV = ConstantExpr::getBitCast(cast<Constant>(GV), ArgOp->getType());
221 
222       Call->setArgOperand(0, GV);
223 
224       // Add the changed CallInst to Solver Worklist
225       Solver.visitCall(*Call);
226     }
227   }
228 }
229 
230 // ssa_copy intrinsics are introduced by the SCCP solver. These intrinsics
231 // interfere with the constantArgPropagation optimization.
232 static void removeSSACopy(Function &F) {
233   for (BasicBlock &BB : F) {
234     for (Instruction &Inst : llvm::make_early_inc_range(BB)) {
235       auto *II = dyn_cast<IntrinsicInst>(&Inst);
236       if (!II)
237         continue;
238       if (II->getIntrinsicID() != Intrinsic::ssa_copy)
239         continue;
240       Inst.replaceAllUsesWith(II->getOperand(0));
241       Inst.eraseFromParent();
242     }
243   }
244 }
245 
246 static void removeSSACopy(Module &M) {
247   for (Function &F : M)
248     removeSSACopy(F);
249 }
250 
251 namespace {
252 class FunctionSpecializer {
253 
254   /// The IPSCCP Solver.
255   SCCPSolver &Solver;
256 
257   /// Analyses used to help determine if a function should be specialized.
258   std::function<AssumptionCache &(Function &)> GetAC;
259   std::function<TargetTransformInfo &(Function &)> GetTTI;
260   std::function<TargetLibraryInfo &(Function &)> GetTLI;
261 
262   SmallPtrSet<Function *, 2> SpecializedFuncs;
263 
264 public:
265   FunctionSpecializer(SCCPSolver &Solver,
266                       std::function<AssumptionCache &(Function &)> GetAC,
267                       std::function<TargetTransformInfo &(Function &)> GetTTI,
268                       std::function<TargetLibraryInfo &(Function &)> GetTLI)
269       : Solver(Solver), GetAC(GetAC), GetTTI(GetTTI), GetTLI(GetTLI) {}
270 
271   /// Attempt to specialize functions in the module to enable constant
272   /// propagation across function boundaries.
273   ///
274   /// \returns true if at least one function is specialized.
275   bool
276   specializeFunctions(SmallVectorImpl<Function *> &FuncDecls,
277                       SmallVectorImpl<Function *> &CurrentSpecializations) {
278     bool Changed = false;
279     for (auto *F : FuncDecls) {
280       if (!isCandidateFunction(F, CurrentSpecializations))
281         continue;
282 
283       auto Cost = getSpecializationCost(F);
284       if (!Cost.isValid()) {
285         LLVM_DEBUG(
286             dbgs() << "FnSpecialization: Invalid specialisation cost.\n");
287         continue;
288       }
289 
290       auto ConstArgs = calculateGains(F, Cost);
291       if (ConstArgs.empty()) {
292         LLVM_DEBUG(dbgs() << "FnSpecialization: no possible constants found\n");
293         continue;
294       }
295 
296       for (auto &CA : ConstArgs) {
297         specializeFunction(CA, CurrentSpecializations);
298         Changed = true;
299       }
300     }
301 
302     for (auto *SpecializedFunc : CurrentSpecializations) {
303       SpecializedFuncs.insert(SpecializedFunc);
304 
305       // Initialize the state of the newly created functions, marking them
306       // argument-tracked and executable.
307       if (SpecializedFunc->hasExactDefinition() &&
308           !SpecializedFunc->hasFnAttribute(Attribute::Naked))
309         Solver.addTrackedFunction(SpecializedFunc);
310       Solver.addArgumentTrackedFunction(SpecializedFunc);
311       FuncDecls.push_back(SpecializedFunc);
312       Solver.markBlockExecutable(&SpecializedFunc->front());
313 
314       // Replace the function arguments for the specialized functions.
315       for (Argument &Arg : SpecializedFunc->args())
316         if (!Arg.use_empty() && tryToReplaceWithConstant(&Arg))
317           LLVM_DEBUG(dbgs() << "FnSpecialization: Replaced constant argument: "
318                             << Arg.getName() << "\n");
319     }
320 
321     NumFuncSpecialized += NbFunctionsSpecialized;
322     return Changed;
323   }
324 
325   bool tryToReplaceWithConstant(Value *V) {
326     if (!V->getType()->isSingleValueType() || isa<CallBase>(V) ||
327         V->user_empty())
328       return false;
329 
330     const ValueLatticeElement &IV = Solver.getLatticeValueFor(V);
331     if (isOverdefined(IV))
332       return false;
333     auto *Const =
334         isConstant(IV) ? Solver.getConstant(IV) : UndefValue::get(V->getType());
335     V->replaceAllUsesWith(Const);
336 
337     for (auto *U : Const->users())
338       if (auto *I = dyn_cast<Instruction>(U))
339         if (Solver.isBlockExecutable(I->getParent()))
340           Solver.visit(I);
341 
342     // Remove the instruction from Block and Solver.
343     if (auto *I = dyn_cast<Instruction>(V)) {
344       if (I->isSafeToRemove()) {
345         I->eraseFromParent();
346         Solver.removeLatticeValueFor(I);
347       }
348     }
349     return true;
350   }
351 
352 private:
353   // The number of functions specialised, used for collecting statistics and
354   // also in the cost model.
355   unsigned NbFunctionsSpecialized = 0;
356 
357   /// Clone the function \p F and remove the ssa_copy intrinsics added by
358   /// the SCCPSolver in the cloned version.
359   Function *cloneCandidateFunction(Function *F) {
360     ValueToValueMapTy EmptyMap;
361     Function *Clone = CloneFunction(F, EmptyMap);
362     removeSSACopy(*Clone);
363     return Clone;
364   }
365 
366   /// This function decides whether it's worthwhile to specialize function \p F
367   /// based on the known constant values its arguments can take on, i.e. it
368   /// calculates a gain and returns a list of actual arguments that are deemed
369   /// profitable to specialize. Specialization is performed on the first
370   /// interesting argument. Specializations based on additional arguments will
371   /// be evaluated on following iterations of the main IPSCCP solve loop.
372   SmallVector<ArgInfo> calculateGains(Function *F, InstructionCost Cost) {
373     SmallVector<ArgInfo> Worklist;
374     // Determine if we should specialize the function based on the values the
375     // argument can take on. If specialization is not profitable, we continue
376     // on to the next argument.
377     for (Argument &FormalArg : F->args()) {
378       LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing arg: "
379                         << FormalArg.getName() << "\n");
380       // Determine if this argument is interesting. If we know the argument can
381       // take on any constant values, they are collected in Constants. If the
382       // argument can only ever equal a constant value in Constants, the
383       // function will be completely specialized, and the IsPartial flag will
384       // be set to false by isArgumentInteresting (that function only adds
385       // values to the Constants list that are deemed profitable).
386       bool IsPartial = true;
387       SmallVector<Constant *> ActualConstArg;
388       if (!isArgumentInteresting(&FormalArg, ActualConstArg, IsPartial)) {
389         LLVM_DEBUG(dbgs() << "FnSpecialization: Argument is not interesting\n");
390         continue;
391       }
392 
393       for (auto *ActualArg : ActualConstArg) {
394         InstructionCost Gain =
395             ForceFunctionSpecialization
396                 ? 1
397                 : getSpecializationBonus(&FormalArg, ActualArg) - Cost;
398 
399         if (Gain <= 0)
400           continue;
401         Worklist.push_back({F, &FormalArg, ActualArg, Gain});
402       }
403 
404       if (Worklist.empty())
405         continue;
406 
407       // Sort the candidates in descending order.
408       llvm::stable_sort(Worklist, [](const ArgInfo &L, const ArgInfo &R) {
409         return L.Gain > R.Gain;
410       });
411 
412       // TODO: truncate the worklist to 'MaxConstantsThreshold' candidates if
413       // necessary.
414       if (Worklist.size() > MaxConstantsThreshold) {
415         Worklist.clear();
416         continue;
417       }
418 
419       if (IsPartial || Worklist.size() < ActualConstArg.size())
420         for (auto &ActualArg : Worklist)
421           ActualArg.Partial = true;
422 
423       LLVM_DEBUG(dbgs() << "Sorted list of candidates by gain:\n";
424                  for (auto &C
425                       : Worklist) {
426                    dbgs() << "- Function = " << C.Fn->getName() << ", ";
427                    dbgs() << "FormalArg = " << C.Arg->getName() << ", ";
428                    dbgs() << "ActualArg = " << C.Const->getName() << ", ";
429                    dbgs() << "Gain = " << C.Gain << "\n";
430                  });
431 
432       // FIXME: Only one argument per function.
433       break;
434     }
435     return Worklist;
436   }
437 
438   bool isCandidateFunction(Function *F,
439                            SmallVectorImpl<Function *> &Specializations) {
440     // Do not specialize the cloned function again.
441     if (SpecializedFuncs.contains(F))
442       return false;
443 
444     // If we're optimizing the function for size, we shouldn't specialize it.
445     if (F->hasOptSize() ||
446         shouldOptimizeForSize(F, nullptr, nullptr, PGSOQueryType::IRPass))
447       return false;
448 
449     // Exit if the function is not executable. There's no point in specializing
450     // a dead function.
451     if (!Solver.isBlockExecutable(&F->getEntryBlock()))
452       return false;
453 
454     // It wastes time to specialize a function which would get inlined finally.
455     if (F->hasFnAttribute(Attribute::AlwaysInline))
456       return false;
457 
458     LLVM_DEBUG(dbgs() << "FnSpecialization: Try function: " << F->getName()
459                       << "\n");
460     return true;
461   }
462 
463   void specializeFunction(ArgInfo &AI,
464                           SmallVectorImpl<Function *> &Specializations) {
465     Function *Clone = cloneCandidateFunction(AI.Fn);
466     Argument *ClonedArg = Clone->getArg(AI.Arg->getArgNo());
467 
468     // Rewrite calls to the function so that they call the clone instead.
469     rewriteCallSites(AI.Fn, Clone, *ClonedArg, AI.Const);
470 
471     // Initialize the lattice state of the arguments of the function clone,
472     // marking the argument on which we specialized the function constant
473     // with the given value.
474     Solver.markArgInFuncSpecialization(AI.Fn, ClonedArg, AI.Const);
475 
476     // Mark all the specialized functions
477     Specializations.push_back(Clone);
478     NbFunctionsSpecialized++;
479 
480     // If the function has been completely specialized, the original function
481     // is no longer needed. Mark it unreachable.
482     if (!AI.Partial)
483       Solver.markFunctionUnreachable(AI.Fn);
484   }
485 
486   /// Compute and return the cost of specializing function \p F.
487   InstructionCost getSpecializationCost(Function *F) {
488     // Compute the code metrics for the function.
489     SmallPtrSet<const Value *, 32> EphValues;
490     CodeMetrics::collectEphemeralValues(F, &(GetAC)(*F), EphValues);
491     CodeMetrics Metrics;
492     for (BasicBlock &BB : *F)
493       Metrics.analyzeBasicBlock(&BB, (GetTTI)(*F), EphValues);
494 
495     // If the code metrics reveal that we shouldn't duplicate the function, we
496     // shouldn't specialize it. Set the specialization cost to Invalid.
497     // Or if the lines of codes implies that this function is easy to get
498     // inlined so that we shouldn't specialize it.
499     if (Metrics.notDuplicatable ||
500         (!ForceFunctionSpecialization &&
501          Metrics.NumInsts < SmallFunctionThreshold)) {
502       InstructionCost C{};
503       C.setInvalid();
504       return C;
505     }
506 
507     // Otherwise, set the specialization cost to be the cost of all the
508     // instructions in the function and penalty for specializing more functions.
509     unsigned Penalty = NbFunctionsSpecialized + 1;
510     return Metrics.NumInsts * InlineConstants::InstrCost * Penalty;
511   }
512 
513   InstructionCost getUserBonus(User *U, llvm::TargetTransformInfo &TTI,
514                                LoopInfo &LI) {
515     auto *I = dyn_cast_or_null<Instruction>(U);
516     // If not an instruction we do not know how to evaluate.
517     // Keep minimum possible cost for now so that it doesnt affect
518     // specialization.
519     if (!I)
520       return std::numeric_limits<unsigned>::min();
521 
522     auto Cost = TTI.getUserCost(U, TargetTransformInfo::TCK_SizeAndLatency);
523 
524     // Traverse recursively if there are more uses.
525     // TODO: Any other instructions to be added here?
526     if (I->mayReadFromMemory() || I->isCast())
527       for (auto *User : I->users())
528         Cost += getUserBonus(User, TTI, LI);
529 
530     // Increase the cost if it is inside the loop.
531     auto LoopDepth = LI.getLoopDepth(I->getParent());
532     Cost *= std::pow((double)AvgLoopIterationCount, LoopDepth);
533     return Cost;
534   }
535 
536   /// Compute a bonus for replacing argument \p A with constant \p C.
537   InstructionCost getSpecializationBonus(Argument *A, Constant *C) {
538     Function *F = A->getParent();
539     DominatorTree DT(*F);
540     LoopInfo LI(DT);
541     auto &TTI = (GetTTI)(*F);
542     LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing bonus for: " << *A
543                       << "\n");
544 
545     InstructionCost TotalCost = 0;
546     for (auto *U : A->users()) {
547       TotalCost += getUserBonus(U, TTI, LI);
548       LLVM_DEBUG(dbgs() << "FnSpecialization: User cost ";
549                  TotalCost.print(dbgs()); dbgs() << " for: " << *U << "\n");
550     }
551 
552     // The below heuristic is only concerned with exposing inlining
553     // opportunities via indirect call promotion. If the argument is not a
554     // function pointer, give up.
555     if (!isa<PointerType>(A->getType()) ||
556         !isa<FunctionType>(A->getType()->getPointerElementType()))
557       return TotalCost;
558 
559     // Since the argument is a function pointer, its incoming constant values
560     // should be functions or constant expressions. The code below attempts to
561     // look through cast expressions to find the function that will be called.
562     Value *CalledValue = C;
563     while (isa<ConstantExpr>(CalledValue) &&
564            cast<ConstantExpr>(CalledValue)->isCast())
565       CalledValue = cast<User>(CalledValue)->getOperand(0);
566     Function *CalledFunction = dyn_cast<Function>(CalledValue);
567     if (!CalledFunction)
568       return TotalCost;
569 
570     // Get TTI for the called function (used for the inline cost).
571     auto &CalleeTTI = (GetTTI)(*CalledFunction);
572 
573     // Look at all the call sites whose called value is the argument.
574     // Specializing the function on the argument would allow these indirect
575     // calls to be promoted to direct calls. If the indirect call promotion
576     // would likely enable the called function to be inlined, specializing is a
577     // good idea.
578     int Bonus = 0;
579     for (User *U : A->users()) {
580       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
581         continue;
582       auto *CS = cast<CallBase>(U);
583       if (CS->getCalledOperand() != A)
584         continue;
585 
586       // Get the cost of inlining the called function at this call site. Note
587       // that this is only an estimate. The called function may eventually
588       // change in a way that leads to it not being inlined here, even though
589       // inlining looks profitable now. For example, one of its called
590       // functions may be inlined into it, making the called function too large
591       // to be inlined into this call site.
592       //
593       // We apply a boost for performing indirect call promotion by increasing
594       // the default threshold by the threshold for indirect calls.
595       auto Params = getInlineParams();
596       Params.DefaultThreshold += InlineConstants::IndirectCallThreshold;
597       InlineCost IC =
598           getInlineCost(*CS, CalledFunction, Params, CalleeTTI, GetAC, GetTLI);
599 
600       // We clamp the bonus for this call to be between zero and the default
601       // threshold.
602       if (IC.isAlways())
603         Bonus += Params.DefaultThreshold;
604       else if (IC.isVariable() && IC.getCostDelta() > 0)
605         Bonus += IC.getCostDelta();
606     }
607 
608     return TotalCost + Bonus;
609   }
610 
611   /// Determine if we should specialize a function based on the incoming values
612   /// of the given argument.
613   ///
614   /// This function implements the goal-directed heuristic. It determines if
615   /// specializing the function based on the incoming values of argument \p A
616   /// would result in any significant optimization opportunities. If
617   /// optimization opportunities exist, the constant values of \p A on which to
618   /// specialize the function are collected in \p Constants. If the values in
619   /// \p Constants represent the complete set of values that \p A can take on,
620   /// the function will be completely specialized, and the \p IsPartial flag is
621   /// set to false.
622   ///
623   /// \returns true if the function should be specialized on the given
624   /// argument.
625   bool isArgumentInteresting(Argument *A,
626                              SmallVectorImpl<Constant *> &Constants,
627                              bool &IsPartial) {
628     // For now, don't attempt to specialize functions based on the values of
629     // composite types.
630     if (!A->getType()->isSingleValueType() || A->user_empty())
631       return false;
632 
633     // If the argument isn't overdefined, there's nothing to do. It should
634     // already be constant.
635     if (!Solver.getLatticeValueFor(A).isOverdefined()) {
636       LLVM_DEBUG(dbgs() << "FnSpecialization: nothing to do, arg is already "
637                         << "constant?\n");
638       return false;
639     }
640 
641     // Collect the constant values that the argument can take on. If the
642     // argument can't take on any constant values, we aren't going to
643     // specialize the function. While it's possible to specialize the function
644     // based on non-constant arguments, there's likely not much benefit to
645     // constant propagation in doing so.
646     //
647     // TODO 1: currently it won't specialize if there are over the threshold of
648     // calls using the same argument, e.g foo(a) x 4 and foo(b) x 1, but it
649     // might be beneficial to take the occurrences into account in the cost
650     // model, so we would need to find the unique constants.
651     //
652     // TODO 2: this currently does not support constants, i.e. integer ranges.
653     //
654     IsPartial = !getPossibleConstants(A, Constants);
655     LLVM_DEBUG(dbgs() << "FnSpecialization: interesting arg: " << *A << "\n");
656     return true;
657   }
658 
659   /// Collect in \p Constants all the constant values that argument \p A can
660   /// take on.
661   ///
662   /// \returns true if all of the values the argument can take on are constant
663   /// (e.g., the argument's parent function cannot be called with an
664   /// overdefined value).
665   bool getPossibleConstants(Argument *A,
666                             SmallVectorImpl<Constant *> &Constants) {
667     Function *F = A->getParent();
668     bool AllConstant = true;
669 
670     // Iterate over all the call sites of the argument's parent function.
671     for (User *U : F->users()) {
672       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
673         continue;
674       auto &CS = *cast<CallBase>(U);
675       // If the call site has attribute minsize set, that callsite won't be
676       // specialized.
677       if (CS.hasFnAttr(Attribute::MinSize)) {
678         AllConstant = false;
679         continue;
680       }
681 
682       // If the parent of the call site will never be executed, we don't need
683       // to worry about the passed value.
684       if (!Solver.isBlockExecutable(CS.getParent()))
685         continue;
686 
687       auto *V = CS.getArgOperand(A->getArgNo());
688       if (isa<PoisonValue>(V))
689         return false;
690 
691       // For now, constant expressions are fine but only if they are function
692       // calls.
693       if (auto *CE = dyn_cast<ConstantExpr>(V))
694         if (!isa<Function>(CE->getOperand(0)))
695           return false;
696 
697       // TrackValueOfGlobalVariable only tracks scalar global variables.
698       if (auto *GV = dyn_cast<GlobalVariable>(V)) {
699         // Check if we want to specialize on the address of non-constant
700         // global values.
701         if (!GV->isConstant())
702           if (!SpecializeOnAddresses)
703             return false;
704 
705         if (!GV->getValueType()->isSingleValueType())
706           return false;
707       }
708 
709       if (isa<Constant>(V) && (Solver.getLatticeValueFor(V).isConstant() ||
710                                EnableSpecializationForLiteralConstant))
711         Constants.push_back(cast<Constant>(V));
712       else
713         AllConstant = false;
714     }
715 
716     // If the argument can only take on constant values, AllConstant will be
717     // true.
718     return AllConstant;
719   }
720 
721   /// Rewrite calls to function \p F to call function \p Clone instead.
722   ///
723   /// This function modifies calls to function \p F whose argument at index \p
724   /// ArgNo is equal to constant \p C. The calls are rewritten to call function
725   /// \p Clone instead.
726   ///
727   /// Callsites that have been marked with the MinSize function attribute won't
728   /// be specialized and rewritten.
729   void rewriteCallSites(Function *F, Function *Clone, Argument &Arg,
730                         Constant *C) {
731     unsigned ArgNo = Arg.getArgNo();
732     SmallVector<CallBase *, 4> CallSitesToRewrite;
733     for (auto *U : F->users()) {
734       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
735         continue;
736       auto &CS = *cast<CallBase>(U);
737       if (!CS.getCalledFunction() || CS.getCalledFunction() != F)
738         continue;
739       CallSitesToRewrite.push_back(&CS);
740     }
741     for (auto *CS : CallSitesToRewrite) {
742       if ((CS->getFunction() == Clone && CS->getArgOperand(ArgNo) == &Arg) ||
743           CS->getArgOperand(ArgNo) == C) {
744         CS->setCalledFunction(Clone);
745         Solver.markOverdefined(CS);
746       }
747     }
748   }
749 };
750 } // namespace
751 
752 bool llvm::runFunctionSpecialization(
753     Module &M, const DataLayout &DL,
754     std::function<TargetLibraryInfo &(Function &)> GetTLI,
755     std::function<TargetTransformInfo &(Function &)> GetTTI,
756     std::function<AssumptionCache &(Function &)> GetAC,
757     function_ref<AnalysisResultsForFn(Function &)> GetAnalysis) {
758   SCCPSolver Solver(DL, GetTLI, M.getContext());
759   FunctionSpecializer FS(Solver, GetAC, GetTTI, GetTLI);
760   bool Changed = false;
761 
762   // Loop over all functions, marking arguments to those with their addresses
763   // taken or that are external as overdefined.
764   for (Function &F : M) {
765     if (F.isDeclaration())
766       continue;
767     if (F.hasFnAttribute(Attribute::NoDuplicate))
768       continue;
769 
770     LLVM_DEBUG(dbgs() << "\nFnSpecialization: Analysing decl: " << F.getName()
771                       << "\n");
772     Solver.addAnalysis(F, GetAnalysis(F));
773 
774     // Determine if we can track the function's arguments. If so, add the
775     // function to the solver's set of argument-tracked functions.
776     if (canTrackArgumentsInterprocedurally(&F)) {
777       LLVM_DEBUG(dbgs() << "FnSpecialization: Can track arguments\n");
778       Solver.addArgumentTrackedFunction(&F);
779       continue;
780     } else {
781       LLVM_DEBUG(dbgs() << "FnSpecialization: Can't track arguments!\n"
782                         << "FnSpecialization: Doesn't have local linkage, or "
783                         << "has its address taken\n");
784     }
785 
786     // Assume the function is called.
787     Solver.markBlockExecutable(&F.front());
788 
789     // Assume nothing about the incoming arguments.
790     for (Argument &AI : F.args())
791       Solver.markOverdefined(&AI);
792   }
793 
794   // Determine if we can track any of the module's global variables. If so, add
795   // the global variables we can track to the solver's set of tracked global
796   // variables.
797   for (GlobalVariable &G : M.globals()) {
798     G.removeDeadConstantUsers();
799     if (canTrackGlobalVariableInterprocedurally(&G))
800       Solver.trackValueOfGlobalVariable(&G);
801   }
802 
803   auto &TrackedFuncs = Solver.getArgumentTrackedFunctions();
804   SmallVector<Function *, 16> FuncDecls(TrackedFuncs.begin(),
805                                         TrackedFuncs.end());
806 
807   // No tracked functions, so nothing to do: don't run the solver and remove
808   // the ssa_copy intrinsics that may have been introduced.
809   if (TrackedFuncs.empty()) {
810     removeSSACopy(M);
811     return false;
812   }
813 
814   // Solve for constants.
815   auto RunSCCPSolver = [&](auto &WorkList) {
816     bool ResolvedUndefs = true;
817 
818     while (ResolvedUndefs) {
819       // Not running the solver unnecessary is checked in regression test
820       // nothing-to-do.ll, so if this debug message is changed, this regression
821       // test needs updating too.
822       LLVM_DEBUG(dbgs() << "FnSpecialization: Running solver\n");
823 
824       Solver.solve();
825       LLVM_DEBUG(dbgs() << "FnSpecialization: Resolving undefs\n");
826       ResolvedUndefs = false;
827       for (Function *F : WorkList)
828         if (Solver.resolvedUndefsIn(*F))
829           ResolvedUndefs = true;
830     }
831 
832     for (auto *F : WorkList) {
833       for (BasicBlock &BB : *F) {
834         if (!Solver.isBlockExecutable(&BB))
835           continue;
836         // FIXME: The solver may make changes to the function here, so set
837         // Changed, even if later function specialization does not trigger.
838         for (auto &I : make_early_inc_range(BB))
839           Changed |= FS.tryToReplaceWithConstant(&I);
840       }
841     }
842   };
843 
844 #ifndef NDEBUG
845   LLVM_DEBUG(dbgs() << "FnSpecialization: Worklist fn decls:\n");
846   for (auto *F : FuncDecls)
847     LLVM_DEBUG(dbgs() << "FnSpecialization: *) " << F->getName() << "\n");
848 #endif
849 
850   // Initially resolve the constants in all the argument tracked functions.
851   RunSCCPSolver(FuncDecls);
852 
853   SmallVector<Function *, 2> CurrentSpecializations;
854   unsigned I = 0;
855   while (FuncSpecializationMaxIters != I++ &&
856          FS.specializeFunctions(FuncDecls, CurrentSpecializations)) {
857 
858     // Run the solver for the specialized functions.
859     RunSCCPSolver(CurrentSpecializations);
860 
861     // Replace some unresolved constant arguments.
862     constantArgPropagation(FuncDecls, M, Solver);
863 
864     CurrentSpecializations.clear();
865     Changed = true;
866   }
867 
868   // Clean up the IR by removing ssa_copy intrinsics.
869   removeSSACopy(M);
870   return Changed;
871 }
872