1 //===- FunctionSpecialization.cpp - Function Specialization ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This specialises functions with constant parameters. Constant parameters
10 // like function pointers and constant globals are propagated to the callee by
11 // specializing the function. The main benefit of this pass at the moment is
12 // that indirect calls are transformed into direct calls, which provides inline
13 // opportunities that the inliner would not have been able to achieve. That's
14 // why function specialisation is run before the inliner in the optimisation
15 // pipeline; that is by design. Otherwise, we would only benefit from constant
16 // passing, which is a valid use-case too, but hasn't been explored much in
17 // terms of performance uplifts, cost-model and compile-time impact.
18 //
19 // Current limitations:
20 // - It does not yet handle integer ranges. We do support "literal constants",
21 //   but that's off by default under an option.
22 // - Only 1 argument per function is specialised,
23 // - The cost-model could be further looked into (it mainly focuses on inlining
24 //   benefits),
25 // - We are not yet caching analysis results, but profiling and checking where
26 //   extra compile time is spent didn't suggest this to be a problem.
27 //
28 // Ideas:
29 // - With a function specialization attribute for arguments, we could have
30 //   a direct way to steer function specialization, avoiding the cost-model,
31 //   and thus control compile-times / code-size.
32 //
33 // Todos:
34 // - Specializing recursive functions relies on running the transformation a
35 //   number of times, which is controlled by option
36 //   `func-specialization-max-iters`. Thus, increasing this value and the
37 //   number of iterations, will linearly increase the number of times recursive
38 //   functions get specialized, see also the discussion in
39 //   https://reviews.llvm.org/D106426 for details. Perhaps there is a
40 //   compile-time friendlier way to control/limit the number of specialisations
41 //   for recursive functions.
42 // - Don't transform the function if function specialization does not trigger;
43 //   the SCCPSolver may make IR changes.
44 //
45 // References:
46 // - 2021 LLVM Dev Mtg “Introducing function specialisation, and can we enable
47 //   it by default?”, https://www.youtube.com/watch?v=zJiCjeXgV5Q
48 //
49 //===----------------------------------------------------------------------===//
50 
51 #include "llvm/ADT/Statistic.h"
52 #include "llvm/Analysis/AssumptionCache.h"
53 #include "llvm/Analysis/CodeMetrics.h"
54 #include "llvm/Analysis/DomTreeUpdater.h"
55 #include "llvm/Analysis/InlineCost.h"
56 #include "llvm/Analysis/LoopInfo.h"
57 #include "llvm/Analysis/TargetLibraryInfo.h"
58 #include "llvm/Analysis/TargetTransformInfo.h"
59 #include "llvm/Analysis/ValueLattice.h"
60 #include "llvm/Analysis/ValueLatticeUtils.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/Transforms/Scalar/SCCP.h"
63 #include "llvm/Transforms/Utils/Cloning.h"
64 #include "llvm/Transforms/Utils/SizeOpts.h"
65 #include <cmath>
66 
67 using namespace llvm;
68 
69 #define DEBUG_TYPE "function-specialization"
70 
71 STATISTIC(NumFuncSpecialized, "Number of functions specialized");
72 
73 static cl::opt<bool> ForceFunctionSpecialization(
74     "force-function-specialization", cl::init(false), cl::Hidden,
75     cl::desc("Force function specialization for every call site with a "
76              "constant argument"));
77 
78 static cl::opt<unsigned> FuncSpecializationMaxIters(
79     "func-specialization-max-iters", cl::Hidden,
80     cl::desc("The maximum number of iterations function specialization is run"),
81     cl::init(1));
82 
83 static cl::opt<unsigned> MaxClonesThreshold(
84     "func-specialization-max-clones", cl::Hidden,
85     cl::desc("The maximum number of clones allowed for a single function "
86              "specialization"),
87     cl::init(3));
88 
89 static cl::opt<unsigned> SmallFunctionThreshold(
90     "func-specialization-size-threshold", cl::Hidden,
91     cl::desc("Don't specialize functions that have less than this theshold "
92              "number of instructions"),
93     cl::init(100));
94 
95 static cl::opt<unsigned>
96     AvgLoopIterationCount("func-specialization-avg-iters-cost", cl::Hidden,
97                           cl::desc("Average loop iteration count cost"),
98                           cl::init(10));
99 
100 static cl::opt<bool> SpecializeOnAddresses(
101     "func-specialization-on-address", cl::init(false), cl::Hidden,
102     cl::desc("Enable function specialization on the address of global values"));
103 
104 // TODO: This needs checking to see the impact on compile-times, which is why
105 // this is off by default for now.
106 static cl::opt<bool> EnableSpecializationForLiteralConstant(
107     "function-specialization-for-literal-constant", cl::init(false), cl::Hidden,
108     cl::desc("Enable specialization of functions that take a literal constant "
109              "as an argument."));
110 
111 namespace {
112 // Bookkeeping struct to pass data from the analysis and profitability phase
113 // to the actual transform helper functions.
114 struct ArgInfo {
115   Function *Fn;         // The function to perform specialisation on.
116   Argument *Formal;     // The Formal argument being analysed.
117   Constant *Actual;     // A corresponding actual constant argument.
118   InstructionCost Gain; // Profitability: Gain = Bonus - Cost.
119 
120   // Flag if this will be a partial specialization, in which case we will need
121   // to keep the original function around in addition to the added
122   // specializations.
123   bool Partial = false;
124 
125   ArgInfo(Function *F, Argument *A, Constant *C, InstructionCost G)
126       : Fn(F), Formal(A), Actual(C), Gain(G){};
127 };
128 } // Anonymous namespace
129 
130 using FuncList = SmallVectorImpl<Function *>;
131 using ConstList = SmallVectorImpl<Constant *>;
132 
133 // Helper to check if \p LV is either a constant or a constant
134 // range with a single element. This should cover exactly the same cases as the
135 // old ValueLatticeElement::isConstant() and is intended to be used in the
136 // transition to ValueLatticeElement.
137 static bool isConstant(const ValueLatticeElement &LV) {
138   return LV.isConstant() ||
139          (LV.isConstantRange() && LV.getConstantRange().isSingleElement());
140 }
141 
142 // Helper to check if \p LV is either overdefined or a constant int.
143 static bool isOverdefined(const ValueLatticeElement &LV) {
144   return !LV.isUnknownOrUndef() && !isConstant(LV);
145 }
146 
147 static Constant *getPromotableAlloca(AllocaInst *Alloca, CallInst *Call) {
148   Value *StoreValue = nullptr;
149   for (auto *User : Alloca->users()) {
150     // We can't use llvm::isAllocaPromotable() as that would fail because of
151     // the usage in the CallInst, which is what we check here.
152     if (User == Call)
153       continue;
154     if (auto *Bitcast = dyn_cast<BitCastInst>(User)) {
155       if (!Bitcast->hasOneUse() || *Bitcast->user_begin() != Call)
156         return nullptr;
157       continue;
158     }
159 
160     if (auto *Store = dyn_cast<StoreInst>(User)) {
161       // This is a duplicate store, bail out.
162       if (StoreValue || Store->isVolatile())
163         return nullptr;
164       StoreValue = Store->getValueOperand();
165       continue;
166     }
167     // Bail if there is any other unknown usage.
168     return nullptr;
169   }
170   return dyn_cast_or_null<Constant>(StoreValue);
171 }
172 
173 // A constant stack value is an AllocaInst that has a single constant
174 // value stored to it. Return this constant if such an alloca stack value
175 // is a function argument.
176 static Constant *getConstantStackValue(CallInst *Call, Value *Val,
177                                        SCCPSolver &Solver) {
178   if (!Val)
179     return nullptr;
180   Val = Val->stripPointerCasts();
181   if (auto *ConstVal = dyn_cast<ConstantInt>(Val))
182     return ConstVal;
183   auto *Alloca = dyn_cast<AllocaInst>(Val);
184   if (!Alloca || !Alloca->getAllocatedType()->isIntegerTy())
185     return nullptr;
186   return getPromotableAlloca(Alloca, Call);
187 }
188 
189 // To support specializing recursive functions, it is important to propagate
190 // constant arguments because after a first iteration of specialisation, a
191 // reduced example may look like this:
192 //
193 //     define internal void @RecursiveFn(i32* arg1) {
194 //       %temp = alloca i32, align 4
195 //       store i32 2 i32* %temp, align 4
196 //       call void @RecursiveFn.1(i32* nonnull %temp)
197 //       ret void
198 //     }
199 //
200 // Before a next iteration, we need to propagate the constant like so
201 // which allows further specialization in next iterations.
202 //
203 //     @funcspec.arg = internal constant i32 2
204 //
205 //     define internal void @someFunc(i32* arg1) {
206 //       call void @otherFunc(i32* nonnull @funcspec.arg)
207 //       ret void
208 //     }
209 //
210 static void constantArgPropagation(FuncList &WorkList,
211                                    Module &M, SCCPSolver &Solver) {
212   // Iterate over the argument tracked functions see if there
213   // are any new constant values for the call instruction via
214   // stack variables.
215   for (auto *F : WorkList) {
216     // TODO: Generalize for any read only arguments.
217     if (F->arg_size() != 1)
218       continue;
219 
220     auto &Arg = *F->arg_begin();
221     if (!Arg.onlyReadsMemory() || !Arg.getType()->isPointerTy())
222       continue;
223 
224     for (auto *User : F->users()) {
225       auto *Call = dyn_cast<CallInst>(User);
226       if (!Call)
227         break;
228       auto *ArgOp = Call->getArgOperand(0);
229       auto *ArgOpType = ArgOp->getType();
230       auto *ConstVal = getConstantStackValue(Call, ArgOp, Solver);
231       if (!ConstVal)
232         break;
233 
234       Value *GV = new GlobalVariable(M, ConstVal->getType(), true,
235                                      GlobalValue::InternalLinkage, ConstVal,
236                                      "funcspec.arg");
237 
238       if (ArgOpType != ConstVal->getType())
239         GV = ConstantExpr::getBitCast(cast<Constant>(GV), ArgOp->getType());
240 
241       Call->setArgOperand(0, GV);
242 
243       // Add the changed CallInst to Solver Worklist
244       Solver.visitCall(*Call);
245     }
246   }
247 }
248 
249 // ssa_copy intrinsics are introduced by the SCCP solver. These intrinsics
250 // interfere with the constantArgPropagation optimization.
251 static void removeSSACopy(Function &F) {
252   for (BasicBlock &BB : F) {
253     for (Instruction &Inst : llvm::make_early_inc_range(BB)) {
254       auto *II = dyn_cast<IntrinsicInst>(&Inst);
255       if (!II)
256         continue;
257       if (II->getIntrinsicID() != Intrinsic::ssa_copy)
258         continue;
259       Inst.replaceAllUsesWith(II->getOperand(0));
260       Inst.eraseFromParent();
261     }
262   }
263 }
264 
265 static void removeSSACopy(Module &M) {
266   for (Function &F : M)
267     removeSSACopy(F);
268 }
269 
270 namespace {
271 class FunctionSpecializer {
272 
273   /// The IPSCCP Solver.
274   SCCPSolver &Solver;
275 
276   /// Analyses used to help determine if a function should be specialized.
277   std::function<AssumptionCache &(Function &)> GetAC;
278   std::function<TargetTransformInfo &(Function &)> GetTTI;
279   std::function<TargetLibraryInfo &(Function &)> GetTLI;
280 
281   SmallPtrSet<Function *, 4> SpecializedFuncs;
282   SmallPtrSet<Function *, 4> FullySpecialized;
283   SmallVector<Instruction *> ReplacedWithConstant;
284 
285 public:
286   FunctionSpecializer(SCCPSolver &Solver,
287                       std::function<AssumptionCache &(Function &)> GetAC,
288                       std::function<TargetTransformInfo &(Function &)> GetTTI,
289                       std::function<TargetLibraryInfo &(Function &)> GetTLI)
290       : Solver(Solver), GetAC(GetAC), GetTTI(GetTTI), GetTLI(GetTLI) {}
291 
292   ~FunctionSpecializer() {
293     // Eliminate dead code.
294     removeDeadInstructions();
295     removeDeadFunctions();
296   }
297 
298   /// Attempt to specialize functions in the module to enable constant
299   /// propagation across function boundaries.
300   ///
301   /// \returns true if at least one function is specialized.
302   bool specializeFunctions(FuncList &Candidates, FuncList &WorkList) {
303     bool Changed = false;
304     for (auto *F : Candidates) {
305       if (!isCandidateFunction(F))
306         continue;
307 
308       auto Cost = getSpecializationCost(F);
309       if (!Cost.isValid()) {
310         LLVM_DEBUG(
311             dbgs() << "FnSpecialization: Invalid specialisation cost.\n");
312         continue;
313       }
314 
315       LLVM_DEBUG(dbgs() << "FnSpecialization: Specialization cost for "
316                         << F->getName() << " is " << Cost << "\n");
317 
318       auto ConstArgs = calculateGains(F, Cost);
319       if (ConstArgs.empty()) {
320         LLVM_DEBUG(dbgs() << "FnSpecialization: no possible constants found\n");
321         continue;
322       }
323 
324       for (auto &CA : ConstArgs) {
325         specializeFunction(CA, WorkList);
326         Changed = true;
327       }
328     }
329 
330     updateSpecializedFuncs(Candidates, WorkList);
331     NumFuncSpecialized += NbFunctionsSpecialized;
332     return Changed;
333   }
334 
335   void removeDeadInstructions() {
336     for (auto *I : ReplacedWithConstant) {
337       LLVM_DEBUG(dbgs() << "FnSpecialization: Removing dead instruction "
338                         << *I << "\n");
339       I->eraseFromParent();
340     }
341     ReplacedWithConstant.clear();
342   }
343 
344   void removeDeadFunctions() {
345     for (auto *F : FullySpecialized) {
346       LLVM_DEBUG(dbgs() << "FnSpecialization: Removing dead function "
347                         << F->getName() << "\n");
348       F->eraseFromParent();
349     }
350     FullySpecialized.clear();
351   }
352 
353   bool tryToReplaceWithConstant(Value *V) {
354     if (!V->getType()->isSingleValueType() || isa<CallBase>(V) ||
355         V->user_empty())
356       return false;
357 
358     const ValueLatticeElement &IV = Solver.getLatticeValueFor(V);
359     if (isOverdefined(IV))
360       return false;
361     auto *Const =
362         isConstant(IV) ? Solver.getConstant(IV) : UndefValue::get(V->getType());
363 
364     LLVM_DEBUG(dbgs() << "FnSpecialization: Replacing " << *V
365                       << "\nFnSpecialization: with " << *Const << "\n");
366 
367     // Record uses of V to avoid visiting irrelevant uses of const later.
368     SmallVector<Instruction *> UseInsts;
369     for (auto *U : V->users())
370       if (auto *I = dyn_cast<Instruction>(U))
371         if (Solver.isBlockExecutable(I->getParent()))
372           UseInsts.push_back(I);
373 
374     V->replaceAllUsesWith(Const);
375 
376     for (auto *I : UseInsts)
377       Solver.visit(I);
378 
379     // Remove the instruction from Block and Solver.
380     if (auto *I = dyn_cast<Instruction>(V)) {
381       if (I->isSafeToRemove()) {
382         ReplacedWithConstant.push_back(I);
383         Solver.removeLatticeValueFor(I);
384       }
385     }
386     return true;
387   }
388 
389 private:
390   // The number of functions specialised, used for collecting statistics and
391   // also in the cost model.
392   unsigned NbFunctionsSpecialized = 0;
393 
394   /// Clone the function \p F and remove the ssa_copy intrinsics added by
395   /// the SCCPSolver in the cloned version.
396   Function *cloneCandidateFunction(Function *F) {
397     ValueToValueMapTy EmptyMap;
398     Function *Clone = CloneFunction(F, EmptyMap);
399     removeSSACopy(*Clone);
400     return Clone;
401   }
402 
403   /// This function decides whether it's worthwhile to specialize function \p F
404   /// based on the known constant values its arguments can take on, i.e. it
405   /// calculates a gain and returns a list of actual arguments that are deemed
406   /// profitable to specialize. Specialization is performed on the first
407   /// interesting argument. Specializations based on additional arguments will
408   /// be evaluated on following iterations of the main IPSCCP solve loop.
409   SmallVector<ArgInfo> calculateGains(Function *F, InstructionCost Cost) {
410     SmallVector<ArgInfo> Worklist;
411     // Determine if we should specialize the function based on the values the
412     // argument can take on. If specialization is not profitable, we continue
413     // on to the next argument.
414     for (Argument &FormalArg : F->args()) {
415       // Determine if this argument is interesting. If we know the argument can
416       // take on any constant values, they are collected in Constants. If the
417       // argument can only ever equal a constant value in Constants, the
418       // function will be completely specialized, and the IsPartial flag will
419       // be set to false by isArgumentInteresting (that function only adds
420       // values to the Constants list that are deemed profitable).
421       bool IsPartial = true;
422       SmallVector<Constant *> ActualArgs;
423       if (!isArgumentInteresting(&FormalArg, ActualArgs, IsPartial)) {
424         LLVM_DEBUG(dbgs() << "FnSpecialization: Argument "
425                           << FormalArg.getNameOrAsOperand()
426                           << " is not interesting\n");
427         continue;
428       }
429 
430       for (auto *ActualArg : ActualArgs) {
431         InstructionCost Gain =
432             ForceFunctionSpecialization
433                 ? 1
434                 : getSpecializationBonus(&FormalArg, ActualArg) - Cost;
435 
436         if (Gain <= 0)
437           continue;
438         Worklist.push_back({F, &FormalArg, ActualArg, Gain});
439       }
440 
441       if (Worklist.empty())
442         continue;
443 
444       // Sort the candidates in descending order.
445       llvm::stable_sort(Worklist, [](const ArgInfo &L, const ArgInfo &R) {
446         return L.Gain > R.Gain;
447       });
448 
449       // Truncate the worklist to 'MaxClonesThreshold' candidates if
450       // necessary.
451       if (Worklist.size() > MaxClonesThreshold) {
452         LLVM_DEBUG(dbgs() << "FnSpecialization: Number of candidates exceed "
453                           << "the maximum number of clones threshold.\n"
454                           << "FnSpecialization: Truncating worklist to "
455                           << MaxClonesThreshold << " candidates.\n");
456         Worklist.erase(Worklist.begin() + MaxClonesThreshold,
457                        Worklist.end());
458       }
459 
460       if (IsPartial || Worklist.size() < ActualArgs.size())
461         for (auto &ActualArg : Worklist)
462           ActualArg.Partial = true;
463 
464       LLVM_DEBUG(
465         dbgs() << "FnSpecialization: Specializations for function "
466                << F->getName() << "\n";
467         for (auto &C : Worklist) {
468           dbgs() << "FnSpecialization:   FormalArg = "
469                  << C.Formal->getNameOrAsOperand() << ", ActualArg = "
470                  << C.Actual->getNameOrAsOperand() << ", Gain = "
471                  << C.Gain << "\n";
472         }
473       );
474 
475       // FIXME: Only one argument per function.
476       break;
477     }
478     return Worklist;
479   }
480 
481   bool isCandidateFunction(Function *F) {
482     // Do not specialize the cloned function again.
483     if (SpecializedFuncs.contains(F))
484       return false;
485 
486     // If we're optimizing the function for size, we shouldn't specialize it.
487     if (F->hasOptSize() ||
488         shouldOptimizeForSize(F, nullptr, nullptr, PGSOQueryType::IRPass))
489       return false;
490 
491     // Exit if the function is not executable. There's no point in specializing
492     // a dead function.
493     if (!Solver.isBlockExecutable(&F->getEntryBlock()))
494       return false;
495 
496     // It wastes time to specialize a function which would get inlined finally.
497     if (F->hasFnAttribute(Attribute::AlwaysInline))
498       return false;
499 
500     LLVM_DEBUG(dbgs() << "FnSpecialization: Try function: " << F->getName()
501                       << "\n");
502     return true;
503   }
504 
505   void specializeFunction(ArgInfo &AI, FuncList &WorkList) {
506     Function *Clone = cloneCandidateFunction(AI.Fn);
507     Argument *ClonedArg = Clone->getArg(AI.Formal->getArgNo());
508 
509     // Rewrite calls to the function so that they call the clone instead.
510     rewriteCallSites(AI.Fn, Clone, *ClonedArg, AI.Actual);
511 
512     // Initialize the lattice state of the arguments of the function clone,
513     // marking the argument on which we specialized the function constant
514     // with the given value.
515     Solver.markArgInFuncSpecialization(AI.Fn, ClonedArg, AI.Actual);
516 
517     // Mark all the specialized functions
518     WorkList.push_back(Clone);
519     NbFunctionsSpecialized++;
520 
521     // If the function has been completely specialized, the original function
522     // is no longer needed. Mark it unreachable.
523     if (AI.Fn->getNumUses() == 0 ||
524         all_of(AI.Fn->users(), [&AI](User *U) {
525           if (auto *CS = dyn_cast<CallBase>(U))
526             return CS->getFunction() == AI.Fn;
527           return false;
528         })) {
529       Solver.markFunctionUnreachable(AI.Fn);
530       FullySpecialized.insert(AI.Fn);
531     }
532   }
533 
534   /// Compute and return the cost of specializing function \p F.
535   InstructionCost getSpecializationCost(Function *F) {
536     // Compute the code metrics for the function.
537     SmallPtrSet<const Value *, 32> EphValues;
538     CodeMetrics::collectEphemeralValues(F, &(GetAC)(*F), EphValues);
539     CodeMetrics Metrics;
540     for (BasicBlock &BB : *F)
541       Metrics.analyzeBasicBlock(&BB, (GetTTI)(*F), EphValues);
542 
543     // If the code metrics reveal that we shouldn't duplicate the function, we
544     // shouldn't specialize it. Set the specialization cost to Invalid.
545     // Or if the lines of codes implies that this function is easy to get
546     // inlined so that we shouldn't specialize it.
547     if (Metrics.notDuplicatable ||
548         (!ForceFunctionSpecialization &&
549          Metrics.NumInsts < SmallFunctionThreshold)) {
550       InstructionCost C{};
551       C.setInvalid();
552       return C;
553     }
554 
555     // Otherwise, set the specialization cost to be the cost of all the
556     // instructions in the function and penalty for specializing more functions.
557     unsigned Penalty = NbFunctionsSpecialized + 1;
558     return Metrics.NumInsts * InlineConstants::InstrCost * Penalty;
559   }
560 
561   InstructionCost getUserBonus(User *U, llvm::TargetTransformInfo &TTI,
562                                LoopInfo &LI) {
563     auto *I = dyn_cast_or_null<Instruction>(U);
564     // If not an instruction we do not know how to evaluate.
565     // Keep minimum possible cost for now so that it doesnt affect
566     // specialization.
567     if (!I)
568       return std::numeric_limits<unsigned>::min();
569 
570     auto Cost = TTI.getUserCost(U, TargetTransformInfo::TCK_SizeAndLatency);
571 
572     // Traverse recursively if there are more uses.
573     // TODO: Any other instructions to be added here?
574     if (I->mayReadFromMemory() || I->isCast())
575       for (auto *User : I->users())
576         Cost += getUserBonus(User, TTI, LI);
577 
578     // Increase the cost if it is inside the loop.
579     auto LoopDepth = LI.getLoopDepth(I->getParent());
580     Cost *= std::pow((double)AvgLoopIterationCount, LoopDepth);
581     return Cost;
582   }
583 
584   /// Compute a bonus for replacing argument \p A with constant \p C.
585   InstructionCost getSpecializationBonus(Argument *A, Constant *C) {
586     Function *F = A->getParent();
587     DominatorTree DT(*F);
588     LoopInfo LI(DT);
589     auto &TTI = (GetTTI)(*F);
590     LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing bonus for constant: "
591                       << C->getNameOrAsOperand() << "\n");
592 
593     InstructionCost TotalCost = 0;
594     for (auto *U : A->users()) {
595       TotalCost += getUserBonus(U, TTI, LI);
596       LLVM_DEBUG(dbgs() << "FnSpecialization:   User cost ";
597                  TotalCost.print(dbgs()); dbgs() << " for: " << *U << "\n");
598     }
599 
600     // The below heuristic is only concerned with exposing inlining
601     // opportunities via indirect call promotion. If the argument is not a
602     // function pointer, give up.
603     if (!isa<PointerType>(A->getType()) ||
604         !isa<FunctionType>(A->getType()->getPointerElementType()))
605       return TotalCost;
606 
607     // Since the argument is a function pointer, its incoming constant values
608     // should be functions or constant expressions. The code below attempts to
609     // look through cast expressions to find the function that will be called.
610     Value *CalledValue = C;
611     while (isa<ConstantExpr>(CalledValue) &&
612            cast<ConstantExpr>(CalledValue)->isCast())
613       CalledValue = cast<User>(CalledValue)->getOperand(0);
614     Function *CalledFunction = dyn_cast<Function>(CalledValue);
615     if (!CalledFunction)
616       return TotalCost;
617 
618     // Get TTI for the called function (used for the inline cost).
619     auto &CalleeTTI = (GetTTI)(*CalledFunction);
620 
621     // Look at all the call sites whose called value is the argument.
622     // Specializing the function on the argument would allow these indirect
623     // calls to be promoted to direct calls. If the indirect call promotion
624     // would likely enable the called function to be inlined, specializing is a
625     // good idea.
626     int Bonus = 0;
627     for (User *U : A->users()) {
628       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
629         continue;
630       auto *CS = cast<CallBase>(U);
631       if (CS->getCalledOperand() != A)
632         continue;
633 
634       // Get the cost of inlining the called function at this call site. Note
635       // that this is only an estimate. The called function may eventually
636       // change in a way that leads to it not being inlined here, even though
637       // inlining looks profitable now. For example, one of its called
638       // functions may be inlined into it, making the called function too large
639       // to be inlined into this call site.
640       //
641       // We apply a boost for performing indirect call promotion by increasing
642       // the default threshold by the threshold for indirect calls.
643       auto Params = getInlineParams();
644       Params.DefaultThreshold += InlineConstants::IndirectCallThreshold;
645       InlineCost IC =
646           getInlineCost(*CS, CalledFunction, Params, CalleeTTI, GetAC, GetTLI);
647 
648       // We clamp the bonus for this call to be between zero and the default
649       // threshold.
650       if (IC.isAlways())
651         Bonus += Params.DefaultThreshold;
652       else if (IC.isVariable() && IC.getCostDelta() > 0)
653         Bonus += IC.getCostDelta();
654 
655       LLVM_DEBUG(dbgs() << "FnSpecialization:   Inlining bonus " << Bonus
656                         << " for user " << *U << "\n");
657     }
658 
659     return TotalCost + Bonus;
660   }
661 
662   /// Determine if we should specialize a function based on the incoming values
663   /// of the given argument.
664   ///
665   /// This function implements the goal-directed heuristic. It determines if
666   /// specializing the function based on the incoming values of argument \p A
667   /// would result in any significant optimization opportunities. If
668   /// optimization opportunities exist, the constant values of \p A on which to
669   /// specialize the function are collected in \p Constants. If the values in
670   /// \p Constants represent the complete set of values that \p A can take on,
671   /// the function will be completely specialized, and the \p IsPartial flag is
672   /// set to false.
673   ///
674   /// \returns true if the function should be specialized on the given
675   /// argument.
676   bool isArgumentInteresting(Argument *A, ConstList &Constants,
677                              bool &IsPartial) {
678     // For now, don't attempt to specialize functions based on the values of
679     // composite types.
680     if (!A->getType()->isSingleValueType() || A->user_empty())
681       return false;
682 
683     // If the argument isn't overdefined, there's nothing to do. It should
684     // already be constant.
685     if (!Solver.getLatticeValueFor(A).isOverdefined()) {
686       LLVM_DEBUG(dbgs() << "FnSpecialization: Nothing to do, argument "
687                         << A->getNameOrAsOperand()
688                         << " is already constant?\n");
689       return false;
690     }
691 
692     // Collect the constant values that the argument can take on. If the
693     // argument can't take on any constant values, we aren't going to
694     // specialize the function. While it's possible to specialize the function
695     // based on non-constant arguments, there's likely not much benefit to
696     // constant propagation in doing so.
697     //
698     // TODO 1: currently it won't specialize if there are over the threshold of
699     // calls using the same argument, e.g foo(a) x 4 and foo(b) x 1, but it
700     // might be beneficial to take the occurrences into account in the cost
701     // model, so we would need to find the unique constants.
702     //
703     // TODO 2: this currently does not support constants, i.e. integer ranges.
704     //
705     IsPartial = !getPossibleConstants(A, Constants);
706     LLVM_DEBUG(dbgs() << "FnSpecialization: Found interesting argument "
707                       << A->getNameOrAsOperand() << "\n");
708     return true;
709   }
710 
711   /// Collect in \p Constants all the constant values that argument \p A can
712   /// take on.
713   ///
714   /// \returns true if all of the values the argument can take on are constant
715   /// (e.g., the argument's parent function cannot be called with an
716   /// overdefined value).
717   bool getPossibleConstants(Argument *A, ConstList &Constants) {
718     Function *F = A->getParent();
719     bool AllConstant = true;
720 
721     // Iterate over all the call sites of the argument's parent function.
722     for (User *U : F->users()) {
723       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
724         continue;
725       auto &CS = *cast<CallBase>(U);
726       // If the call site has attribute minsize set, that callsite won't be
727       // specialized.
728       if (CS.hasFnAttr(Attribute::MinSize)) {
729         AllConstant = false;
730         continue;
731       }
732 
733       // If the parent of the call site will never be executed, we don't need
734       // to worry about the passed value.
735       if (!Solver.isBlockExecutable(CS.getParent()))
736         continue;
737 
738       auto *V = CS.getArgOperand(A->getArgNo());
739       if (isa<PoisonValue>(V))
740         return false;
741 
742       // For now, constant expressions are fine but only if they are function
743       // calls.
744       if (auto *CE = dyn_cast<ConstantExpr>(V))
745         if (!isa<Function>(CE->getOperand(0)))
746           return false;
747 
748       // TrackValueOfGlobalVariable only tracks scalar global variables.
749       if (auto *GV = dyn_cast<GlobalVariable>(V)) {
750         // Check if we want to specialize on the address of non-constant
751         // global values.
752         if (!GV->isConstant())
753           if (!SpecializeOnAddresses)
754             return false;
755 
756         if (!GV->getValueType()->isSingleValueType())
757           return false;
758       }
759 
760       if (isa<Constant>(V) && (Solver.getLatticeValueFor(V).isConstant() ||
761                                EnableSpecializationForLiteralConstant))
762         Constants.push_back(cast<Constant>(V));
763       else
764         AllConstant = false;
765     }
766 
767     // If the argument can only take on constant values, AllConstant will be
768     // true.
769     return AllConstant;
770   }
771 
772   /// Rewrite calls to function \p F to call function \p Clone instead.
773   ///
774   /// This function modifies calls to function \p F whose argument at index \p
775   /// ArgNo is equal to constant \p C. The calls are rewritten to call function
776   /// \p Clone instead.
777   ///
778   /// Callsites that have been marked with the MinSize function attribute won't
779   /// be specialized and rewritten.
780   void rewriteCallSites(Function *F, Function *Clone, Argument &Arg,
781                         Constant *C) {
782     unsigned ArgNo = Arg.getArgNo();
783     SmallVector<CallBase *, 4> CallSitesToRewrite;
784     for (auto *U : F->users()) {
785       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
786         continue;
787       auto &CS = *cast<CallBase>(U);
788       if (!CS.getCalledFunction() || CS.getCalledFunction() != F)
789         continue;
790       CallSitesToRewrite.push_back(&CS);
791     }
792 
793     LLVM_DEBUG(dbgs() << "FnSpecialization: Replacing call sites of "
794                       << F->getName() << " with "
795                       << Clone->getName() << "\n");
796 
797     for (auto *CS : CallSitesToRewrite) {
798       LLVM_DEBUG(dbgs() << "FnSpecialization:   "
799                         << CS->getFunction()->getName() << " ->"
800                         << *CS << "\n");
801       if ((CS->getFunction() == Clone && CS->getArgOperand(ArgNo) == &Arg) ||
802           CS->getArgOperand(ArgNo) == C) {
803         CS->setCalledFunction(Clone);
804         Solver.markOverdefined(CS);
805       }
806     }
807   }
808 
809   void updateSpecializedFuncs(FuncList &Candidates, FuncList &WorkList) {
810     for (auto *F : WorkList) {
811       SpecializedFuncs.insert(F);
812 
813       // Initialize the state of the newly created functions, marking them
814       // argument-tracked and executable.
815       if (F->hasExactDefinition() && !F->hasFnAttribute(Attribute::Naked))
816         Solver.addTrackedFunction(F);
817 
818       Solver.addArgumentTrackedFunction(F);
819       Candidates.push_back(F);
820       Solver.markBlockExecutable(&F->front());
821 
822       // Replace the function arguments for the specialized functions.
823       for (Argument &Arg : F->args())
824         if (!Arg.use_empty() && tryToReplaceWithConstant(&Arg))
825           LLVM_DEBUG(dbgs() << "FnSpecialization: Replaced constant argument: "
826                             << Arg.getNameOrAsOperand() << "\n");
827     }
828   }
829 };
830 } // namespace
831 
832 bool llvm::runFunctionSpecialization(
833     Module &M, const DataLayout &DL,
834     std::function<TargetLibraryInfo &(Function &)> GetTLI,
835     std::function<TargetTransformInfo &(Function &)> GetTTI,
836     std::function<AssumptionCache &(Function &)> GetAC,
837     function_ref<AnalysisResultsForFn(Function &)> GetAnalysis) {
838   SCCPSolver Solver(DL, GetTLI, M.getContext());
839   FunctionSpecializer FS(Solver, GetAC, GetTTI, GetTLI);
840   bool Changed = false;
841 
842   // Loop over all functions, marking arguments to those with their addresses
843   // taken or that are external as overdefined.
844   for (Function &F : M) {
845     if (F.isDeclaration())
846       continue;
847     if (F.hasFnAttribute(Attribute::NoDuplicate))
848       continue;
849 
850     LLVM_DEBUG(dbgs() << "\nFnSpecialization: Analysing decl: " << F.getName()
851                       << "\n");
852     Solver.addAnalysis(F, GetAnalysis(F));
853 
854     // Determine if we can track the function's arguments. If so, add the
855     // function to the solver's set of argument-tracked functions.
856     if (canTrackArgumentsInterprocedurally(&F)) {
857       LLVM_DEBUG(dbgs() << "FnSpecialization: Can track arguments\n");
858       Solver.addArgumentTrackedFunction(&F);
859       continue;
860     } else {
861       LLVM_DEBUG(dbgs() << "FnSpecialization: Can't track arguments!\n"
862                         << "FnSpecialization: Doesn't have local linkage, or "
863                         << "has its address taken\n");
864     }
865 
866     // Assume the function is called.
867     Solver.markBlockExecutable(&F.front());
868 
869     // Assume nothing about the incoming arguments.
870     for (Argument &AI : F.args())
871       Solver.markOverdefined(&AI);
872   }
873 
874   // Determine if we can track any of the module's global variables. If so, add
875   // the global variables we can track to the solver's set of tracked global
876   // variables.
877   for (GlobalVariable &G : M.globals()) {
878     G.removeDeadConstantUsers();
879     if (canTrackGlobalVariableInterprocedurally(&G))
880       Solver.trackValueOfGlobalVariable(&G);
881   }
882 
883   auto &TrackedFuncs = Solver.getArgumentTrackedFunctions();
884   SmallVector<Function *, 16> FuncDecls(TrackedFuncs.begin(),
885                                         TrackedFuncs.end());
886 
887   // No tracked functions, so nothing to do: don't run the solver and remove
888   // the ssa_copy intrinsics that may have been introduced.
889   if (TrackedFuncs.empty()) {
890     removeSSACopy(M);
891     return false;
892   }
893 
894   // Solve for constants.
895   auto RunSCCPSolver = [&](auto &WorkList) {
896     bool ResolvedUndefs = true;
897 
898     while (ResolvedUndefs) {
899       // Not running the solver unnecessary is checked in regression test
900       // nothing-to-do.ll, so if this debug message is changed, this regression
901       // test needs updating too.
902       LLVM_DEBUG(dbgs() << "FnSpecialization: Running solver\n");
903 
904       Solver.solve();
905       LLVM_DEBUG(dbgs() << "FnSpecialization: Resolving undefs\n");
906       ResolvedUndefs = false;
907       for (Function *F : WorkList)
908         if (Solver.resolvedUndefsIn(*F))
909           ResolvedUndefs = true;
910     }
911 
912     for (auto *F : WorkList) {
913       for (BasicBlock &BB : *F) {
914         if (!Solver.isBlockExecutable(&BB))
915           continue;
916         // FIXME: The solver may make changes to the function here, so set
917         // Changed, even if later function specialization does not trigger.
918         for (auto &I : make_early_inc_range(BB))
919           Changed |= FS.tryToReplaceWithConstant(&I);
920       }
921     }
922   };
923 
924 #ifndef NDEBUG
925   LLVM_DEBUG(dbgs() << "FnSpecialization: Worklist fn decls:\n");
926   for (auto *F : FuncDecls)
927     LLVM_DEBUG(dbgs() << "FnSpecialization: *) " << F->getName() << "\n");
928 #endif
929 
930   // Initially resolve the constants in all the argument tracked functions.
931   RunSCCPSolver(FuncDecls);
932 
933   SmallVector<Function *, 2> WorkList;
934   unsigned I = 0;
935   while (FuncSpecializationMaxIters != I++ &&
936          FS.specializeFunctions(FuncDecls, WorkList)) {
937     LLVM_DEBUG(dbgs() << "FnSpecialization: Finished iteration " << I << "\n");
938 
939     // Run the solver for the specialized functions.
940     RunSCCPSolver(WorkList);
941 
942     // Replace some unresolved constant arguments.
943     constantArgPropagation(FuncDecls, M, Solver);
944 
945     WorkList.clear();
946     Changed = true;
947   }
948 
949   LLVM_DEBUG(dbgs() << "FnSpecialization: Number of specializations = "
950                     << NumFuncSpecialized <<"\n");
951 
952   // Remove any ssa_copy intrinsics that may have been introduced.
953   removeSSACopy(M);
954   return Changed;
955 }
956