1 //===- FunctionSpecialization.cpp - Function Specialization ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This specialises functions with constant parameters. Constant parameters
10 // like function pointers and constant globals are propagated to the callee by
11 // specializing the function. The main benefit of this pass at the moment is
12 // that indirect calls are transformed into direct calls, which provides inline
13 // opportunities that the inliner would not have been able to achieve. That's
14 // why function specialisation is run before the inliner in the optimisation
15 // pipeline; that is by design. Otherwise, we would only benefit from constant
16 // passing, which is a valid use-case too, but hasn't been explored much in
17 // terms of performance uplifts, cost-model and compile-time impact.
18 //
19 // Current limitations:
20 // - It does not yet handle integer ranges. We do support "literal constants",
21 //   but that's off by default under an option.
22 // - Only 1 argument per function is specialised,
23 // - The cost-model could be further looked into (it mainly focuses on inlining
24 //   benefits),
25 // - We are not yet caching analysis results, but profiling and checking where
26 //   extra compile time is spent didn't suggest this to be a problem.
27 //
28 // Ideas:
29 // - With a function specialization attribute for arguments, we could have
30 //   a direct way to steer function specialization, avoiding the cost-model,
31 //   and thus control compile-times / code-size.
32 //
33 // Todos:
34 // - Specializing recursive functions relies on running the transformation a
35 //   number of times, which is controlled by option
36 //   `func-specialization-max-iters`. Thus, increasing this value and the
37 //   number of iterations, will linearly increase the number of times recursive
38 //   functions get specialized, see also the discussion in
39 //   https://reviews.llvm.org/D106426 for details. Perhaps there is a
40 //   compile-time friendlier way to control/limit the number of specialisations
41 //   for recursive functions.
42 // - Don't transform the function if function specialization does not trigger;
43 //   the SCCPSolver may make IR changes.
44 //
45 // References:
46 // - 2021 LLVM Dev Mtg “Introducing function specialisation, and can we enable
47 //   it by default?”, https://www.youtube.com/watch?v=zJiCjeXgV5Q
48 //
49 //===----------------------------------------------------------------------===//
50 
51 #include "llvm/ADT/Statistic.h"
52 #include "llvm/Analysis/CodeMetrics.h"
53 #include "llvm/Analysis/InlineCost.h"
54 #include "llvm/Analysis/LoopInfo.h"
55 #include "llvm/Analysis/TargetTransformInfo.h"
56 #include "llvm/Analysis/ValueLattice.h"
57 #include "llvm/Analysis/ValueLatticeUtils.h"
58 #include "llvm/IR/IntrinsicInst.h"
59 #include "llvm/Transforms/Scalar/SCCP.h"
60 #include "llvm/Transforms/Utils/Cloning.h"
61 #include "llvm/Transforms/Utils/SCCPSolver.h"
62 #include "llvm/Transforms/Utils/SizeOpts.h"
63 #include <cmath>
64 
65 using namespace llvm;
66 
67 #define DEBUG_TYPE "function-specialization"
68 
69 STATISTIC(NumFuncSpecialized, "Number of functions specialized");
70 
71 static cl::opt<bool> ForceFunctionSpecialization(
72     "force-function-specialization", cl::init(false), cl::Hidden,
73     cl::desc("Force function specialization for every call site with a "
74              "constant argument"));
75 
76 static cl::opt<unsigned> FuncSpecializationMaxIters(
77     "func-specialization-max-iters", cl::Hidden,
78     cl::desc("The maximum number of iterations function specialization is run"),
79     cl::init(1));
80 
81 static cl::opt<unsigned> MaxClonesThreshold(
82     "func-specialization-max-clones", cl::Hidden,
83     cl::desc("The maximum number of clones allowed for a single function "
84              "specialization"),
85     cl::init(3));
86 
87 static cl::opt<unsigned> SmallFunctionThreshold(
88     "func-specialization-size-threshold", cl::Hidden,
89     cl::desc("Don't specialize functions that have less than this theshold "
90              "number of instructions"),
91     cl::init(100));
92 
93 static cl::opt<unsigned>
94     AvgLoopIterationCount("func-specialization-avg-iters-cost", cl::Hidden,
95                           cl::desc("Average loop iteration count cost"),
96                           cl::init(10));
97 
98 static cl::opt<bool> SpecializeOnAddresses(
99     "func-specialization-on-address", cl::init(false), cl::Hidden,
100     cl::desc("Enable function specialization on the address of global values"));
101 
102 // Disabled by default as it can significantly increase compilation times.
103 // Running nikic's compile time tracker on x86 with instruction count as the
104 // metric shows 3-4% regression for SPASS while being neutral for all other
105 // benchmarks of the llvm test suite.
106 //
107 // https://llvm-compile-time-tracker.com
108 // https://github.com/nikic/llvm-compile-time-tracker
109 static cl::opt<bool> EnableSpecializationForLiteralConstant(
110     "function-specialization-for-literal-constant", cl::init(false), cl::Hidden,
111     cl::desc("Enable specialization of functions that take a literal constant "
112              "as an argument."));
113 
114 namespace {
115 // Bookkeeping struct to pass data from the analysis and profitability phase
116 // to the actual transform helper functions.
117 struct SpecializationInfo {
118   SmallVector<ArgInfo, 8> Args; // Stores the {formal,actual} argument pairs.
119   InstructionCost Gain;         // Profitability: Gain = Bonus - Cost.
120 };
121 } // Anonymous namespace
122 
123 using FuncList = SmallVectorImpl<Function *>;
124 using CallArgBinding = std::pair<CallBase *, Constant *>;
125 using CallSpecBinding = std::pair<CallBase *, SpecializationInfo>;
126 // We are using MapVector because it guarantees deterministic iteration
127 // order across executions.
128 using SpecializationMap = SmallMapVector<CallBase *, SpecializationInfo, 8>;
129 
130 // Helper to check if \p LV is either a constant or a constant
131 // range with a single element. This should cover exactly the same cases as the
132 // old ValueLatticeElement::isConstant() and is intended to be used in the
133 // transition to ValueLatticeElement.
134 static bool isConstant(const ValueLatticeElement &LV) {
135   return LV.isConstant() ||
136          (LV.isConstantRange() && LV.getConstantRange().isSingleElement());
137 }
138 
139 // Helper to check if \p LV is either overdefined or a constant int.
140 static bool isOverdefined(const ValueLatticeElement &LV) {
141   return !LV.isUnknownOrUndef() && !isConstant(LV);
142 }
143 
144 static Constant *getPromotableAlloca(AllocaInst *Alloca, CallInst *Call) {
145   Value *StoreValue = nullptr;
146   for (auto *User : Alloca->users()) {
147     // We can't use llvm::isAllocaPromotable() as that would fail because of
148     // the usage in the CallInst, which is what we check here.
149     if (User == Call)
150       continue;
151     if (auto *Bitcast = dyn_cast<BitCastInst>(User)) {
152       if (!Bitcast->hasOneUse() || *Bitcast->user_begin() != Call)
153         return nullptr;
154       continue;
155     }
156 
157     if (auto *Store = dyn_cast<StoreInst>(User)) {
158       // This is a duplicate store, bail out.
159       if (StoreValue || Store->isVolatile())
160         return nullptr;
161       StoreValue = Store->getValueOperand();
162       continue;
163     }
164     // Bail if there is any other unknown usage.
165     return nullptr;
166   }
167   return dyn_cast_or_null<Constant>(StoreValue);
168 }
169 
170 // A constant stack value is an AllocaInst that has a single constant
171 // value stored to it. Return this constant if such an alloca stack value
172 // is a function argument.
173 static Constant *getConstantStackValue(CallInst *Call, Value *Val,
174                                        SCCPSolver &Solver) {
175   if (!Val)
176     return nullptr;
177   Val = Val->stripPointerCasts();
178   if (auto *ConstVal = dyn_cast<ConstantInt>(Val))
179     return ConstVal;
180   auto *Alloca = dyn_cast<AllocaInst>(Val);
181   if (!Alloca || !Alloca->getAllocatedType()->isIntegerTy())
182     return nullptr;
183   return getPromotableAlloca(Alloca, Call);
184 }
185 
186 // To support specializing recursive functions, it is important to propagate
187 // constant arguments because after a first iteration of specialisation, a
188 // reduced example may look like this:
189 //
190 //     define internal void @RecursiveFn(i32* arg1) {
191 //       %temp = alloca i32, align 4
192 //       store i32 2 i32* %temp, align 4
193 //       call void @RecursiveFn.1(i32* nonnull %temp)
194 //       ret void
195 //     }
196 //
197 // Before a next iteration, we need to propagate the constant like so
198 // which allows further specialization in next iterations.
199 //
200 //     @funcspec.arg = internal constant i32 2
201 //
202 //     define internal void @someFunc(i32* arg1) {
203 //       call void @otherFunc(i32* nonnull @funcspec.arg)
204 //       ret void
205 //     }
206 //
207 static void constantArgPropagation(FuncList &WorkList, Module &M,
208                                    SCCPSolver &Solver) {
209   // Iterate over the argument tracked functions see if there
210   // are any new constant values for the call instruction via
211   // stack variables.
212   for (auto *F : WorkList) {
213     // TODO: Generalize for any read only arguments.
214     if (F->arg_size() != 1)
215       continue;
216 
217     auto &Arg = *F->arg_begin();
218     if (!Arg.onlyReadsMemory() || !Arg.getType()->isPointerTy())
219       continue;
220 
221     for (auto *User : F->users()) {
222       auto *Call = dyn_cast<CallInst>(User);
223       if (!Call)
224         break;
225       auto *ArgOp = Call->getArgOperand(0);
226       auto *ArgOpType = ArgOp->getType();
227       auto *ConstVal = getConstantStackValue(Call, ArgOp, Solver);
228       if (!ConstVal)
229         break;
230 
231       Value *GV = new GlobalVariable(M, ConstVal->getType(), true,
232                                      GlobalValue::InternalLinkage, ConstVal,
233                                      "funcspec.arg");
234 
235       if (ArgOpType != ConstVal->getType())
236         GV = ConstantExpr::getBitCast(cast<Constant>(GV), ArgOp->getType());
237 
238       Call->setArgOperand(0, GV);
239 
240       // Add the changed CallInst to Solver Worklist
241       Solver.visitCall(*Call);
242     }
243   }
244 }
245 
246 // ssa_copy intrinsics are introduced by the SCCP solver. These intrinsics
247 // interfere with the constantArgPropagation optimization.
248 static void removeSSACopy(Function &F) {
249   for (BasicBlock &BB : F) {
250     for (Instruction &Inst : llvm::make_early_inc_range(BB)) {
251       auto *II = dyn_cast<IntrinsicInst>(&Inst);
252       if (!II)
253         continue;
254       if (II->getIntrinsicID() != Intrinsic::ssa_copy)
255         continue;
256       Inst.replaceAllUsesWith(II->getOperand(0));
257       Inst.eraseFromParent();
258     }
259   }
260 }
261 
262 static void removeSSACopy(Module &M) {
263   for (Function &F : M)
264     removeSSACopy(F);
265 }
266 
267 namespace {
268 class FunctionSpecializer {
269 
270   /// The IPSCCP Solver.
271   SCCPSolver &Solver;
272 
273   /// Analyses used to help determine if a function should be specialized.
274   std::function<AssumptionCache &(Function &)> GetAC;
275   std::function<TargetTransformInfo &(Function &)> GetTTI;
276   std::function<TargetLibraryInfo &(Function &)> GetTLI;
277 
278   SmallPtrSet<Function *, 4> SpecializedFuncs;
279   SmallPtrSet<Function *, 4> FullySpecialized;
280   SmallVector<Instruction *> ReplacedWithConstant;
281 
282 public:
283   FunctionSpecializer(SCCPSolver &Solver,
284                       std::function<AssumptionCache &(Function &)> GetAC,
285                       std::function<TargetTransformInfo &(Function &)> GetTTI,
286                       std::function<TargetLibraryInfo &(Function &)> GetTLI)
287       : Solver(Solver), GetAC(GetAC), GetTTI(GetTTI), GetTLI(GetTLI) {}
288 
289   ~FunctionSpecializer() {
290     // Eliminate dead code.
291     removeDeadInstructions();
292     removeDeadFunctions();
293   }
294 
295   /// Attempt to specialize functions in the module to enable constant
296   /// propagation across function boundaries.
297   ///
298   /// \returns true if at least one function is specialized.
299   bool specializeFunctions(FuncList &Candidates, FuncList &WorkList) {
300     bool Changed = false;
301     for (auto *F : Candidates) {
302       if (!isCandidateFunction(F))
303         continue;
304 
305       auto Cost = getSpecializationCost(F);
306       if (!Cost.isValid()) {
307         LLVM_DEBUG(
308             dbgs() << "FnSpecialization: Invalid specialization cost.\n");
309         continue;
310       }
311 
312       LLVM_DEBUG(dbgs() << "FnSpecialization: Specialization cost for "
313                         << F->getName() << " is " << Cost << "\n");
314 
315       SmallVector<CallSpecBinding, 8> Specializations;
316       if (!calculateGains(F, Cost, Specializations)) {
317         LLVM_DEBUG(dbgs() << "FnSpecialization: No possible constants found\n");
318         continue;
319       }
320 
321       Changed = true;
322       for (auto &Entry : Specializations)
323         specializeFunction(F, Entry.second, WorkList);
324     }
325 
326     updateSpecializedFuncs(Candidates, WorkList);
327     NumFuncSpecialized += NbFunctionsSpecialized;
328     return Changed;
329   }
330 
331   void removeDeadInstructions() {
332     for (auto *I : ReplacedWithConstant) {
333       LLVM_DEBUG(dbgs() << "FnSpecialization: Removing dead instruction " << *I
334                         << "\n");
335       I->eraseFromParent();
336     }
337     ReplacedWithConstant.clear();
338   }
339 
340   void removeDeadFunctions() {
341     for (auto *F : FullySpecialized) {
342       LLVM_DEBUG(dbgs() << "FnSpecialization: Removing dead function "
343                         << F->getName() << "\n");
344       F->eraseFromParent();
345     }
346     FullySpecialized.clear();
347   }
348 
349   bool tryToReplaceWithConstant(Value *V) {
350     if (!V->getType()->isSingleValueType() || isa<CallBase>(V) ||
351         V->user_empty())
352       return false;
353 
354     const ValueLatticeElement &IV = Solver.getLatticeValueFor(V);
355     if (isOverdefined(IV))
356       return false;
357     auto *Const =
358         isConstant(IV) ? Solver.getConstant(IV) : UndefValue::get(V->getType());
359 
360     LLVM_DEBUG(dbgs() << "FnSpecialization: Replacing " << *V
361                       << "\nFnSpecialization: with " << *Const << "\n");
362 
363     // Record uses of V to avoid visiting irrelevant uses of const later.
364     SmallVector<Instruction *> UseInsts;
365     for (auto *U : V->users())
366       if (auto *I = dyn_cast<Instruction>(U))
367         if (Solver.isBlockExecutable(I->getParent()))
368           UseInsts.push_back(I);
369 
370     V->replaceAllUsesWith(Const);
371 
372     for (auto *I : UseInsts)
373       Solver.visit(I);
374 
375     // Remove the instruction from Block and Solver.
376     if (auto *I = dyn_cast<Instruction>(V)) {
377       if (I->isSafeToRemove()) {
378         ReplacedWithConstant.push_back(I);
379         Solver.removeLatticeValueFor(I);
380       }
381     }
382     return true;
383   }
384 
385 private:
386   // The number of functions specialised, used for collecting statistics and
387   // also in the cost model.
388   unsigned NbFunctionsSpecialized = 0;
389 
390   /// Clone the function \p F and remove the ssa_copy intrinsics added by
391   /// the SCCPSolver in the cloned version.
392   Function *cloneCandidateFunction(Function *F, ValueToValueMapTy &Mappings) {
393     Function *Clone = CloneFunction(F, Mappings);
394     removeSSACopy(*Clone);
395     return Clone;
396   }
397 
398   /// This function decides whether it's worthwhile to specialize function
399   /// \p F based on the known constant values its arguments can take on. It
400   /// only discovers potential specialization opportunities without actually
401   /// applying them.
402   ///
403   /// \returns true if any specializations have been found.
404   bool calculateGains(Function *F, InstructionCost Cost,
405                       SmallVectorImpl<CallSpecBinding> &WorkList) {
406     SpecializationMap Specializations;
407     // Determine if we should specialize the function based on the values the
408     // argument can take on. If specialization is not profitable, we continue
409     // on to the next argument.
410     for (Argument &FormalArg : F->args()) {
411       // Determine if this argument is interesting. If we know the argument can
412       // take on any constant values, they are collected in Constants.
413       SmallVector<CallArgBinding, 8> ActualArgs;
414       if (!isArgumentInteresting(&FormalArg, ActualArgs)) {
415         LLVM_DEBUG(dbgs() << "FnSpecialization: Argument "
416                           << FormalArg.getNameOrAsOperand()
417                           << " is not interesting\n");
418         continue;
419       }
420 
421       for (const auto &Entry : ActualArgs) {
422         CallBase *Call = Entry.first;
423         Constant *ActualArg = Entry.second;
424 
425         auto I = Specializations.insert({Call, SpecializationInfo()});
426         SpecializationInfo &S = I.first->second;
427 
428         if (I.second)
429           S.Gain = ForceFunctionSpecialization ? 1 : 0 - Cost;
430         if (!ForceFunctionSpecialization)
431           S.Gain += getSpecializationBonus(&FormalArg, ActualArg);
432         S.Args.push_back({&FormalArg, ActualArg});
433       }
434     }
435 
436     // Remove unprofitable specializations.
437     Specializations.remove_if(
438         [](const auto &Entry) { return Entry.second.Gain <= 0; });
439 
440     // Clear the MapVector and return the underlying vector.
441     WorkList = Specializations.takeVector();
442 
443     // Sort the candidates in descending order.
444     llvm::stable_sort(WorkList, [](const auto &L, const auto &R) {
445       return L.second.Gain > R.second.Gain;
446     });
447 
448     // Truncate the worklist to 'MaxClonesThreshold' candidates if necessary.
449     if (WorkList.size() > MaxClonesThreshold) {
450       LLVM_DEBUG(dbgs() << "FnSpecialization: Number of candidates exceed "
451                         << "the maximum number of clones threshold.\n"
452                         << "FnSpecialization: Truncating worklist to "
453                         << MaxClonesThreshold << " candidates.\n");
454       WorkList.erase(WorkList.begin() + MaxClonesThreshold, WorkList.end());
455     }
456 
457     LLVM_DEBUG(dbgs() << "FnSpecialization: Specializations for function "
458                       << F->getName() << "\n";
459                for (const auto &Entry
460                     : WorkList) {
461                  dbgs() << "FnSpecialization:   Gain = " << Entry.second.Gain
462                         << "\n";
463                  for (const ArgInfo &Arg : Entry.second.Args)
464                    dbgs() << "FnSpecialization:   FormalArg = "
465                           << Arg.Formal->getNameOrAsOperand()
466                           << ", ActualArg = "
467                           << Arg.Actual->getNameOrAsOperand() << "\n";
468                });
469 
470     return !WorkList.empty();
471   }
472 
473   bool isCandidateFunction(Function *F) {
474     // Do not specialize the cloned function again.
475     if (SpecializedFuncs.contains(F))
476       return false;
477 
478     // If we're optimizing the function for size, we shouldn't specialize it.
479     if (F->hasOptSize() ||
480         shouldOptimizeForSize(F, nullptr, nullptr, PGSOQueryType::IRPass))
481       return false;
482 
483     // Exit if the function is not executable. There's no point in specializing
484     // a dead function.
485     if (!Solver.isBlockExecutable(&F->getEntryBlock()))
486       return false;
487 
488     // It wastes time to specialize a function which would get inlined finally.
489     if (F->hasFnAttribute(Attribute::AlwaysInline))
490       return false;
491 
492     LLVM_DEBUG(dbgs() << "FnSpecialization: Try function: " << F->getName()
493                       << "\n");
494     return true;
495   }
496 
497   void specializeFunction(Function *F, SpecializationInfo &S,
498                           FuncList &WorkList) {
499     ValueToValueMapTy Mappings;
500     Function *Clone = cloneCandidateFunction(F, Mappings);
501 
502     // Rewrite calls to the function so that they call the clone instead.
503     rewriteCallSites(Clone, S.Args, Mappings);
504 
505     // Initialize the lattice state of the arguments of the function clone,
506     // marking the argument on which we specialized the function constant
507     // with the given value.
508     Solver.markArgInFuncSpecialization(Clone, S.Args);
509 
510     // Mark all the specialized functions
511     WorkList.push_back(Clone);
512     NbFunctionsSpecialized++;
513 
514     // If the function has been completely specialized, the original function
515     // is no longer needed. Mark it unreachable.
516     if (F->getNumUses() == 0 || all_of(F->users(), [F](User *U) {
517           if (auto *CS = dyn_cast<CallBase>(U))
518             return CS->getFunction() == F;
519           return false;
520         })) {
521       Solver.markFunctionUnreachable(F);
522       FullySpecialized.insert(F);
523     }
524   }
525 
526   /// Compute and return the cost of specializing function \p F.
527   InstructionCost getSpecializationCost(Function *F) {
528     // Compute the code metrics for the function.
529     SmallPtrSet<const Value *, 32> EphValues;
530     CodeMetrics::collectEphemeralValues(F, &(GetAC)(*F), EphValues);
531     CodeMetrics Metrics;
532     for (BasicBlock &BB : *F)
533       Metrics.analyzeBasicBlock(&BB, (GetTTI)(*F), EphValues);
534 
535     // If the code metrics reveal that we shouldn't duplicate the function, we
536     // shouldn't specialize it. Set the specialization cost to Invalid.
537     // Or if the lines of codes implies that this function is easy to get
538     // inlined so that we shouldn't specialize it.
539     if (Metrics.notDuplicatable ||
540         (!ForceFunctionSpecialization &&
541          Metrics.NumInsts < SmallFunctionThreshold)) {
542       InstructionCost C{};
543       C.setInvalid();
544       return C;
545     }
546 
547     // Otherwise, set the specialization cost to be the cost of all the
548     // instructions in the function and penalty for specializing more functions.
549     unsigned Penalty = NbFunctionsSpecialized + 1;
550     return Metrics.NumInsts * InlineConstants::InstrCost * Penalty;
551   }
552 
553   InstructionCost getUserBonus(User *U, llvm::TargetTransformInfo &TTI,
554                                LoopInfo &LI) {
555     auto *I = dyn_cast_or_null<Instruction>(U);
556     // If not an instruction we do not know how to evaluate.
557     // Keep minimum possible cost for now so that it doesnt affect
558     // specialization.
559     if (!I)
560       return std::numeric_limits<unsigned>::min();
561 
562     auto Cost = TTI.getUserCost(U, TargetTransformInfo::TCK_SizeAndLatency);
563 
564     // Traverse recursively if there are more uses.
565     // TODO: Any other instructions to be added here?
566     if (I->mayReadFromMemory() || I->isCast())
567       for (auto *User : I->users())
568         Cost += getUserBonus(User, TTI, LI);
569 
570     // Increase the cost if it is inside the loop.
571     auto LoopDepth = LI.getLoopDepth(I->getParent());
572     Cost *= std::pow((double)AvgLoopIterationCount, LoopDepth);
573     return Cost;
574   }
575 
576   /// Compute a bonus for replacing argument \p A with constant \p C.
577   InstructionCost getSpecializationBonus(Argument *A, Constant *C) {
578     Function *F = A->getParent();
579     DominatorTree DT(*F);
580     LoopInfo LI(DT);
581     auto &TTI = (GetTTI)(*F);
582     LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing bonus for constant: "
583                       << C->getNameOrAsOperand() << "\n");
584 
585     InstructionCost TotalCost = 0;
586     for (auto *U : A->users()) {
587       TotalCost += getUserBonus(U, TTI, LI);
588       LLVM_DEBUG(dbgs() << "FnSpecialization:   User cost ";
589                  TotalCost.print(dbgs()); dbgs() << " for: " << *U << "\n");
590     }
591 
592     // The below heuristic is only concerned with exposing inlining
593     // opportunities via indirect call promotion. If the argument is not a
594     // (potentially casted) function pointer, give up.
595     Function *CalledFunction = dyn_cast<Function>(C->stripPointerCasts());
596     if (!CalledFunction)
597       return TotalCost;
598 
599     // Get TTI for the called function (used for the inline cost).
600     auto &CalleeTTI = (GetTTI)(*CalledFunction);
601 
602     // Look at all the call sites whose called value is the argument.
603     // Specializing the function on the argument would allow these indirect
604     // calls to be promoted to direct calls. If the indirect call promotion
605     // would likely enable the called function to be inlined, specializing is a
606     // good idea.
607     int Bonus = 0;
608     for (User *U : A->users()) {
609       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
610         continue;
611       auto *CS = cast<CallBase>(U);
612       if (CS->getCalledOperand() != A)
613         continue;
614 
615       // Get the cost of inlining the called function at this call site. Note
616       // that this is only an estimate. The called function may eventually
617       // change in a way that leads to it not being inlined here, even though
618       // inlining looks profitable now. For example, one of its called
619       // functions may be inlined into it, making the called function too large
620       // to be inlined into this call site.
621       //
622       // We apply a boost for performing indirect call promotion by increasing
623       // the default threshold by the threshold for indirect calls.
624       auto Params = getInlineParams();
625       Params.DefaultThreshold += InlineConstants::IndirectCallThreshold;
626       InlineCost IC =
627           getInlineCost(*CS, CalledFunction, Params, CalleeTTI, GetAC, GetTLI);
628 
629       // We clamp the bonus for this call to be between zero and the default
630       // threshold.
631       if (IC.isAlways())
632         Bonus += Params.DefaultThreshold;
633       else if (IC.isVariable() && IC.getCostDelta() > 0)
634         Bonus += IC.getCostDelta();
635 
636       LLVM_DEBUG(dbgs() << "FnSpecialization:   Inlining bonus " << Bonus
637                         << " for user " << *U << "\n");
638     }
639 
640     return TotalCost + Bonus;
641   }
642 
643   /// Determine if we should specialize a function based on the incoming values
644   /// of the given argument.
645   ///
646   /// This function implements the goal-directed heuristic. It determines if
647   /// specializing the function based on the incoming values of argument \p A
648   /// would result in any significant optimization opportunities. If
649   /// optimization opportunities exist, the constant values of \p A on which to
650   /// specialize the function are collected in \p Constants.
651   ///
652   /// \returns true if the function should be specialized on the given
653   /// argument.
654   bool isArgumentInteresting(Argument *A,
655                              SmallVectorImpl<CallArgBinding> &Constants) {
656     // For now, don't attempt to specialize functions based on the values of
657     // composite types.
658     if (!A->getType()->isSingleValueType() || A->user_empty())
659       return false;
660 
661     // If the argument isn't overdefined, there's nothing to do. It should
662     // already be constant.
663     if (!Solver.getLatticeValueFor(A).isOverdefined()) {
664       LLVM_DEBUG(dbgs() << "FnSpecialization: Nothing to do, argument "
665                         << A->getNameOrAsOperand()
666                         << " is already constant?\n");
667       return false;
668     }
669 
670     // Collect the constant values that the argument can take on. If the
671     // argument can't take on any constant values, we aren't going to
672     // specialize the function. While it's possible to specialize the function
673     // based on non-constant arguments, there's likely not much benefit to
674     // constant propagation in doing so.
675     //
676     // TODO 1: currently it won't specialize if there are over the threshold of
677     // calls using the same argument, e.g foo(a) x 4 and foo(b) x 1, but it
678     // might be beneficial to take the occurrences into account in the cost
679     // model, so we would need to find the unique constants.
680     //
681     // TODO 2: this currently does not support constants, i.e. integer ranges.
682     //
683     getPossibleConstants(A, Constants);
684 
685     if (Constants.empty())
686       return false;
687 
688     LLVM_DEBUG(dbgs() << "FnSpecialization: Found interesting argument "
689                       << A->getNameOrAsOperand() << "\n");
690     return true;
691   }
692 
693   /// Collect in \p Constants all the constant values that argument \p A can
694   /// take on.
695   void getPossibleConstants(Argument *A,
696                             SmallVectorImpl<CallArgBinding> &Constants) {
697     Function *F = A->getParent();
698 
699     // Iterate over all the call sites of the argument's parent function.
700     for (User *U : F->users()) {
701       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
702         continue;
703       auto &CS = *cast<CallBase>(U);
704       // If the call site has attribute minsize set, that callsite won't be
705       // specialized.
706       if (CS.hasFnAttr(Attribute::MinSize))
707         continue;
708 
709       // If the parent of the call site will never be executed, we don't need
710       // to worry about the passed value.
711       if (!Solver.isBlockExecutable(CS.getParent()))
712         continue;
713 
714       auto *V = CS.getArgOperand(A->getArgNo());
715       if (isa<PoisonValue>(V))
716         return;
717 
718       // For now, constant expressions are fine but only if they are function
719       // calls.
720       if (auto *CE = dyn_cast<ConstantExpr>(V))
721         if (!isa<Function>(CE->getOperand(0)))
722           return;
723 
724       // TrackValueOfGlobalVariable only tracks scalar global variables.
725       if (auto *GV = dyn_cast<GlobalVariable>(V)) {
726         // Check if we want to specialize on the address of non-constant
727         // global values.
728         if (!GV->isConstant())
729           if (!SpecializeOnAddresses)
730             return;
731 
732         if (!GV->getValueType()->isSingleValueType())
733           return;
734       }
735 
736       if (isa<Constant>(V) && (Solver.getLatticeValueFor(V).isConstant() ||
737                                EnableSpecializationForLiteralConstant))
738         Constants.push_back({&CS, cast<Constant>(V)});
739     }
740   }
741 
742   /// Rewrite calls to function \p F to call function \p Clone instead.
743   ///
744   /// This function modifies calls to function \p F as long as the actual
745   /// arguments match those in \p Args. Note that for recursive calls we
746   /// need to compare against the cloned formal arguments.
747   ///
748   /// Callsites that have been marked with the MinSize function attribute won't
749   /// be specialized and rewritten.
750   void rewriteCallSites(Function *Clone, const SmallVectorImpl<ArgInfo> &Args,
751                         ValueToValueMapTy &Mappings) {
752     assert(!Args.empty() && "Specialization without arguments");
753     Function *F = Args[0].Formal->getParent();
754 
755     SmallVector<CallBase *, 8> CallSitesToRewrite;
756     for (auto *U : F->users()) {
757       if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
758         continue;
759       auto &CS = *cast<CallBase>(U);
760       if (!CS.getCalledFunction() || CS.getCalledFunction() != F)
761         continue;
762       CallSitesToRewrite.push_back(&CS);
763     }
764 
765     LLVM_DEBUG(dbgs() << "FnSpecialization: Replacing call sites of "
766                       << F->getName() << " with " << Clone->getName() << "\n");
767 
768     for (auto *CS : CallSitesToRewrite) {
769       LLVM_DEBUG(dbgs() << "FnSpecialization:   "
770                         << CS->getFunction()->getName() << " ->" << *CS
771                         << "\n");
772       if (/* recursive call */
773           (CS->getFunction() == Clone &&
774            all_of(Args,
775                   [CS, &Mappings](const ArgInfo &Arg) {
776                     unsigned ArgNo = Arg.Formal->getArgNo();
777                     return CS->getArgOperand(ArgNo) == Mappings[Arg.Formal];
778                   })) ||
779           /* normal call */
780           all_of(Args, [CS](const ArgInfo &Arg) {
781             unsigned ArgNo = Arg.Formal->getArgNo();
782             return CS->getArgOperand(ArgNo) == Arg.Actual;
783           })) {
784         CS->setCalledFunction(Clone);
785         Solver.markOverdefined(CS);
786       }
787     }
788   }
789 
790   void updateSpecializedFuncs(FuncList &Candidates, FuncList &WorkList) {
791     for (auto *F : WorkList) {
792       SpecializedFuncs.insert(F);
793 
794       // Initialize the state of the newly created functions, marking them
795       // argument-tracked and executable.
796       if (F->hasExactDefinition() && !F->hasFnAttribute(Attribute::Naked))
797         Solver.addTrackedFunction(F);
798 
799       Solver.addArgumentTrackedFunction(F);
800       Candidates.push_back(F);
801       Solver.markBlockExecutable(&F->front());
802 
803       // Replace the function arguments for the specialized functions.
804       for (Argument &Arg : F->args())
805         if (!Arg.use_empty() && tryToReplaceWithConstant(&Arg))
806           LLVM_DEBUG(dbgs() << "FnSpecialization: Replaced constant argument: "
807                             << Arg.getNameOrAsOperand() << "\n");
808     }
809   }
810 };
811 } // namespace
812 
813 bool llvm::runFunctionSpecialization(
814     Module &M, const DataLayout &DL,
815     std::function<TargetLibraryInfo &(Function &)> GetTLI,
816     std::function<TargetTransformInfo &(Function &)> GetTTI,
817     std::function<AssumptionCache &(Function &)> GetAC,
818     function_ref<AnalysisResultsForFn(Function &)> GetAnalysis) {
819   SCCPSolver Solver(DL, GetTLI, M.getContext());
820   FunctionSpecializer FS(Solver, GetAC, GetTTI, GetTLI);
821   bool Changed = false;
822 
823   // Loop over all functions, marking arguments to those with their addresses
824   // taken or that are external as overdefined.
825   for (Function &F : M) {
826     if (F.isDeclaration())
827       continue;
828     if (F.hasFnAttribute(Attribute::NoDuplicate))
829       continue;
830 
831     LLVM_DEBUG(dbgs() << "\nFnSpecialization: Analysing decl: " << F.getName()
832                       << "\n");
833     Solver.addAnalysis(F, GetAnalysis(F));
834 
835     // Determine if we can track the function's arguments. If so, add the
836     // function to the solver's set of argument-tracked functions.
837     if (canTrackArgumentsInterprocedurally(&F)) {
838       LLVM_DEBUG(dbgs() << "FnSpecialization: Can track arguments\n");
839       Solver.addArgumentTrackedFunction(&F);
840       continue;
841     } else {
842       LLVM_DEBUG(dbgs() << "FnSpecialization: Can't track arguments!\n"
843                         << "FnSpecialization: Doesn't have local linkage, or "
844                         << "has its address taken\n");
845     }
846 
847     // Assume the function is called.
848     Solver.markBlockExecutable(&F.front());
849 
850     // Assume nothing about the incoming arguments.
851     for (Argument &AI : F.args())
852       Solver.markOverdefined(&AI);
853   }
854 
855   // Determine if we can track any of the module's global variables. If so, add
856   // the global variables we can track to the solver's set of tracked global
857   // variables.
858   for (GlobalVariable &G : M.globals()) {
859     G.removeDeadConstantUsers();
860     if (canTrackGlobalVariableInterprocedurally(&G))
861       Solver.trackValueOfGlobalVariable(&G);
862   }
863 
864   auto &TrackedFuncs = Solver.getArgumentTrackedFunctions();
865   SmallVector<Function *, 16> FuncDecls(TrackedFuncs.begin(),
866                                         TrackedFuncs.end());
867 
868   // No tracked functions, so nothing to do: don't run the solver and remove
869   // the ssa_copy intrinsics that may have been introduced.
870   if (TrackedFuncs.empty()) {
871     removeSSACopy(M);
872     return false;
873   }
874 
875   // Solve for constants.
876   auto RunSCCPSolver = [&](auto &WorkList) {
877     bool ResolvedUndefs = true;
878 
879     while (ResolvedUndefs) {
880       // Not running the solver unnecessary is checked in regression test
881       // nothing-to-do.ll, so if this debug message is changed, this regression
882       // test needs updating too.
883       LLVM_DEBUG(dbgs() << "FnSpecialization: Running solver\n");
884 
885       Solver.solve();
886       LLVM_DEBUG(dbgs() << "FnSpecialization: Resolving undefs\n");
887       ResolvedUndefs = false;
888       for (Function *F : WorkList)
889         if (Solver.resolvedUndefsIn(*F))
890           ResolvedUndefs = true;
891     }
892 
893     for (auto *F : WorkList) {
894       for (BasicBlock &BB : *F) {
895         if (!Solver.isBlockExecutable(&BB))
896           continue;
897         // FIXME: The solver may make changes to the function here, so set
898         // Changed, even if later function specialization does not trigger.
899         for (auto &I : make_early_inc_range(BB))
900           Changed |= FS.tryToReplaceWithConstant(&I);
901       }
902     }
903   };
904 
905 #ifndef NDEBUG
906   LLVM_DEBUG(dbgs() << "FnSpecialization: Worklist fn decls:\n");
907   for (auto *F : FuncDecls)
908     LLVM_DEBUG(dbgs() << "FnSpecialization: *) " << F->getName() << "\n");
909 #endif
910 
911   // Initially resolve the constants in all the argument tracked functions.
912   RunSCCPSolver(FuncDecls);
913 
914   SmallVector<Function *, 8> WorkList;
915   unsigned I = 0;
916   while (FuncSpecializationMaxIters != I++ &&
917          FS.specializeFunctions(FuncDecls, WorkList)) {
918     LLVM_DEBUG(dbgs() << "FnSpecialization: Finished iteration " << I << "\n");
919 
920     // Run the solver for the specialized functions.
921     RunSCCPSolver(WorkList);
922 
923     // Replace some unresolved constant arguments.
924     constantArgPropagation(FuncDecls, M, Solver);
925 
926     WorkList.clear();
927     Changed = true;
928   }
929 
930   LLVM_DEBUG(dbgs() << "FnSpecialization: Number of specializations = "
931                     << NumFuncSpecialized << "\n");
932 
933   // Remove any ssa_copy intrinsics that may have been introduced.
934   removeSSACopy(M);
935   return Changed;
936 }
937