1 //===- FunctionSpecialization.cpp - Function Specialization ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This specialises functions with constant parameters. Constant parameters 10 // like function pointers and constant globals are propagated to the callee by 11 // specializing the function. The main benefit of this pass at the moment is 12 // that indirect calls are transformed into direct calls, which provides inline 13 // opportunities that the inliner would not have been able to achieve. That's 14 // why function specialisation is run before the inliner in the optimisation 15 // pipeline; that is by design. Otherwise, we would only benefit from constant 16 // passing, which is a valid use-case too, but hasn't been explored much in 17 // terms of performance uplifts, cost-model and compile-time impact. 18 // 19 // Current limitations: 20 // - It does not yet handle integer ranges. We do support "literal constants", 21 // but that's off by default under an option. 22 // - The cost-model could be further looked into (it mainly focuses on inlining 23 // benefits), 24 // - We are not yet caching analysis results, but profiling and checking where 25 // extra compile time is spent didn't suggest this to be a problem. 26 // 27 // Ideas: 28 // - With a function specialization attribute for arguments, we could have 29 // a direct way to steer function specialization, avoiding the cost-model, 30 // and thus control compile-times / code-size. 31 // 32 // Todos: 33 // - Specializing recursive functions relies on running the transformation a 34 // number of times, which is controlled by option 35 // `func-specialization-max-iters`. Thus, increasing this value and the 36 // number of iterations, will linearly increase the number of times recursive 37 // functions get specialized, see also the discussion in 38 // https://reviews.llvm.org/D106426 for details. Perhaps there is a 39 // compile-time friendlier way to control/limit the number of specialisations 40 // for recursive functions. 41 // - Don't transform the function if function specialization does not trigger; 42 // the SCCPSolver may make IR changes. 43 // 44 // References: 45 // - 2021 LLVM Dev Mtg “Introducing function specialisation, and can we enable 46 // it by default?”, https://www.youtube.com/watch?v=zJiCjeXgV5Q 47 // 48 //===----------------------------------------------------------------------===// 49 50 #include "llvm/ADT/Statistic.h" 51 #include "llvm/Analysis/CodeMetrics.h" 52 #include "llvm/Analysis/InlineCost.h" 53 #include "llvm/Analysis/LoopInfo.h" 54 #include "llvm/Analysis/TargetTransformInfo.h" 55 #include "llvm/Analysis/ValueLattice.h" 56 #include "llvm/Analysis/ValueLatticeUtils.h" 57 #include "llvm/IR/IntrinsicInst.h" 58 #include "llvm/Transforms/Scalar/SCCP.h" 59 #include "llvm/Transforms/Utils/Cloning.h" 60 #include "llvm/Transforms/Utils/SCCPSolver.h" 61 #include "llvm/Transforms/Utils/SizeOpts.h" 62 #include <cmath> 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "function-specialization" 67 68 STATISTIC(NumFuncSpecialized, "Number of functions specialized"); 69 70 static cl::opt<bool> ForceFunctionSpecialization( 71 "force-function-specialization", cl::init(false), cl::Hidden, 72 cl::desc("Force function specialization for every call site with a " 73 "constant argument")); 74 75 static cl::opt<unsigned> FuncSpecializationMaxIters( 76 "func-specialization-max-iters", cl::Hidden, 77 cl::desc("The maximum number of iterations function specialization is run"), 78 cl::init(1)); 79 80 static cl::opt<unsigned> MaxClonesThreshold( 81 "func-specialization-max-clones", cl::Hidden, 82 cl::desc("The maximum number of clones allowed for a single function " 83 "specialization"), 84 cl::init(3)); 85 86 static cl::opt<unsigned> SmallFunctionThreshold( 87 "func-specialization-size-threshold", cl::Hidden, 88 cl::desc("Don't specialize functions that have less than this theshold " 89 "number of instructions"), 90 cl::init(100)); 91 92 static cl::opt<unsigned> 93 AvgLoopIterationCount("func-specialization-avg-iters-cost", cl::Hidden, 94 cl::desc("Average loop iteration count cost"), 95 cl::init(10)); 96 97 static cl::opt<bool> SpecializeOnAddresses( 98 "func-specialization-on-address", cl::init(false), cl::Hidden, 99 cl::desc("Enable function specialization on the address of global values")); 100 101 // Disabled by default as it can significantly increase compilation times. 102 // Running nikic's compile time tracker on x86 with instruction count as the 103 // metric shows 3-4% regression for SPASS while being neutral for all other 104 // benchmarks of the llvm test suite. 105 // 106 // https://llvm-compile-time-tracker.com 107 // https://github.com/nikic/llvm-compile-time-tracker 108 static cl::opt<bool> EnableSpecializationForLiteralConstant( 109 "function-specialization-for-literal-constant", cl::init(false), cl::Hidden, 110 cl::desc("Enable specialization of functions that take a literal constant " 111 "as an argument.")); 112 113 namespace { 114 // Bookkeeping struct to pass data from the analysis and profitability phase 115 // to the actual transform helper functions. 116 struct SpecializationInfo { 117 SmallVector<ArgInfo, 8> Args; // Stores the {formal,actual} argument pairs. 118 InstructionCost Gain; // Profitability: Gain = Bonus - Cost. 119 }; 120 } // Anonymous namespace 121 122 using FuncList = SmallVectorImpl<Function *>; 123 using CallArgBinding = std::pair<CallBase *, Constant *>; 124 using CallSpecBinding = std::pair<CallBase *, SpecializationInfo>; 125 // We are using MapVector because it guarantees deterministic iteration 126 // order across executions. 127 using SpecializationMap = SmallMapVector<CallBase *, SpecializationInfo, 8>; 128 129 // Helper to check if \p LV is either a constant or a constant 130 // range with a single element. This should cover exactly the same cases as the 131 // old ValueLatticeElement::isConstant() and is intended to be used in the 132 // transition to ValueLatticeElement. 133 static bool isConstant(const ValueLatticeElement &LV) { 134 return LV.isConstant() || 135 (LV.isConstantRange() && LV.getConstantRange().isSingleElement()); 136 } 137 138 // Helper to check if \p LV is either overdefined or a constant int. 139 static bool isOverdefined(const ValueLatticeElement &LV) { 140 return !LV.isUnknownOrUndef() && !isConstant(LV); 141 } 142 143 static Constant *getPromotableAlloca(AllocaInst *Alloca, CallInst *Call) { 144 Value *StoreValue = nullptr; 145 for (auto *User : Alloca->users()) { 146 // We can't use llvm::isAllocaPromotable() as that would fail because of 147 // the usage in the CallInst, which is what we check here. 148 if (User == Call) 149 continue; 150 if (auto *Bitcast = dyn_cast<BitCastInst>(User)) { 151 if (!Bitcast->hasOneUse() || *Bitcast->user_begin() != Call) 152 return nullptr; 153 continue; 154 } 155 156 if (auto *Store = dyn_cast<StoreInst>(User)) { 157 // This is a duplicate store, bail out. 158 if (StoreValue || Store->isVolatile()) 159 return nullptr; 160 StoreValue = Store->getValueOperand(); 161 continue; 162 } 163 // Bail if there is any other unknown usage. 164 return nullptr; 165 } 166 return dyn_cast_or_null<Constant>(StoreValue); 167 } 168 169 // A constant stack value is an AllocaInst that has a single constant 170 // value stored to it. Return this constant if such an alloca stack value 171 // is a function argument. 172 static Constant *getConstantStackValue(CallInst *Call, Value *Val, 173 SCCPSolver &Solver) { 174 if (!Val) 175 return nullptr; 176 Val = Val->stripPointerCasts(); 177 if (auto *ConstVal = dyn_cast<ConstantInt>(Val)) 178 return ConstVal; 179 auto *Alloca = dyn_cast<AllocaInst>(Val); 180 if (!Alloca || !Alloca->getAllocatedType()->isIntegerTy()) 181 return nullptr; 182 return getPromotableAlloca(Alloca, Call); 183 } 184 185 // To support specializing recursive functions, it is important to propagate 186 // constant arguments because after a first iteration of specialisation, a 187 // reduced example may look like this: 188 // 189 // define internal void @RecursiveFn(i32* arg1) { 190 // %temp = alloca i32, align 4 191 // store i32 2 i32* %temp, align 4 192 // call void @RecursiveFn.1(i32* nonnull %temp) 193 // ret void 194 // } 195 // 196 // Before a next iteration, we need to propagate the constant like so 197 // which allows further specialization in next iterations. 198 // 199 // @funcspec.arg = internal constant i32 2 200 // 201 // define internal void @someFunc(i32* arg1) { 202 // call void @otherFunc(i32* nonnull @funcspec.arg) 203 // ret void 204 // } 205 // 206 static void constantArgPropagation(FuncList &WorkList, Module &M, 207 SCCPSolver &Solver) { 208 // Iterate over the argument tracked functions see if there 209 // are any new constant values for the call instruction via 210 // stack variables. 211 for (auto *F : WorkList) { 212 213 for (auto *User : F->users()) { 214 215 auto *Call = dyn_cast<CallInst>(User); 216 if (!Call) 217 continue; 218 219 bool Changed = false; 220 for (const Use &U : Call->args()) { 221 unsigned Idx = Call->getArgOperandNo(&U); 222 Value *ArgOp = Call->getArgOperand(Idx); 223 Type *ArgOpType = ArgOp->getType(); 224 225 if (!Call->onlyReadsMemory(Idx) || !ArgOpType->isPointerTy()) 226 continue; 227 228 auto *ConstVal = getConstantStackValue(Call, ArgOp, Solver); 229 if (!ConstVal) 230 continue; 231 232 Value *GV = new GlobalVariable(M, ConstVal->getType(), true, 233 GlobalValue::InternalLinkage, ConstVal, 234 "funcspec.arg"); 235 if (ArgOpType != ConstVal->getType()) 236 GV = ConstantExpr::getBitCast(cast<Constant>(GV), ArgOpType); 237 238 Call->setArgOperand(Idx, GV); 239 Changed = true; 240 } 241 242 // Add the changed CallInst to Solver Worklist 243 if (Changed) 244 Solver.visitCall(*Call); 245 } 246 } 247 } 248 249 // ssa_copy intrinsics are introduced by the SCCP solver. These intrinsics 250 // interfere with the constantArgPropagation optimization. 251 static void removeSSACopy(Function &F) { 252 for (BasicBlock &BB : F) { 253 for (Instruction &Inst : llvm::make_early_inc_range(BB)) { 254 auto *II = dyn_cast<IntrinsicInst>(&Inst); 255 if (!II) 256 continue; 257 if (II->getIntrinsicID() != Intrinsic::ssa_copy) 258 continue; 259 Inst.replaceAllUsesWith(II->getOperand(0)); 260 Inst.eraseFromParent(); 261 } 262 } 263 } 264 265 static void removeSSACopy(Module &M) { 266 for (Function &F : M) 267 removeSSACopy(F); 268 } 269 270 namespace { 271 class FunctionSpecializer { 272 273 /// The IPSCCP Solver. 274 SCCPSolver &Solver; 275 276 /// Analyses used to help determine if a function should be specialized. 277 std::function<AssumptionCache &(Function &)> GetAC; 278 std::function<TargetTransformInfo &(Function &)> GetTTI; 279 std::function<TargetLibraryInfo &(Function &)> GetTLI; 280 281 SmallPtrSet<Function *, 4> SpecializedFuncs; 282 SmallPtrSet<Function *, 4> FullySpecialized; 283 SmallVector<Instruction *> ReplacedWithConstant; 284 285 public: 286 FunctionSpecializer(SCCPSolver &Solver, 287 std::function<AssumptionCache &(Function &)> GetAC, 288 std::function<TargetTransformInfo &(Function &)> GetTTI, 289 std::function<TargetLibraryInfo &(Function &)> GetTLI) 290 : Solver(Solver), GetAC(GetAC), GetTTI(GetTTI), GetTLI(GetTLI) {} 291 292 ~FunctionSpecializer() { 293 // Eliminate dead code. 294 removeDeadInstructions(); 295 removeDeadFunctions(); 296 } 297 298 /// Attempt to specialize functions in the module to enable constant 299 /// propagation across function boundaries. 300 /// 301 /// \returns true if at least one function is specialized. 302 bool specializeFunctions(FuncList &Candidates, FuncList &WorkList) { 303 bool Changed = false; 304 for (auto *F : Candidates) { 305 if (!isCandidateFunction(F)) 306 continue; 307 308 auto Cost = getSpecializationCost(F); 309 if (!Cost.isValid()) { 310 LLVM_DEBUG( 311 dbgs() << "FnSpecialization: Invalid specialization cost.\n"); 312 continue; 313 } 314 315 LLVM_DEBUG(dbgs() << "FnSpecialization: Specialization cost for " 316 << F->getName() << " is " << Cost << "\n"); 317 318 SmallVector<CallSpecBinding, 8> Specializations; 319 if (!calculateGains(F, Cost, Specializations)) { 320 LLVM_DEBUG(dbgs() << "FnSpecialization: No possible constants found\n"); 321 continue; 322 } 323 324 Changed = true; 325 for (auto &Entry : Specializations) 326 specializeFunction(F, Entry.second, WorkList); 327 } 328 329 updateSpecializedFuncs(Candidates, WorkList); 330 NumFuncSpecialized += NbFunctionsSpecialized; 331 return Changed; 332 } 333 334 void removeDeadInstructions() { 335 for (auto *I : ReplacedWithConstant) { 336 LLVM_DEBUG(dbgs() << "FnSpecialization: Removing dead instruction " << *I 337 << "\n"); 338 I->eraseFromParent(); 339 } 340 ReplacedWithConstant.clear(); 341 } 342 343 void removeDeadFunctions() { 344 for (auto *F : FullySpecialized) { 345 LLVM_DEBUG(dbgs() << "FnSpecialization: Removing dead function " 346 << F->getName() << "\n"); 347 F->eraseFromParent(); 348 } 349 FullySpecialized.clear(); 350 } 351 352 bool tryToReplaceWithConstant(Value *V) { 353 if (!V->getType()->isSingleValueType() || isa<CallBase>(V) || 354 V->user_empty()) 355 return false; 356 357 const ValueLatticeElement &IV = Solver.getLatticeValueFor(V); 358 if (isOverdefined(IV)) 359 return false; 360 auto *Const = 361 isConstant(IV) ? Solver.getConstant(IV) : UndefValue::get(V->getType()); 362 363 LLVM_DEBUG(dbgs() << "FnSpecialization: Replacing " << *V 364 << "\nFnSpecialization: with " << *Const << "\n"); 365 366 // Record uses of V to avoid visiting irrelevant uses of const later. 367 SmallVector<Instruction *> UseInsts; 368 for (auto *U : V->users()) 369 if (auto *I = dyn_cast<Instruction>(U)) 370 if (Solver.isBlockExecutable(I->getParent())) 371 UseInsts.push_back(I); 372 373 V->replaceAllUsesWith(Const); 374 375 for (auto *I : UseInsts) 376 Solver.visit(I); 377 378 // Remove the instruction from Block and Solver. 379 if (auto *I = dyn_cast<Instruction>(V)) { 380 if (I->isSafeToRemove()) { 381 ReplacedWithConstant.push_back(I); 382 Solver.removeLatticeValueFor(I); 383 } 384 } 385 return true; 386 } 387 388 private: 389 // The number of functions specialised, used for collecting statistics and 390 // also in the cost model. 391 unsigned NbFunctionsSpecialized = 0; 392 393 /// Clone the function \p F and remove the ssa_copy intrinsics added by 394 /// the SCCPSolver in the cloned version. 395 Function *cloneCandidateFunction(Function *F, ValueToValueMapTy &Mappings) { 396 Function *Clone = CloneFunction(F, Mappings); 397 removeSSACopy(*Clone); 398 return Clone; 399 } 400 401 /// This function decides whether it's worthwhile to specialize function 402 /// \p F based on the known constant values its arguments can take on. It 403 /// only discovers potential specialization opportunities without actually 404 /// applying them. 405 /// 406 /// \returns true if any specializations have been found. 407 bool calculateGains(Function *F, InstructionCost Cost, 408 SmallVectorImpl<CallSpecBinding> &WorkList) { 409 SpecializationMap Specializations; 410 // Determine if we should specialize the function based on the values the 411 // argument can take on. If specialization is not profitable, we continue 412 // on to the next argument. 413 for (Argument &FormalArg : F->args()) { 414 // Determine if this argument is interesting. If we know the argument can 415 // take on any constant values, they are collected in Constants. 416 SmallVector<CallArgBinding, 8> ActualArgs; 417 if (!isArgumentInteresting(&FormalArg, ActualArgs)) { 418 LLVM_DEBUG(dbgs() << "FnSpecialization: Argument " 419 << FormalArg.getNameOrAsOperand() 420 << " is not interesting\n"); 421 continue; 422 } 423 424 for (const auto &Entry : ActualArgs) { 425 CallBase *Call = Entry.first; 426 Constant *ActualArg = Entry.second; 427 428 auto I = Specializations.insert({Call, SpecializationInfo()}); 429 SpecializationInfo &S = I.first->second; 430 431 if (I.second) 432 S.Gain = ForceFunctionSpecialization ? 1 : 0 - Cost; 433 if (!ForceFunctionSpecialization) 434 S.Gain += getSpecializationBonus(&FormalArg, ActualArg); 435 S.Args.push_back({&FormalArg, ActualArg}); 436 } 437 } 438 439 // Remove unprofitable specializations. 440 Specializations.remove_if( 441 [](const auto &Entry) { return Entry.second.Gain <= 0; }); 442 443 // Clear the MapVector and return the underlying vector. 444 WorkList = Specializations.takeVector(); 445 446 // Sort the candidates in descending order. 447 llvm::stable_sort(WorkList, [](const auto &L, const auto &R) { 448 return L.second.Gain > R.second.Gain; 449 }); 450 451 // Truncate the worklist to 'MaxClonesThreshold' candidates if necessary. 452 if (WorkList.size() > MaxClonesThreshold) { 453 LLVM_DEBUG(dbgs() << "FnSpecialization: Number of candidates exceed " 454 << "the maximum number of clones threshold.\n" 455 << "FnSpecialization: Truncating worklist to " 456 << MaxClonesThreshold << " candidates.\n"); 457 WorkList.erase(WorkList.begin() + MaxClonesThreshold, WorkList.end()); 458 } 459 460 LLVM_DEBUG(dbgs() << "FnSpecialization: Specializations for function " 461 << F->getName() << "\n"; 462 for (const auto &Entry 463 : WorkList) { 464 dbgs() << "FnSpecialization: Gain = " << Entry.second.Gain 465 << "\n"; 466 for (const ArgInfo &Arg : Entry.second.Args) 467 dbgs() << "FnSpecialization: FormalArg = " 468 << Arg.Formal->getNameOrAsOperand() 469 << ", ActualArg = " 470 << Arg.Actual->getNameOrAsOperand() << "\n"; 471 }); 472 473 return !WorkList.empty(); 474 } 475 476 bool isCandidateFunction(Function *F) { 477 // Do not specialize the cloned function again. 478 if (SpecializedFuncs.contains(F)) 479 return false; 480 481 // If we're optimizing the function for size, we shouldn't specialize it. 482 if (F->hasOptSize() || 483 shouldOptimizeForSize(F, nullptr, nullptr, PGSOQueryType::IRPass)) 484 return false; 485 486 // Exit if the function is not executable. There's no point in specializing 487 // a dead function. 488 if (!Solver.isBlockExecutable(&F->getEntryBlock())) 489 return false; 490 491 // It wastes time to specialize a function which would get inlined finally. 492 if (F->hasFnAttribute(Attribute::AlwaysInline)) 493 return false; 494 495 LLVM_DEBUG(dbgs() << "FnSpecialization: Try function: " << F->getName() 496 << "\n"); 497 return true; 498 } 499 500 void specializeFunction(Function *F, SpecializationInfo &S, 501 FuncList &WorkList) { 502 ValueToValueMapTy Mappings; 503 Function *Clone = cloneCandidateFunction(F, Mappings); 504 505 // Rewrite calls to the function so that they call the clone instead. 506 rewriteCallSites(Clone, S.Args, Mappings); 507 508 // Initialize the lattice state of the arguments of the function clone, 509 // marking the argument on which we specialized the function constant 510 // with the given value. 511 Solver.markArgInFuncSpecialization(Clone, S.Args); 512 513 // Mark all the specialized functions 514 WorkList.push_back(Clone); 515 NbFunctionsSpecialized++; 516 517 // If the function has been completely specialized, the original function 518 // is no longer needed. Mark it unreachable. 519 if (F->getNumUses() == 0 || all_of(F->users(), [F](User *U) { 520 if (auto *CS = dyn_cast<CallBase>(U)) 521 return CS->getFunction() == F; 522 return false; 523 })) { 524 Solver.markFunctionUnreachable(F); 525 FullySpecialized.insert(F); 526 } 527 } 528 529 /// Compute and return the cost of specializing function \p F. 530 InstructionCost getSpecializationCost(Function *F) { 531 // Compute the code metrics for the function. 532 SmallPtrSet<const Value *, 32> EphValues; 533 CodeMetrics::collectEphemeralValues(F, &(GetAC)(*F), EphValues); 534 CodeMetrics Metrics; 535 for (BasicBlock &BB : *F) 536 Metrics.analyzeBasicBlock(&BB, (GetTTI)(*F), EphValues); 537 538 // If the code metrics reveal that we shouldn't duplicate the function, we 539 // shouldn't specialize it. Set the specialization cost to Invalid. 540 // Or if the lines of codes implies that this function is easy to get 541 // inlined so that we shouldn't specialize it. 542 if (Metrics.notDuplicatable || 543 (!ForceFunctionSpecialization && 544 Metrics.NumInsts < SmallFunctionThreshold)) { 545 InstructionCost C{}; 546 C.setInvalid(); 547 return C; 548 } 549 550 // Otherwise, set the specialization cost to be the cost of all the 551 // instructions in the function and penalty for specializing more functions. 552 unsigned Penalty = NbFunctionsSpecialized + 1; 553 return Metrics.NumInsts * InlineConstants::InstrCost * Penalty; 554 } 555 556 InstructionCost getUserBonus(User *U, llvm::TargetTransformInfo &TTI, 557 LoopInfo &LI) { 558 auto *I = dyn_cast_or_null<Instruction>(U); 559 // If not an instruction we do not know how to evaluate. 560 // Keep minimum possible cost for now so that it doesnt affect 561 // specialization. 562 if (!I) 563 return std::numeric_limits<unsigned>::min(); 564 565 auto Cost = TTI.getUserCost(U, TargetTransformInfo::TCK_SizeAndLatency); 566 567 // Traverse recursively if there are more uses. 568 // TODO: Any other instructions to be added here? 569 if (I->mayReadFromMemory() || I->isCast()) 570 for (auto *User : I->users()) 571 Cost += getUserBonus(User, TTI, LI); 572 573 // Increase the cost if it is inside the loop. 574 auto LoopDepth = LI.getLoopDepth(I->getParent()); 575 Cost *= std::pow((double)AvgLoopIterationCount, LoopDepth); 576 return Cost; 577 } 578 579 /// Compute a bonus for replacing argument \p A with constant \p C. 580 InstructionCost getSpecializationBonus(Argument *A, Constant *C) { 581 Function *F = A->getParent(); 582 DominatorTree DT(*F); 583 LoopInfo LI(DT); 584 auto &TTI = (GetTTI)(*F); 585 LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing bonus for constant: " 586 << C->getNameOrAsOperand() << "\n"); 587 588 InstructionCost TotalCost = 0; 589 for (auto *U : A->users()) { 590 TotalCost += getUserBonus(U, TTI, LI); 591 LLVM_DEBUG(dbgs() << "FnSpecialization: User cost "; 592 TotalCost.print(dbgs()); dbgs() << " for: " << *U << "\n"); 593 } 594 595 // The below heuristic is only concerned with exposing inlining 596 // opportunities via indirect call promotion. If the argument is not a 597 // (potentially casted) function pointer, give up. 598 Function *CalledFunction = dyn_cast<Function>(C->stripPointerCasts()); 599 if (!CalledFunction) 600 return TotalCost; 601 602 // Get TTI for the called function (used for the inline cost). 603 auto &CalleeTTI = (GetTTI)(*CalledFunction); 604 605 // Look at all the call sites whose called value is the argument. 606 // Specializing the function on the argument would allow these indirect 607 // calls to be promoted to direct calls. If the indirect call promotion 608 // would likely enable the called function to be inlined, specializing is a 609 // good idea. 610 int Bonus = 0; 611 for (User *U : A->users()) { 612 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 613 continue; 614 auto *CS = cast<CallBase>(U); 615 if (CS->getCalledOperand() != A) 616 continue; 617 618 // Get the cost of inlining the called function at this call site. Note 619 // that this is only an estimate. The called function may eventually 620 // change in a way that leads to it not being inlined here, even though 621 // inlining looks profitable now. For example, one of its called 622 // functions may be inlined into it, making the called function too large 623 // to be inlined into this call site. 624 // 625 // We apply a boost for performing indirect call promotion by increasing 626 // the default threshold by the threshold for indirect calls. 627 auto Params = getInlineParams(); 628 Params.DefaultThreshold += InlineConstants::IndirectCallThreshold; 629 InlineCost IC = 630 getInlineCost(*CS, CalledFunction, Params, CalleeTTI, GetAC, GetTLI); 631 632 // We clamp the bonus for this call to be between zero and the default 633 // threshold. 634 if (IC.isAlways()) 635 Bonus += Params.DefaultThreshold; 636 else if (IC.isVariable() && IC.getCostDelta() > 0) 637 Bonus += IC.getCostDelta(); 638 639 LLVM_DEBUG(dbgs() << "FnSpecialization: Inlining bonus " << Bonus 640 << " for user " << *U << "\n"); 641 } 642 643 return TotalCost + Bonus; 644 } 645 646 /// Determine if we should specialize a function based on the incoming values 647 /// of the given argument. 648 /// 649 /// This function implements the goal-directed heuristic. It determines if 650 /// specializing the function based on the incoming values of argument \p A 651 /// would result in any significant optimization opportunities. If 652 /// optimization opportunities exist, the constant values of \p A on which to 653 /// specialize the function are collected in \p Constants. 654 /// 655 /// \returns true if the function should be specialized on the given 656 /// argument. 657 bool isArgumentInteresting(Argument *A, 658 SmallVectorImpl<CallArgBinding> &Constants) { 659 // For now, don't attempt to specialize functions based on the values of 660 // composite types. 661 if (!A->getType()->isSingleValueType() || A->user_empty()) 662 return false; 663 664 // If the argument isn't overdefined, there's nothing to do. It should 665 // already be constant. 666 if (!Solver.getLatticeValueFor(A).isOverdefined()) { 667 LLVM_DEBUG(dbgs() << "FnSpecialization: Nothing to do, argument " 668 << A->getNameOrAsOperand() 669 << " is already constant?\n"); 670 return false; 671 } 672 673 // Collect the constant values that the argument can take on. If the 674 // argument can't take on any constant values, we aren't going to 675 // specialize the function. While it's possible to specialize the function 676 // based on non-constant arguments, there's likely not much benefit to 677 // constant propagation in doing so. 678 // 679 // TODO 1: currently it won't specialize if there are over the threshold of 680 // calls using the same argument, e.g foo(a) x 4 and foo(b) x 1, but it 681 // might be beneficial to take the occurrences into account in the cost 682 // model, so we would need to find the unique constants. 683 // 684 // TODO 2: this currently does not support constants, i.e. integer ranges. 685 // 686 getPossibleConstants(A, Constants); 687 688 if (Constants.empty()) 689 return false; 690 691 LLVM_DEBUG(dbgs() << "FnSpecialization: Found interesting argument " 692 << A->getNameOrAsOperand() << "\n"); 693 return true; 694 } 695 696 /// Collect in \p Constants all the constant values that argument \p A can 697 /// take on. 698 void getPossibleConstants(Argument *A, 699 SmallVectorImpl<CallArgBinding> &Constants) { 700 Function *F = A->getParent(); 701 702 // Iterate over all the call sites of the argument's parent function. 703 for (User *U : F->users()) { 704 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 705 continue; 706 auto &CS = *cast<CallBase>(U); 707 // If the call site has attribute minsize set, that callsite won't be 708 // specialized. 709 if (CS.hasFnAttr(Attribute::MinSize)) 710 continue; 711 712 // If the parent of the call site will never be executed, we don't need 713 // to worry about the passed value. 714 if (!Solver.isBlockExecutable(CS.getParent())) 715 continue; 716 717 auto *V = CS.getArgOperand(A->getArgNo()); 718 if (isa<PoisonValue>(V)) 719 return; 720 721 // For now, constant expressions are fine but only if they are function 722 // calls. 723 if (auto *CE = dyn_cast<ConstantExpr>(V)) 724 if (!isa<Function>(CE->getOperand(0))) 725 return; 726 727 // TrackValueOfGlobalVariable only tracks scalar global variables. 728 if (auto *GV = dyn_cast<GlobalVariable>(V)) { 729 // Check if we want to specialize on the address of non-constant 730 // global values. 731 if (!GV->isConstant()) 732 if (!SpecializeOnAddresses) 733 return; 734 735 if (!GV->getValueType()->isSingleValueType()) 736 return; 737 } 738 739 if (isa<Constant>(V) && (Solver.getLatticeValueFor(V).isConstant() || 740 EnableSpecializationForLiteralConstant)) 741 Constants.push_back({&CS, cast<Constant>(V)}); 742 } 743 } 744 745 /// Rewrite calls to function \p F to call function \p Clone instead. 746 /// 747 /// This function modifies calls to function \p F as long as the actual 748 /// arguments match those in \p Args. Note that for recursive calls we 749 /// need to compare against the cloned formal arguments. 750 /// 751 /// Callsites that have been marked with the MinSize function attribute won't 752 /// be specialized and rewritten. 753 void rewriteCallSites(Function *Clone, const SmallVectorImpl<ArgInfo> &Args, 754 ValueToValueMapTy &Mappings) { 755 assert(!Args.empty() && "Specialization without arguments"); 756 Function *F = Args[0].Formal->getParent(); 757 758 SmallVector<CallBase *, 8> CallSitesToRewrite; 759 for (auto *U : F->users()) { 760 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 761 continue; 762 auto &CS = *cast<CallBase>(U); 763 if (!CS.getCalledFunction() || CS.getCalledFunction() != F) 764 continue; 765 CallSitesToRewrite.push_back(&CS); 766 } 767 768 LLVM_DEBUG(dbgs() << "FnSpecialization: Replacing call sites of " 769 << F->getName() << " with " << Clone->getName() << "\n"); 770 771 for (auto *CS : CallSitesToRewrite) { 772 LLVM_DEBUG(dbgs() << "FnSpecialization: " 773 << CS->getFunction()->getName() << " ->" << *CS 774 << "\n"); 775 if (/* recursive call */ 776 (CS->getFunction() == Clone && 777 all_of(Args, 778 [CS, &Mappings](const ArgInfo &Arg) { 779 unsigned ArgNo = Arg.Formal->getArgNo(); 780 return CS->getArgOperand(ArgNo) == Mappings[Arg.Formal]; 781 })) || 782 /* normal call */ 783 all_of(Args, [CS](const ArgInfo &Arg) { 784 unsigned ArgNo = Arg.Formal->getArgNo(); 785 return CS->getArgOperand(ArgNo) == Arg.Actual; 786 })) { 787 CS->setCalledFunction(Clone); 788 Solver.markOverdefined(CS); 789 } 790 } 791 } 792 793 void updateSpecializedFuncs(FuncList &Candidates, FuncList &WorkList) { 794 for (auto *F : WorkList) { 795 SpecializedFuncs.insert(F); 796 797 // Initialize the state of the newly created functions, marking them 798 // argument-tracked and executable. 799 if (F->hasExactDefinition() && !F->hasFnAttribute(Attribute::Naked)) 800 Solver.addTrackedFunction(F); 801 802 Solver.addArgumentTrackedFunction(F); 803 Candidates.push_back(F); 804 Solver.markBlockExecutable(&F->front()); 805 806 // Replace the function arguments for the specialized functions. 807 for (Argument &Arg : F->args()) 808 if (!Arg.use_empty() && tryToReplaceWithConstant(&Arg)) 809 LLVM_DEBUG(dbgs() << "FnSpecialization: Replaced constant argument: " 810 << Arg.getNameOrAsOperand() << "\n"); 811 } 812 } 813 }; 814 } // namespace 815 816 bool llvm::runFunctionSpecialization( 817 Module &M, const DataLayout &DL, 818 std::function<TargetLibraryInfo &(Function &)> GetTLI, 819 std::function<TargetTransformInfo &(Function &)> GetTTI, 820 std::function<AssumptionCache &(Function &)> GetAC, 821 function_ref<AnalysisResultsForFn(Function &)> GetAnalysis) { 822 SCCPSolver Solver(DL, GetTLI, M.getContext()); 823 FunctionSpecializer FS(Solver, GetAC, GetTTI, GetTLI); 824 bool Changed = false; 825 826 // Loop over all functions, marking arguments to those with their addresses 827 // taken or that are external as overdefined. 828 for (Function &F : M) { 829 if (F.isDeclaration()) 830 continue; 831 if (F.hasFnAttribute(Attribute::NoDuplicate)) 832 continue; 833 834 LLVM_DEBUG(dbgs() << "\nFnSpecialization: Analysing decl: " << F.getName() 835 << "\n"); 836 Solver.addAnalysis(F, GetAnalysis(F)); 837 838 // Determine if we can track the function's arguments. If so, add the 839 // function to the solver's set of argument-tracked functions. 840 if (canTrackArgumentsInterprocedurally(&F)) { 841 LLVM_DEBUG(dbgs() << "FnSpecialization: Can track arguments\n"); 842 Solver.addArgumentTrackedFunction(&F); 843 continue; 844 } else { 845 LLVM_DEBUG(dbgs() << "FnSpecialization: Can't track arguments!\n" 846 << "FnSpecialization: Doesn't have local linkage, or " 847 << "has its address taken\n"); 848 } 849 850 // Assume the function is called. 851 Solver.markBlockExecutable(&F.front()); 852 853 // Assume nothing about the incoming arguments. 854 for (Argument &AI : F.args()) 855 Solver.markOverdefined(&AI); 856 } 857 858 // Determine if we can track any of the module's global variables. If so, add 859 // the global variables we can track to the solver's set of tracked global 860 // variables. 861 for (GlobalVariable &G : M.globals()) { 862 G.removeDeadConstantUsers(); 863 if (canTrackGlobalVariableInterprocedurally(&G)) 864 Solver.trackValueOfGlobalVariable(&G); 865 } 866 867 auto &TrackedFuncs = Solver.getArgumentTrackedFunctions(); 868 SmallVector<Function *, 16> FuncDecls(TrackedFuncs.begin(), 869 TrackedFuncs.end()); 870 871 // No tracked functions, so nothing to do: don't run the solver and remove 872 // the ssa_copy intrinsics that may have been introduced. 873 if (TrackedFuncs.empty()) { 874 removeSSACopy(M); 875 return false; 876 } 877 878 // Solve for constants. 879 auto RunSCCPSolver = [&](auto &WorkList) { 880 bool ResolvedUndefs = true; 881 882 while (ResolvedUndefs) { 883 // Not running the solver unnecessary is checked in regression test 884 // nothing-to-do.ll, so if this debug message is changed, this regression 885 // test needs updating too. 886 LLVM_DEBUG(dbgs() << "FnSpecialization: Running solver\n"); 887 888 Solver.solve(); 889 LLVM_DEBUG(dbgs() << "FnSpecialization: Resolving undefs\n"); 890 ResolvedUndefs = false; 891 for (Function *F : WorkList) 892 if (Solver.resolvedUndefsIn(*F)) 893 ResolvedUndefs = true; 894 } 895 896 for (auto *F : WorkList) { 897 for (BasicBlock &BB : *F) { 898 if (!Solver.isBlockExecutable(&BB)) 899 continue; 900 // FIXME: The solver may make changes to the function here, so set 901 // Changed, even if later function specialization does not trigger. 902 for (auto &I : make_early_inc_range(BB)) 903 Changed |= FS.tryToReplaceWithConstant(&I); 904 } 905 } 906 }; 907 908 #ifndef NDEBUG 909 LLVM_DEBUG(dbgs() << "FnSpecialization: Worklist fn decls:\n"); 910 for (auto *F : FuncDecls) 911 LLVM_DEBUG(dbgs() << "FnSpecialization: *) " << F->getName() << "\n"); 912 #endif 913 914 // Initially resolve the constants in all the argument tracked functions. 915 RunSCCPSolver(FuncDecls); 916 917 SmallVector<Function *, 8> WorkList; 918 unsigned I = 0; 919 while (FuncSpecializationMaxIters != I++ && 920 FS.specializeFunctions(FuncDecls, WorkList)) { 921 LLVM_DEBUG(dbgs() << "FnSpecialization: Finished iteration " << I << "\n"); 922 923 // Run the solver for the specialized functions. 924 RunSCCPSolver(WorkList); 925 926 // Replace some unresolved constant arguments. 927 constantArgPropagation(FuncDecls, M, Solver); 928 929 WorkList.clear(); 930 Changed = true; 931 } 932 933 LLVM_DEBUG(dbgs() << "FnSpecialization: Number of specializations = " 934 << NumFuncSpecialized << "\n"); 935 936 // Remove any ssa_copy intrinsics that may have been introduced. 937 removeSSACopy(M); 938 return Changed; 939 } 940