1 //===- FunctionSpecialization.cpp - Function Specialization ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This specialises functions with constant parameters. Constant parameters 10 // like function pointers and constant globals are propagated to the callee by 11 // specializing the function. The main benefit of this pass at the moment is 12 // that indirect calls are transformed into direct calls, which provides inline 13 // opportunities that the inliner would not have been able to achieve. That's 14 // why function specialisation is run before the inliner in the optimisation 15 // pipeline; that is by design. Otherwise, we would only benefit from constant 16 // passing, which is a valid use-case too, but hasn't been explored much in 17 // terms of performance uplifts, cost-model and compile-time impact. 18 // 19 // Current limitations: 20 // - It does not yet handle integer ranges. We do support "literal constants", 21 // but that's off by default under an option. 22 // - Only 1 argument per function is specialised, 23 // - The cost-model could be further looked into (it mainly focuses on inlining 24 // benefits), 25 // - We are not yet caching analysis results, but profiling and checking where 26 // extra compile time is spent didn't suggest this to be a problem. 27 // 28 // Ideas: 29 // - With a function specialization attribute for arguments, we could have 30 // a direct way to steer function specialization, avoiding the cost-model, 31 // and thus control compile-times / code-size. 32 // 33 // Todos: 34 // - Specializing recursive functions relies on running the transformation a 35 // number of times, which is controlled by option 36 // `func-specialization-max-iters`. Thus, increasing this value and the 37 // number of iterations, will linearly increase the number of times recursive 38 // functions get specialized, see also the discussion in 39 // https://reviews.llvm.org/D106426 for details. Perhaps there is a 40 // compile-time friendlier way to control/limit the number of specialisations 41 // for recursive functions. 42 // - Don't transform the function if function specialization does not trigger; 43 // the SCCPSolver may make IR changes. 44 // 45 // References: 46 // - 2021 LLVM Dev Mtg “Introducing function specialisation, and can we enable 47 // it by default?”, https://www.youtube.com/watch?v=zJiCjeXgV5Q 48 // 49 //===----------------------------------------------------------------------===// 50 51 #include "llvm/ADT/Statistic.h" 52 #include "llvm/Analysis/AssumptionCache.h" 53 #include "llvm/Analysis/CodeMetrics.h" 54 #include "llvm/Analysis/DomTreeUpdater.h" 55 #include "llvm/Analysis/InlineCost.h" 56 #include "llvm/Analysis/LoopInfo.h" 57 #include "llvm/Analysis/TargetLibraryInfo.h" 58 #include "llvm/Analysis/TargetTransformInfo.h" 59 #include "llvm/Analysis/ValueLattice.h" 60 #include "llvm/Analysis/ValueLatticeUtils.h" 61 #include "llvm/IR/IntrinsicInst.h" 62 #include "llvm/Transforms/Scalar/SCCP.h" 63 #include "llvm/Transforms/Utils/Cloning.h" 64 #include "llvm/Transforms/Utils/SCCPSolver.h" 65 #include "llvm/Transforms/Utils/SizeOpts.h" 66 #include <cmath> 67 68 using namespace llvm; 69 70 #define DEBUG_TYPE "function-specialization" 71 72 STATISTIC(NumFuncSpecialized, "Number of functions specialized"); 73 74 static cl::opt<bool> ForceFunctionSpecialization( 75 "force-function-specialization", cl::init(false), cl::Hidden, 76 cl::desc("Force function specialization for every call site with a " 77 "constant argument")); 78 79 static cl::opt<unsigned> FuncSpecializationMaxIters( 80 "func-specialization-max-iters", cl::Hidden, 81 cl::desc("The maximum number of iterations function specialization is run"), 82 cl::init(1)); 83 84 static cl::opt<unsigned> MaxClonesThreshold( 85 "func-specialization-max-clones", cl::Hidden, 86 cl::desc("The maximum number of clones allowed for a single function " 87 "specialization"), 88 cl::init(3)); 89 90 static cl::opt<unsigned> SmallFunctionThreshold( 91 "func-specialization-size-threshold", cl::Hidden, 92 cl::desc("Don't specialize functions that have less than this theshold " 93 "number of instructions"), 94 cl::init(100)); 95 96 static cl::opt<unsigned> 97 AvgLoopIterationCount("func-specialization-avg-iters-cost", cl::Hidden, 98 cl::desc("Average loop iteration count cost"), 99 cl::init(10)); 100 101 static cl::opt<bool> SpecializeOnAddresses( 102 "func-specialization-on-address", cl::init(false), cl::Hidden, 103 cl::desc("Enable function specialization on the address of global values")); 104 105 // TODO: This needs checking to see the impact on compile-times, which is why 106 // this is off by default for now. 107 static cl::opt<bool> EnableSpecializationForLiteralConstant( 108 "function-specialization-for-literal-constant", cl::init(false), cl::Hidden, 109 cl::desc("Enable specialization of functions that take a literal constant " 110 "as an argument.")); 111 112 namespace { 113 // Bookkeeping struct to pass data from the analysis and profitability phase 114 // to the actual transform helper functions. 115 struct ArgInfo { 116 Function *Fn; // The function to perform specialisation on. 117 Argument *Formal; // The Formal argument being analysed. 118 Constant *Actual; // A corresponding actual constant argument. 119 InstructionCost Gain; // Profitability: Gain = Bonus - Cost. 120 121 // Flag if this will be a partial specialization, in which case we will need 122 // to keep the original function around in addition to the added 123 // specializations. 124 bool Partial = false; 125 126 ArgInfo(Function *F, Argument *A, Constant *C, InstructionCost G) 127 : Fn(F), Formal(A), Actual(C), Gain(G){}; 128 }; 129 } // Anonymous namespace 130 131 using FuncList = SmallVectorImpl<Function *>; 132 using ConstList = SmallVectorImpl<Constant *>; 133 134 // Helper to check if \p LV is either a constant or a constant 135 // range with a single element. This should cover exactly the same cases as the 136 // old ValueLatticeElement::isConstant() and is intended to be used in the 137 // transition to ValueLatticeElement. 138 static bool isConstant(const ValueLatticeElement &LV) { 139 return LV.isConstant() || 140 (LV.isConstantRange() && LV.getConstantRange().isSingleElement()); 141 } 142 143 // Helper to check if \p LV is either overdefined or a constant int. 144 static bool isOverdefined(const ValueLatticeElement &LV) { 145 return !LV.isUnknownOrUndef() && !isConstant(LV); 146 } 147 148 static Constant *getPromotableAlloca(AllocaInst *Alloca, CallInst *Call) { 149 Value *StoreValue = nullptr; 150 for (auto *User : Alloca->users()) { 151 // We can't use llvm::isAllocaPromotable() as that would fail because of 152 // the usage in the CallInst, which is what we check here. 153 if (User == Call) 154 continue; 155 if (auto *Bitcast = dyn_cast<BitCastInst>(User)) { 156 if (!Bitcast->hasOneUse() || *Bitcast->user_begin() != Call) 157 return nullptr; 158 continue; 159 } 160 161 if (auto *Store = dyn_cast<StoreInst>(User)) { 162 // This is a duplicate store, bail out. 163 if (StoreValue || Store->isVolatile()) 164 return nullptr; 165 StoreValue = Store->getValueOperand(); 166 continue; 167 } 168 // Bail if there is any other unknown usage. 169 return nullptr; 170 } 171 return dyn_cast_or_null<Constant>(StoreValue); 172 } 173 174 // A constant stack value is an AllocaInst that has a single constant 175 // value stored to it. Return this constant if such an alloca stack value 176 // is a function argument. 177 static Constant *getConstantStackValue(CallInst *Call, Value *Val, 178 SCCPSolver &Solver) { 179 if (!Val) 180 return nullptr; 181 Val = Val->stripPointerCasts(); 182 if (auto *ConstVal = dyn_cast<ConstantInt>(Val)) 183 return ConstVal; 184 auto *Alloca = dyn_cast<AllocaInst>(Val); 185 if (!Alloca || !Alloca->getAllocatedType()->isIntegerTy()) 186 return nullptr; 187 return getPromotableAlloca(Alloca, Call); 188 } 189 190 // To support specializing recursive functions, it is important to propagate 191 // constant arguments because after a first iteration of specialisation, a 192 // reduced example may look like this: 193 // 194 // define internal void @RecursiveFn(i32* arg1) { 195 // %temp = alloca i32, align 4 196 // store i32 2 i32* %temp, align 4 197 // call void @RecursiveFn.1(i32* nonnull %temp) 198 // ret void 199 // } 200 // 201 // Before a next iteration, we need to propagate the constant like so 202 // which allows further specialization in next iterations. 203 // 204 // @funcspec.arg = internal constant i32 2 205 // 206 // define internal void @someFunc(i32* arg1) { 207 // call void @otherFunc(i32* nonnull @funcspec.arg) 208 // ret void 209 // } 210 // 211 static void constantArgPropagation(FuncList &WorkList, 212 Module &M, SCCPSolver &Solver) { 213 // Iterate over the argument tracked functions see if there 214 // are any new constant values for the call instruction via 215 // stack variables. 216 for (auto *F : WorkList) { 217 // TODO: Generalize for any read only arguments. 218 if (F->arg_size() != 1) 219 continue; 220 221 auto &Arg = *F->arg_begin(); 222 if (!Arg.onlyReadsMemory() || !Arg.getType()->isPointerTy()) 223 continue; 224 225 for (auto *User : F->users()) { 226 auto *Call = dyn_cast<CallInst>(User); 227 if (!Call) 228 break; 229 auto *ArgOp = Call->getArgOperand(0); 230 auto *ArgOpType = ArgOp->getType(); 231 auto *ConstVal = getConstantStackValue(Call, ArgOp, Solver); 232 if (!ConstVal) 233 break; 234 235 Value *GV = new GlobalVariable(M, ConstVal->getType(), true, 236 GlobalValue::InternalLinkage, ConstVal, 237 "funcspec.arg"); 238 239 if (ArgOpType != ConstVal->getType()) 240 GV = ConstantExpr::getBitCast(cast<Constant>(GV), ArgOp->getType()); 241 242 Call->setArgOperand(0, GV); 243 244 // Add the changed CallInst to Solver Worklist 245 Solver.visitCall(*Call); 246 } 247 } 248 } 249 250 // ssa_copy intrinsics are introduced by the SCCP solver. These intrinsics 251 // interfere with the constantArgPropagation optimization. 252 static void removeSSACopy(Function &F) { 253 for (BasicBlock &BB : F) { 254 for (Instruction &Inst : llvm::make_early_inc_range(BB)) { 255 auto *II = dyn_cast<IntrinsicInst>(&Inst); 256 if (!II) 257 continue; 258 if (II->getIntrinsicID() != Intrinsic::ssa_copy) 259 continue; 260 Inst.replaceAllUsesWith(II->getOperand(0)); 261 Inst.eraseFromParent(); 262 } 263 } 264 } 265 266 static void removeSSACopy(Module &M) { 267 for (Function &F : M) 268 removeSSACopy(F); 269 } 270 271 namespace { 272 class FunctionSpecializer { 273 274 /// The IPSCCP Solver. 275 SCCPSolver &Solver; 276 277 /// Analyses used to help determine if a function should be specialized. 278 std::function<AssumptionCache &(Function &)> GetAC; 279 std::function<TargetTransformInfo &(Function &)> GetTTI; 280 std::function<TargetLibraryInfo &(Function &)> GetTLI; 281 282 SmallPtrSet<Function *, 4> SpecializedFuncs; 283 SmallPtrSet<Function *, 4> FullySpecialized; 284 SmallVector<Instruction *> ReplacedWithConstant; 285 286 public: 287 FunctionSpecializer(SCCPSolver &Solver, 288 std::function<AssumptionCache &(Function &)> GetAC, 289 std::function<TargetTransformInfo &(Function &)> GetTTI, 290 std::function<TargetLibraryInfo &(Function &)> GetTLI) 291 : Solver(Solver), GetAC(GetAC), GetTTI(GetTTI), GetTLI(GetTLI) {} 292 293 ~FunctionSpecializer() { 294 // Eliminate dead code. 295 removeDeadInstructions(); 296 removeDeadFunctions(); 297 } 298 299 /// Attempt to specialize functions in the module to enable constant 300 /// propagation across function boundaries. 301 /// 302 /// \returns true if at least one function is specialized. 303 bool specializeFunctions(FuncList &Candidates, FuncList &WorkList) { 304 bool Changed = false; 305 for (auto *F : Candidates) { 306 if (!isCandidateFunction(F)) 307 continue; 308 309 auto Cost = getSpecializationCost(F); 310 if (!Cost.isValid()) { 311 LLVM_DEBUG( 312 dbgs() << "FnSpecialization: Invalid specialisation cost.\n"); 313 continue; 314 } 315 316 LLVM_DEBUG(dbgs() << "FnSpecialization: Specialization cost for " 317 << F->getName() << " is " << Cost << "\n"); 318 319 auto ConstArgs = calculateGains(F, Cost); 320 if (ConstArgs.empty()) { 321 LLVM_DEBUG(dbgs() << "FnSpecialization: no possible constants found\n"); 322 continue; 323 } 324 325 for (auto &CA : ConstArgs) { 326 specializeFunction(CA, WorkList); 327 Changed = true; 328 } 329 } 330 331 updateSpecializedFuncs(Candidates, WorkList); 332 NumFuncSpecialized += NbFunctionsSpecialized; 333 return Changed; 334 } 335 336 void removeDeadInstructions() { 337 for (auto *I : ReplacedWithConstant) { 338 LLVM_DEBUG(dbgs() << "FnSpecialization: Removing dead instruction " 339 << *I << "\n"); 340 I->eraseFromParent(); 341 } 342 ReplacedWithConstant.clear(); 343 } 344 345 void removeDeadFunctions() { 346 for (auto *F : FullySpecialized) { 347 LLVM_DEBUG(dbgs() << "FnSpecialization: Removing dead function " 348 << F->getName() << "\n"); 349 F->eraseFromParent(); 350 } 351 FullySpecialized.clear(); 352 } 353 354 bool tryToReplaceWithConstant(Value *V) { 355 if (!V->getType()->isSingleValueType() || isa<CallBase>(V) || 356 V->user_empty()) 357 return false; 358 359 const ValueLatticeElement &IV = Solver.getLatticeValueFor(V); 360 if (isOverdefined(IV)) 361 return false; 362 auto *Const = 363 isConstant(IV) ? Solver.getConstant(IV) : UndefValue::get(V->getType()); 364 365 LLVM_DEBUG(dbgs() << "FnSpecialization: Replacing " << *V 366 << "\nFnSpecialization: with " << *Const << "\n"); 367 368 // Record uses of V to avoid visiting irrelevant uses of const later. 369 SmallVector<Instruction *> UseInsts; 370 for (auto *U : V->users()) 371 if (auto *I = dyn_cast<Instruction>(U)) 372 if (Solver.isBlockExecutable(I->getParent())) 373 UseInsts.push_back(I); 374 375 V->replaceAllUsesWith(Const); 376 377 for (auto *I : UseInsts) 378 Solver.visit(I); 379 380 // Remove the instruction from Block and Solver. 381 if (auto *I = dyn_cast<Instruction>(V)) { 382 if (I->isSafeToRemove()) { 383 ReplacedWithConstant.push_back(I); 384 Solver.removeLatticeValueFor(I); 385 } 386 } 387 return true; 388 } 389 390 private: 391 // The number of functions specialised, used for collecting statistics and 392 // also in the cost model. 393 unsigned NbFunctionsSpecialized = 0; 394 395 /// Clone the function \p F and remove the ssa_copy intrinsics added by 396 /// the SCCPSolver in the cloned version. 397 Function *cloneCandidateFunction(Function *F) { 398 ValueToValueMapTy EmptyMap; 399 Function *Clone = CloneFunction(F, EmptyMap); 400 removeSSACopy(*Clone); 401 return Clone; 402 } 403 404 /// This function decides whether it's worthwhile to specialize function \p F 405 /// based on the known constant values its arguments can take on, i.e. it 406 /// calculates a gain and returns a list of actual arguments that are deemed 407 /// profitable to specialize. Specialization is performed on the first 408 /// interesting argument. Specializations based on additional arguments will 409 /// be evaluated on following iterations of the main IPSCCP solve loop. 410 SmallVector<ArgInfo> calculateGains(Function *F, InstructionCost Cost) { 411 SmallVector<ArgInfo> Worklist; 412 // Determine if we should specialize the function based on the values the 413 // argument can take on. If specialization is not profitable, we continue 414 // on to the next argument. 415 for (Argument &FormalArg : F->args()) { 416 // Determine if this argument is interesting. If we know the argument can 417 // take on any constant values, they are collected in Constants. If the 418 // argument can only ever equal a constant value in Constants, the 419 // function will be completely specialized, and the IsPartial flag will 420 // be set to false by isArgumentInteresting (that function only adds 421 // values to the Constants list that are deemed profitable). 422 bool IsPartial = true; 423 SmallVector<Constant *> ActualArgs; 424 if (!isArgumentInteresting(&FormalArg, ActualArgs, IsPartial)) { 425 LLVM_DEBUG(dbgs() << "FnSpecialization: Argument " 426 << FormalArg.getNameOrAsOperand() 427 << " is not interesting\n"); 428 continue; 429 } 430 431 for (auto *ActualArg : ActualArgs) { 432 InstructionCost Gain = 433 ForceFunctionSpecialization 434 ? 1 435 : getSpecializationBonus(&FormalArg, ActualArg) - Cost; 436 437 if (Gain <= 0) 438 continue; 439 Worklist.push_back({F, &FormalArg, ActualArg, Gain}); 440 } 441 442 if (Worklist.empty()) 443 continue; 444 445 // Sort the candidates in descending order. 446 llvm::stable_sort(Worklist, [](const ArgInfo &L, const ArgInfo &R) { 447 return L.Gain > R.Gain; 448 }); 449 450 // Truncate the worklist to 'MaxClonesThreshold' candidates if 451 // necessary. 452 if (Worklist.size() > MaxClonesThreshold) { 453 LLVM_DEBUG(dbgs() << "FnSpecialization: Number of candidates exceed " 454 << "the maximum number of clones threshold.\n" 455 << "FnSpecialization: Truncating worklist to " 456 << MaxClonesThreshold << " candidates.\n"); 457 Worklist.erase(Worklist.begin() + MaxClonesThreshold, 458 Worklist.end()); 459 } 460 461 if (IsPartial || Worklist.size() < ActualArgs.size()) 462 for (auto &ActualArg : Worklist) 463 ActualArg.Partial = true; 464 465 LLVM_DEBUG( 466 dbgs() << "FnSpecialization: Specializations for function " 467 << F->getName() << "\n"; 468 for (auto &C : Worklist) { 469 dbgs() << "FnSpecialization: FormalArg = " 470 << C.Formal->getNameOrAsOperand() << ", ActualArg = " 471 << C.Actual->getNameOrAsOperand() << ", Gain = " 472 << C.Gain << "\n"; 473 } 474 ); 475 476 // FIXME: Only one argument per function. 477 break; 478 } 479 return Worklist; 480 } 481 482 bool isCandidateFunction(Function *F) { 483 // Do not specialize the cloned function again. 484 if (SpecializedFuncs.contains(F)) 485 return false; 486 487 // If we're optimizing the function for size, we shouldn't specialize it. 488 if (F->hasOptSize() || 489 shouldOptimizeForSize(F, nullptr, nullptr, PGSOQueryType::IRPass)) 490 return false; 491 492 // Exit if the function is not executable. There's no point in specializing 493 // a dead function. 494 if (!Solver.isBlockExecutable(&F->getEntryBlock())) 495 return false; 496 497 // It wastes time to specialize a function which would get inlined finally. 498 if (F->hasFnAttribute(Attribute::AlwaysInline)) 499 return false; 500 501 LLVM_DEBUG(dbgs() << "FnSpecialization: Try function: " << F->getName() 502 << "\n"); 503 return true; 504 } 505 506 void specializeFunction(ArgInfo &AI, FuncList &WorkList) { 507 Function *Clone = cloneCandidateFunction(AI.Fn); 508 Argument *ClonedArg = Clone->getArg(AI.Formal->getArgNo()); 509 510 // Rewrite calls to the function so that they call the clone instead. 511 rewriteCallSites(AI.Fn, Clone, *ClonedArg, AI.Actual); 512 513 // Initialize the lattice state of the arguments of the function clone, 514 // marking the argument on which we specialized the function constant 515 // with the given value. 516 Solver.markArgInFuncSpecialization(AI.Fn, ClonedArg, AI.Actual); 517 518 // Mark all the specialized functions 519 WorkList.push_back(Clone); 520 NbFunctionsSpecialized++; 521 522 // If the function has been completely specialized, the original function 523 // is no longer needed. Mark it unreachable. 524 if (AI.Fn->getNumUses() == 0 || 525 all_of(AI.Fn->users(), [&AI](User *U) { 526 if (auto *CS = dyn_cast<CallBase>(U)) 527 return CS->getFunction() == AI.Fn; 528 return false; 529 })) { 530 Solver.markFunctionUnreachable(AI.Fn); 531 FullySpecialized.insert(AI.Fn); 532 } 533 } 534 535 /// Compute and return the cost of specializing function \p F. 536 InstructionCost getSpecializationCost(Function *F) { 537 // Compute the code metrics for the function. 538 SmallPtrSet<const Value *, 32> EphValues; 539 CodeMetrics::collectEphemeralValues(F, &(GetAC)(*F), EphValues); 540 CodeMetrics Metrics; 541 for (BasicBlock &BB : *F) 542 Metrics.analyzeBasicBlock(&BB, (GetTTI)(*F), EphValues); 543 544 // If the code metrics reveal that we shouldn't duplicate the function, we 545 // shouldn't specialize it. Set the specialization cost to Invalid. 546 // Or if the lines of codes implies that this function is easy to get 547 // inlined so that we shouldn't specialize it. 548 if (Metrics.notDuplicatable || 549 (!ForceFunctionSpecialization && 550 Metrics.NumInsts < SmallFunctionThreshold)) { 551 InstructionCost C{}; 552 C.setInvalid(); 553 return C; 554 } 555 556 // Otherwise, set the specialization cost to be the cost of all the 557 // instructions in the function and penalty for specializing more functions. 558 unsigned Penalty = NbFunctionsSpecialized + 1; 559 return Metrics.NumInsts * InlineConstants::InstrCost * Penalty; 560 } 561 562 InstructionCost getUserBonus(User *U, llvm::TargetTransformInfo &TTI, 563 LoopInfo &LI) { 564 auto *I = dyn_cast_or_null<Instruction>(U); 565 // If not an instruction we do not know how to evaluate. 566 // Keep minimum possible cost for now so that it doesnt affect 567 // specialization. 568 if (!I) 569 return std::numeric_limits<unsigned>::min(); 570 571 auto Cost = TTI.getUserCost(U, TargetTransformInfo::TCK_SizeAndLatency); 572 573 // Traverse recursively if there are more uses. 574 // TODO: Any other instructions to be added here? 575 if (I->mayReadFromMemory() || I->isCast()) 576 for (auto *User : I->users()) 577 Cost += getUserBonus(User, TTI, LI); 578 579 // Increase the cost if it is inside the loop. 580 auto LoopDepth = LI.getLoopDepth(I->getParent()); 581 Cost *= std::pow((double)AvgLoopIterationCount, LoopDepth); 582 return Cost; 583 } 584 585 /// Compute a bonus for replacing argument \p A with constant \p C. 586 InstructionCost getSpecializationBonus(Argument *A, Constant *C) { 587 Function *F = A->getParent(); 588 DominatorTree DT(*F); 589 LoopInfo LI(DT); 590 auto &TTI = (GetTTI)(*F); 591 LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing bonus for constant: " 592 << C->getNameOrAsOperand() << "\n"); 593 594 InstructionCost TotalCost = 0; 595 for (auto *U : A->users()) { 596 TotalCost += getUserBonus(U, TTI, LI); 597 LLVM_DEBUG(dbgs() << "FnSpecialization: User cost "; 598 TotalCost.print(dbgs()); dbgs() << " for: " << *U << "\n"); 599 } 600 601 // The below heuristic is only concerned with exposing inlining 602 // opportunities via indirect call promotion. If the argument is not a 603 // function pointer, give up. 604 if (!isa<PointerType>(A->getType()) || 605 !isa<FunctionType>(A->getType()->getPointerElementType())) 606 return TotalCost; 607 608 // Since the argument is a function pointer, its incoming constant values 609 // should be functions or constant expressions. The code below attempts to 610 // look through cast expressions to find the function that will be called. 611 Value *CalledValue = C; 612 while (isa<ConstantExpr>(CalledValue) && 613 cast<ConstantExpr>(CalledValue)->isCast()) 614 CalledValue = cast<User>(CalledValue)->getOperand(0); 615 Function *CalledFunction = dyn_cast<Function>(CalledValue); 616 if (!CalledFunction) 617 return TotalCost; 618 619 // Get TTI for the called function (used for the inline cost). 620 auto &CalleeTTI = (GetTTI)(*CalledFunction); 621 622 // Look at all the call sites whose called value is the argument. 623 // Specializing the function on the argument would allow these indirect 624 // calls to be promoted to direct calls. If the indirect call promotion 625 // would likely enable the called function to be inlined, specializing is a 626 // good idea. 627 int Bonus = 0; 628 for (User *U : A->users()) { 629 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 630 continue; 631 auto *CS = cast<CallBase>(U); 632 if (CS->getCalledOperand() != A) 633 continue; 634 635 // Get the cost of inlining the called function at this call site. Note 636 // that this is only an estimate. The called function may eventually 637 // change in a way that leads to it not being inlined here, even though 638 // inlining looks profitable now. For example, one of its called 639 // functions may be inlined into it, making the called function too large 640 // to be inlined into this call site. 641 // 642 // We apply a boost for performing indirect call promotion by increasing 643 // the default threshold by the threshold for indirect calls. 644 auto Params = getInlineParams(); 645 Params.DefaultThreshold += InlineConstants::IndirectCallThreshold; 646 InlineCost IC = 647 getInlineCost(*CS, CalledFunction, Params, CalleeTTI, GetAC, GetTLI); 648 649 // We clamp the bonus for this call to be between zero and the default 650 // threshold. 651 if (IC.isAlways()) 652 Bonus += Params.DefaultThreshold; 653 else if (IC.isVariable() && IC.getCostDelta() > 0) 654 Bonus += IC.getCostDelta(); 655 656 LLVM_DEBUG(dbgs() << "FnSpecialization: Inlining bonus " << Bonus 657 << " for user " << *U << "\n"); 658 } 659 660 return TotalCost + Bonus; 661 } 662 663 /// Determine if we should specialize a function based on the incoming values 664 /// of the given argument. 665 /// 666 /// This function implements the goal-directed heuristic. It determines if 667 /// specializing the function based on the incoming values of argument \p A 668 /// would result in any significant optimization opportunities. If 669 /// optimization opportunities exist, the constant values of \p A on which to 670 /// specialize the function are collected in \p Constants. If the values in 671 /// \p Constants represent the complete set of values that \p A can take on, 672 /// the function will be completely specialized, and the \p IsPartial flag is 673 /// set to false. 674 /// 675 /// \returns true if the function should be specialized on the given 676 /// argument. 677 bool isArgumentInteresting(Argument *A, ConstList &Constants, 678 bool &IsPartial) { 679 // For now, don't attempt to specialize functions based on the values of 680 // composite types. 681 if (!A->getType()->isSingleValueType() || A->user_empty()) 682 return false; 683 684 // If the argument isn't overdefined, there's nothing to do. It should 685 // already be constant. 686 if (!Solver.getLatticeValueFor(A).isOverdefined()) { 687 LLVM_DEBUG(dbgs() << "FnSpecialization: Nothing to do, argument " 688 << A->getNameOrAsOperand() 689 << " is already constant?\n"); 690 return false; 691 } 692 693 // Collect the constant values that the argument can take on. If the 694 // argument can't take on any constant values, we aren't going to 695 // specialize the function. While it's possible to specialize the function 696 // based on non-constant arguments, there's likely not much benefit to 697 // constant propagation in doing so. 698 // 699 // TODO 1: currently it won't specialize if there are over the threshold of 700 // calls using the same argument, e.g foo(a) x 4 and foo(b) x 1, but it 701 // might be beneficial to take the occurrences into account in the cost 702 // model, so we would need to find the unique constants. 703 // 704 // TODO 2: this currently does not support constants, i.e. integer ranges. 705 // 706 IsPartial = !getPossibleConstants(A, Constants); 707 LLVM_DEBUG(dbgs() << "FnSpecialization: Found interesting argument " 708 << A->getNameOrAsOperand() << "\n"); 709 return true; 710 } 711 712 /// Collect in \p Constants all the constant values that argument \p A can 713 /// take on. 714 /// 715 /// \returns true if all of the values the argument can take on are constant 716 /// (e.g., the argument's parent function cannot be called with an 717 /// overdefined value). 718 bool getPossibleConstants(Argument *A, ConstList &Constants) { 719 Function *F = A->getParent(); 720 bool AllConstant = true; 721 722 // Iterate over all the call sites of the argument's parent function. 723 for (User *U : F->users()) { 724 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 725 continue; 726 auto &CS = *cast<CallBase>(U); 727 // If the call site has attribute minsize set, that callsite won't be 728 // specialized. 729 if (CS.hasFnAttr(Attribute::MinSize)) { 730 AllConstant = false; 731 continue; 732 } 733 734 // If the parent of the call site will never be executed, we don't need 735 // to worry about the passed value. 736 if (!Solver.isBlockExecutable(CS.getParent())) 737 continue; 738 739 auto *V = CS.getArgOperand(A->getArgNo()); 740 if (isa<PoisonValue>(V)) 741 return false; 742 743 // For now, constant expressions are fine but only if they are function 744 // calls. 745 if (auto *CE = dyn_cast<ConstantExpr>(V)) 746 if (!isa<Function>(CE->getOperand(0))) 747 return false; 748 749 // TrackValueOfGlobalVariable only tracks scalar global variables. 750 if (auto *GV = dyn_cast<GlobalVariable>(V)) { 751 // Check if we want to specialize on the address of non-constant 752 // global values. 753 if (!GV->isConstant()) 754 if (!SpecializeOnAddresses) 755 return false; 756 757 if (!GV->getValueType()->isSingleValueType()) 758 return false; 759 } 760 761 if (isa<Constant>(V) && (Solver.getLatticeValueFor(V).isConstant() || 762 EnableSpecializationForLiteralConstant)) 763 Constants.push_back(cast<Constant>(V)); 764 else 765 AllConstant = false; 766 } 767 768 // If the argument can only take on constant values, AllConstant will be 769 // true. 770 return AllConstant; 771 } 772 773 /// Rewrite calls to function \p F to call function \p Clone instead. 774 /// 775 /// This function modifies calls to function \p F whose argument at index \p 776 /// ArgNo is equal to constant \p C. The calls are rewritten to call function 777 /// \p Clone instead. 778 /// 779 /// Callsites that have been marked with the MinSize function attribute won't 780 /// be specialized and rewritten. 781 void rewriteCallSites(Function *F, Function *Clone, Argument &Arg, 782 Constant *C) { 783 unsigned ArgNo = Arg.getArgNo(); 784 SmallVector<CallBase *, 4> CallSitesToRewrite; 785 for (auto *U : F->users()) { 786 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 787 continue; 788 auto &CS = *cast<CallBase>(U); 789 if (!CS.getCalledFunction() || CS.getCalledFunction() != F) 790 continue; 791 CallSitesToRewrite.push_back(&CS); 792 } 793 794 LLVM_DEBUG(dbgs() << "FnSpecialization: Replacing call sites of " 795 << F->getName() << " with " 796 << Clone->getName() << "\n"); 797 798 for (auto *CS : CallSitesToRewrite) { 799 LLVM_DEBUG(dbgs() << "FnSpecialization: " 800 << CS->getFunction()->getName() << " ->" 801 << *CS << "\n"); 802 if ((CS->getFunction() == Clone && CS->getArgOperand(ArgNo) == &Arg) || 803 CS->getArgOperand(ArgNo) == C) { 804 CS->setCalledFunction(Clone); 805 Solver.markOverdefined(CS); 806 } 807 } 808 } 809 810 void updateSpecializedFuncs(FuncList &Candidates, FuncList &WorkList) { 811 for (auto *F : WorkList) { 812 SpecializedFuncs.insert(F); 813 814 // Initialize the state of the newly created functions, marking them 815 // argument-tracked and executable. 816 if (F->hasExactDefinition() && !F->hasFnAttribute(Attribute::Naked)) 817 Solver.addTrackedFunction(F); 818 819 Solver.addArgumentTrackedFunction(F); 820 Candidates.push_back(F); 821 Solver.markBlockExecutable(&F->front()); 822 823 // Replace the function arguments for the specialized functions. 824 for (Argument &Arg : F->args()) 825 if (!Arg.use_empty() && tryToReplaceWithConstant(&Arg)) 826 LLVM_DEBUG(dbgs() << "FnSpecialization: Replaced constant argument: " 827 << Arg.getNameOrAsOperand() << "\n"); 828 } 829 } 830 }; 831 } // namespace 832 833 bool llvm::runFunctionSpecialization( 834 Module &M, const DataLayout &DL, 835 std::function<TargetLibraryInfo &(Function &)> GetTLI, 836 std::function<TargetTransformInfo &(Function &)> GetTTI, 837 std::function<AssumptionCache &(Function &)> GetAC, 838 function_ref<AnalysisResultsForFn(Function &)> GetAnalysis) { 839 SCCPSolver Solver(DL, GetTLI, M.getContext()); 840 FunctionSpecializer FS(Solver, GetAC, GetTTI, GetTLI); 841 bool Changed = false; 842 843 // Loop over all functions, marking arguments to those with their addresses 844 // taken or that are external as overdefined. 845 for (Function &F : M) { 846 if (F.isDeclaration()) 847 continue; 848 if (F.hasFnAttribute(Attribute::NoDuplicate)) 849 continue; 850 851 LLVM_DEBUG(dbgs() << "\nFnSpecialization: Analysing decl: " << F.getName() 852 << "\n"); 853 Solver.addAnalysis(F, GetAnalysis(F)); 854 855 // Determine if we can track the function's arguments. If so, add the 856 // function to the solver's set of argument-tracked functions. 857 if (canTrackArgumentsInterprocedurally(&F)) { 858 LLVM_DEBUG(dbgs() << "FnSpecialization: Can track arguments\n"); 859 Solver.addArgumentTrackedFunction(&F); 860 continue; 861 } else { 862 LLVM_DEBUG(dbgs() << "FnSpecialization: Can't track arguments!\n" 863 << "FnSpecialization: Doesn't have local linkage, or " 864 << "has its address taken\n"); 865 } 866 867 // Assume the function is called. 868 Solver.markBlockExecutable(&F.front()); 869 870 // Assume nothing about the incoming arguments. 871 for (Argument &AI : F.args()) 872 Solver.markOverdefined(&AI); 873 } 874 875 // Determine if we can track any of the module's global variables. If so, add 876 // the global variables we can track to the solver's set of tracked global 877 // variables. 878 for (GlobalVariable &G : M.globals()) { 879 G.removeDeadConstantUsers(); 880 if (canTrackGlobalVariableInterprocedurally(&G)) 881 Solver.trackValueOfGlobalVariable(&G); 882 } 883 884 auto &TrackedFuncs = Solver.getArgumentTrackedFunctions(); 885 SmallVector<Function *, 16> FuncDecls(TrackedFuncs.begin(), 886 TrackedFuncs.end()); 887 888 // No tracked functions, so nothing to do: don't run the solver and remove 889 // the ssa_copy intrinsics that may have been introduced. 890 if (TrackedFuncs.empty()) { 891 removeSSACopy(M); 892 return false; 893 } 894 895 // Solve for constants. 896 auto RunSCCPSolver = [&](auto &WorkList) { 897 bool ResolvedUndefs = true; 898 899 while (ResolvedUndefs) { 900 // Not running the solver unnecessary is checked in regression test 901 // nothing-to-do.ll, so if this debug message is changed, this regression 902 // test needs updating too. 903 LLVM_DEBUG(dbgs() << "FnSpecialization: Running solver\n"); 904 905 Solver.solve(); 906 LLVM_DEBUG(dbgs() << "FnSpecialization: Resolving undefs\n"); 907 ResolvedUndefs = false; 908 for (Function *F : WorkList) 909 if (Solver.resolvedUndefsIn(*F)) 910 ResolvedUndefs = true; 911 } 912 913 for (auto *F : WorkList) { 914 for (BasicBlock &BB : *F) { 915 if (!Solver.isBlockExecutable(&BB)) 916 continue; 917 // FIXME: The solver may make changes to the function here, so set 918 // Changed, even if later function specialization does not trigger. 919 for (auto &I : make_early_inc_range(BB)) 920 Changed |= FS.tryToReplaceWithConstant(&I); 921 } 922 } 923 }; 924 925 #ifndef NDEBUG 926 LLVM_DEBUG(dbgs() << "FnSpecialization: Worklist fn decls:\n"); 927 for (auto *F : FuncDecls) 928 LLVM_DEBUG(dbgs() << "FnSpecialization: *) " << F->getName() << "\n"); 929 #endif 930 931 // Initially resolve the constants in all the argument tracked functions. 932 RunSCCPSolver(FuncDecls); 933 934 SmallVector<Function *, 2> WorkList; 935 unsigned I = 0; 936 while (FuncSpecializationMaxIters != I++ && 937 FS.specializeFunctions(FuncDecls, WorkList)) { 938 LLVM_DEBUG(dbgs() << "FnSpecialization: Finished iteration " << I << "\n"); 939 940 // Run the solver for the specialized functions. 941 RunSCCPSolver(WorkList); 942 943 // Replace some unresolved constant arguments. 944 constantArgPropagation(FuncDecls, M, Solver); 945 946 WorkList.clear(); 947 Changed = true; 948 } 949 950 LLVM_DEBUG(dbgs() << "FnSpecialization: Number of specializations = " 951 << NumFuncSpecialized <<"\n"); 952 953 // Remove any ssa_copy intrinsics that may have been introduced. 954 removeSSACopy(M); 955 return Changed; 956 } 957