1 //===- FunctionSpecialization.cpp - Function Specialization ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This specialises functions with constant parameters (e.g. functions, 10 // globals). Constant parameters like function pointers and constant globals 11 // are propagated to the callee by specializing the function. 12 // 13 // Current limitations: 14 // - It does not yet handle integer ranges. 15 // - Only 1 argument per function is specialised, 16 // - The cost-model could be further looked into, 17 // - We are not yet caching analysis results. 18 // 19 // Ideas: 20 // - With a function specialization attribute for arguments, we could have 21 // a direct way to steer function specialization, avoiding the cost-model, 22 // and thus control compile-times / code-size. 23 // 24 // Todos: 25 // - Specializing recursive functions relies on running the transformation a 26 // number of times, which is controlled by option 27 // `func-specialization-max-iters`. Thus, increasing this value and the 28 // number of iterations, will linearly increase the number of times recursive 29 // functions get specialized, see also the discussion in 30 // https://reviews.llvm.org/D106426 for details. Perhaps there is a 31 // compile-time friendlier way to control/limit the number of specialisations 32 // for recursive functions. 33 // - Don't transform the function if there is no function specialization 34 // happens. 35 // 36 //===----------------------------------------------------------------------===// 37 38 #include "llvm/ADT/Statistic.h" 39 #include "llvm/Analysis/AssumptionCache.h" 40 #include "llvm/Analysis/CodeMetrics.h" 41 #include "llvm/Analysis/DomTreeUpdater.h" 42 #include "llvm/Analysis/InlineCost.h" 43 #include "llvm/Analysis/LoopInfo.h" 44 #include "llvm/Analysis/TargetLibraryInfo.h" 45 #include "llvm/Analysis/TargetTransformInfo.h" 46 #include "llvm/Transforms/Scalar/SCCP.h" 47 #include "llvm/Transforms/Utils/Cloning.h" 48 #include "llvm/Transforms/Utils/SizeOpts.h" 49 #include <cmath> 50 51 using namespace llvm; 52 53 #define DEBUG_TYPE "function-specialization" 54 55 STATISTIC(NumFuncSpecialized, "Number of functions specialized"); 56 57 static cl::opt<bool> ForceFunctionSpecialization( 58 "force-function-specialization", cl::init(false), cl::Hidden, 59 cl::desc("Force function specialization for every call site with a " 60 "constant argument")); 61 62 static cl::opt<unsigned> FuncSpecializationMaxIters( 63 "func-specialization-max-iters", cl::Hidden, 64 cl::desc("The maximum number of iterations function specialization is run"), 65 cl::init(1)); 66 67 static cl::opt<unsigned> MaxConstantsThreshold( 68 "func-specialization-max-constants", cl::Hidden, 69 cl::desc("The maximum number of clones allowed for a single function " 70 "specialization"), 71 cl::init(3)); 72 73 static cl::opt<unsigned> SmallFunctionThreshold( 74 "func-specialization-size-threshold", cl::Hidden, 75 cl::desc("Don't specialize functions that have less than this theshold " 76 "number of instructions"), 77 cl::init(100)); 78 79 static cl::opt<unsigned> 80 AvgLoopIterationCount("func-specialization-avg-iters-cost", cl::Hidden, 81 cl::desc("Average loop iteration count cost"), 82 cl::init(10)); 83 84 static cl::opt<bool> SpecializeOnAddresses( 85 "func-specialization-on-address", cl::init(false), cl::Hidden, 86 cl::desc("Enable function specialization on the address of global values")); 87 88 // TODO: This needs checking to see the impact on compile-times, which is why 89 // this is off by default for now. 90 static cl::opt<bool> EnableSpecializationForLiteralConstant( 91 "function-specialization-for-literal-constant", cl::init(false), cl::Hidden, 92 cl::desc("Enable specialization of functions that take a literal constant " 93 "as an argument.")); 94 95 namespace { 96 // Bookkeeping struct to pass data from the analysis and profitability phase 97 // to the actual transform helper functions. 98 struct ArgInfo { 99 Function *Fn; // The function to perform specialisation on. 100 Argument *Arg; // The Formal argument being analysed. 101 Constant *Const; // A corresponding actual constant argument. 102 InstructionCost Gain; // Profitability: Gain = Bonus - Cost. 103 104 // Flag if this will be a partial specialization, in which case we will need 105 // to keep the original function around in addition to the added 106 // specializations. 107 bool Partial = false; 108 109 ArgInfo(Function *F, Argument *A, Constant *C, InstructionCost G) 110 : Fn(F), Arg(A), Const(C), Gain(G){}; 111 }; 112 } // Anonymous namespace 113 114 // Helper to check if \p LV is either a constant or a constant 115 // range with a single element. This should cover exactly the same cases as the 116 // old ValueLatticeElement::isConstant() and is intended to be used in the 117 // transition to ValueLatticeElement. 118 static bool isConstant(const ValueLatticeElement &LV) { 119 return LV.isConstant() || 120 (LV.isConstantRange() && LV.getConstantRange().isSingleElement()); 121 } 122 123 // Helper to check if \p LV is either overdefined or a constant int. 124 static bool isOverdefined(const ValueLatticeElement &LV) { 125 return !LV.isUnknownOrUndef() && !isConstant(LV); 126 } 127 128 static Constant *getPromotableAlloca(AllocaInst *Alloca, CallInst *Call) { 129 Value *StoreValue = nullptr; 130 for (auto *User : Alloca->users()) { 131 // We can't use llvm::isAllocaPromotable() as that would fail because of 132 // the usage in the CallInst, which is what we check here. 133 if (User == Call) 134 continue; 135 if (auto *Bitcast = dyn_cast<BitCastInst>(User)) { 136 if (!Bitcast->hasOneUse() || *Bitcast->user_begin() != Call) 137 return nullptr; 138 continue; 139 } 140 141 if (auto *Store = dyn_cast<StoreInst>(User)) { 142 // This is a duplicate store, bail out. 143 if (StoreValue || Store->isVolatile()) 144 return nullptr; 145 StoreValue = Store->getValueOperand(); 146 continue; 147 } 148 // Bail if there is any other unknown usage. 149 return nullptr; 150 } 151 return dyn_cast_or_null<Constant>(StoreValue); 152 } 153 154 // A constant stack value is an AllocaInst that has a single constant 155 // value stored to it. Return this constant if such an alloca stack value 156 // is a function argument. 157 static Constant *getConstantStackValue(CallInst *Call, Value *Val, 158 SCCPSolver &Solver) { 159 if (!Val) 160 return nullptr; 161 Val = Val->stripPointerCasts(); 162 if (auto *ConstVal = dyn_cast<ConstantInt>(Val)) 163 return ConstVal; 164 auto *Alloca = dyn_cast<AllocaInst>(Val); 165 if (!Alloca || !Alloca->getAllocatedType()->isIntegerTy()) 166 return nullptr; 167 return getPromotableAlloca(Alloca, Call); 168 } 169 170 // To support specializing recursive functions, it is important to propagate 171 // constant arguments because after a first iteration of specialisation, a 172 // reduced example may look like this: 173 // 174 // define internal void @RecursiveFn(i32* arg1) { 175 // %temp = alloca i32, align 4 176 // store i32 2 i32* %temp, align 4 177 // call void @RecursiveFn.1(i32* nonnull %temp) 178 // ret void 179 // } 180 // 181 // Before a next iteration, we need to propagate the constant like so 182 // which allows further specialization in next iterations. 183 // 184 // @funcspec.arg = internal constant i32 2 185 // 186 // define internal void @someFunc(i32* arg1) { 187 // call void @otherFunc(i32* nonnull @funcspec.arg) 188 // ret void 189 // } 190 // 191 static void constantArgPropagation(SmallVectorImpl<Function *> &WorkList, 192 Module &M, SCCPSolver &Solver) { 193 // Iterate over the argument tracked functions see if there 194 // are any new constant values for the call instruction via 195 // stack variables. 196 for (auto *F : WorkList) { 197 // TODO: Generalize for any read only arguments. 198 if (F->arg_size() != 1) 199 continue; 200 201 auto &Arg = *F->arg_begin(); 202 if (!Arg.onlyReadsMemory() || !Arg.getType()->isPointerTy()) 203 continue; 204 205 for (auto *User : F->users()) { 206 auto *Call = dyn_cast<CallInst>(User); 207 if (!Call) 208 break; 209 auto *ArgOp = Call->getArgOperand(0); 210 auto *ArgOpType = ArgOp->getType(); 211 auto *ConstVal = getConstantStackValue(Call, ArgOp, Solver); 212 if (!ConstVal) 213 break; 214 215 Value *GV = new GlobalVariable(M, ConstVal->getType(), true, 216 GlobalValue::InternalLinkage, ConstVal, 217 "funcspec.arg"); 218 219 if (ArgOpType != ConstVal->getType()) 220 GV = ConstantExpr::getBitCast(cast<Constant>(GV), ArgOp->getType()); 221 222 Call->setArgOperand(0, GV); 223 224 // Add the changed CallInst to Solver Worklist 225 Solver.visitCall(*Call); 226 } 227 } 228 } 229 230 // ssa_copy intrinsics are introduced by the SCCP solver. These intrinsics 231 // interfere with the constantArgPropagation optimization. 232 static void removeSSACopy(Function &F) { 233 for (BasicBlock &BB : F) { 234 for (Instruction &Inst : llvm::make_early_inc_range(BB)) { 235 auto *II = dyn_cast<IntrinsicInst>(&Inst); 236 if (!II) 237 continue; 238 if (II->getIntrinsicID() != Intrinsic::ssa_copy) 239 continue; 240 Inst.replaceAllUsesWith(II->getOperand(0)); 241 Inst.eraseFromParent(); 242 } 243 } 244 } 245 246 static void removeSSACopy(Module &M) { 247 for (Function &F : M) 248 removeSSACopy(F); 249 } 250 251 namespace { 252 class FunctionSpecializer { 253 254 /// The IPSCCP Solver. 255 SCCPSolver &Solver; 256 257 /// Analyses used to help determine if a function should be specialized. 258 std::function<AssumptionCache &(Function &)> GetAC; 259 std::function<TargetTransformInfo &(Function &)> GetTTI; 260 std::function<TargetLibraryInfo &(Function &)> GetTLI; 261 262 SmallPtrSet<Function *, 2> SpecializedFuncs; 263 264 public: 265 FunctionSpecializer(SCCPSolver &Solver, 266 std::function<AssumptionCache &(Function &)> GetAC, 267 std::function<TargetTransformInfo &(Function &)> GetTTI, 268 std::function<TargetLibraryInfo &(Function &)> GetTLI) 269 : Solver(Solver), GetAC(GetAC), GetTTI(GetTTI), GetTLI(GetTLI) {} 270 271 /// Attempt to specialize functions in the module to enable constant 272 /// propagation across function boundaries. 273 /// 274 /// \returns true if at least one function is specialized. 275 bool 276 specializeFunctions(SmallVectorImpl<Function *> &FuncDecls, 277 SmallVectorImpl<Function *> &CurrentSpecializations) { 278 bool Changed = false; 279 for (auto *F : FuncDecls) { 280 if (!isCandidateFunction(F, CurrentSpecializations)) 281 continue; 282 283 auto Cost = getSpecializationCost(F); 284 if (!Cost.isValid()) { 285 LLVM_DEBUG( 286 dbgs() << "FnSpecialization: Invalid specialisation cost.\n"); 287 continue; 288 } 289 290 auto ConstArgs = calculateGains(F, Cost); 291 if (ConstArgs.empty()) { 292 LLVM_DEBUG(dbgs() << "FnSpecialization: no possible constants found\n"); 293 continue; 294 } 295 296 for (auto &CA : ConstArgs) { 297 specializeFunction(CA, CurrentSpecializations); 298 Changed = true; 299 } 300 } 301 302 for (auto *SpecializedFunc : CurrentSpecializations) { 303 SpecializedFuncs.insert(SpecializedFunc); 304 305 // Initialize the state of the newly created functions, marking them 306 // argument-tracked and executable. 307 if (SpecializedFunc->hasExactDefinition() && 308 !SpecializedFunc->hasFnAttribute(Attribute::Naked)) 309 Solver.addTrackedFunction(SpecializedFunc); 310 Solver.addArgumentTrackedFunction(SpecializedFunc); 311 FuncDecls.push_back(SpecializedFunc); 312 Solver.markBlockExecutable(&SpecializedFunc->front()); 313 314 // Replace the function arguments for the specialized functions. 315 for (Argument &Arg : SpecializedFunc->args()) 316 if (!Arg.use_empty() && tryToReplaceWithConstant(&Arg)) 317 LLVM_DEBUG(dbgs() << "FnSpecialization: Replaced constant argument: " 318 << Arg.getName() << "\n"); 319 } 320 321 NumFuncSpecialized += NbFunctionsSpecialized; 322 return Changed; 323 } 324 325 bool tryToReplaceWithConstant(Value *V) { 326 if (!V->getType()->isSingleValueType() || isa<CallBase>(V) || 327 V->user_empty()) 328 return false; 329 330 const ValueLatticeElement &IV = Solver.getLatticeValueFor(V); 331 if (isOverdefined(IV)) 332 return false; 333 auto *Const = 334 isConstant(IV) ? Solver.getConstant(IV) : UndefValue::get(V->getType()); 335 V->replaceAllUsesWith(Const); 336 337 for (auto *U : Const->users()) 338 if (auto *I = dyn_cast<Instruction>(U)) 339 if (Solver.isBlockExecutable(I->getParent())) 340 Solver.visit(I); 341 342 // Remove the instruction from Block and Solver. 343 if (auto *I = dyn_cast<Instruction>(V)) { 344 if (I->isSafeToRemove()) { 345 I->eraseFromParent(); 346 Solver.removeLatticeValueFor(I); 347 } 348 } 349 return true; 350 } 351 352 private: 353 // The number of functions specialised, used for collecting statistics and 354 // also in the cost model. 355 unsigned NbFunctionsSpecialized = 0; 356 357 /// Clone the function \p F and remove the ssa_copy intrinsics added by 358 /// the SCCPSolver in the cloned version. 359 Function *cloneCandidateFunction(Function *F) { 360 ValueToValueMapTy EmptyMap; 361 Function *Clone = CloneFunction(F, EmptyMap); 362 removeSSACopy(*Clone); 363 return Clone; 364 } 365 366 /// This function decides whether it's worthwhile to specialize function \p F 367 /// based on the known constant values its arguments can take on, i.e. it 368 /// calculates a gain and returns a list of actual arguments that are deemed 369 /// profitable to specialize. Specialization is performed on the first 370 /// interesting argument. Specializations based on additional arguments will 371 /// be evaluated on following iterations of the main IPSCCP solve loop. 372 SmallVector<ArgInfo> calculateGains(Function *F, InstructionCost Cost) { 373 SmallVector<ArgInfo> Worklist; 374 // Determine if we should specialize the function based on the values the 375 // argument can take on. If specialization is not profitable, we continue 376 // on to the next argument. 377 for (Argument &FormalArg : F->args()) { 378 LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing arg: " 379 << FormalArg.getName() << "\n"); 380 // Determine if this argument is interesting. If we know the argument can 381 // take on any constant values, they are collected in Constants. If the 382 // argument can only ever equal a constant value in Constants, the 383 // function will be completely specialized, and the IsPartial flag will 384 // be set to false by isArgumentInteresting (that function only adds 385 // values to the Constants list that are deemed profitable). 386 bool IsPartial = true; 387 SmallVector<Constant *> ActualConstArg; 388 if (!isArgumentInteresting(&FormalArg, ActualConstArg, IsPartial)) { 389 LLVM_DEBUG(dbgs() << "FnSpecialization: Argument is not interesting\n"); 390 continue; 391 } 392 393 for (auto *ActualArg : ActualConstArg) { 394 InstructionCost Gain = 395 ForceFunctionSpecialization 396 ? 1 397 : getSpecializationBonus(&FormalArg, ActualArg) - Cost; 398 399 if (Gain <= 0) 400 continue; 401 Worklist.push_back({F, &FormalArg, ActualArg, Gain}); 402 } 403 404 if (Worklist.empty()) 405 continue; 406 407 // Sort the candidates in descending order. 408 llvm::stable_sort(Worklist, [](const ArgInfo &L, const ArgInfo &R) { 409 return L.Gain > R.Gain; 410 }); 411 412 // Truncate the worklist to 'MaxConstantsThreshold' candidates if 413 // necessary. 414 if (Worklist.size() > MaxConstantsThreshold) { 415 LLVM_DEBUG(dbgs() << "FnSpecialization: number of constants exceed " 416 << "the maximum number of constants threshold.\n" 417 << "Truncating worklist to " << MaxConstantsThreshold 418 << " candidates.\n"); 419 Worklist.erase(Worklist.begin() + MaxConstantsThreshold, 420 Worklist.end()); 421 } 422 423 if (IsPartial || Worklist.size() < ActualConstArg.size()) 424 for (auto &ActualArg : Worklist) 425 ActualArg.Partial = true; 426 427 LLVM_DEBUG(dbgs() << "Sorted list of candidates by gain:\n"; 428 for (auto &C 429 : Worklist) { 430 dbgs() << "- Function = " << C.Fn->getName() << ", "; 431 dbgs() << "FormalArg = " << C.Arg->getName() << ", "; 432 dbgs() << "ActualArg = " << C.Const->getName() << ", "; 433 dbgs() << "Gain = " << C.Gain << "\n"; 434 }); 435 436 // FIXME: Only one argument per function. 437 break; 438 } 439 return Worklist; 440 } 441 442 bool isCandidateFunction(Function *F, 443 SmallVectorImpl<Function *> &Specializations) { 444 // Do not specialize the cloned function again. 445 if (SpecializedFuncs.contains(F)) 446 return false; 447 448 // If we're optimizing the function for size, we shouldn't specialize it. 449 if (F->hasOptSize() || 450 shouldOptimizeForSize(F, nullptr, nullptr, PGSOQueryType::IRPass)) 451 return false; 452 453 // Exit if the function is not executable. There's no point in specializing 454 // a dead function. 455 if (!Solver.isBlockExecutable(&F->getEntryBlock())) 456 return false; 457 458 // It wastes time to specialize a function which would get inlined finally. 459 if (F->hasFnAttribute(Attribute::AlwaysInline)) 460 return false; 461 462 LLVM_DEBUG(dbgs() << "FnSpecialization: Try function: " << F->getName() 463 << "\n"); 464 return true; 465 } 466 467 void specializeFunction(ArgInfo &AI, 468 SmallVectorImpl<Function *> &Specializations) { 469 Function *Clone = cloneCandidateFunction(AI.Fn); 470 Argument *ClonedArg = Clone->getArg(AI.Arg->getArgNo()); 471 472 // Rewrite calls to the function so that they call the clone instead. 473 rewriteCallSites(AI.Fn, Clone, *ClonedArg, AI.Const); 474 475 // Initialize the lattice state of the arguments of the function clone, 476 // marking the argument on which we specialized the function constant 477 // with the given value. 478 Solver.markArgInFuncSpecialization(AI.Fn, ClonedArg, AI.Const); 479 480 // Mark all the specialized functions 481 Specializations.push_back(Clone); 482 NbFunctionsSpecialized++; 483 484 // If the function has been completely specialized, the original function 485 // is no longer needed. Mark it unreachable. 486 if (!AI.Partial) 487 Solver.markFunctionUnreachable(AI.Fn); 488 } 489 490 /// Compute and return the cost of specializing function \p F. 491 InstructionCost getSpecializationCost(Function *F) { 492 // Compute the code metrics for the function. 493 SmallPtrSet<const Value *, 32> EphValues; 494 CodeMetrics::collectEphemeralValues(F, &(GetAC)(*F), EphValues); 495 CodeMetrics Metrics; 496 for (BasicBlock &BB : *F) 497 Metrics.analyzeBasicBlock(&BB, (GetTTI)(*F), EphValues); 498 499 // If the code metrics reveal that we shouldn't duplicate the function, we 500 // shouldn't specialize it. Set the specialization cost to Invalid. 501 // Or if the lines of codes implies that this function is easy to get 502 // inlined so that we shouldn't specialize it. 503 if (Metrics.notDuplicatable || 504 (!ForceFunctionSpecialization && 505 Metrics.NumInsts < SmallFunctionThreshold)) { 506 InstructionCost C{}; 507 C.setInvalid(); 508 return C; 509 } 510 511 // Otherwise, set the specialization cost to be the cost of all the 512 // instructions in the function and penalty for specializing more functions. 513 unsigned Penalty = NbFunctionsSpecialized + 1; 514 return Metrics.NumInsts * InlineConstants::InstrCost * Penalty; 515 } 516 517 InstructionCost getUserBonus(User *U, llvm::TargetTransformInfo &TTI, 518 LoopInfo &LI) { 519 auto *I = dyn_cast_or_null<Instruction>(U); 520 // If not an instruction we do not know how to evaluate. 521 // Keep minimum possible cost for now so that it doesnt affect 522 // specialization. 523 if (!I) 524 return std::numeric_limits<unsigned>::min(); 525 526 auto Cost = TTI.getUserCost(U, TargetTransformInfo::TCK_SizeAndLatency); 527 528 // Traverse recursively if there are more uses. 529 // TODO: Any other instructions to be added here? 530 if (I->mayReadFromMemory() || I->isCast()) 531 for (auto *User : I->users()) 532 Cost += getUserBonus(User, TTI, LI); 533 534 // Increase the cost if it is inside the loop. 535 auto LoopDepth = LI.getLoopDepth(I->getParent()); 536 Cost *= std::pow((double)AvgLoopIterationCount, LoopDepth); 537 return Cost; 538 } 539 540 /// Compute a bonus for replacing argument \p A with constant \p C. 541 InstructionCost getSpecializationBonus(Argument *A, Constant *C) { 542 Function *F = A->getParent(); 543 DominatorTree DT(*F); 544 LoopInfo LI(DT); 545 auto &TTI = (GetTTI)(*F); 546 LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing bonus for: " << *A 547 << "\n"); 548 549 InstructionCost TotalCost = 0; 550 for (auto *U : A->users()) { 551 TotalCost += getUserBonus(U, TTI, LI); 552 LLVM_DEBUG(dbgs() << "FnSpecialization: User cost "; 553 TotalCost.print(dbgs()); dbgs() << " for: " << *U << "\n"); 554 } 555 556 // The below heuristic is only concerned with exposing inlining 557 // opportunities via indirect call promotion. If the argument is not a 558 // function pointer, give up. 559 if (!isa<PointerType>(A->getType()) || 560 !isa<FunctionType>(A->getType()->getPointerElementType())) 561 return TotalCost; 562 563 // Since the argument is a function pointer, its incoming constant values 564 // should be functions or constant expressions. The code below attempts to 565 // look through cast expressions to find the function that will be called. 566 Value *CalledValue = C; 567 while (isa<ConstantExpr>(CalledValue) && 568 cast<ConstantExpr>(CalledValue)->isCast()) 569 CalledValue = cast<User>(CalledValue)->getOperand(0); 570 Function *CalledFunction = dyn_cast<Function>(CalledValue); 571 if (!CalledFunction) 572 return TotalCost; 573 574 // Get TTI for the called function (used for the inline cost). 575 auto &CalleeTTI = (GetTTI)(*CalledFunction); 576 577 // Look at all the call sites whose called value is the argument. 578 // Specializing the function on the argument would allow these indirect 579 // calls to be promoted to direct calls. If the indirect call promotion 580 // would likely enable the called function to be inlined, specializing is a 581 // good idea. 582 int Bonus = 0; 583 for (User *U : A->users()) { 584 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 585 continue; 586 auto *CS = cast<CallBase>(U); 587 if (CS->getCalledOperand() != A) 588 continue; 589 590 // Get the cost of inlining the called function at this call site. Note 591 // that this is only an estimate. The called function may eventually 592 // change in a way that leads to it not being inlined here, even though 593 // inlining looks profitable now. For example, one of its called 594 // functions may be inlined into it, making the called function too large 595 // to be inlined into this call site. 596 // 597 // We apply a boost for performing indirect call promotion by increasing 598 // the default threshold by the threshold for indirect calls. 599 auto Params = getInlineParams(); 600 Params.DefaultThreshold += InlineConstants::IndirectCallThreshold; 601 InlineCost IC = 602 getInlineCost(*CS, CalledFunction, Params, CalleeTTI, GetAC, GetTLI); 603 604 // We clamp the bonus for this call to be between zero and the default 605 // threshold. 606 if (IC.isAlways()) 607 Bonus += Params.DefaultThreshold; 608 else if (IC.isVariable() && IC.getCostDelta() > 0) 609 Bonus += IC.getCostDelta(); 610 } 611 612 return TotalCost + Bonus; 613 } 614 615 /// Determine if we should specialize a function based on the incoming values 616 /// of the given argument. 617 /// 618 /// This function implements the goal-directed heuristic. It determines if 619 /// specializing the function based on the incoming values of argument \p A 620 /// would result in any significant optimization opportunities. If 621 /// optimization opportunities exist, the constant values of \p A on which to 622 /// specialize the function are collected in \p Constants. If the values in 623 /// \p Constants represent the complete set of values that \p A can take on, 624 /// the function will be completely specialized, and the \p IsPartial flag is 625 /// set to false. 626 /// 627 /// \returns true if the function should be specialized on the given 628 /// argument. 629 bool isArgumentInteresting(Argument *A, 630 SmallVectorImpl<Constant *> &Constants, 631 bool &IsPartial) { 632 // For now, don't attempt to specialize functions based on the values of 633 // composite types. 634 if (!A->getType()->isSingleValueType() || A->user_empty()) 635 return false; 636 637 // If the argument isn't overdefined, there's nothing to do. It should 638 // already be constant. 639 if (!Solver.getLatticeValueFor(A).isOverdefined()) { 640 LLVM_DEBUG(dbgs() << "FnSpecialization: nothing to do, arg is already " 641 << "constant?\n"); 642 return false; 643 } 644 645 // Collect the constant values that the argument can take on. If the 646 // argument can't take on any constant values, we aren't going to 647 // specialize the function. While it's possible to specialize the function 648 // based on non-constant arguments, there's likely not much benefit to 649 // constant propagation in doing so. 650 // 651 // TODO 1: currently it won't specialize if there are over the threshold of 652 // calls using the same argument, e.g foo(a) x 4 and foo(b) x 1, but it 653 // might be beneficial to take the occurrences into account in the cost 654 // model, so we would need to find the unique constants. 655 // 656 // TODO 2: this currently does not support constants, i.e. integer ranges. 657 // 658 IsPartial = !getPossibleConstants(A, Constants); 659 LLVM_DEBUG(dbgs() << "FnSpecialization: interesting arg: " << *A << "\n"); 660 return true; 661 } 662 663 /// Collect in \p Constants all the constant values that argument \p A can 664 /// take on. 665 /// 666 /// \returns true if all of the values the argument can take on are constant 667 /// (e.g., the argument's parent function cannot be called with an 668 /// overdefined value). 669 bool getPossibleConstants(Argument *A, 670 SmallVectorImpl<Constant *> &Constants) { 671 Function *F = A->getParent(); 672 bool AllConstant = true; 673 674 // Iterate over all the call sites of the argument's parent function. 675 for (User *U : F->users()) { 676 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 677 continue; 678 auto &CS = *cast<CallBase>(U); 679 // If the call site has attribute minsize set, that callsite won't be 680 // specialized. 681 if (CS.hasFnAttr(Attribute::MinSize)) { 682 AllConstant = false; 683 continue; 684 } 685 686 // If the parent of the call site will never be executed, we don't need 687 // to worry about the passed value. 688 if (!Solver.isBlockExecutable(CS.getParent())) 689 continue; 690 691 auto *V = CS.getArgOperand(A->getArgNo()); 692 if (isa<PoisonValue>(V)) 693 return false; 694 695 // For now, constant expressions are fine but only if they are function 696 // calls. 697 if (auto *CE = dyn_cast<ConstantExpr>(V)) 698 if (!isa<Function>(CE->getOperand(0))) 699 return false; 700 701 // TrackValueOfGlobalVariable only tracks scalar global variables. 702 if (auto *GV = dyn_cast<GlobalVariable>(V)) { 703 // Check if we want to specialize on the address of non-constant 704 // global values. 705 if (!GV->isConstant()) 706 if (!SpecializeOnAddresses) 707 return false; 708 709 if (!GV->getValueType()->isSingleValueType()) 710 return false; 711 } 712 713 if (isa<Constant>(V) && (Solver.getLatticeValueFor(V).isConstant() || 714 EnableSpecializationForLiteralConstant)) 715 Constants.push_back(cast<Constant>(V)); 716 else 717 AllConstant = false; 718 } 719 720 // If the argument can only take on constant values, AllConstant will be 721 // true. 722 return AllConstant; 723 } 724 725 /// Rewrite calls to function \p F to call function \p Clone instead. 726 /// 727 /// This function modifies calls to function \p F whose argument at index \p 728 /// ArgNo is equal to constant \p C. The calls are rewritten to call function 729 /// \p Clone instead. 730 /// 731 /// Callsites that have been marked with the MinSize function attribute won't 732 /// be specialized and rewritten. 733 void rewriteCallSites(Function *F, Function *Clone, Argument &Arg, 734 Constant *C) { 735 unsigned ArgNo = Arg.getArgNo(); 736 SmallVector<CallBase *, 4> CallSitesToRewrite; 737 for (auto *U : F->users()) { 738 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 739 continue; 740 auto &CS = *cast<CallBase>(U); 741 if (!CS.getCalledFunction() || CS.getCalledFunction() != F) 742 continue; 743 CallSitesToRewrite.push_back(&CS); 744 } 745 for (auto *CS : CallSitesToRewrite) { 746 if ((CS->getFunction() == Clone && CS->getArgOperand(ArgNo) == &Arg) || 747 CS->getArgOperand(ArgNo) == C) { 748 CS->setCalledFunction(Clone); 749 Solver.markOverdefined(CS); 750 } 751 } 752 } 753 }; 754 } // namespace 755 756 bool llvm::runFunctionSpecialization( 757 Module &M, const DataLayout &DL, 758 std::function<TargetLibraryInfo &(Function &)> GetTLI, 759 std::function<TargetTransformInfo &(Function &)> GetTTI, 760 std::function<AssumptionCache &(Function &)> GetAC, 761 function_ref<AnalysisResultsForFn(Function &)> GetAnalysis) { 762 SCCPSolver Solver(DL, GetTLI, M.getContext()); 763 FunctionSpecializer FS(Solver, GetAC, GetTTI, GetTLI); 764 bool Changed = false; 765 766 // Loop over all functions, marking arguments to those with their addresses 767 // taken or that are external as overdefined. 768 for (Function &F : M) { 769 if (F.isDeclaration()) 770 continue; 771 if (F.hasFnAttribute(Attribute::NoDuplicate)) 772 continue; 773 774 LLVM_DEBUG(dbgs() << "\nFnSpecialization: Analysing decl: " << F.getName() 775 << "\n"); 776 Solver.addAnalysis(F, GetAnalysis(F)); 777 778 // Determine if we can track the function's arguments. If so, add the 779 // function to the solver's set of argument-tracked functions. 780 if (canTrackArgumentsInterprocedurally(&F)) { 781 LLVM_DEBUG(dbgs() << "FnSpecialization: Can track arguments\n"); 782 Solver.addArgumentTrackedFunction(&F); 783 continue; 784 } else { 785 LLVM_DEBUG(dbgs() << "FnSpecialization: Can't track arguments!\n" 786 << "FnSpecialization: Doesn't have local linkage, or " 787 << "has its address taken\n"); 788 } 789 790 // Assume the function is called. 791 Solver.markBlockExecutable(&F.front()); 792 793 // Assume nothing about the incoming arguments. 794 for (Argument &AI : F.args()) 795 Solver.markOverdefined(&AI); 796 } 797 798 // Determine if we can track any of the module's global variables. If so, add 799 // the global variables we can track to the solver's set of tracked global 800 // variables. 801 for (GlobalVariable &G : M.globals()) { 802 G.removeDeadConstantUsers(); 803 if (canTrackGlobalVariableInterprocedurally(&G)) 804 Solver.trackValueOfGlobalVariable(&G); 805 } 806 807 auto &TrackedFuncs = Solver.getArgumentTrackedFunctions(); 808 SmallVector<Function *, 16> FuncDecls(TrackedFuncs.begin(), 809 TrackedFuncs.end()); 810 811 // No tracked functions, so nothing to do: don't run the solver and remove 812 // the ssa_copy intrinsics that may have been introduced. 813 if (TrackedFuncs.empty()) { 814 removeSSACopy(M); 815 return false; 816 } 817 818 // Solve for constants. 819 auto RunSCCPSolver = [&](auto &WorkList) { 820 bool ResolvedUndefs = true; 821 822 while (ResolvedUndefs) { 823 // Not running the solver unnecessary is checked in regression test 824 // nothing-to-do.ll, so if this debug message is changed, this regression 825 // test needs updating too. 826 LLVM_DEBUG(dbgs() << "FnSpecialization: Running solver\n"); 827 828 Solver.solve(); 829 LLVM_DEBUG(dbgs() << "FnSpecialization: Resolving undefs\n"); 830 ResolvedUndefs = false; 831 for (Function *F : WorkList) 832 if (Solver.resolvedUndefsIn(*F)) 833 ResolvedUndefs = true; 834 } 835 836 for (auto *F : WorkList) { 837 for (BasicBlock &BB : *F) { 838 if (!Solver.isBlockExecutable(&BB)) 839 continue; 840 // FIXME: The solver may make changes to the function here, so set 841 // Changed, even if later function specialization does not trigger. 842 for (auto &I : make_early_inc_range(BB)) 843 Changed |= FS.tryToReplaceWithConstant(&I); 844 } 845 } 846 }; 847 848 #ifndef NDEBUG 849 LLVM_DEBUG(dbgs() << "FnSpecialization: Worklist fn decls:\n"); 850 for (auto *F : FuncDecls) 851 LLVM_DEBUG(dbgs() << "FnSpecialization: *) " << F->getName() << "\n"); 852 #endif 853 854 // Initially resolve the constants in all the argument tracked functions. 855 RunSCCPSolver(FuncDecls); 856 857 SmallVector<Function *, 2> CurrentSpecializations; 858 unsigned I = 0; 859 while (FuncSpecializationMaxIters != I++ && 860 FS.specializeFunctions(FuncDecls, CurrentSpecializations)) { 861 862 // Run the solver for the specialized functions. 863 RunSCCPSolver(CurrentSpecializations); 864 865 // Replace some unresolved constant arguments. 866 constantArgPropagation(FuncDecls, M, Solver); 867 868 CurrentSpecializations.clear(); 869 Changed = true; 870 } 871 872 // Clean up the IR by removing ssa_copy intrinsics. 873 removeSSACopy(M); 874 return Changed; 875 } 876