1 //===- FunctionSpecialization.cpp - Function Specialization ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This specialises functions with constant parameters (e.g. functions, 10 // globals). Constant parameters like function pointers and constant globals 11 // are propagated to the callee by specializing the function. 12 // 13 // Current limitations: 14 // - It does not yet handle integer ranges. 15 // - Only 1 argument per function is specialised, 16 // - The cost-model could be further looked into, 17 // - We are not yet caching analysis results. 18 // 19 // Ideas: 20 // - With a function specialization attribute for arguments, we could have 21 // a direct way to steer function specialization, avoiding the cost-model, 22 // and thus control compile-times / code-size. 23 // 24 // Todos: 25 // - Specializing recursive functions relies on running the transformation a 26 // number of times, which is controlled by option 27 // `func-specialization-max-iters`. Thus, increasing this value and the 28 // number of iterations, will linearly increase the number of times recursive 29 // functions get specialized, see also the discussion in 30 // https://reviews.llvm.org/D106426 for details. Perhaps there is a 31 // compile-time friendlier way to control/limit the number of specialisations 32 // for recursive functions. 33 // - Don't transform the function if there is no function specialization 34 // happens. 35 // 36 //===----------------------------------------------------------------------===// 37 38 #include "llvm/ADT/Statistic.h" 39 #include "llvm/Analysis/AssumptionCache.h" 40 #include "llvm/Analysis/CodeMetrics.h" 41 #include "llvm/Analysis/DomTreeUpdater.h" 42 #include "llvm/Analysis/InlineCost.h" 43 #include "llvm/Analysis/LoopInfo.h" 44 #include "llvm/Analysis/TargetLibraryInfo.h" 45 #include "llvm/Analysis/TargetTransformInfo.h" 46 #include "llvm/Transforms/Scalar/SCCP.h" 47 #include "llvm/Transforms/Utils/Cloning.h" 48 #include "llvm/Transforms/Utils/SizeOpts.h" 49 #include <cmath> 50 51 using namespace llvm; 52 53 #define DEBUG_TYPE "function-specialization" 54 55 STATISTIC(NumFuncSpecialized, "Number of functions specialized"); 56 57 static cl::opt<bool> ForceFunctionSpecialization( 58 "force-function-specialization", cl::init(false), cl::Hidden, 59 cl::desc("Force function specialization for every call site with a " 60 "constant argument")); 61 62 static cl::opt<unsigned> FuncSpecializationMaxIters( 63 "func-specialization-max-iters", cl::Hidden, 64 cl::desc("The maximum number of iterations function specialization is run"), 65 cl::init(1)); 66 67 static cl::opt<unsigned> MaxConstantsThreshold( 68 "func-specialization-max-constants", cl::Hidden, 69 cl::desc("The maximum number of clones allowed for a single function " 70 "specialization"), 71 cl::init(3)); 72 73 static cl::opt<unsigned> SmallFunctionThreshold( 74 "func-specialization-size-threshold", cl::Hidden, 75 cl::desc("Don't specialize functions that have less than this theshold " 76 "number of instructions"), 77 cl::init(100)); 78 79 static cl::opt<unsigned> 80 AvgLoopIterationCount("func-specialization-avg-iters-cost", cl::Hidden, 81 cl::desc("Average loop iteration count cost"), 82 cl::init(10)); 83 84 static cl::opt<bool> SpecializeOnAddresses( 85 "func-specialization-on-address", cl::init(false), cl::Hidden, 86 cl::desc("Enable function specialization on the address of global values")); 87 88 // TODO: This needs checking to see the impact on compile-times, which is why 89 // this is off by default for now. 90 static cl::opt<bool> EnableSpecializationForLiteralConstant( 91 "function-specialization-for-literal-constant", cl::init(false), cl::Hidden, 92 cl::desc("Enable specialization of functions that take a literal constant " 93 "as an argument.")); 94 95 // Helper to check if \p LV is either a constant or a constant 96 // range with a single element. This should cover exactly the same cases as the 97 // old ValueLatticeElement::isConstant() and is intended to be used in the 98 // transition to ValueLatticeElement. 99 static bool isConstant(const ValueLatticeElement &LV) { 100 return LV.isConstant() || 101 (LV.isConstantRange() && LV.getConstantRange().isSingleElement()); 102 } 103 104 // Helper to check if \p LV is either overdefined or a constant int. 105 static bool isOverdefined(const ValueLatticeElement &LV) { 106 return !LV.isUnknownOrUndef() && !isConstant(LV); 107 } 108 109 static Constant *getPromotableAlloca(AllocaInst *Alloca, CallInst *Call) { 110 Value *StoreValue = nullptr; 111 for (auto *User : Alloca->users()) { 112 // We can't use llvm::isAllocaPromotable() as that would fail because of 113 // the usage in the CallInst, which is what we check here. 114 if (User == Call) 115 continue; 116 if (auto *Bitcast = dyn_cast<BitCastInst>(User)) { 117 if (!Bitcast->hasOneUse() || *Bitcast->user_begin() != Call) 118 return nullptr; 119 continue; 120 } 121 122 if (auto *Store = dyn_cast<StoreInst>(User)) { 123 // This is a duplicate store, bail out. 124 if (StoreValue || Store->isVolatile()) 125 return nullptr; 126 StoreValue = Store->getValueOperand(); 127 continue; 128 } 129 // Bail if there is any other unknown usage. 130 return nullptr; 131 } 132 return dyn_cast_or_null<Constant>(StoreValue); 133 } 134 135 // A constant stack value is an AllocaInst that has a single constant 136 // value stored to it. Return this constant if such an alloca stack value 137 // is a function argument. 138 static Constant *getConstantStackValue(CallInst *Call, Value *Val, 139 SCCPSolver &Solver) { 140 if (!Val) 141 return nullptr; 142 Val = Val->stripPointerCasts(); 143 if (auto *ConstVal = dyn_cast<ConstantInt>(Val)) 144 return ConstVal; 145 auto *Alloca = dyn_cast<AllocaInst>(Val); 146 if (!Alloca || !Alloca->getAllocatedType()->isIntegerTy()) 147 return nullptr; 148 return getPromotableAlloca(Alloca, Call); 149 } 150 151 // To support specializing recursive functions, it is important to propagate 152 // constant arguments because after a first iteration of specialisation, a 153 // reduced example may look like this: 154 // 155 // define internal void @RecursiveFn(i32* arg1) { 156 // %temp = alloca i32, align 4 157 // store i32 2 i32* %temp, align 4 158 // call void @RecursiveFn.1(i32* nonnull %temp) 159 // ret void 160 // } 161 // 162 // Before a next iteration, we need to propagate the constant like so 163 // which allows further specialization in next iterations. 164 // 165 // @funcspec.arg = internal constant i32 2 166 // 167 // define internal void @someFunc(i32* arg1) { 168 // call void @otherFunc(i32* nonnull @funcspec.arg) 169 // ret void 170 // } 171 // 172 static void constantArgPropagation(SmallVectorImpl<Function *> &WorkList, 173 Module &M, SCCPSolver &Solver) { 174 // Iterate over the argument tracked functions see if there 175 // are any new constant values for the call instruction via 176 // stack variables. 177 for (auto *F : WorkList) { 178 // TODO: Generalize for any read only arguments. 179 if (F->arg_size() != 1) 180 continue; 181 182 auto &Arg = *F->arg_begin(); 183 if (!Arg.onlyReadsMemory() || !Arg.getType()->isPointerTy()) 184 continue; 185 186 for (auto *User : F->users()) { 187 auto *Call = dyn_cast<CallInst>(User); 188 if (!Call) 189 break; 190 auto *ArgOp = Call->getArgOperand(0); 191 auto *ArgOpType = ArgOp->getType(); 192 auto *ConstVal = getConstantStackValue(Call, ArgOp, Solver); 193 if (!ConstVal) 194 break; 195 196 Value *GV = new GlobalVariable(M, ConstVal->getType(), true, 197 GlobalValue::InternalLinkage, ConstVal, 198 "funcspec.arg"); 199 200 if (ArgOpType != ConstVal->getType()) 201 GV = ConstantExpr::getBitCast(cast<Constant>(GV), ArgOp->getType()); 202 203 Call->setArgOperand(0, GV); 204 205 // Add the changed CallInst to Solver Worklist 206 Solver.visitCall(*Call); 207 } 208 } 209 } 210 211 // ssa_copy intrinsics are introduced by the SCCP solver. These intrinsics 212 // interfere with the constantArgPropagation optimization. 213 static void removeSSACopy(Function &F) { 214 for (BasicBlock &BB : F) { 215 for (Instruction &Inst : llvm::make_early_inc_range(BB)) { 216 auto *II = dyn_cast<IntrinsicInst>(&Inst); 217 if (!II) 218 continue; 219 if (II->getIntrinsicID() != Intrinsic::ssa_copy) 220 continue; 221 Inst.replaceAllUsesWith(II->getOperand(0)); 222 Inst.eraseFromParent(); 223 } 224 } 225 } 226 227 static void removeSSACopy(Module &M) { 228 for (Function &F : M) 229 removeSSACopy(F); 230 } 231 232 class FunctionSpecializer { 233 234 /// The IPSCCP Solver. 235 SCCPSolver &Solver; 236 237 /// Analyses used to help determine if a function should be specialized. 238 std::function<AssumptionCache &(Function &)> GetAC; 239 std::function<TargetTransformInfo &(Function &)> GetTTI; 240 std::function<TargetLibraryInfo &(Function &)> GetTLI; 241 242 SmallPtrSet<Function *, 2> SpecializedFuncs; 243 244 public: 245 FunctionSpecializer(SCCPSolver &Solver, 246 std::function<AssumptionCache &(Function &)> GetAC, 247 std::function<TargetTransformInfo &(Function &)> GetTTI, 248 std::function<TargetLibraryInfo &(Function &)> GetTLI) 249 : Solver(Solver), GetAC(GetAC), GetTTI(GetTTI), GetTLI(GetTLI) {} 250 251 /// Attempt to specialize functions in the module to enable constant 252 /// propagation across function boundaries. 253 /// 254 /// \returns true if at least one function is specialized. 255 bool 256 specializeFunctions(SmallVectorImpl<Function *> &FuncDecls, 257 SmallVectorImpl<Function *> &CurrentSpecializations) { 258 259 // Attempt to specialize the argument-tracked functions. 260 bool Changed = false; 261 for (auto *F : FuncDecls) { 262 if (specializeFunction(F, CurrentSpecializations)) { 263 Changed = true; 264 LLVM_DEBUG(dbgs() << "FnSpecialization: Can specialize this func.\n"); 265 } else { 266 LLVM_DEBUG( 267 dbgs() << "FnSpecialization: Cannot specialize this func.\n"); 268 } 269 } 270 271 for (auto *SpecializedFunc : CurrentSpecializations) { 272 SpecializedFuncs.insert(SpecializedFunc); 273 274 // Initialize the state of the newly created functions, marking them 275 // argument-tracked and executable. 276 if (SpecializedFunc->hasExactDefinition() && 277 !SpecializedFunc->hasFnAttribute(Attribute::Naked)) 278 Solver.addTrackedFunction(SpecializedFunc); 279 Solver.addArgumentTrackedFunction(SpecializedFunc); 280 FuncDecls.push_back(SpecializedFunc); 281 Solver.markBlockExecutable(&SpecializedFunc->front()); 282 283 // Replace the function arguments for the specialized functions. 284 for (Argument &Arg : SpecializedFunc->args()) 285 if (!Arg.use_empty() && tryToReplaceWithConstant(&Arg)) 286 LLVM_DEBUG(dbgs() << "FnSpecialization: Replaced constant argument: " 287 << Arg.getName() << "\n"); 288 } 289 290 NumFuncSpecialized += NbFunctionsSpecialized; 291 return Changed; 292 } 293 294 bool tryToReplaceWithConstant(Value *V) { 295 if (!V->getType()->isSingleValueType() || isa<CallBase>(V) || 296 V->user_empty()) 297 return false; 298 299 const ValueLatticeElement &IV = Solver.getLatticeValueFor(V); 300 if (isOverdefined(IV)) 301 return false; 302 auto *Const = 303 isConstant(IV) ? Solver.getConstant(IV) : UndefValue::get(V->getType()); 304 V->replaceAllUsesWith(Const); 305 306 for (auto *U : Const->users()) 307 if (auto *I = dyn_cast<Instruction>(U)) 308 if (Solver.isBlockExecutable(I->getParent())) 309 Solver.visit(I); 310 311 // Remove the instruction from Block and Solver. 312 if (auto *I = dyn_cast<Instruction>(V)) { 313 if (I->isSafeToRemove()) { 314 I->eraseFromParent(); 315 Solver.removeLatticeValueFor(I); 316 } 317 } 318 return true; 319 } 320 321 private: 322 // The number of functions specialised, used for collecting statistics and 323 // also in the cost model. 324 unsigned NbFunctionsSpecialized = 0; 325 326 /// Clone the function \p F and remove the ssa_copy intrinsics added by 327 /// the SCCPSolver in the cloned version. 328 Function *cloneCandidateFunction(Function *F) { 329 ValueToValueMapTy EmptyMap; 330 Function *Clone = CloneFunction(F, EmptyMap); 331 removeSSACopy(*Clone); 332 return Clone; 333 } 334 335 /// This function decides whether to specialize function \p F based on the 336 /// known constant values its arguments can take on. Specialization is 337 /// performed on the first interesting argument. Specializations based on 338 /// additional arguments will be evaluated on following iterations of the 339 /// main IPSCCP solve loop. \returns true if the function is specialized and 340 /// false otherwise. 341 bool specializeFunction(Function *F, 342 SmallVectorImpl<Function *> &Specializations) { 343 344 // Do not specialize the cloned function again. 345 if (SpecializedFuncs.contains(F)) 346 return false; 347 348 // If we're optimizing the function for size, we shouldn't specialize it. 349 if (F->hasOptSize() || 350 shouldOptimizeForSize(F, nullptr, nullptr, PGSOQueryType::IRPass)) 351 return false; 352 353 // Exit if the function is not executable. There's no point in specializing 354 // a dead function. 355 if (!Solver.isBlockExecutable(&F->getEntryBlock())) 356 return false; 357 358 // It wastes time to specialize a function which would get inlined finally. 359 if (F->hasFnAttribute(Attribute::AlwaysInline)) 360 return false; 361 362 LLVM_DEBUG(dbgs() << "FnSpecialization: Try function: " << F->getName() 363 << "\n"); 364 365 // Determine if it would be profitable to create a specialization of the 366 // function where the argument takes on the given constant value. If so, 367 // add the constant to Constants. 368 auto FnSpecCost = getSpecializationCost(F); 369 if (!FnSpecCost.isValid()) { 370 LLVM_DEBUG(dbgs() << "FnSpecialization: Invalid specialisation cost.\n"); 371 return false; 372 } 373 374 LLVM_DEBUG(dbgs() << "FnSpecialization: func specialisation cost: "; 375 FnSpecCost.print(dbgs()); dbgs() << "\n"); 376 377 // Determine if we should specialize the function based on the values the 378 // argument can take on. If specialization is not profitable, we continue 379 // on to the next argument. 380 for (Argument &A : F->args()) { 381 LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing arg: " << A.getName() 382 << "\n"); 383 // True if this will be a partial specialization. We will need to keep 384 // the original function around in addition to the added specializations. 385 bool IsPartial = true; 386 387 // Determine if this argument is interesting. If we know the argument can 388 // take on any constant values, they are collected in Constants. If the 389 // argument can only ever equal a constant value in Constants, the 390 // function will be completely specialized, and the IsPartial flag will 391 // be set to false by isArgumentInteresting (that function only adds 392 // values to the Constants list that are deemed profitable). 393 SmallVector<Constant *, 4> Constants; 394 if (!isArgumentInteresting(&A, Constants, FnSpecCost, IsPartial)) { 395 LLVM_DEBUG(dbgs() << "FnSpecialization: Argument is not interesting\n"); 396 continue; 397 } 398 399 assert(!Constants.empty() && "No constants on which to specialize"); 400 LLVM_DEBUG(dbgs() << "FnSpecialization: Argument is interesting!\n" 401 << "FnSpecialization: Specializing '" << F->getName() 402 << "' on argument: " << A << "\n" 403 << "FnSpecialization: Constants are:\n\n"; 404 for (unsigned I = 0; I < Constants.size(); ++I) dbgs() 405 << *Constants[I] << "\n"; 406 dbgs() << "FnSpecialization: End of constants\n\n"); 407 408 // Create a version of the function in which the argument is marked 409 // constant with the given value. 410 for (auto *C : Constants) { 411 // Clone the function. We leave the ValueToValueMap empty to allow 412 // IPSCCP to propagate the constant arguments. 413 Function *Clone = cloneCandidateFunction(F); 414 Argument *ClonedArg = Clone->arg_begin() + A.getArgNo(); 415 416 // Rewrite calls to the function so that they call the clone instead. 417 rewriteCallSites(F, Clone, *ClonedArg, C); 418 419 // Initialize the lattice state of the arguments of the function clone, 420 // marking the argument on which we specialized the function constant 421 // with the given value. 422 Solver.markArgInFuncSpecialization(F, ClonedArg, C); 423 424 // Mark all the specialized functions 425 Specializations.push_back(Clone); 426 NbFunctionsSpecialized++; 427 } 428 429 // If the function has been completely specialized, the original function 430 // is no longer needed. Mark it unreachable. 431 if (!IsPartial) 432 Solver.markFunctionUnreachable(F); 433 434 // FIXME: Only one argument per function. 435 return true; 436 } 437 438 return false; 439 } 440 441 /// Compute the cost of specializing function \p F. 442 InstructionCost getSpecializationCost(Function *F) { 443 // Compute the code metrics for the function. 444 SmallPtrSet<const Value *, 32> EphValues; 445 CodeMetrics::collectEphemeralValues(F, &(GetAC)(*F), EphValues); 446 CodeMetrics Metrics; 447 for (BasicBlock &BB : *F) 448 Metrics.analyzeBasicBlock(&BB, (GetTTI)(*F), EphValues); 449 450 // If the code metrics reveal that we shouldn't duplicate the function, we 451 // shouldn't specialize it. Set the specialization cost to Invalid. 452 // Or if the lines of codes implies that this function is easy to get 453 // inlined so that we shouldn't specialize it. 454 if (Metrics.notDuplicatable || 455 (!ForceFunctionSpecialization && 456 Metrics.NumInsts < SmallFunctionThreshold)) { 457 InstructionCost C{}; 458 C.setInvalid(); 459 return C; 460 } 461 462 // Otherwise, set the specialization cost to be the cost of all the 463 // instructions in the function and penalty for specializing more functions. 464 unsigned Penalty = NbFunctionsSpecialized + 1; 465 return Metrics.NumInsts * InlineConstants::InstrCost * Penalty; 466 } 467 468 InstructionCost getUserBonus(User *U, llvm::TargetTransformInfo &TTI, 469 LoopInfo &LI) { 470 auto *I = dyn_cast_or_null<Instruction>(U); 471 // If not an instruction we do not know how to evaluate. 472 // Keep minimum possible cost for now so that it doesnt affect 473 // specialization. 474 if (!I) 475 return std::numeric_limits<unsigned>::min(); 476 477 auto Cost = TTI.getUserCost(U, TargetTransformInfo::TCK_SizeAndLatency); 478 479 // Traverse recursively if there are more uses. 480 // TODO: Any other instructions to be added here? 481 if (I->mayReadFromMemory() || I->isCast()) 482 for (auto *User : I->users()) 483 Cost += getUserBonus(User, TTI, LI); 484 485 // Increase the cost if it is inside the loop. 486 auto LoopDepth = LI.getLoopDepth(I->getParent()); 487 Cost *= std::pow((double)AvgLoopIterationCount, LoopDepth); 488 return Cost; 489 } 490 491 /// Compute a bonus for replacing argument \p A with constant \p C. 492 InstructionCost getSpecializationBonus(Argument *A, Constant *C) { 493 Function *F = A->getParent(); 494 DominatorTree DT(*F); 495 LoopInfo LI(DT); 496 auto &TTI = (GetTTI)(*F); 497 LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing bonus for: " << *A 498 << "\n"); 499 500 InstructionCost TotalCost = 0; 501 for (auto *U : A->users()) { 502 TotalCost += getUserBonus(U, TTI, LI); 503 LLVM_DEBUG(dbgs() << "FnSpecialization: User cost "; 504 TotalCost.print(dbgs()); dbgs() << " for: " << *U << "\n"); 505 } 506 507 // The below heuristic is only concerned with exposing inlining 508 // opportunities via indirect call promotion. If the argument is not a 509 // function pointer, give up. 510 if (!isa<PointerType>(A->getType()) || 511 !isa<FunctionType>(A->getType()->getPointerElementType())) 512 return TotalCost; 513 514 // Since the argument is a function pointer, its incoming constant values 515 // should be functions or constant expressions. The code below attempts to 516 // look through cast expressions to find the function that will be called. 517 Value *CalledValue = C; 518 while (isa<ConstantExpr>(CalledValue) && 519 cast<ConstantExpr>(CalledValue)->isCast()) 520 CalledValue = cast<User>(CalledValue)->getOperand(0); 521 Function *CalledFunction = dyn_cast<Function>(CalledValue); 522 if (!CalledFunction) 523 return TotalCost; 524 525 // Get TTI for the called function (used for the inline cost). 526 auto &CalleeTTI = (GetTTI)(*CalledFunction); 527 528 // Look at all the call sites whose called value is the argument. 529 // Specializing the function on the argument would allow these indirect 530 // calls to be promoted to direct calls. If the indirect call promotion 531 // would likely enable the called function to be inlined, specializing is a 532 // good idea. 533 int Bonus = 0; 534 for (User *U : A->users()) { 535 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 536 continue; 537 auto *CS = cast<CallBase>(U); 538 if (CS->getCalledOperand() != A) 539 continue; 540 541 // Get the cost of inlining the called function at this call site. Note 542 // that this is only an estimate. The called function may eventually 543 // change in a way that leads to it not being inlined here, even though 544 // inlining looks profitable now. For example, one of its called 545 // functions may be inlined into it, making the called function too large 546 // to be inlined into this call site. 547 // 548 // We apply a boost for performing indirect call promotion by increasing 549 // the default threshold by the threshold for indirect calls. 550 auto Params = getInlineParams(); 551 Params.DefaultThreshold += InlineConstants::IndirectCallThreshold; 552 InlineCost IC = 553 getInlineCost(*CS, CalledFunction, Params, CalleeTTI, GetAC, GetTLI); 554 555 // We clamp the bonus for this call to be between zero and the default 556 // threshold. 557 if (IC.isAlways()) 558 Bonus += Params.DefaultThreshold; 559 else if (IC.isVariable() && IC.getCostDelta() > 0) 560 Bonus += IC.getCostDelta(); 561 } 562 563 return TotalCost + Bonus; 564 } 565 566 /// Determine if we should specialize a function based on the incoming values 567 /// of the given argument. 568 /// 569 /// This function implements the goal-directed heuristic. It determines if 570 /// specializing the function based on the incoming values of argument \p A 571 /// would result in any significant optimization opportunities. If 572 /// optimization opportunities exist, the constant values of \p A on which to 573 /// specialize the function are collected in \p Constants. If the values in 574 /// \p Constants represent the complete set of values that \p A can take on, 575 /// the function will be completely specialized, and the \p IsPartial flag is 576 /// set to false. 577 /// 578 /// \returns true if the function should be specialized on the given 579 /// argument. 580 bool isArgumentInteresting(Argument *A, 581 SmallVectorImpl<Constant *> &Constants, 582 const InstructionCost &FnSpecCost, 583 bool &IsPartial) { 584 // For now, don't attempt to specialize functions based on the values of 585 // composite types. 586 if (!A->getType()->isSingleValueType() || A->user_empty()) 587 return false; 588 589 // If the argument isn't overdefined, there's nothing to do. It should 590 // already be constant. 591 if (!Solver.getLatticeValueFor(A).isOverdefined()) { 592 LLVM_DEBUG(dbgs() << "FnSpecialization: nothing to do, arg is already " 593 << "constant?\n"); 594 return false; 595 } 596 597 // Collect the constant values that the argument can take on. If the 598 // argument can't take on any constant values, we aren't going to 599 // specialize the function. While it's possible to specialize the function 600 // based on non-constant arguments, there's likely not much benefit to 601 // constant propagation in doing so. 602 // 603 // TODO 1: currently it won't specialize if there are over the threshold of 604 // calls using the same argument, e.g foo(a) x 4 and foo(b) x 1, but it 605 // might be beneficial to take the occurrences into account in the cost 606 // model, so we would need to find the unique constants. 607 // 608 // TODO 2: this currently does not support constants, i.e. integer ranges. 609 // 610 SmallVector<Constant *, 4> PossibleConstants; 611 bool AllConstant = getPossibleConstants(A, PossibleConstants); 612 if (PossibleConstants.empty()) { 613 LLVM_DEBUG(dbgs() << "FnSpecialization: no possible constants found\n"); 614 return false; 615 } 616 if (PossibleConstants.size() > MaxConstantsThreshold) { 617 LLVM_DEBUG(dbgs() << "FnSpecialization: number of constants found exceed " 618 << "the maximum number of constants threshold.\n"); 619 return false; 620 } 621 622 for (auto *C : PossibleConstants) { 623 LLVM_DEBUG(dbgs() << "FnSpecialization: Constant: " << *C << "\n"); 624 if (ForceFunctionSpecialization) { 625 LLVM_DEBUG(dbgs() << "FnSpecialization: Forced!\n"); 626 Constants.push_back(C); 627 continue; 628 } 629 if (getSpecializationBonus(A, C) > FnSpecCost) { 630 LLVM_DEBUG(dbgs() << "FnSpecialization: profitable!\n"); 631 Constants.push_back(C); 632 } else { 633 LLVM_DEBUG(dbgs() << "FnSpecialization: not profitable\n"); 634 } 635 } 636 637 // None of the constant values the argument can take on were deemed good 638 // candidates on which to specialize the function. 639 if (Constants.empty()) 640 return false; 641 642 // This will be a partial specialization if some of the constants were 643 // rejected due to their profitability. 644 IsPartial = !AllConstant || PossibleConstants.size() != Constants.size(); 645 646 return true; 647 } 648 649 /// Collect in \p Constants all the constant values that argument \p A can 650 /// take on. 651 /// 652 /// \returns true if all of the values the argument can take on are constant 653 /// (e.g., the argument's parent function cannot be called with an 654 /// overdefined value). 655 bool getPossibleConstants(Argument *A, 656 SmallVectorImpl<Constant *> &Constants) { 657 Function *F = A->getParent(); 658 bool AllConstant = true; 659 660 // Iterate over all the call sites of the argument's parent function. 661 for (User *U : F->users()) { 662 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 663 continue; 664 auto &CS = *cast<CallBase>(U); 665 // If the call site has attribute minsize set, that callsite won't be 666 // specialized. 667 if (CS.hasFnAttr(Attribute::MinSize)) { 668 AllConstant = false; 669 continue; 670 } 671 672 // If the parent of the call site will never be executed, we don't need 673 // to worry about the passed value. 674 if (!Solver.isBlockExecutable(CS.getParent())) 675 continue; 676 677 auto *V = CS.getArgOperand(A->getArgNo()); 678 if (isa<PoisonValue>(V)) 679 return false; 680 681 // For now, constant expressions are fine but only if they are function 682 // calls. 683 if (auto *CE = dyn_cast<ConstantExpr>(V)) 684 if (!isa<Function>(CE->getOperand(0))) 685 return false; 686 687 // TrackValueOfGlobalVariable only tracks scalar global variables. 688 if (auto *GV = dyn_cast<GlobalVariable>(V)) { 689 // Check if we want to specialize on the address of non-constant 690 // global values. 691 if (!GV->isConstant()) 692 if (!SpecializeOnAddresses) 693 return false; 694 695 if (!GV->getValueType()->isSingleValueType()) 696 return false; 697 } 698 699 if (isa<Constant>(V) && (Solver.getLatticeValueFor(V).isConstant() || 700 EnableSpecializationForLiteralConstant)) 701 Constants.push_back(cast<Constant>(V)); 702 else 703 AllConstant = false; 704 } 705 706 // If the argument can only take on constant values, AllConstant will be 707 // true. 708 return AllConstant; 709 } 710 711 /// Rewrite calls to function \p F to call function \p Clone instead. 712 /// 713 /// This function modifies calls to function \p F whose argument at index \p 714 /// ArgNo is equal to constant \p C. The calls are rewritten to call function 715 /// \p Clone instead. 716 /// 717 /// Callsites that have been marked with the MinSize function attribute won't 718 /// be specialized and rewritten. 719 void rewriteCallSites(Function *F, Function *Clone, Argument &Arg, 720 Constant *C) { 721 unsigned ArgNo = Arg.getArgNo(); 722 SmallVector<CallBase *, 4> CallSitesToRewrite; 723 for (auto *U : F->users()) { 724 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 725 continue; 726 auto &CS = *cast<CallBase>(U); 727 if (!CS.getCalledFunction() || CS.getCalledFunction() != F) 728 continue; 729 CallSitesToRewrite.push_back(&CS); 730 } 731 for (auto *CS : CallSitesToRewrite) { 732 if ((CS->getFunction() == Clone && CS->getArgOperand(ArgNo) == &Arg) || 733 CS->getArgOperand(ArgNo) == C) { 734 CS->setCalledFunction(Clone); 735 Solver.markOverdefined(CS); 736 } 737 } 738 } 739 }; 740 741 bool llvm::runFunctionSpecialization( 742 Module &M, const DataLayout &DL, 743 std::function<TargetLibraryInfo &(Function &)> GetTLI, 744 std::function<TargetTransformInfo &(Function &)> GetTTI, 745 std::function<AssumptionCache &(Function &)> GetAC, 746 function_ref<AnalysisResultsForFn(Function &)> GetAnalysis) { 747 SCCPSolver Solver(DL, GetTLI, M.getContext()); 748 FunctionSpecializer FS(Solver, GetAC, GetTTI, GetTLI); 749 bool Changed = false; 750 751 // Loop over all functions, marking arguments to those with their addresses 752 // taken or that are external as overdefined. 753 for (Function &F : M) { 754 if (F.isDeclaration()) 755 continue; 756 if (F.hasFnAttribute(Attribute::NoDuplicate)) 757 continue; 758 759 LLVM_DEBUG(dbgs() << "\nFnSpecialization: Analysing decl: " << F.getName() 760 << "\n"); 761 Solver.addAnalysis(F, GetAnalysis(F)); 762 763 // Determine if we can track the function's arguments. If so, add the 764 // function to the solver's set of argument-tracked functions. 765 if (canTrackArgumentsInterprocedurally(&F)) { 766 LLVM_DEBUG(dbgs() << "FnSpecialization: Can track arguments\n"); 767 Solver.addArgumentTrackedFunction(&F); 768 continue; 769 } else { 770 LLVM_DEBUG(dbgs() << "FnSpecialization: Can't track arguments!\n" 771 << "FnSpecialization: Doesn't have local linkage, or " 772 << "has its address taken\n"); 773 } 774 775 // Assume the function is called. 776 Solver.markBlockExecutable(&F.front()); 777 778 // Assume nothing about the incoming arguments. 779 for (Argument &AI : F.args()) 780 Solver.markOverdefined(&AI); 781 } 782 783 // Determine if we can track any of the module's global variables. If so, add 784 // the global variables we can track to the solver's set of tracked global 785 // variables. 786 for (GlobalVariable &G : M.globals()) { 787 G.removeDeadConstantUsers(); 788 if (canTrackGlobalVariableInterprocedurally(&G)) 789 Solver.trackValueOfGlobalVariable(&G); 790 } 791 792 auto &TrackedFuncs = Solver.getArgumentTrackedFunctions(); 793 SmallVector<Function *, 16> FuncDecls(TrackedFuncs.begin(), 794 TrackedFuncs.end()); 795 796 // No tracked functions, so nothing to do: don't run the solver and remove 797 // the ssa_copy intrinsics that may have been introduced. 798 if (TrackedFuncs.empty()) { 799 removeSSACopy(M); 800 return false; 801 } 802 803 // Solve for constants. 804 auto RunSCCPSolver = [&](auto &WorkList) { 805 bool ResolvedUndefs = true; 806 807 while (ResolvedUndefs) { 808 // Not running the solver unnecessary is checked in regression test 809 // nothing-to-do.ll, so if this debug message is changed, this regression 810 // test needs updating too. 811 LLVM_DEBUG(dbgs() << "FnSpecialization: Running solver\n"); 812 813 Solver.solve(); 814 LLVM_DEBUG(dbgs() << "FnSpecialization: Resolving undefs\n"); 815 ResolvedUndefs = false; 816 for (Function *F : WorkList) 817 if (Solver.resolvedUndefsIn(*F)) 818 ResolvedUndefs = true; 819 } 820 821 for (auto *F : WorkList) { 822 for (BasicBlock &BB : *F) { 823 if (!Solver.isBlockExecutable(&BB)) 824 continue; 825 // FIXME: The solver may make changes to the function here, so set 826 // Changed, even if later function specialization does not trigger. 827 for (auto &I : make_early_inc_range(BB)) 828 Changed |= FS.tryToReplaceWithConstant(&I); 829 } 830 } 831 }; 832 833 #ifndef NDEBUG 834 LLVM_DEBUG(dbgs() << "FnSpecialization: Worklist fn decls:\n"); 835 for (auto *F : FuncDecls) 836 LLVM_DEBUG(dbgs() << "FnSpecialization: *) " << F->getName() << "\n"); 837 #endif 838 839 // Initially resolve the constants in all the argument tracked functions. 840 RunSCCPSolver(FuncDecls); 841 842 SmallVector<Function *, 2> CurrentSpecializations; 843 unsigned I = 0; 844 while (FuncSpecializationMaxIters != I++ && 845 FS.specializeFunctions(FuncDecls, CurrentSpecializations)) { 846 847 // Run the solver for the specialized functions. 848 RunSCCPSolver(CurrentSpecializations); 849 850 // Replace some unresolved constant arguments. 851 constantArgPropagation(FuncDecls, M, Solver); 852 853 CurrentSpecializations.clear(); 854 Changed = true; 855 } 856 857 // Clean up the IR by removing ssa_copy intrinsics. 858 removeSSACopy(M); 859 return Changed; 860 } 861