1 //===- FunctionSpecialization.cpp - Function Specialization ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This specialises functions with constant parameters (e.g. functions, 10 // globals). Constant parameters like function pointers and constant globals 11 // are propagated to the callee by specializing the function. 12 // 13 // Current limitations: 14 // - It does not yet handle integer ranges. 15 // - Only 1 argument per function is specialised, 16 // - The cost-model could be further looked into, 17 // - We are not yet caching analysis results. 18 // 19 // Ideas: 20 // - With a function specialization attribute for arguments, we could have 21 // a direct way to steer function specialization, avoiding the cost-model, 22 // and thus control compile-times / code-size. 23 // 24 // Todos: 25 // - Limit the times a recursive function get specialized when 26 // `func-specialization-max-iters` 27 // increases linearly. See discussion in https://reviews.llvm.org/D106426 for 28 // details. 29 // - Don't transform the function if there is no function specialization 30 // happens. 31 // 32 //===----------------------------------------------------------------------===// 33 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/Analysis/AssumptionCache.h" 36 #include "llvm/Analysis/CodeMetrics.h" 37 #include "llvm/Analysis/DomTreeUpdater.h" 38 #include "llvm/Analysis/InlineCost.h" 39 #include "llvm/Analysis/LoopInfo.h" 40 #include "llvm/Analysis/TargetLibraryInfo.h" 41 #include "llvm/Analysis/TargetTransformInfo.h" 42 #include "llvm/Transforms/Scalar/SCCP.h" 43 #include "llvm/Transforms/Utils/Cloning.h" 44 #include "llvm/Transforms/Utils/SizeOpts.h" 45 #include <cmath> 46 47 using namespace llvm; 48 49 #define DEBUG_TYPE "function-specialization" 50 51 STATISTIC(NumFuncSpecialized, "Number of functions specialized"); 52 53 static cl::opt<bool> ForceFunctionSpecialization( 54 "force-function-specialization", cl::init(false), cl::Hidden, 55 cl::desc("Force function specialization for every call site with a " 56 "constant argument")); 57 58 static cl::opt<unsigned> FuncSpecializationMaxIters( 59 "func-specialization-max-iters", cl::Hidden, 60 cl::desc("The maximum number of iterations function specialization is run"), 61 cl::init(1)); 62 63 static cl::opt<unsigned> MaxConstantsThreshold( 64 "func-specialization-max-constants", cl::Hidden, 65 cl::desc("The maximum number of clones allowed for a single function " 66 "specialization"), 67 cl::init(3)); 68 69 static cl::opt<unsigned> SmallFunctionThreshold( 70 "func-specialization-size-threshold", cl::Hidden, 71 cl::desc("For functions whose IR instruction count below this threshold, " 72 " they wouldn't be specialized to avoid useless sepcializations."), 73 cl::init(100)); 74 75 static cl::opt<unsigned> 76 AvgLoopIterationCount("func-specialization-avg-iters-cost", cl::Hidden, 77 cl::desc("Average loop iteration count cost"), 78 cl::init(10)); 79 80 static cl::opt<bool> EnableSpecializationForLiteralConstant( 81 "function-specialization-for-literal-constant", cl::init(false), cl::Hidden, 82 cl::desc("Make function specialization available for literal constant.")); 83 84 // Helper to check if \p LV is either a constant or a constant 85 // range with a single element. This should cover exactly the same cases as the 86 // old ValueLatticeElement::isConstant() and is intended to be used in the 87 // transition to ValueLatticeElement. 88 static bool isConstant(const ValueLatticeElement &LV) { 89 return LV.isConstant() || 90 (LV.isConstantRange() && LV.getConstantRange().isSingleElement()); 91 } 92 93 // Helper to check if \p LV is either overdefined or a constant int. 94 static bool isOverdefined(const ValueLatticeElement &LV) { 95 return !LV.isUnknownOrUndef() && !isConstant(LV); 96 } 97 98 static Constant *getPromotableAlloca(AllocaInst *Alloca, CallInst *Call) { 99 Value *StoreValue = nullptr; 100 for (auto *User : Alloca->users()) { 101 // We can't use llvm::isAllocaPromotable() as that would fail because of 102 // the usage in the CallInst, which is what we check here. 103 if (User == Call) 104 continue; 105 if (auto *Bitcast = dyn_cast<BitCastInst>(User)) { 106 if (!Bitcast->hasOneUse() || *Bitcast->user_begin() != Call) 107 return nullptr; 108 continue; 109 } 110 111 if (auto *Store = dyn_cast<StoreInst>(User)) { 112 // This is a duplicate store, bail out. 113 if (StoreValue || Store->isVolatile()) 114 return nullptr; 115 StoreValue = Store->getValueOperand(); 116 continue; 117 } 118 // Bail if there is any other unknown usage. 119 return nullptr; 120 } 121 return dyn_cast_or_null<Constant>(StoreValue); 122 } 123 124 // A constant stack value is an AllocaInst that has a single constant 125 // value stored to it. Return this constant if such an alloca stack value 126 // is a function argument. 127 static Constant *getConstantStackValue(CallInst *Call, Value *Val, 128 SCCPSolver &Solver) { 129 if (!Val) 130 return nullptr; 131 Val = Val->stripPointerCasts(); 132 if (auto *ConstVal = dyn_cast<ConstantInt>(Val)) 133 return ConstVal; 134 auto *Alloca = dyn_cast<AllocaInst>(Val); 135 if (!Alloca || !Alloca->getAllocatedType()->isIntegerTy()) 136 return nullptr; 137 return getPromotableAlloca(Alloca, Call); 138 } 139 140 // To support specializing recursive functions, it is important to propagate 141 // constant arguments because after a first iteration of specialisation, a 142 // reduced example may look like this: 143 // 144 // define internal void @RecursiveFn(i32* arg1) { 145 // %temp = alloca i32, align 4 146 // store i32 2 i32* %temp, align 4 147 // call void @RecursiveFn.1(i32* nonnull %temp) 148 // ret void 149 // } 150 // 151 // Before a next iteration, we need to propagate the constant like so 152 // which allows further specialization in next iterations. 153 // 154 // @funcspec.arg = internal constant i32 2 155 // 156 // define internal void @someFunc(i32* arg1) { 157 // call void @otherFunc(i32* nonnull @funcspec.arg) 158 // ret void 159 // } 160 // 161 static void constantArgPropagation(SmallVectorImpl<Function *> &WorkList, 162 Module &M, SCCPSolver &Solver) { 163 // Iterate over the argument tracked functions see if there 164 // are any new constant values for the call instruction via 165 // stack variables. 166 for (auto *F : WorkList) { 167 // TODO: Generalize for any read only arguments. 168 if (F->arg_size() != 1) 169 continue; 170 171 auto &Arg = *F->arg_begin(); 172 if (!Arg.onlyReadsMemory() || !Arg.getType()->isPointerTy()) 173 continue; 174 175 for (auto *User : F->users()) { 176 auto *Call = dyn_cast<CallInst>(User); 177 if (!Call) 178 break; 179 auto *ArgOp = Call->getArgOperand(0); 180 auto *ArgOpType = ArgOp->getType(); 181 auto *ConstVal = getConstantStackValue(Call, ArgOp, Solver); 182 if (!ConstVal) 183 break; 184 185 Value *GV = new GlobalVariable(M, ConstVal->getType(), true, 186 GlobalValue::InternalLinkage, ConstVal, 187 "funcspec.arg"); 188 189 if (ArgOpType != ConstVal->getType()) 190 GV = ConstantExpr::getBitCast(cast<Constant>(GV), ArgOp->getType()); 191 192 Call->setArgOperand(0, GV); 193 194 // Add the changed CallInst to Solver Worklist 195 Solver.visitCall(*Call); 196 } 197 } 198 } 199 200 // ssa_copy intrinsics are introduced by the SCCP solver. These intrinsics 201 // interfere with the constantArgPropagation optimization. 202 static void removeSSACopy(Function &F) { 203 for (BasicBlock &BB : F) { 204 for (BasicBlock::iterator BI = BB.begin(), E = BB.end(); BI != E;) { 205 Instruction *Inst = &*BI++; 206 auto *II = dyn_cast<IntrinsicInst>(Inst); 207 if (!II) 208 continue; 209 if (II->getIntrinsicID() != Intrinsic::ssa_copy) 210 continue; 211 Inst->replaceAllUsesWith(II->getOperand(0)); 212 Inst->eraseFromParent(); 213 } 214 } 215 } 216 217 static void removeSSACopy(Module &M) { 218 for (Function &F : M) 219 removeSSACopy(F); 220 } 221 222 class FunctionSpecializer { 223 224 /// The IPSCCP Solver. 225 SCCPSolver &Solver; 226 227 /// Analyses used to help determine if a function should be specialized. 228 std::function<AssumptionCache &(Function &)> GetAC; 229 std::function<TargetTransformInfo &(Function &)> GetTTI; 230 std::function<TargetLibraryInfo &(Function &)> GetTLI; 231 232 SmallPtrSet<Function *, 2> SpecializedFuncs; 233 234 public: 235 FunctionSpecializer(SCCPSolver &Solver, 236 std::function<AssumptionCache &(Function &)> GetAC, 237 std::function<TargetTransformInfo &(Function &)> GetTTI, 238 std::function<TargetLibraryInfo &(Function &)> GetTLI) 239 : Solver(Solver), GetAC(GetAC), GetTTI(GetTTI), GetTLI(GetTLI) {} 240 241 /// Attempt to specialize functions in the module to enable constant 242 /// propagation across function boundaries. 243 /// 244 /// \returns true if at least one function is specialized. 245 bool 246 specializeFunctions(SmallVectorImpl<Function *> &FuncDecls, 247 SmallVectorImpl<Function *> &CurrentSpecializations) { 248 249 // Attempt to specialize the argument-tracked functions. 250 bool Changed = false; 251 for (auto *F : FuncDecls) { 252 if (specializeFunction(F, CurrentSpecializations)) { 253 Changed = true; 254 LLVM_DEBUG(dbgs() << "FnSpecialization: Can specialize this func.\n"); 255 } else { 256 LLVM_DEBUG( 257 dbgs() << "FnSpecialization: Cannot specialize this func.\n"); 258 } 259 } 260 261 for (auto *SpecializedFunc : CurrentSpecializations) { 262 SpecializedFuncs.insert(SpecializedFunc); 263 264 // Initialize the state of the newly created functions, marking them 265 // argument-tracked and executable. 266 if (SpecializedFunc->hasExactDefinition() && 267 !SpecializedFunc->hasFnAttribute(Attribute::Naked)) 268 Solver.addTrackedFunction(SpecializedFunc); 269 Solver.addArgumentTrackedFunction(SpecializedFunc); 270 FuncDecls.push_back(SpecializedFunc); 271 Solver.markBlockExecutable(&SpecializedFunc->front()); 272 273 // Replace the function arguments for the specialized functions. 274 for (Argument &Arg : SpecializedFunc->args()) 275 if (!Arg.use_empty() && tryToReplaceWithConstant(&Arg)) 276 LLVM_DEBUG(dbgs() << "FnSpecialization: Replaced constant argument: " 277 << Arg.getName() << "\n"); 278 } 279 280 NumFuncSpecialized += NbFunctionsSpecialized; 281 return Changed; 282 } 283 284 bool tryToReplaceWithConstant(Value *V) { 285 if (!V->getType()->isSingleValueType() || isa<CallBase>(V) || 286 V->user_empty()) 287 return false; 288 289 const ValueLatticeElement &IV = Solver.getLatticeValueFor(V); 290 if (isOverdefined(IV)) 291 return false; 292 auto *Const = 293 isConstant(IV) ? Solver.getConstant(IV) : UndefValue::get(V->getType()); 294 V->replaceAllUsesWith(Const); 295 296 for (auto *U : Const->users()) 297 if (auto *I = dyn_cast<Instruction>(U)) 298 if (Solver.isBlockExecutable(I->getParent())) 299 Solver.visit(I); 300 301 // Remove the instruction from Block and Solver. 302 if (auto *I = dyn_cast<Instruction>(V)) { 303 if (I->isSafeToRemove()) { 304 I->eraseFromParent(); 305 Solver.removeLatticeValueFor(I); 306 } 307 } 308 return true; 309 } 310 311 private: 312 // The number of functions specialised, used for collecting statistics and 313 // also in the cost model. 314 unsigned NbFunctionsSpecialized = 0; 315 316 /// Clone the function \p F and remove the ssa_copy intrinsics added by 317 /// the SCCPSolver in the cloned version. 318 Function *cloneCandidateFunction(Function *F) { 319 ValueToValueMapTy EmptyMap; 320 Function *Clone = CloneFunction(F, EmptyMap); 321 removeSSACopy(*Clone); 322 return Clone; 323 } 324 325 /// This function decides whether to specialize function \p F based on the 326 /// known constant values its arguments can take on. Specialization is 327 /// performed on the first interesting argument. Specializations based on 328 /// additional arguments will be evaluated on following iterations of the 329 /// main IPSCCP solve loop. \returns true if the function is specialized and 330 /// false otherwise. 331 bool specializeFunction(Function *F, 332 SmallVectorImpl<Function *> &Specializations) { 333 334 // Do not specialize the cloned function again. 335 if (SpecializedFuncs.contains(F)) { 336 return false; 337 } 338 339 // If we're optimizing the function for size, we shouldn't specialize it. 340 if (F->hasOptSize() || 341 shouldOptimizeForSize(F, nullptr, nullptr, PGSOQueryType::IRPass)) 342 return false; 343 344 // Exit if the function is not executable. There's no point in specializing 345 // a dead function. 346 if (!Solver.isBlockExecutable(&F->getEntryBlock())) 347 return false; 348 349 // It wastes time to specialize a function which would get inlined finally. 350 if (F->hasFnAttribute(Attribute::AlwaysInline)) 351 return false; 352 353 LLVM_DEBUG(dbgs() << "FnSpecialization: Try function: " << F->getName() 354 << "\n"); 355 356 // Determine if it would be profitable to create a specialization of the 357 // function where the argument takes on the given constant value. If so, 358 // add the constant to Constants. 359 auto FnSpecCost = getSpecializationCost(F); 360 if (!FnSpecCost.isValid()) { 361 LLVM_DEBUG(dbgs() << "FnSpecialization: Invalid specialisation cost.\n"); 362 return false; 363 } 364 365 LLVM_DEBUG(dbgs() << "FnSpecialization: func specialisation cost: "; 366 FnSpecCost.print(dbgs()); dbgs() << "\n"); 367 368 // Determine if we should specialize the function based on the values the 369 // argument can take on. If specialization is not profitable, we continue 370 // on to the next argument. 371 for (Argument &A : F->args()) { 372 LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing arg: " << A.getName() 373 << "\n"); 374 // True if this will be a partial specialization. We will need to keep 375 // the original function around in addition to the added specializations. 376 bool IsPartial = true; 377 378 // Determine if this argument is interesting. If we know the argument can 379 // take on any constant values, they are collected in Constants. If the 380 // argument can only ever equal a constant value in Constants, the 381 // function will be completely specialized, and the IsPartial flag will 382 // be set to false by isArgumentInteresting (that function only adds 383 // values to the Constants list that are deemed profitable). 384 SmallVector<Constant *, 4> Constants; 385 if (!isArgumentInteresting(&A, Constants, FnSpecCost, IsPartial)) { 386 LLVM_DEBUG(dbgs() << "FnSpecialization: Argument is not interesting\n"); 387 continue; 388 } 389 390 assert(!Constants.empty() && "No constants on which to specialize"); 391 LLVM_DEBUG(dbgs() << "FnSpecialization: Argument is interesting!\n" 392 << "FnSpecialization: Specializing '" << F->getName() 393 << "' on argument: " << A << "\n" 394 << "FnSpecialization: Constants are:\n\n"; 395 for (unsigned I = 0; I < Constants.size(); ++I) dbgs() 396 << *Constants[I] << "\n"; 397 dbgs() << "FnSpecialization: End of constants\n\n"); 398 399 // Create a version of the function in which the argument is marked 400 // constant with the given value. 401 for (auto *C : Constants) { 402 // Clone the function. We leave the ValueToValueMap empty to allow 403 // IPSCCP to propagate the constant arguments. 404 Function *Clone = cloneCandidateFunction(F); 405 Argument *ClonedArg = Clone->arg_begin() + A.getArgNo(); 406 407 // Rewrite calls to the function so that they call the clone instead. 408 rewriteCallSites(F, Clone, *ClonedArg, C); 409 410 // Initialize the lattice state of the arguments of the function clone, 411 // marking the argument on which we specialized the function constant 412 // with the given value. 413 Solver.markArgInFuncSpecialization(F, ClonedArg, C); 414 415 // Mark all the specialized functions 416 Specializations.push_back(Clone); 417 NbFunctionsSpecialized++; 418 } 419 420 // If the function has been completely specialized, the original function 421 // is no longer needed. Mark it unreachable. 422 if (!IsPartial) 423 Solver.markFunctionUnreachable(F); 424 425 // FIXME: Only one argument per function. 426 return true; 427 } 428 429 return false; 430 } 431 432 /// Compute the cost of specializing function \p F. 433 InstructionCost getSpecializationCost(Function *F) { 434 // Compute the code metrics for the function. 435 SmallPtrSet<const Value *, 32> EphValues; 436 CodeMetrics::collectEphemeralValues(F, &(GetAC)(*F), EphValues); 437 CodeMetrics Metrics; 438 for (BasicBlock &BB : *F) 439 Metrics.analyzeBasicBlock(&BB, (GetTTI)(*F), EphValues); 440 441 // If the code metrics reveal that we shouldn't duplicate the function, we 442 // shouldn't specialize it. Set the specialization cost to Invalid. 443 // Or if the lines of codes implies that this function is easy to get 444 // inlined so that we shouldn't specialize it. 445 if (Metrics.notDuplicatable || 446 (!ForceFunctionSpecialization && 447 Metrics.NumInsts < SmallFunctionThreshold)) { 448 InstructionCost C{}; 449 C.setInvalid(); 450 return C; 451 } 452 453 // Otherwise, set the specialization cost to be the cost of all the 454 // instructions in the function and penalty for specializing more functions. 455 unsigned Penalty = NbFunctionsSpecialized + 1; 456 return Metrics.NumInsts * InlineConstants::InstrCost * Penalty; 457 } 458 459 InstructionCost getUserBonus(User *U, llvm::TargetTransformInfo &TTI, 460 LoopInfo &LI) { 461 auto *I = dyn_cast_or_null<Instruction>(U); 462 // If not an instruction we do not know how to evaluate. 463 // Keep minimum possible cost for now so that it doesnt affect 464 // specialization. 465 if (!I) 466 return std::numeric_limits<unsigned>::min(); 467 468 auto Cost = TTI.getUserCost(U, TargetTransformInfo::TCK_SizeAndLatency); 469 470 // Traverse recursively if there are more uses. 471 // TODO: Any other instructions to be added here? 472 if (I->mayReadFromMemory() || I->isCast()) 473 for (auto *User : I->users()) 474 Cost += getUserBonus(User, TTI, LI); 475 476 // Increase the cost if it is inside the loop. 477 auto LoopDepth = LI.getLoopDepth(I->getParent()); 478 Cost *= std::pow((double)AvgLoopIterationCount, LoopDepth); 479 return Cost; 480 } 481 482 /// Compute a bonus for replacing argument \p A with constant \p C. 483 InstructionCost getSpecializationBonus(Argument *A, Constant *C) { 484 Function *F = A->getParent(); 485 DominatorTree DT(*F); 486 LoopInfo LI(DT); 487 auto &TTI = (GetTTI)(*F); 488 LLVM_DEBUG(dbgs() << "FnSpecialization: Analysing bonus for: " << *A 489 << "\n"); 490 491 InstructionCost TotalCost = 0; 492 for (auto *U : A->users()) { 493 TotalCost += getUserBonus(U, TTI, LI); 494 LLVM_DEBUG(dbgs() << "FnSpecialization: User cost "; 495 TotalCost.print(dbgs()); dbgs() << " for: " << *U << "\n"); 496 } 497 498 // The below heuristic is only concerned with exposing inlining 499 // opportunities via indirect call promotion. If the argument is not a 500 // function pointer, give up. 501 if (!isa<PointerType>(A->getType()) || 502 !isa<FunctionType>(A->getType()->getPointerElementType())) 503 return TotalCost; 504 505 // Since the argument is a function pointer, its incoming constant values 506 // should be functions or constant expressions. The code below attempts to 507 // look through cast expressions to find the function that will be called. 508 Value *CalledValue = C; 509 while (isa<ConstantExpr>(CalledValue) && 510 cast<ConstantExpr>(CalledValue)->isCast()) 511 CalledValue = cast<User>(CalledValue)->getOperand(0); 512 Function *CalledFunction = dyn_cast<Function>(CalledValue); 513 if (!CalledFunction) 514 return TotalCost; 515 516 // Get TTI for the called function (used for the inline cost). 517 auto &CalleeTTI = (GetTTI)(*CalledFunction); 518 519 // Look at all the call sites whose called value is the argument. 520 // Specializing the function on the argument would allow these indirect 521 // calls to be promoted to direct calls. If the indirect call promotion 522 // would likely enable the called function to be inlined, specializing is a 523 // good idea. 524 int Bonus = 0; 525 for (User *U : A->users()) { 526 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 527 continue; 528 auto *CS = cast<CallBase>(U); 529 if (CS->getCalledOperand() != A) 530 continue; 531 532 // Get the cost of inlining the called function at this call site. Note 533 // that this is only an estimate. The called function may eventually 534 // change in a way that leads to it not being inlined here, even though 535 // inlining looks profitable now. For example, one of its called 536 // functions may be inlined into it, making the called function too large 537 // to be inlined into this call site. 538 // 539 // We apply a boost for performing indirect call promotion by increasing 540 // the default threshold by the threshold for indirect calls. 541 auto Params = getInlineParams(); 542 Params.DefaultThreshold += InlineConstants::IndirectCallThreshold; 543 InlineCost IC = 544 getInlineCost(*CS, CalledFunction, Params, CalleeTTI, GetAC, GetTLI); 545 546 // We clamp the bonus for this call to be between zero and the default 547 // threshold. 548 if (IC.isAlways()) 549 Bonus += Params.DefaultThreshold; 550 else if (IC.isVariable() && IC.getCostDelta() > 0) 551 Bonus += IC.getCostDelta(); 552 } 553 554 return TotalCost + Bonus; 555 } 556 557 /// Determine if we should specialize a function based on the incoming values 558 /// of the given argument. 559 /// 560 /// This function implements the goal-directed heuristic. It determines if 561 /// specializing the function based on the incoming values of argument \p A 562 /// would result in any significant optimization opportunities. If 563 /// optimization opportunities exist, the constant values of \p A on which to 564 /// specialize the function are collected in \p Constants. If the values in 565 /// \p Constants represent the complete set of values that \p A can take on, 566 /// the function will be completely specialized, and the \p IsPartial flag is 567 /// set to false. 568 /// 569 /// \returns true if the function should be specialized on the given 570 /// argument. 571 bool isArgumentInteresting(Argument *A, 572 SmallVectorImpl<Constant *> &Constants, 573 const InstructionCost &FnSpecCost, 574 bool &IsPartial) { 575 // For now, don't attempt to specialize functions based on the values of 576 // composite types. 577 if (!A->getType()->isSingleValueType() || A->user_empty()) 578 return false; 579 580 // If the argument isn't overdefined, there's nothing to do. It should 581 // already be constant. 582 if (!Solver.getLatticeValueFor(A).isOverdefined()) { 583 LLVM_DEBUG(dbgs() << "FnSpecialization: nothing to do, arg is already " 584 << "constant?\n"); 585 return false; 586 } 587 588 // Collect the constant values that the argument can take on. If the 589 // argument can't take on any constant values, we aren't going to 590 // specialize the function. While it's possible to specialize the function 591 // based on non-constant arguments, there's likely not much benefit to 592 // constant propagation in doing so. 593 // 594 // TODO 1: currently it won't specialize if there are over the threshold of 595 // calls using the same argument, e.g foo(a) x 4 and foo(b) x 1, but it 596 // might be beneficial to take the occurrences into account in the cost 597 // model, so we would need to find the unique constants. 598 // 599 // TODO 2: this currently does not support constants, i.e. integer ranges. 600 // 601 SmallVector<Constant *, 4> PossibleConstants; 602 bool AllConstant = getPossibleConstants(A, PossibleConstants); 603 if (PossibleConstants.empty()) { 604 LLVM_DEBUG(dbgs() << "FnSpecialization: no possible constants found\n"); 605 return false; 606 } 607 if (PossibleConstants.size() > MaxConstantsThreshold) { 608 LLVM_DEBUG(dbgs() << "FnSpecialization: number of constants found exceed " 609 << "the maximum number of constants threshold.\n"); 610 return false; 611 } 612 613 for (auto *C : PossibleConstants) { 614 LLVM_DEBUG(dbgs() << "FnSpecialization: Constant: " << *C << "\n"); 615 if (ForceFunctionSpecialization) { 616 LLVM_DEBUG(dbgs() << "FnSpecialization: Forced!\n"); 617 Constants.push_back(C); 618 continue; 619 } 620 if (getSpecializationBonus(A, C) > FnSpecCost) { 621 LLVM_DEBUG(dbgs() << "FnSpecialization: profitable!\n"); 622 Constants.push_back(C); 623 } else { 624 LLVM_DEBUG(dbgs() << "FnSpecialization: not profitable\n"); 625 } 626 } 627 628 // None of the constant values the argument can take on were deemed good 629 // candidates on which to specialize the function. 630 if (Constants.empty()) 631 return false; 632 633 // This will be a partial specialization if some of the constants were 634 // rejected due to their profitability. 635 IsPartial = !AllConstant || PossibleConstants.size() != Constants.size(); 636 637 return true; 638 } 639 640 /// Collect in \p Constants all the constant values that argument \p A can 641 /// take on. 642 /// 643 /// \returns true if all of the values the argument can take on are constant 644 /// (e.g., the argument's parent function cannot be called with an 645 /// overdefined value). 646 bool getPossibleConstants(Argument *A, 647 SmallVectorImpl<Constant *> &Constants) { 648 Function *F = A->getParent(); 649 bool AllConstant = true; 650 651 // Iterate over all the call sites of the argument's parent function. 652 for (User *U : F->users()) { 653 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 654 continue; 655 auto &CS = *cast<CallBase>(U); 656 657 // If the parent of the call site will never be executed, we don't need 658 // to worry about the passed value. 659 if (!Solver.isBlockExecutable(CS.getParent())) 660 continue; 661 662 auto *V = CS.getArgOperand(A->getArgNo()); 663 // TrackValueOfGlobalVariable only tracks scalar global variables. 664 if (auto *GV = dyn_cast<GlobalVariable>(V)) { 665 if (!GV->getValueType()->isSingleValueType()) { 666 return false; 667 } 668 } 669 670 if (isa<Constant>(V) && (Solver.getLatticeValueFor(V).isConstant() || 671 EnableSpecializationForLiteralConstant)) 672 Constants.push_back(cast<Constant>(V)); 673 else 674 AllConstant = false; 675 } 676 677 // If the argument can only take on constant values, AllConstant will be 678 // true. 679 return AllConstant; 680 } 681 682 /// Rewrite calls to function \p F to call function \p Clone instead. 683 /// 684 /// This function modifies calls to function \p F whose argument at index \p 685 /// ArgNo is equal to constant \p C. The calls are rewritten to call function 686 /// \p Clone instead. 687 void rewriteCallSites(Function *F, Function *Clone, Argument &Arg, 688 Constant *C) { 689 unsigned ArgNo = Arg.getArgNo(); 690 SmallVector<CallBase *, 4> CallSitesToRewrite; 691 for (auto *U : F->users()) { 692 if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) 693 continue; 694 auto &CS = *cast<CallBase>(U); 695 if (!CS.getCalledFunction() || CS.getCalledFunction() != F) 696 continue; 697 CallSitesToRewrite.push_back(&CS); 698 } 699 for (auto *CS : CallSitesToRewrite) { 700 if ((CS->getFunction() == Clone && CS->getArgOperand(ArgNo) == &Arg) || 701 CS->getArgOperand(ArgNo) == C) { 702 CS->setCalledFunction(Clone); 703 Solver.markOverdefined(CS); 704 } 705 } 706 } 707 }; 708 709 bool llvm::runFunctionSpecialization( 710 Module &M, const DataLayout &DL, 711 std::function<TargetLibraryInfo &(Function &)> GetTLI, 712 std::function<TargetTransformInfo &(Function &)> GetTTI, 713 std::function<AssumptionCache &(Function &)> GetAC, 714 function_ref<AnalysisResultsForFn(Function &)> GetAnalysis) { 715 SCCPSolver Solver(DL, GetTLI, M.getContext()); 716 FunctionSpecializer FS(Solver, GetAC, GetTTI, GetTLI); 717 bool Changed = false; 718 719 // Loop over all functions, marking arguments to those with their addresses 720 // taken or that are external as overdefined. 721 for (Function &F : M) { 722 if (F.isDeclaration()) 723 continue; 724 if (F.hasFnAttribute(Attribute::NoDuplicate)) 725 continue; 726 727 LLVM_DEBUG(dbgs() << "\nFnSpecialization: Analysing decl: " << F.getName() 728 << "\n"); 729 Solver.addAnalysis(F, GetAnalysis(F)); 730 731 // Determine if we can track the function's arguments. If so, add the 732 // function to the solver's set of argument-tracked functions. 733 if (canTrackArgumentsInterprocedurally(&F)) { 734 LLVM_DEBUG(dbgs() << "FnSpecialization: Can track arguments\n"); 735 Solver.addArgumentTrackedFunction(&F); 736 continue; 737 } else { 738 LLVM_DEBUG(dbgs() << "FnSpecialization: Can't track arguments!\n" 739 << "FnSpecialization: Doesn't have local linkage, or " 740 << "has its address taken\n"); 741 } 742 743 // Assume the function is called. 744 Solver.markBlockExecutable(&F.front()); 745 746 // Assume nothing about the incoming arguments. 747 for (Argument &AI : F.args()) 748 Solver.markOverdefined(&AI); 749 } 750 751 // Determine if we can track any of the module's global variables. If so, add 752 // the global variables we can track to the solver's set of tracked global 753 // variables. 754 for (GlobalVariable &G : M.globals()) { 755 G.removeDeadConstantUsers(); 756 if (canTrackGlobalVariableInterprocedurally(&G)) 757 Solver.trackValueOfGlobalVariable(&G); 758 } 759 760 // Solve for constants. 761 auto RunSCCPSolver = [&](auto &WorkList) { 762 bool ResolvedUndefs = true; 763 764 while (ResolvedUndefs) { 765 LLVM_DEBUG(dbgs() << "FnSpecialization: Running solver\n"); 766 Solver.solve(); 767 LLVM_DEBUG(dbgs() << "FnSpecialization: Resolving undefs\n"); 768 ResolvedUndefs = false; 769 for (Function *F : WorkList) 770 if (Solver.resolvedUndefsIn(*F)) 771 ResolvedUndefs = true; 772 } 773 774 for (auto *F : WorkList) { 775 for (BasicBlock &BB : *F) { 776 if (!Solver.isBlockExecutable(&BB)) 777 continue; 778 for (auto &I : make_early_inc_range(BB)) 779 // FIXME: The solver may make changes to the function here, so set Changed, even if later 780 // function specialization does not trigger. 781 Changed |= FS.tryToReplaceWithConstant(&I); 782 } 783 } 784 }; 785 786 auto &TrackedFuncs = Solver.getArgumentTrackedFunctions(); 787 SmallVector<Function *, 16> FuncDecls(TrackedFuncs.begin(), 788 TrackedFuncs.end()); 789 #ifndef NDEBUG 790 LLVM_DEBUG(dbgs() << "FnSpecialization: Worklist fn decls:\n"); 791 for (auto *F : FuncDecls) 792 LLVM_DEBUG(dbgs() << "FnSpecialization: *) " << F->getName() << "\n"); 793 #endif 794 795 // Initially resolve the constants in all the argument tracked functions. 796 RunSCCPSolver(FuncDecls); 797 798 SmallVector<Function *, 2> CurrentSpecializations; 799 unsigned I = 0; 800 while (FuncSpecializationMaxIters != I++ && 801 FS.specializeFunctions(FuncDecls, CurrentSpecializations)) { 802 803 // Run the solver for the specialized functions. 804 RunSCCPSolver(CurrentSpecializations); 805 806 // Replace some unresolved constant arguments 807 constantArgPropagation(FuncDecls, M, Solver); 808 809 CurrentSpecializations.clear(); 810 Changed = true; 811 } 812 813 // Clean up the IR by removing ssa_copy intrinsics. 814 removeSSACopy(M); 815 return Changed; 816 } 817