1 //===-- ArgumentPromotion.cpp - Promote by-reference arguments ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass promotes "by reference" arguments to be "by value" arguments. In 11 // practice, this means looking for internal functions that have pointer 12 // arguments. If it can prove, through the use of alias analysis, that an 13 // argument is *only* loaded, then it can pass the value into the function 14 // instead of the address of the value. This can cause recursive simplification 15 // of code and lead to the elimination of allocas (especially in C++ template 16 // code like the STL). 17 // 18 // This pass also handles aggregate arguments that are passed into a function, 19 // scalarizing them if the elements of the aggregate are only loaded. Note that 20 // by default it refuses to scalarize aggregates which would require passing in 21 // more than three operands to the function, because passing thousands of 22 // operands for a large array or structure is unprofitable! This limit can be 23 // configured or disabled, however. 24 // 25 // Note that this transformation could also be done for arguments that are only 26 // stored to (returning the value instead), but does not currently. This case 27 // would be best handled when and if LLVM begins supporting multiple return 28 // values from functions. 29 // 30 //===----------------------------------------------------------------------===// 31 32 #include "llvm/Transforms/IPO.h" 33 #include "llvm/ADT/DepthFirstIterator.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringExtras.h" 36 #include "llvm/Analysis/AliasAnalysis.h" 37 #include "llvm/Analysis/CallGraph.h" 38 #include "llvm/Analysis/CallGraphSCCPass.h" 39 #include "llvm/IR/CFG.h" 40 #include "llvm/IR/CallSite.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DebugInfo.h" 44 #include "llvm/IR/DerivedTypes.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/LLVMContext.h" 47 #include "llvm/IR/Module.h" 48 #include "llvm/Support/Debug.h" 49 #include "llvm/Support/raw_ostream.h" 50 #include <set> 51 using namespace llvm; 52 53 #define DEBUG_TYPE "argpromotion" 54 55 STATISTIC(NumArgumentsPromoted , "Number of pointer arguments promoted"); 56 STATISTIC(NumAggregatesPromoted, "Number of aggregate arguments promoted"); 57 STATISTIC(NumByValArgsPromoted , "Number of byval arguments promoted"); 58 STATISTIC(NumArgumentsDead , "Number of dead pointer args eliminated"); 59 60 namespace { 61 /// ArgPromotion - The 'by reference' to 'by value' argument promotion pass. 62 /// 63 struct ArgPromotion : public CallGraphSCCPass { 64 void getAnalysisUsage(AnalysisUsage &AU) const override { 65 AU.addRequired<AliasAnalysis>(); 66 CallGraphSCCPass::getAnalysisUsage(AU); 67 } 68 69 bool runOnSCC(CallGraphSCC &SCC) override; 70 static char ID; // Pass identification, replacement for typeid 71 explicit ArgPromotion(unsigned maxElements = 3) 72 : CallGraphSCCPass(ID), DL(nullptr), maxElements(maxElements) { 73 initializeArgPromotionPass(*PassRegistry::getPassRegistry()); 74 } 75 76 /// A vector used to hold the indices of a single GEP instruction 77 typedef std::vector<uint64_t> IndicesVector; 78 79 const DataLayout *DL; 80 private: 81 bool isDenselyPacked(Type *type); 82 bool canPaddingBeAccessed(Argument *Arg); 83 CallGraphNode *PromoteArguments(CallGraphNode *CGN); 84 bool isSafeToPromoteArgument(Argument *Arg, bool isByVal) const; 85 CallGraphNode *DoPromotion(Function *F, 86 SmallPtrSetImpl<Argument*> &ArgsToPromote, 87 SmallPtrSetImpl<Argument*> &ByValArgsToTransform); 88 89 using llvm::Pass::doInitialization; 90 bool doInitialization(CallGraph &CG) override; 91 /// The maximum number of elements to expand, or 0 for unlimited. 92 unsigned maxElements; 93 DenseMap<const Function *, DISubprogram> FunctionDIs; 94 }; 95 } 96 97 char ArgPromotion::ID = 0; 98 INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion", 99 "Promote 'by reference' arguments to scalars", false, false) 100 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 101 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 102 INITIALIZE_PASS_END(ArgPromotion, "argpromotion", 103 "Promote 'by reference' arguments to scalars", false, false) 104 105 Pass *llvm::createArgumentPromotionPass(unsigned maxElements) { 106 return new ArgPromotion(maxElements); 107 } 108 109 bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) { 110 bool Changed = false, LocalChange; 111 112 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 113 DL = DLP ? &DLP->getDataLayout() : nullptr; 114 115 do { // Iterate until we stop promoting from this SCC. 116 LocalChange = false; 117 // Attempt to promote arguments from all functions in this SCC. 118 for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { 119 if (CallGraphNode *CGN = PromoteArguments(*I)) { 120 LocalChange = true; 121 SCC.ReplaceNode(*I, CGN); 122 } 123 } 124 Changed |= LocalChange; // Remember that we changed something. 125 } while (LocalChange); 126 127 return Changed; 128 } 129 130 /// \brief Checks if a type could have padding bytes. 131 bool ArgPromotion::isDenselyPacked(Type *type) { 132 133 // There is no size information, so be conservative. 134 if (!type->isSized()) 135 return false; 136 137 // If the alloc size is not equal to the storage size, then there are padding 138 // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128. 139 if (!DL || DL->getTypeSizeInBits(type) != DL->getTypeAllocSizeInBits(type)) 140 return false; 141 142 if (!isa<CompositeType>(type)) 143 return true; 144 145 // For homogenous sequential types, check for padding within members. 146 if (SequentialType *seqTy = dyn_cast<SequentialType>(type)) 147 return isa<PointerType>(seqTy) || isDenselyPacked(seqTy->getElementType()); 148 149 // Check for padding within and between elements of a struct. 150 StructType *StructTy = cast<StructType>(type); 151 const StructLayout *Layout = DL->getStructLayout(StructTy); 152 uint64_t StartPos = 0; 153 for (unsigned i = 0, E = StructTy->getNumElements(); i < E; ++i) { 154 Type *ElTy = StructTy->getElementType(i); 155 if (!isDenselyPacked(ElTy)) 156 return false; 157 if (StartPos != Layout->getElementOffsetInBits(i)) 158 return false; 159 StartPos += DL->getTypeAllocSizeInBits(ElTy); 160 } 161 162 return true; 163 } 164 165 /// \brief Checks if the padding bytes of an argument could be accessed. 166 bool ArgPromotion::canPaddingBeAccessed(Argument *arg) { 167 168 assert(arg->hasByValAttr()); 169 170 // Track all the pointers to the argument to make sure they are not captured. 171 SmallPtrSet<Value *, 16> PtrValues; 172 PtrValues.insert(arg); 173 174 // Track all of the stores. 175 SmallVector<StoreInst *, 16> Stores; 176 177 // Scan through the uses recursively to make sure the pointer is always used 178 // sanely. 179 SmallVector<Value *, 16> WorkList; 180 WorkList.insert(WorkList.end(), arg->user_begin(), arg->user_end()); 181 while (!WorkList.empty()) { 182 Value *V = WorkList.back(); 183 WorkList.pop_back(); 184 if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) { 185 if (PtrValues.insert(V).second) 186 WorkList.insert(WorkList.end(), V->user_begin(), V->user_end()); 187 } else if (StoreInst *Store = dyn_cast<StoreInst>(V)) { 188 Stores.push_back(Store); 189 } else if (!isa<LoadInst>(V)) { 190 return true; 191 } 192 } 193 194 // Check to make sure the pointers aren't captured 195 for (StoreInst *Store : Stores) 196 if (PtrValues.count(Store->getValueOperand())) 197 return true; 198 199 return false; 200 } 201 202 /// PromoteArguments - This method checks the specified function to see if there 203 /// are any promotable arguments and if it is safe to promote the function (for 204 /// example, all callers are direct). If safe to promote some arguments, it 205 /// calls the DoPromotion method. 206 /// 207 CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) { 208 Function *F = CGN->getFunction(); 209 210 // Make sure that it is local to this module. 211 if (!F || !F->hasLocalLinkage()) return nullptr; 212 213 // First check: see if there are any pointer arguments! If not, quick exit. 214 SmallVector<Argument*, 16> PointerArgs; 215 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) 216 if (I->getType()->isPointerTy()) 217 PointerArgs.push_back(I); 218 if (PointerArgs.empty()) return nullptr; 219 220 // Second check: make sure that all callers are direct callers. We can't 221 // transform functions that have indirect callers. Also see if the function 222 // is self-recursive. 223 bool isSelfRecursive = false; 224 for (Use &U : F->uses()) { 225 CallSite CS(U.getUser()); 226 // Must be a direct call. 227 if (CS.getInstruction() == nullptr || !CS.isCallee(&U)) return nullptr; 228 229 if (CS.getInstruction()->getParent()->getParent() == F) 230 isSelfRecursive = true; 231 } 232 233 // Don't promote arguments for variadic functions. Adding, removing, or 234 // changing non-pack parameters can change the classification of pack 235 // parameters. Frontends encode that classification at the call site in the 236 // IR, while in the callee the classification is determined dynamically based 237 // on the number of registers consumed so far. 238 if (F->isVarArg()) return nullptr; 239 240 // Check to see which arguments are promotable. If an argument is promotable, 241 // add it to ArgsToPromote. 242 SmallPtrSet<Argument*, 8> ArgsToPromote; 243 SmallPtrSet<Argument*, 8> ByValArgsToTransform; 244 for (unsigned i = 0, e = PointerArgs.size(); i != e; ++i) { 245 Argument *PtrArg = PointerArgs[i]; 246 Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType(); 247 248 // If this is a byval argument, and if the aggregate type is small, just 249 // pass the elements, which is always safe, if the passed value is densely 250 // packed or if we can prove the padding bytes are never accessed. This does 251 // not apply to inalloca. 252 bool isSafeToPromote = 253 PtrArg->hasByValAttr() && 254 (isDenselyPacked(AgTy) || !canPaddingBeAccessed(PtrArg)); 255 if (isSafeToPromote) { 256 if (StructType *STy = dyn_cast<StructType>(AgTy)) { 257 if (maxElements > 0 && STy->getNumElements() > maxElements) { 258 DEBUG(dbgs() << "argpromotion disable promoting argument '" 259 << PtrArg->getName() << "' because it would require adding more" 260 << " than " << maxElements << " arguments to the function.\n"); 261 continue; 262 } 263 264 // If all the elements are single-value types, we can promote it. 265 bool AllSimple = true; 266 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 267 if (!STy->getElementType(i)->isSingleValueType()) { 268 AllSimple = false; 269 break; 270 } 271 } 272 273 // Safe to transform, don't even bother trying to "promote" it. 274 // Passing the elements as a scalar will allow scalarrepl to hack on 275 // the new alloca we introduce. 276 if (AllSimple) { 277 ByValArgsToTransform.insert(PtrArg); 278 continue; 279 } 280 } 281 } 282 283 // If the argument is a recursive type and we're in a recursive 284 // function, we could end up infinitely peeling the function argument. 285 if (isSelfRecursive) { 286 if (StructType *STy = dyn_cast<StructType>(AgTy)) { 287 bool RecursiveType = false; 288 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 289 if (STy->getElementType(i) == PtrArg->getType()) { 290 RecursiveType = true; 291 break; 292 } 293 } 294 if (RecursiveType) 295 continue; 296 } 297 } 298 299 // Otherwise, see if we can promote the pointer to its value. 300 if (isSafeToPromoteArgument(PtrArg, PtrArg->hasByValOrInAllocaAttr())) 301 ArgsToPromote.insert(PtrArg); 302 } 303 304 // No promotable pointer arguments. 305 if (ArgsToPromote.empty() && ByValArgsToTransform.empty()) 306 return nullptr; 307 308 return DoPromotion(F, ArgsToPromote, ByValArgsToTransform); 309 } 310 311 /// AllCallersPassInValidPointerForArgument - Return true if we can prove that 312 /// all callees pass in a valid pointer for the specified function argument. 313 static bool AllCallersPassInValidPointerForArgument(Argument *Arg, 314 const DataLayout *DL) { 315 Function *Callee = Arg->getParent(); 316 317 unsigned ArgNo = Arg->getArgNo(); 318 319 // Look at all call sites of the function. At this pointer we know we only 320 // have direct callees. 321 for (User *U : Callee->users()) { 322 CallSite CS(U); 323 assert(CS && "Should only have direct calls!"); 324 325 if (!CS.getArgument(ArgNo)->isDereferenceablePointer(DL)) 326 return false; 327 } 328 return true; 329 } 330 331 /// Returns true if Prefix is a prefix of longer. That means, Longer has a size 332 /// that is greater than or equal to the size of prefix, and each of the 333 /// elements in Prefix is the same as the corresponding elements in Longer. 334 /// 335 /// This means it also returns true when Prefix and Longer are equal! 336 static bool IsPrefix(const ArgPromotion::IndicesVector &Prefix, 337 const ArgPromotion::IndicesVector &Longer) { 338 if (Prefix.size() > Longer.size()) 339 return false; 340 return std::equal(Prefix.begin(), Prefix.end(), Longer.begin()); 341 } 342 343 344 /// Checks if Indices, or a prefix of Indices, is in Set. 345 static bool PrefixIn(const ArgPromotion::IndicesVector &Indices, 346 std::set<ArgPromotion::IndicesVector> &Set) { 347 std::set<ArgPromotion::IndicesVector>::iterator Low; 348 Low = Set.upper_bound(Indices); 349 if (Low != Set.begin()) 350 Low--; 351 // Low is now the last element smaller than or equal to Indices. This means 352 // it points to a prefix of Indices (possibly Indices itself), if such 353 // prefix exists. 354 // 355 // This load is safe if any prefix of its operands is safe to load. 356 return Low != Set.end() && IsPrefix(*Low, Indices); 357 } 358 359 /// Mark the given indices (ToMark) as safe in the given set of indices 360 /// (Safe). Marking safe usually means adding ToMark to Safe. However, if there 361 /// is already a prefix of Indices in Safe, Indices are implicitely marked safe 362 /// already. Furthermore, any indices that Indices is itself a prefix of, are 363 /// removed from Safe (since they are implicitely safe because of Indices now). 364 static void MarkIndicesSafe(const ArgPromotion::IndicesVector &ToMark, 365 std::set<ArgPromotion::IndicesVector> &Safe) { 366 std::set<ArgPromotion::IndicesVector>::iterator Low; 367 Low = Safe.upper_bound(ToMark); 368 // Guard against the case where Safe is empty 369 if (Low != Safe.begin()) 370 Low--; 371 // Low is now the last element smaller than or equal to Indices. This 372 // means it points to a prefix of Indices (possibly Indices itself), if 373 // such prefix exists. 374 if (Low != Safe.end()) { 375 if (IsPrefix(*Low, ToMark)) 376 // If there is already a prefix of these indices (or exactly these 377 // indices) marked a safe, don't bother adding these indices 378 return; 379 380 // Increment Low, so we can use it as a "insert before" hint 381 ++Low; 382 } 383 // Insert 384 Low = Safe.insert(Low, ToMark); 385 ++Low; 386 // If there we're a prefix of longer index list(s), remove those 387 std::set<ArgPromotion::IndicesVector>::iterator End = Safe.end(); 388 while (Low != End && IsPrefix(ToMark, *Low)) { 389 std::set<ArgPromotion::IndicesVector>::iterator Remove = Low; 390 ++Low; 391 Safe.erase(Remove); 392 } 393 } 394 395 /// isSafeToPromoteArgument - As you might guess from the name of this method, 396 /// it checks to see if it is both safe and useful to promote the argument. 397 /// This method limits promotion of aggregates to only promote up to three 398 /// elements of the aggregate in order to avoid exploding the number of 399 /// arguments passed in. 400 bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, 401 bool isByValOrInAlloca) const { 402 typedef std::set<IndicesVector> GEPIndicesSet; 403 404 // Quick exit for unused arguments 405 if (Arg->use_empty()) 406 return true; 407 408 // We can only promote this argument if all of the uses are loads, or are GEP 409 // instructions (with constant indices) that are subsequently loaded. 410 // 411 // Promoting the argument causes it to be loaded in the caller 412 // unconditionally. This is only safe if we can prove that either the load 413 // would have happened in the callee anyway (ie, there is a load in the entry 414 // block) or the pointer passed in at every call site is guaranteed to be 415 // valid. 416 // In the former case, invalid loads can happen, but would have happened 417 // anyway, in the latter case, invalid loads won't happen. This prevents us 418 // from introducing an invalid load that wouldn't have happened in the 419 // original code. 420 // 421 // This set will contain all sets of indices that are loaded in the entry 422 // block, and thus are safe to unconditionally load in the caller. 423 // 424 // This optimization is also safe for InAlloca parameters, because it verifies 425 // that the address isn't captured. 426 GEPIndicesSet SafeToUnconditionallyLoad; 427 428 // This set contains all the sets of indices that we are planning to promote. 429 // This makes it possible to limit the number of arguments added. 430 GEPIndicesSet ToPromote; 431 432 // If the pointer is always valid, any load with first index 0 is valid. 433 if (isByValOrInAlloca || AllCallersPassInValidPointerForArgument(Arg, DL)) 434 SafeToUnconditionallyLoad.insert(IndicesVector(1, 0)); 435 436 // First, iterate the entry block and mark loads of (geps of) arguments as 437 // safe. 438 BasicBlock *EntryBlock = Arg->getParent()->begin(); 439 // Declare this here so we can reuse it 440 IndicesVector Indices; 441 for (BasicBlock::iterator I = EntryBlock->begin(), E = EntryBlock->end(); 442 I != E; ++I) 443 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 444 Value *V = LI->getPointerOperand(); 445 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { 446 V = GEP->getPointerOperand(); 447 if (V == Arg) { 448 // This load actually loads (part of) Arg? Check the indices then. 449 Indices.reserve(GEP->getNumIndices()); 450 for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end(); 451 II != IE; ++II) 452 if (ConstantInt *CI = dyn_cast<ConstantInt>(*II)) 453 Indices.push_back(CI->getSExtValue()); 454 else 455 // We found a non-constant GEP index for this argument? Bail out 456 // right away, can't promote this argument at all. 457 return false; 458 459 // Indices checked out, mark them as safe 460 MarkIndicesSafe(Indices, SafeToUnconditionallyLoad); 461 Indices.clear(); 462 } 463 } else if (V == Arg) { 464 // Direct loads are equivalent to a GEP with a single 0 index. 465 MarkIndicesSafe(IndicesVector(1, 0), SafeToUnconditionallyLoad); 466 } 467 } 468 469 // Now, iterate all uses of the argument to see if there are any uses that are 470 // not (GEP+)loads, or any (GEP+)loads that are not safe to promote. 471 SmallVector<LoadInst*, 16> Loads; 472 IndicesVector Operands; 473 for (Use &U : Arg->uses()) { 474 User *UR = U.getUser(); 475 Operands.clear(); 476 if (LoadInst *LI = dyn_cast<LoadInst>(UR)) { 477 // Don't hack volatile/atomic loads 478 if (!LI->isSimple()) return false; 479 Loads.push_back(LI); 480 // Direct loads are equivalent to a GEP with a zero index and then a load. 481 Operands.push_back(0); 482 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) { 483 if (GEP->use_empty()) { 484 // Dead GEP's cause trouble later. Just remove them if we run into 485 // them. 486 getAnalysis<AliasAnalysis>().deleteValue(GEP); 487 GEP->eraseFromParent(); 488 // TODO: This runs the above loop over and over again for dead GEPs 489 // Couldn't we just do increment the UI iterator earlier and erase the 490 // use? 491 return isSafeToPromoteArgument(Arg, isByValOrInAlloca); 492 } 493 494 // Ensure that all of the indices are constants. 495 for (User::op_iterator i = GEP->idx_begin(), e = GEP->idx_end(); 496 i != e; ++i) 497 if (ConstantInt *C = dyn_cast<ConstantInt>(*i)) 498 Operands.push_back(C->getSExtValue()); 499 else 500 return false; // Not a constant operand GEP! 501 502 // Ensure that the only users of the GEP are load instructions. 503 for (User *GEPU : GEP->users()) 504 if (LoadInst *LI = dyn_cast<LoadInst>(GEPU)) { 505 // Don't hack volatile/atomic loads 506 if (!LI->isSimple()) return false; 507 Loads.push_back(LI); 508 } else { 509 // Other uses than load? 510 return false; 511 } 512 } else { 513 return false; // Not a load or a GEP. 514 } 515 516 // Now, see if it is safe to promote this load / loads of this GEP. Loading 517 // is safe if Operands, or a prefix of Operands, is marked as safe. 518 if (!PrefixIn(Operands, SafeToUnconditionallyLoad)) 519 return false; 520 521 // See if we are already promoting a load with these indices. If not, check 522 // to make sure that we aren't promoting too many elements. If so, nothing 523 // to do. 524 if (ToPromote.find(Operands) == ToPromote.end()) { 525 if (maxElements > 0 && ToPromote.size() == maxElements) { 526 DEBUG(dbgs() << "argpromotion not promoting argument '" 527 << Arg->getName() << "' because it would require adding more " 528 << "than " << maxElements << " arguments to the function.\n"); 529 // We limit aggregate promotion to only promoting up to a fixed number 530 // of elements of the aggregate. 531 return false; 532 } 533 ToPromote.insert(std::move(Operands)); 534 } 535 } 536 537 if (Loads.empty()) return true; // No users, this is a dead argument. 538 539 // Okay, now we know that the argument is only used by load instructions and 540 // it is safe to unconditionally perform all of them. Use alias analysis to 541 // check to see if the pointer is guaranteed to not be modified from entry of 542 // the function to each of the load instructions. 543 544 // Because there could be several/many load instructions, remember which 545 // blocks we know to be transparent to the load. 546 SmallPtrSet<BasicBlock*, 16> TranspBlocks; 547 548 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 549 550 for (unsigned i = 0, e = Loads.size(); i != e; ++i) { 551 // Check to see if the load is invalidated from the start of the block to 552 // the load itself. 553 LoadInst *Load = Loads[i]; 554 BasicBlock *BB = Load->getParent(); 555 556 AliasAnalysis::Location Loc = AA.getLocation(Load); 557 if (AA.canInstructionRangeModRef(BB->front(), *Load, Loc, 558 AliasAnalysis::Mod)) 559 return false; // Pointer is invalidated! 560 561 // Now check every path from the entry block to the load for transparency. 562 // To do this, we perform a depth first search on the inverse CFG from the 563 // loading block. 564 for (BasicBlock *P : predecessors(BB)) { 565 for (BasicBlock *TranspBB : inverse_depth_first_ext(P, TranspBlocks)) 566 if (AA.canBasicBlockModify(*TranspBB, Loc)) 567 return false; 568 } 569 } 570 571 // If the path from the entry of the function to each load is free of 572 // instructions that potentially invalidate the load, we can make the 573 // transformation! 574 return true; 575 } 576 577 /// DoPromotion - This method actually performs the promotion of the specified 578 /// arguments, and returns the new function. At this point, we know that it's 579 /// safe to do so. 580 CallGraphNode *ArgPromotion::DoPromotion(Function *F, 581 SmallPtrSetImpl<Argument*> &ArgsToPromote, 582 SmallPtrSetImpl<Argument*> &ByValArgsToTransform) { 583 584 // Start by computing a new prototype for the function, which is the same as 585 // the old function, but has modified arguments. 586 FunctionType *FTy = F->getFunctionType(); 587 std::vector<Type*> Params; 588 589 typedef std::set<IndicesVector> ScalarizeTable; 590 591 // ScalarizedElements - If we are promoting a pointer that has elements 592 // accessed out of it, keep track of which elements are accessed so that we 593 // can add one argument for each. 594 // 595 // Arguments that are directly loaded will have a zero element value here, to 596 // handle cases where there are both a direct load and GEP accesses. 597 // 598 std::map<Argument*, ScalarizeTable> ScalarizedElements; 599 600 // OriginalLoads - Keep track of a representative load instruction from the 601 // original function so that we can tell the alias analysis implementation 602 // what the new GEP/Load instructions we are inserting look like. 603 // We need to keep the original loads for each argument and the elements 604 // of the argument that are accessed. 605 std::map<std::pair<Argument*, IndicesVector>, LoadInst*> OriginalLoads; 606 607 // Attribute - Keep track of the parameter attributes for the arguments 608 // that we are *not* promoting. For the ones that we do promote, the parameter 609 // attributes are lost 610 SmallVector<AttributeSet, 8> AttributesVec; 611 const AttributeSet &PAL = F->getAttributes(); 612 613 // Add any return attributes. 614 if (PAL.hasAttributes(AttributeSet::ReturnIndex)) 615 AttributesVec.push_back(AttributeSet::get(F->getContext(), 616 PAL.getRetAttributes())); 617 618 // First, determine the new argument list 619 unsigned ArgIndex = 1; 620 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; 621 ++I, ++ArgIndex) { 622 if (ByValArgsToTransform.count(I)) { 623 // Simple byval argument? Just add all the struct element types. 624 Type *AgTy = cast<PointerType>(I->getType())->getElementType(); 625 StructType *STy = cast<StructType>(AgTy); 626 Params.insert(Params.end(), STy->element_begin(), STy->element_end()); 627 ++NumByValArgsPromoted; 628 } else if (!ArgsToPromote.count(I)) { 629 // Unchanged argument 630 Params.push_back(I->getType()); 631 AttributeSet attrs = PAL.getParamAttributes(ArgIndex); 632 if (attrs.hasAttributes(ArgIndex)) { 633 AttrBuilder B(attrs, ArgIndex); 634 AttributesVec. 635 push_back(AttributeSet::get(F->getContext(), Params.size(), B)); 636 } 637 } else if (I->use_empty()) { 638 // Dead argument (which are always marked as promotable) 639 ++NumArgumentsDead; 640 } else { 641 // Okay, this is being promoted. This means that the only uses are loads 642 // or GEPs which are only used by loads 643 644 // In this table, we will track which indices are loaded from the argument 645 // (where direct loads are tracked as no indices). 646 ScalarizeTable &ArgIndices = ScalarizedElements[I]; 647 for (User *U : I->users()) { 648 Instruction *UI = cast<Instruction>(U); 649 assert(isa<LoadInst>(UI) || isa<GetElementPtrInst>(UI)); 650 IndicesVector Indices; 651 Indices.reserve(UI->getNumOperands() - 1); 652 // Since loads will only have a single operand, and GEPs only a single 653 // non-index operand, this will record direct loads without any indices, 654 // and gep+loads with the GEP indices. 655 for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end(); 656 II != IE; ++II) 657 Indices.push_back(cast<ConstantInt>(*II)->getSExtValue()); 658 // GEPs with a single 0 index can be merged with direct loads 659 if (Indices.size() == 1 && Indices.front() == 0) 660 Indices.clear(); 661 ArgIndices.insert(Indices); 662 LoadInst *OrigLoad; 663 if (LoadInst *L = dyn_cast<LoadInst>(UI)) 664 OrigLoad = L; 665 else 666 // Take any load, we will use it only to update Alias Analysis 667 OrigLoad = cast<LoadInst>(UI->user_back()); 668 OriginalLoads[std::make_pair(I, Indices)] = OrigLoad; 669 } 670 671 // Add a parameter to the function for each element passed in. 672 for (ScalarizeTable::iterator SI = ArgIndices.begin(), 673 E = ArgIndices.end(); SI != E; ++SI) { 674 // not allowed to dereference ->begin() if size() is 0 675 Params.push_back(GetElementPtrInst::getIndexedType(I->getType(), *SI)); 676 assert(Params.back()); 677 } 678 679 if (ArgIndices.size() == 1 && ArgIndices.begin()->empty()) 680 ++NumArgumentsPromoted; 681 else 682 ++NumAggregatesPromoted; 683 } 684 } 685 686 // Add any function attributes. 687 if (PAL.hasAttributes(AttributeSet::FunctionIndex)) 688 AttributesVec.push_back(AttributeSet::get(FTy->getContext(), 689 PAL.getFnAttributes())); 690 691 Type *RetTy = FTy->getReturnType(); 692 693 // Construct the new function type using the new arguments. 694 FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg()); 695 696 // Create the new function body and insert it into the module. 697 Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName()); 698 NF->copyAttributesFrom(F); 699 700 // Patch the pointer to LLVM function in debug info descriptor. 701 auto DI = FunctionDIs.find(F); 702 if (DI != FunctionDIs.end()) { 703 DISubprogram SP = DI->second; 704 SP.replaceFunction(NF); 705 // Ensure the map is updated so it can be reused on subsequent argument 706 // promotions of the same function. 707 FunctionDIs.erase(DI); 708 FunctionDIs[NF] = SP; 709 } 710 711 DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n" 712 << "From: " << *F); 713 714 // Recompute the parameter attributes list based on the new arguments for 715 // the function. 716 NF->setAttributes(AttributeSet::get(F->getContext(), AttributesVec)); 717 AttributesVec.clear(); 718 719 F->getParent()->getFunctionList().insert(F, NF); 720 NF->takeName(F); 721 722 // Get the alias analysis information that we need to update to reflect our 723 // changes. 724 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 725 726 // Get the callgraph information that we need to update to reflect our 727 // changes. 728 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 729 730 // Get a new callgraph node for NF. 731 CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF); 732 733 // Loop over all of the callers of the function, transforming the call sites 734 // to pass in the loaded pointers. 735 // 736 SmallVector<Value*, 16> Args; 737 while (!F->use_empty()) { 738 CallSite CS(F->user_back()); 739 assert(CS.getCalledFunction() == F); 740 Instruction *Call = CS.getInstruction(); 741 const AttributeSet &CallPAL = CS.getAttributes(); 742 743 // Add any return attributes. 744 if (CallPAL.hasAttributes(AttributeSet::ReturnIndex)) 745 AttributesVec.push_back(AttributeSet::get(F->getContext(), 746 CallPAL.getRetAttributes())); 747 748 // Loop over the operands, inserting GEP and loads in the caller as 749 // appropriate. 750 CallSite::arg_iterator AI = CS.arg_begin(); 751 ArgIndex = 1; 752 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); 753 I != E; ++I, ++AI, ++ArgIndex) 754 if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) { 755 Args.push_back(*AI); // Unmodified argument 756 757 if (CallPAL.hasAttributes(ArgIndex)) { 758 AttrBuilder B(CallPAL, ArgIndex); 759 AttributesVec. 760 push_back(AttributeSet::get(F->getContext(), Args.size(), B)); 761 } 762 } else if (ByValArgsToTransform.count(I)) { 763 // Emit a GEP and load for each element of the struct. 764 Type *AgTy = cast<PointerType>(I->getType())->getElementType(); 765 StructType *STy = cast<StructType>(AgTy); 766 Value *Idxs[2] = { 767 ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr }; 768 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 769 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); 770 Value *Idx = GetElementPtrInst::Create(*AI, Idxs, 771 (*AI)->getName()+"."+utostr(i), 772 Call); 773 // TODO: Tell AA about the new values? 774 Args.push_back(new LoadInst(Idx, Idx->getName()+".val", Call)); 775 } 776 } else if (!I->use_empty()) { 777 // Non-dead argument: insert GEPs and loads as appropriate. 778 ScalarizeTable &ArgIndices = ScalarizedElements[I]; 779 // Store the Value* version of the indices in here, but declare it now 780 // for reuse. 781 std::vector<Value*> Ops; 782 for (ScalarizeTable::iterator SI = ArgIndices.begin(), 783 E = ArgIndices.end(); SI != E; ++SI) { 784 Value *V = *AI; 785 LoadInst *OrigLoad = OriginalLoads[std::make_pair(I, *SI)]; 786 if (!SI->empty()) { 787 Ops.reserve(SI->size()); 788 Type *ElTy = V->getType(); 789 for (IndicesVector::const_iterator II = SI->begin(), 790 IE = SI->end(); II != IE; ++II) { 791 // Use i32 to index structs, and i64 for others (pointers/arrays). 792 // This satisfies GEP constraints. 793 Type *IdxTy = (ElTy->isStructTy() ? 794 Type::getInt32Ty(F->getContext()) : 795 Type::getInt64Ty(F->getContext())); 796 Ops.push_back(ConstantInt::get(IdxTy, *II)); 797 // Keep track of the type we're currently indexing. 798 ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(*II); 799 } 800 // And create a GEP to extract those indices. 801 V = GetElementPtrInst::Create(V, Ops, V->getName()+".idx", Call); 802 Ops.clear(); 803 AA.copyValue(OrigLoad->getOperand(0), V); 804 } 805 // Since we're replacing a load make sure we take the alignment 806 // of the previous load. 807 LoadInst *newLoad = new LoadInst(V, V->getName()+".val", Call); 808 newLoad->setAlignment(OrigLoad->getAlignment()); 809 // Transfer the AA info too. 810 AAMDNodes AAInfo; 811 OrigLoad->getAAMetadata(AAInfo); 812 newLoad->setAAMetadata(AAInfo); 813 814 Args.push_back(newLoad); 815 AA.copyValue(OrigLoad, Args.back()); 816 } 817 } 818 819 // Push any varargs arguments on the list. 820 for (; AI != CS.arg_end(); ++AI, ++ArgIndex) { 821 Args.push_back(*AI); 822 if (CallPAL.hasAttributes(ArgIndex)) { 823 AttrBuilder B(CallPAL, ArgIndex); 824 AttributesVec. 825 push_back(AttributeSet::get(F->getContext(), Args.size(), B)); 826 } 827 } 828 829 // Add any function attributes. 830 if (CallPAL.hasAttributes(AttributeSet::FunctionIndex)) 831 AttributesVec.push_back(AttributeSet::get(Call->getContext(), 832 CallPAL.getFnAttributes())); 833 834 Instruction *New; 835 if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) { 836 New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(), 837 Args, "", Call); 838 cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv()); 839 cast<InvokeInst>(New)->setAttributes(AttributeSet::get(II->getContext(), 840 AttributesVec)); 841 } else { 842 New = CallInst::Create(NF, Args, "", Call); 843 cast<CallInst>(New)->setCallingConv(CS.getCallingConv()); 844 cast<CallInst>(New)->setAttributes(AttributeSet::get(New->getContext(), 845 AttributesVec)); 846 if (cast<CallInst>(Call)->isTailCall()) 847 cast<CallInst>(New)->setTailCall(); 848 } 849 New->setDebugLoc(Call->getDebugLoc()); 850 Args.clear(); 851 AttributesVec.clear(); 852 853 // Update the alias analysis implementation to know that we are replacing 854 // the old call with a new one. 855 AA.replaceWithNewValue(Call, New); 856 857 // Update the callgraph to know that the callsite has been transformed. 858 CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()]; 859 CalleeNode->replaceCallEdge(Call, New, NF_CGN); 860 861 if (!Call->use_empty()) { 862 Call->replaceAllUsesWith(New); 863 New->takeName(Call); 864 } 865 866 // Finally, remove the old call from the program, reducing the use-count of 867 // F. 868 Call->eraseFromParent(); 869 } 870 871 // Since we have now created the new function, splice the body of the old 872 // function right into the new function, leaving the old rotting hulk of the 873 // function empty. 874 NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList()); 875 876 // Loop over the argument list, transferring uses of the old arguments over to 877 // the new arguments, also transferring over the names as well. 878 // 879 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(), 880 I2 = NF->arg_begin(); I != E; ++I) { 881 if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) { 882 // If this is an unmodified argument, move the name and users over to the 883 // new version. 884 I->replaceAllUsesWith(I2); 885 I2->takeName(I); 886 AA.replaceWithNewValue(I, I2); 887 ++I2; 888 continue; 889 } 890 891 if (ByValArgsToTransform.count(I)) { 892 // In the callee, we create an alloca, and store each of the new incoming 893 // arguments into the alloca. 894 Instruction *InsertPt = NF->begin()->begin(); 895 896 // Just add all the struct element types. 897 Type *AgTy = cast<PointerType>(I->getType())->getElementType(); 898 Value *TheAlloca = new AllocaInst(AgTy, nullptr, "", InsertPt); 899 StructType *STy = cast<StructType>(AgTy); 900 Value *Idxs[2] = { 901 ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr }; 902 903 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 904 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); 905 Value *Idx = 906 GetElementPtrInst::Create(TheAlloca, Idxs, 907 TheAlloca->getName()+"."+Twine(i), 908 InsertPt); 909 I2->setName(I->getName()+"."+Twine(i)); 910 new StoreInst(I2++, Idx, InsertPt); 911 } 912 913 // Anything that used the arg should now use the alloca. 914 I->replaceAllUsesWith(TheAlloca); 915 TheAlloca->takeName(I); 916 AA.replaceWithNewValue(I, TheAlloca); 917 918 // If the alloca is used in a call, we must clear the tail flag since 919 // the callee now uses an alloca from the caller. 920 for (User *U : TheAlloca->users()) { 921 CallInst *Call = dyn_cast<CallInst>(U); 922 if (!Call) 923 continue; 924 Call->setTailCall(false); 925 } 926 continue; 927 } 928 929 if (I->use_empty()) { 930 AA.deleteValue(I); 931 continue; 932 } 933 934 // Otherwise, if we promoted this argument, then all users are load 935 // instructions (or GEPs with only load users), and all loads should be 936 // using the new argument that we added. 937 ScalarizeTable &ArgIndices = ScalarizedElements[I]; 938 939 while (!I->use_empty()) { 940 if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) { 941 assert(ArgIndices.begin()->empty() && 942 "Load element should sort to front!"); 943 I2->setName(I->getName()+".val"); 944 LI->replaceAllUsesWith(I2); 945 AA.replaceWithNewValue(LI, I2); 946 LI->eraseFromParent(); 947 DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName() 948 << "' in function '" << F->getName() << "'\n"); 949 } else { 950 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back()); 951 IndicesVector Operands; 952 Operands.reserve(GEP->getNumIndices()); 953 for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end(); 954 II != IE; ++II) 955 Operands.push_back(cast<ConstantInt>(*II)->getSExtValue()); 956 957 // GEPs with a single 0 index can be merged with direct loads 958 if (Operands.size() == 1 && Operands.front() == 0) 959 Operands.clear(); 960 961 Function::arg_iterator TheArg = I2; 962 for (ScalarizeTable::iterator It = ArgIndices.begin(); 963 *It != Operands; ++It, ++TheArg) { 964 assert(It != ArgIndices.end() && "GEP not handled??"); 965 } 966 967 std::string NewName = I->getName(); 968 for (unsigned i = 0, e = Operands.size(); i != e; ++i) { 969 NewName += "." + utostr(Operands[i]); 970 } 971 NewName += ".val"; 972 TheArg->setName(NewName); 973 974 DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName() 975 << "' of function '" << NF->getName() << "'\n"); 976 977 // All of the uses must be load instructions. Replace them all with 978 // the argument specified by ArgNo. 979 while (!GEP->use_empty()) { 980 LoadInst *L = cast<LoadInst>(GEP->user_back()); 981 L->replaceAllUsesWith(TheArg); 982 AA.replaceWithNewValue(L, TheArg); 983 L->eraseFromParent(); 984 } 985 AA.deleteValue(GEP); 986 GEP->eraseFromParent(); 987 } 988 } 989 990 // Increment I2 past all of the arguments added for this promoted pointer. 991 std::advance(I2, ArgIndices.size()); 992 } 993 994 // Tell the alias analysis that the old function is about to disappear. 995 AA.replaceWithNewValue(F, NF); 996 997 998 NF_CGN->stealCalledFunctionsFrom(CG[F]); 999 1000 // Now that the old function is dead, delete it. If there is a dangling 1001 // reference to the CallgraphNode, just leave the dead function around for 1002 // someone else to nuke. 1003 CallGraphNode *CGN = CG[F]; 1004 if (CGN->getNumReferences() == 0) 1005 delete CG.removeFunctionFromModule(CGN); 1006 else 1007 F->setLinkage(Function::ExternalLinkage); 1008 1009 return NF_CGN; 1010 } 1011 1012 bool ArgPromotion::doInitialization(CallGraph &CG) { 1013 FunctionDIs = makeSubprogramMap(CG.getModule()); 1014 return CallGraphSCCPass::doInitialization(CG); 1015 } 1016