1 //===- ArgumentPromotion.cpp - Promote by-reference arguments -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass promotes "by reference" arguments to be "by value" arguments. In 10 // practice, this means looking for internal functions that have pointer 11 // arguments. If it can prove, through the use of alias analysis, that an 12 // argument is *only* loaded, then it can pass the value into the function 13 // instead of the address of the value. This can cause recursive simplification 14 // of code and lead to the elimination of allocas (especially in C++ template 15 // code like the STL). 16 // 17 // This pass also handles aggregate arguments that are passed into a function, 18 // scalarizing them if the elements of the aggregate are only loaded. Note that 19 // by default it refuses to scalarize aggregates which would require passing in 20 // more than three operands to the function, because passing thousands of 21 // operands for a large array or structure is unprofitable! This limit can be 22 // configured or disabled, however. 23 // 24 // Note that this transformation could also be done for arguments that are only 25 // stored to (returning the value instead), but does not currently. This case 26 // would be best handled when and if LLVM begins supporting multiple return 27 // values from functions. 28 // 29 //===----------------------------------------------------------------------===// 30 31 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 32 #include "llvm/ADT/DepthFirstIterator.h" 33 #include "llvm/ADT/None.h" 34 #include "llvm/ADT/Optional.h" 35 #include "llvm/ADT/STLExtras.h" 36 #include "llvm/ADT/SmallPtrSet.h" 37 #include "llvm/ADT/SmallVector.h" 38 #include "llvm/ADT/Statistic.h" 39 #include "llvm/ADT/Twine.h" 40 #include "llvm/Analysis/AliasAnalysis.h" 41 #include "llvm/Analysis/AssumptionCache.h" 42 #include "llvm/Analysis/BasicAliasAnalysis.h" 43 #include "llvm/Analysis/CGSCCPassManager.h" 44 #include "llvm/Analysis/CallGraph.h" 45 #include "llvm/Analysis/CallGraphSCCPass.h" 46 #include "llvm/Analysis/LazyCallGraph.h" 47 #include "llvm/Analysis/Loads.h" 48 #include "llvm/Analysis/MemoryLocation.h" 49 #include "llvm/Analysis/TargetLibraryInfo.h" 50 #include "llvm/Analysis/TargetTransformInfo.h" 51 #include "llvm/IR/Argument.h" 52 #include "llvm/IR/Attributes.h" 53 #include "llvm/IR/BasicBlock.h" 54 #include "llvm/IR/CFG.h" 55 #include "llvm/IR/CallSite.h" 56 #include "llvm/IR/Constants.h" 57 #include "llvm/IR/DataLayout.h" 58 #include "llvm/IR/DerivedTypes.h" 59 #include "llvm/IR/Function.h" 60 #include "llvm/IR/IRBuilder.h" 61 #include "llvm/IR/InstrTypes.h" 62 #include "llvm/IR/Instruction.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/Metadata.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/NoFolder.h" 67 #include "llvm/IR/PassManager.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/User.h" 71 #include "llvm/IR/Value.h" 72 #include "llvm/InitializePasses.h" 73 #include "llvm/Pass.h" 74 #include "llvm/Support/Casting.h" 75 #include "llvm/Support/Debug.h" 76 #include "llvm/Support/FormatVariadic.h" 77 #include "llvm/Support/raw_ostream.h" 78 #include "llvm/Transforms/IPO.h" 79 #include <algorithm> 80 #include <cassert> 81 #include <cstdint> 82 #include <functional> 83 #include <iterator> 84 #include <map> 85 #include <set> 86 #include <string> 87 #include <utility> 88 #include <vector> 89 90 using namespace llvm; 91 92 #define DEBUG_TYPE "argpromotion" 93 94 STATISTIC(NumArgumentsPromoted, "Number of pointer arguments promoted"); 95 STATISTIC(NumAggregatesPromoted, "Number of aggregate arguments promoted"); 96 STATISTIC(NumByValArgsPromoted, "Number of byval arguments promoted"); 97 STATISTIC(NumArgumentsDead, "Number of dead pointer args eliminated"); 98 99 /// A vector used to hold the indices of a single GEP instruction 100 using IndicesVector = std::vector<uint64_t>; 101 102 /// DoPromotion - This method actually performs the promotion of the specified 103 /// arguments, and returns the new function. At this point, we know that it's 104 /// safe to do so. 105 static Function * 106 doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote, 107 SmallPtrSetImpl<Argument *> &ByValArgsToTransform, 108 Optional<function_ref<void(CallSite OldCS, CallSite NewCS)>> 109 ReplaceCallSite) { 110 // Start by computing a new prototype for the function, which is the same as 111 // the old function, but has modified arguments. 112 FunctionType *FTy = F->getFunctionType(); 113 std::vector<Type *> Params; 114 115 using ScalarizeTable = std::set<std::pair<Type *, IndicesVector>>; 116 117 // ScalarizedElements - If we are promoting a pointer that has elements 118 // accessed out of it, keep track of which elements are accessed so that we 119 // can add one argument for each. 120 // 121 // Arguments that are directly loaded will have a zero element value here, to 122 // handle cases where there are both a direct load and GEP accesses. 123 std::map<Argument *, ScalarizeTable> ScalarizedElements; 124 125 // OriginalLoads - Keep track of a representative load instruction from the 126 // original function so that we can tell the alias analysis implementation 127 // what the new GEP/Load instructions we are inserting look like. 128 // We need to keep the original loads for each argument and the elements 129 // of the argument that are accessed. 130 std::map<std::pair<Argument *, IndicesVector>, LoadInst *> OriginalLoads; 131 132 // Attribute - Keep track of the parameter attributes for the arguments 133 // that we are *not* promoting. For the ones that we do promote, the parameter 134 // attributes are lost 135 SmallVector<AttributeSet, 8> ArgAttrVec; 136 AttributeList PAL = F->getAttributes(); 137 138 // First, determine the new argument list 139 unsigned ArgNo = 0; 140 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; 141 ++I, ++ArgNo) { 142 if (ByValArgsToTransform.count(&*I)) { 143 // Simple byval argument? Just add all the struct element types. 144 Type *AgTy = cast<PointerType>(I->getType())->getElementType(); 145 StructType *STy = cast<StructType>(AgTy); 146 Params.insert(Params.end(), STy->element_begin(), STy->element_end()); 147 ArgAttrVec.insert(ArgAttrVec.end(), STy->getNumElements(), 148 AttributeSet()); 149 ++NumByValArgsPromoted; 150 } else if (!ArgsToPromote.count(&*I)) { 151 // Unchanged argument 152 Params.push_back(I->getType()); 153 ArgAttrVec.push_back(PAL.getParamAttributes(ArgNo)); 154 } else if (I->use_empty()) { 155 // Dead argument (which are always marked as promotable) 156 ++NumArgumentsDead; 157 158 // There may be remaining metadata uses of the argument for things like 159 // llvm.dbg.value. Replace them with undef. 160 I->replaceAllUsesWith(UndefValue::get(I->getType())); 161 } else { 162 // Okay, this is being promoted. This means that the only uses are loads 163 // or GEPs which are only used by loads 164 165 // In this table, we will track which indices are loaded from the argument 166 // (where direct loads are tracked as no indices). 167 ScalarizeTable &ArgIndices = ScalarizedElements[&*I]; 168 for (User *U : I->users()) { 169 Instruction *UI = cast<Instruction>(U); 170 Type *SrcTy; 171 if (LoadInst *L = dyn_cast<LoadInst>(UI)) 172 SrcTy = L->getType(); 173 else 174 SrcTy = cast<GetElementPtrInst>(UI)->getSourceElementType(); 175 IndicesVector Indices; 176 Indices.reserve(UI->getNumOperands() - 1); 177 // Since loads will only have a single operand, and GEPs only a single 178 // non-index operand, this will record direct loads without any indices, 179 // and gep+loads with the GEP indices. 180 for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end(); 181 II != IE; ++II) 182 Indices.push_back(cast<ConstantInt>(*II)->getSExtValue()); 183 // GEPs with a single 0 index can be merged with direct loads 184 if (Indices.size() == 1 && Indices.front() == 0) 185 Indices.clear(); 186 ArgIndices.insert(std::make_pair(SrcTy, Indices)); 187 LoadInst *OrigLoad; 188 if (LoadInst *L = dyn_cast<LoadInst>(UI)) 189 OrigLoad = L; 190 else 191 // Take any load, we will use it only to update Alias Analysis 192 OrigLoad = cast<LoadInst>(UI->user_back()); 193 OriginalLoads[std::make_pair(&*I, Indices)] = OrigLoad; 194 } 195 196 // Add a parameter to the function for each element passed in. 197 for (const auto &ArgIndex : ArgIndices) { 198 // not allowed to dereference ->begin() if size() is 0 199 Params.push_back(GetElementPtrInst::getIndexedType( 200 cast<PointerType>(I->getType()->getScalarType())->getElementType(), 201 ArgIndex.second)); 202 ArgAttrVec.push_back(AttributeSet()); 203 assert(Params.back()); 204 } 205 206 if (ArgIndices.size() == 1 && ArgIndices.begin()->second.empty()) 207 ++NumArgumentsPromoted; 208 else 209 ++NumAggregatesPromoted; 210 } 211 } 212 213 Type *RetTy = FTy->getReturnType(); 214 215 // Construct the new function type using the new arguments. 216 FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg()); 217 218 // Create the new function body and insert it into the module. 219 Function *NF = Function::Create(NFTy, F->getLinkage(), F->getAddressSpace(), 220 F->getName()); 221 NF->copyAttributesFrom(F); 222 223 // Patch the pointer to LLVM function in debug info descriptor. 224 NF->setSubprogram(F->getSubprogram()); 225 F->setSubprogram(nullptr); 226 227 LLVM_DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n" 228 << "From: " << *F); 229 230 // Recompute the parameter attributes list based on the new arguments for 231 // the function. 232 NF->setAttributes(AttributeList::get(F->getContext(), PAL.getFnAttributes(), 233 PAL.getRetAttributes(), ArgAttrVec)); 234 ArgAttrVec.clear(); 235 236 F->getParent()->getFunctionList().insert(F->getIterator(), NF); 237 NF->takeName(F); 238 239 // Loop over all of the callers of the function, transforming the call sites 240 // to pass in the loaded pointers. 241 // 242 SmallVector<Value *, 16> Args; 243 while (!F->use_empty()) { 244 CallSite CS(F->user_back()); 245 assert(CS.getCalledFunction() == F); 246 Instruction *Call = CS.getInstruction(); 247 const AttributeList &CallPAL = CS.getAttributes(); 248 IRBuilder<NoFolder> IRB(Call); 249 250 // Loop over the operands, inserting GEP and loads in the caller as 251 // appropriate. 252 CallSite::arg_iterator AI = CS.arg_begin(); 253 ArgNo = 0; 254 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; 255 ++I, ++AI, ++ArgNo) 256 if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) { 257 Args.push_back(*AI); // Unmodified argument 258 ArgAttrVec.push_back(CallPAL.getParamAttributes(ArgNo)); 259 } else if (ByValArgsToTransform.count(&*I)) { 260 // Emit a GEP and load for each element of the struct. 261 Type *AgTy = cast<PointerType>(I->getType())->getElementType(); 262 StructType *STy = cast<StructType>(AgTy); 263 Value *Idxs[2] = { 264 ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr}; 265 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 266 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); 267 auto *Idx = 268 IRB.CreateGEP(STy, *AI, Idxs, (*AI)->getName() + "." + Twine(i)); 269 // TODO: Tell AA about the new values? 270 Args.push_back(IRB.CreateLoad(STy->getElementType(i), Idx, 271 Idx->getName() + ".val")); 272 ArgAttrVec.push_back(AttributeSet()); 273 } 274 } else if (!I->use_empty()) { 275 // Non-dead argument: insert GEPs and loads as appropriate. 276 ScalarizeTable &ArgIndices = ScalarizedElements[&*I]; 277 // Store the Value* version of the indices in here, but declare it now 278 // for reuse. 279 std::vector<Value *> Ops; 280 for (const auto &ArgIndex : ArgIndices) { 281 Value *V = *AI; 282 LoadInst *OrigLoad = 283 OriginalLoads[std::make_pair(&*I, ArgIndex.second)]; 284 if (!ArgIndex.second.empty()) { 285 Ops.reserve(ArgIndex.second.size()); 286 Type *ElTy = V->getType(); 287 for (auto II : ArgIndex.second) { 288 // Use i32 to index structs, and i64 for others (pointers/arrays). 289 // This satisfies GEP constraints. 290 Type *IdxTy = 291 (ElTy->isStructTy() ? Type::getInt32Ty(F->getContext()) 292 : Type::getInt64Ty(F->getContext())); 293 Ops.push_back(ConstantInt::get(IdxTy, II)); 294 // Keep track of the type we're currently indexing. 295 if (auto *ElPTy = dyn_cast<PointerType>(ElTy)) 296 ElTy = ElPTy->getElementType(); 297 else 298 ElTy = GetElementPtrInst::getTypeAtIndex(ElTy, II); 299 } 300 // And create a GEP to extract those indices. 301 V = IRB.CreateGEP(ArgIndex.first, V, Ops, V->getName() + ".idx"); 302 Ops.clear(); 303 } 304 // Since we're replacing a load make sure we take the alignment 305 // of the previous load. 306 LoadInst *newLoad = 307 IRB.CreateLoad(OrigLoad->getType(), V, V->getName() + ".val"); 308 newLoad->setAlignment(OrigLoad->getAlign()); 309 // Transfer the AA info too. 310 AAMDNodes AAInfo; 311 OrigLoad->getAAMetadata(AAInfo); 312 newLoad->setAAMetadata(AAInfo); 313 314 Args.push_back(newLoad); 315 ArgAttrVec.push_back(AttributeSet()); 316 } 317 } 318 319 // Push any varargs arguments on the list. 320 for (; AI != CS.arg_end(); ++AI, ++ArgNo) { 321 Args.push_back(*AI); 322 ArgAttrVec.push_back(CallPAL.getParamAttributes(ArgNo)); 323 } 324 325 SmallVector<OperandBundleDef, 1> OpBundles; 326 CS.getOperandBundlesAsDefs(OpBundles); 327 328 CallSite NewCS; 329 if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) { 330 NewCS = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(), 331 Args, OpBundles, "", Call); 332 } else { 333 auto *NewCall = CallInst::Create(NF, Args, OpBundles, "", Call); 334 NewCall->setTailCallKind(cast<CallInst>(Call)->getTailCallKind()); 335 NewCS = NewCall; 336 } 337 NewCS.setCallingConv(CS.getCallingConv()); 338 NewCS.setAttributes( 339 AttributeList::get(F->getContext(), CallPAL.getFnAttributes(), 340 CallPAL.getRetAttributes(), ArgAttrVec)); 341 NewCS->setDebugLoc(Call->getDebugLoc()); 342 uint64_t W; 343 if (Call->extractProfTotalWeight(W)) 344 NewCS->setProfWeight(W); 345 Args.clear(); 346 ArgAttrVec.clear(); 347 348 // Update the callgraph to know that the callsite has been transformed. 349 if (ReplaceCallSite) 350 (*ReplaceCallSite)(CS, NewCS); 351 352 if (!Call->use_empty()) { 353 Call->replaceAllUsesWith(NewCS.getInstruction()); 354 NewCS->takeName(Call); 355 } 356 357 // Finally, remove the old call from the program, reducing the use-count of 358 // F. 359 Call->eraseFromParent(); 360 } 361 362 const DataLayout &DL = F->getParent()->getDataLayout(); 363 364 // Since we have now created the new function, splice the body of the old 365 // function right into the new function, leaving the old rotting hulk of the 366 // function empty. 367 NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList()); 368 369 // Loop over the argument list, transferring uses of the old arguments over to 370 // the new arguments, also transferring over the names as well. 371 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(), 372 I2 = NF->arg_begin(); 373 I != E; ++I) { 374 if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) { 375 // If this is an unmodified argument, move the name and users over to the 376 // new version. 377 I->replaceAllUsesWith(&*I2); 378 I2->takeName(&*I); 379 ++I2; 380 continue; 381 } 382 383 if (ByValArgsToTransform.count(&*I)) { 384 // In the callee, we create an alloca, and store each of the new incoming 385 // arguments into the alloca. 386 Instruction *InsertPt = &NF->begin()->front(); 387 388 // Just add all the struct element types. 389 Type *AgTy = cast<PointerType>(I->getType())->getElementType(); 390 Value *TheAlloca = 391 new AllocaInst(AgTy, DL.getAllocaAddrSpace(), nullptr, 392 MaybeAlign(I->getParamAlignment()), "", InsertPt); 393 StructType *STy = cast<StructType>(AgTy); 394 Value *Idxs[2] = {ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 395 nullptr}; 396 397 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 398 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); 399 Value *Idx = GetElementPtrInst::Create( 400 AgTy, TheAlloca, Idxs, TheAlloca->getName() + "." + Twine(i), 401 InsertPt); 402 I2->setName(I->getName() + "." + Twine(i)); 403 new StoreInst(&*I2++, Idx, InsertPt); 404 } 405 406 // Anything that used the arg should now use the alloca. 407 I->replaceAllUsesWith(TheAlloca); 408 TheAlloca->takeName(&*I); 409 410 // If the alloca is used in a call, we must clear the tail flag since 411 // the callee now uses an alloca from the caller. 412 for (User *U : TheAlloca->users()) { 413 CallInst *Call = dyn_cast<CallInst>(U); 414 if (!Call) 415 continue; 416 Call->setTailCall(false); 417 } 418 continue; 419 } 420 421 if (I->use_empty()) 422 continue; 423 424 // Otherwise, if we promoted this argument, then all users are load 425 // instructions (or GEPs with only load users), and all loads should be 426 // using the new argument that we added. 427 ScalarizeTable &ArgIndices = ScalarizedElements[&*I]; 428 429 while (!I->use_empty()) { 430 if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) { 431 assert(ArgIndices.begin()->second.empty() && 432 "Load element should sort to front!"); 433 I2->setName(I->getName() + ".val"); 434 LI->replaceAllUsesWith(&*I2); 435 LI->eraseFromParent(); 436 LLVM_DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName() 437 << "' in function '" << F->getName() << "'\n"); 438 } else { 439 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back()); 440 IndicesVector Operands; 441 Operands.reserve(GEP->getNumIndices()); 442 for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end(); 443 II != IE; ++II) 444 Operands.push_back(cast<ConstantInt>(*II)->getSExtValue()); 445 446 // GEPs with a single 0 index can be merged with direct loads 447 if (Operands.size() == 1 && Operands.front() == 0) 448 Operands.clear(); 449 450 Function::arg_iterator TheArg = I2; 451 for (ScalarizeTable::iterator It = ArgIndices.begin(); 452 It->second != Operands; ++It, ++TheArg) { 453 assert(It != ArgIndices.end() && "GEP not handled??"); 454 } 455 456 TheArg->setName(formatv("{0}.{1:$[.]}.val", I->getName(), 457 make_range(Operands.begin(), Operands.end()))); 458 459 LLVM_DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName() 460 << "' of function '" << NF->getName() << "'\n"); 461 462 // All of the uses must be load instructions. Replace them all with 463 // the argument specified by ArgNo. 464 while (!GEP->use_empty()) { 465 LoadInst *L = cast<LoadInst>(GEP->user_back()); 466 L->replaceAllUsesWith(&*TheArg); 467 L->eraseFromParent(); 468 } 469 GEP->eraseFromParent(); 470 } 471 } 472 473 // Increment I2 past all of the arguments added for this promoted pointer. 474 std::advance(I2, ArgIndices.size()); 475 } 476 477 return NF; 478 } 479 480 /// Return true if we can prove that all callees pass in a valid pointer for the 481 /// specified function argument. 482 static bool allCallersPassValidPointerForArgument(Argument *Arg, Type *Ty) { 483 Function *Callee = Arg->getParent(); 484 const DataLayout &DL = Callee->getParent()->getDataLayout(); 485 486 unsigned ArgNo = Arg->getArgNo(); 487 488 // Look at all call sites of the function. At this point we know we only have 489 // direct callees. 490 for (User *U : Callee->users()) { 491 CallSite CS(U); 492 assert(CS && "Should only have direct calls!"); 493 494 if (!isDereferenceablePointer(CS.getArgument(ArgNo), Ty, DL)) 495 return false; 496 } 497 return true; 498 } 499 500 /// Returns true if Prefix is a prefix of longer. That means, Longer has a size 501 /// that is greater than or equal to the size of prefix, and each of the 502 /// elements in Prefix is the same as the corresponding elements in Longer. 503 /// 504 /// This means it also returns true when Prefix and Longer are equal! 505 static bool isPrefix(const IndicesVector &Prefix, const IndicesVector &Longer) { 506 if (Prefix.size() > Longer.size()) 507 return false; 508 return std::equal(Prefix.begin(), Prefix.end(), Longer.begin()); 509 } 510 511 /// Checks if Indices, or a prefix of Indices, is in Set. 512 static bool prefixIn(const IndicesVector &Indices, 513 std::set<IndicesVector> &Set) { 514 std::set<IndicesVector>::iterator Low; 515 Low = Set.upper_bound(Indices); 516 if (Low != Set.begin()) 517 Low--; 518 // Low is now the last element smaller than or equal to Indices. This means 519 // it points to a prefix of Indices (possibly Indices itself), if such 520 // prefix exists. 521 // 522 // This load is safe if any prefix of its operands is safe to load. 523 return Low != Set.end() && isPrefix(*Low, Indices); 524 } 525 526 /// Mark the given indices (ToMark) as safe in the given set of indices 527 /// (Safe). Marking safe usually means adding ToMark to Safe. However, if there 528 /// is already a prefix of Indices in Safe, Indices are implicitely marked safe 529 /// already. Furthermore, any indices that Indices is itself a prefix of, are 530 /// removed from Safe (since they are implicitely safe because of Indices now). 531 static void markIndicesSafe(const IndicesVector &ToMark, 532 std::set<IndicesVector> &Safe) { 533 std::set<IndicesVector>::iterator Low; 534 Low = Safe.upper_bound(ToMark); 535 // Guard against the case where Safe is empty 536 if (Low != Safe.begin()) 537 Low--; 538 // Low is now the last element smaller than or equal to Indices. This 539 // means it points to a prefix of Indices (possibly Indices itself), if 540 // such prefix exists. 541 if (Low != Safe.end()) { 542 if (isPrefix(*Low, ToMark)) 543 // If there is already a prefix of these indices (or exactly these 544 // indices) marked a safe, don't bother adding these indices 545 return; 546 547 // Increment Low, so we can use it as a "insert before" hint 548 ++Low; 549 } 550 // Insert 551 Low = Safe.insert(Low, ToMark); 552 ++Low; 553 // If there we're a prefix of longer index list(s), remove those 554 std::set<IndicesVector>::iterator End = Safe.end(); 555 while (Low != End && isPrefix(ToMark, *Low)) { 556 std::set<IndicesVector>::iterator Remove = Low; 557 ++Low; 558 Safe.erase(Remove); 559 } 560 } 561 562 /// isSafeToPromoteArgument - As you might guess from the name of this method, 563 /// it checks to see if it is both safe and useful to promote the argument. 564 /// This method limits promotion of aggregates to only promote up to three 565 /// elements of the aggregate in order to avoid exploding the number of 566 /// arguments passed in. 567 static bool isSafeToPromoteArgument(Argument *Arg, Type *ByValTy, AAResults &AAR, 568 unsigned MaxElements) { 569 using GEPIndicesSet = std::set<IndicesVector>; 570 571 // Quick exit for unused arguments 572 if (Arg->use_empty()) 573 return true; 574 575 // We can only promote this argument if all of the uses are loads, or are GEP 576 // instructions (with constant indices) that are subsequently loaded. 577 // 578 // Promoting the argument causes it to be loaded in the caller 579 // unconditionally. This is only safe if we can prove that either the load 580 // would have happened in the callee anyway (ie, there is a load in the entry 581 // block) or the pointer passed in at every call site is guaranteed to be 582 // valid. 583 // In the former case, invalid loads can happen, but would have happened 584 // anyway, in the latter case, invalid loads won't happen. This prevents us 585 // from introducing an invalid load that wouldn't have happened in the 586 // original code. 587 // 588 // This set will contain all sets of indices that are loaded in the entry 589 // block, and thus are safe to unconditionally load in the caller. 590 GEPIndicesSet SafeToUnconditionallyLoad; 591 592 // This set contains all the sets of indices that we are planning to promote. 593 // This makes it possible to limit the number of arguments added. 594 GEPIndicesSet ToPromote; 595 596 // If the pointer is always valid, any load with first index 0 is valid. 597 598 if (ByValTy) 599 SafeToUnconditionallyLoad.insert(IndicesVector(1, 0)); 600 601 // Whenever a new underlying type for the operand is found, make sure it's 602 // consistent with the GEPs and loads we've already seen and, if necessary, 603 // use it to see if all incoming pointers are valid (which implies the 0-index 604 // is safe). 605 Type *BaseTy = ByValTy; 606 auto UpdateBaseTy = [&](Type *NewBaseTy) { 607 if (BaseTy) 608 return BaseTy == NewBaseTy; 609 610 BaseTy = NewBaseTy; 611 if (allCallersPassValidPointerForArgument(Arg, BaseTy)) { 612 assert(SafeToUnconditionallyLoad.empty()); 613 SafeToUnconditionallyLoad.insert(IndicesVector(1, 0)); 614 } 615 616 return true; 617 }; 618 619 // First, iterate the entry block and mark loads of (geps of) arguments as 620 // safe. 621 BasicBlock &EntryBlock = Arg->getParent()->front(); 622 // Declare this here so we can reuse it 623 IndicesVector Indices; 624 for (Instruction &I : EntryBlock) 625 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 626 Value *V = LI->getPointerOperand(); 627 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { 628 V = GEP->getPointerOperand(); 629 if (V == Arg) { 630 // This load actually loads (part of) Arg? Check the indices then. 631 Indices.reserve(GEP->getNumIndices()); 632 for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end(); 633 II != IE; ++II) 634 if (ConstantInt *CI = dyn_cast<ConstantInt>(*II)) 635 Indices.push_back(CI->getSExtValue()); 636 else 637 // We found a non-constant GEP index for this argument? Bail out 638 // right away, can't promote this argument at all. 639 return false; 640 641 if (!UpdateBaseTy(GEP->getSourceElementType())) 642 return false; 643 644 // Indices checked out, mark them as safe 645 markIndicesSafe(Indices, SafeToUnconditionallyLoad); 646 Indices.clear(); 647 } 648 } else if (V == Arg) { 649 // Direct loads are equivalent to a GEP with a single 0 index. 650 markIndicesSafe(IndicesVector(1, 0), SafeToUnconditionallyLoad); 651 652 if (BaseTy && LI->getType() != BaseTy) 653 return false; 654 655 BaseTy = LI->getType(); 656 } 657 } 658 659 // Now, iterate all uses of the argument to see if there are any uses that are 660 // not (GEP+)loads, or any (GEP+)loads that are not safe to promote. 661 SmallVector<LoadInst *, 16> Loads; 662 IndicesVector Operands; 663 for (Use &U : Arg->uses()) { 664 User *UR = U.getUser(); 665 Operands.clear(); 666 if (LoadInst *LI = dyn_cast<LoadInst>(UR)) { 667 // Don't hack volatile/atomic loads 668 if (!LI->isSimple()) 669 return false; 670 Loads.push_back(LI); 671 // Direct loads are equivalent to a GEP with a zero index and then a load. 672 Operands.push_back(0); 673 674 if (!UpdateBaseTy(LI->getType())) 675 return false; 676 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) { 677 if (GEP->use_empty()) { 678 // Dead GEP's cause trouble later. Just remove them if we run into 679 // them. 680 GEP->eraseFromParent(); 681 // TODO: This runs the above loop over and over again for dead GEPs 682 // Couldn't we just do increment the UI iterator earlier and erase the 683 // use? 684 return isSafeToPromoteArgument(Arg, ByValTy, AAR, MaxElements); 685 } 686 687 if (!UpdateBaseTy(GEP->getSourceElementType())) 688 return false; 689 690 // Ensure that all of the indices are constants. 691 for (User::op_iterator i = GEP->idx_begin(), e = GEP->idx_end(); i != e; 692 ++i) 693 if (ConstantInt *C = dyn_cast<ConstantInt>(*i)) 694 Operands.push_back(C->getSExtValue()); 695 else 696 return false; // Not a constant operand GEP! 697 698 // Ensure that the only users of the GEP are load instructions. 699 for (User *GEPU : GEP->users()) 700 if (LoadInst *LI = dyn_cast<LoadInst>(GEPU)) { 701 // Don't hack volatile/atomic loads 702 if (!LI->isSimple()) 703 return false; 704 Loads.push_back(LI); 705 } else { 706 // Other uses than load? 707 return false; 708 } 709 } else { 710 return false; // Not a load or a GEP. 711 } 712 713 // Now, see if it is safe to promote this load / loads of this GEP. Loading 714 // is safe if Operands, or a prefix of Operands, is marked as safe. 715 if (!prefixIn(Operands, SafeToUnconditionallyLoad)) 716 return false; 717 718 // See if we are already promoting a load with these indices. If not, check 719 // to make sure that we aren't promoting too many elements. If so, nothing 720 // to do. 721 if (ToPromote.find(Operands) == ToPromote.end()) { 722 if (MaxElements > 0 && ToPromote.size() == MaxElements) { 723 LLVM_DEBUG(dbgs() << "argpromotion not promoting argument '" 724 << Arg->getName() 725 << "' because it would require adding more " 726 << "than " << MaxElements 727 << " arguments to the function.\n"); 728 // We limit aggregate promotion to only promoting up to a fixed number 729 // of elements of the aggregate. 730 return false; 731 } 732 ToPromote.insert(std::move(Operands)); 733 } 734 } 735 736 if (Loads.empty()) 737 return true; // No users, this is a dead argument. 738 739 // Okay, now we know that the argument is only used by load instructions and 740 // it is safe to unconditionally perform all of them. Use alias analysis to 741 // check to see if the pointer is guaranteed to not be modified from entry of 742 // the function to each of the load instructions. 743 744 // Because there could be several/many load instructions, remember which 745 // blocks we know to be transparent to the load. 746 df_iterator_default_set<BasicBlock *, 16> TranspBlocks; 747 748 for (LoadInst *Load : Loads) { 749 // Check to see if the load is invalidated from the start of the block to 750 // the load itself. 751 BasicBlock *BB = Load->getParent(); 752 753 MemoryLocation Loc = MemoryLocation::get(Load); 754 if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, ModRefInfo::Mod)) 755 return false; // Pointer is invalidated! 756 757 // Now check every path from the entry block to the load for transparency. 758 // To do this, we perform a depth first search on the inverse CFG from the 759 // loading block. 760 for (BasicBlock *P : predecessors(BB)) { 761 for (BasicBlock *TranspBB : inverse_depth_first_ext(P, TranspBlocks)) 762 if (AAR.canBasicBlockModify(*TranspBB, Loc)) 763 return false; 764 } 765 } 766 767 // If the path from the entry of the function to each load is free of 768 // instructions that potentially invalidate the load, we can make the 769 // transformation! 770 return true; 771 } 772 773 bool ArgumentPromotionPass::isDenselyPacked(Type *type, const DataLayout &DL) { 774 // There is no size information, so be conservative. 775 if (!type->isSized()) 776 return false; 777 778 // If the alloc size is not equal to the storage size, then there are padding 779 // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128. 780 if (DL.getTypeSizeInBits(type) != DL.getTypeAllocSizeInBits(type)) 781 return false; 782 783 // FIXME: This isn't the right way to check for padding in vectors with 784 // non-byte-size elements. 785 if (VectorType *seqTy = dyn_cast<VectorType>(type)) 786 return isDenselyPacked(seqTy->getElementType(), DL); 787 788 // For array types, check for padding within members. 789 if (ArrayType *seqTy = dyn_cast<ArrayType>(type)) 790 return isDenselyPacked(seqTy->getElementType(), DL); 791 792 if (!isa<StructType>(type)) 793 return true; 794 795 // Check for padding within and between elements of a struct. 796 StructType *StructTy = cast<StructType>(type); 797 const StructLayout *Layout = DL.getStructLayout(StructTy); 798 uint64_t StartPos = 0; 799 for (unsigned i = 0, E = StructTy->getNumElements(); i < E; ++i) { 800 Type *ElTy = StructTy->getElementType(i); 801 if (!isDenselyPacked(ElTy, DL)) 802 return false; 803 if (StartPos != Layout->getElementOffsetInBits(i)) 804 return false; 805 StartPos += DL.getTypeAllocSizeInBits(ElTy); 806 } 807 808 return true; 809 } 810 811 /// Checks if the padding bytes of an argument could be accessed. 812 static bool canPaddingBeAccessed(Argument *arg) { 813 assert(arg->hasByValAttr()); 814 815 // Track all the pointers to the argument to make sure they are not captured. 816 SmallPtrSet<Value *, 16> PtrValues; 817 PtrValues.insert(arg); 818 819 // Track all of the stores. 820 SmallVector<StoreInst *, 16> Stores; 821 822 // Scan through the uses recursively to make sure the pointer is always used 823 // sanely. 824 SmallVector<Value *, 16> WorkList; 825 WorkList.insert(WorkList.end(), arg->user_begin(), arg->user_end()); 826 while (!WorkList.empty()) { 827 Value *V = WorkList.back(); 828 WorkList.pop_back(); 829 if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) { 830 if (PtrValues.insert(V).second) 831 WorkList.insert(WorkList.end(), V->user_begin(), V->user_end()); 832 } else if (StoreInst *Store = dyn_cast<StoreInst>(V)) { 833 Stores.push_back(Store); 834 } else if (!isa<LoadInst>(V)) { 835 return true; 836 } 837 } 838 839 // Check to make sure the pointers aren't captured 840 for (StoreInst *Store : Stores) 841 if (PtrValues.count(Store->getValueOperand())) 842 return true; 843 844 return false; 845 } 846 847 bool ArgumentPromotionPass::areFunctionArgsABICompatible( 848 const Function &F, const TargetTransformInfo &TTI, 849 SmallPtrSetImpl<Argument *> &ArgsToPromote, 850 SmallPtrSetImpl<Argument *> &ByValArgsToTransform) { 851 for (const Use &U : F.uses()) { 852 CallSite CS(U.getUser()); 853 if (!CS) 854 return false; 855 const Function *Caller = CS.getCaller(); 856 const Function *Callee = CS.getCalledFunction(); 857 if (!TTI.areFunctionArgsABICompatible(Caller, Callee, ArgsToPromote) || 858 !TTI.areFunctionArgsABICompatible(Caller, Callee, ByValArgsToTransform)) 859 return false; 860 } 861 return true; 862 } 863 864 /// PromoteArguments - This method checks the specified function to see if there 865 /// are any promotable arguments and if it is safe to promote the function (for 866 /// example, all callers are direct). If safe to promote some arguments, it 867 /// calls the DoPromotion method. 868 static Function * 869 promoteArguments(Function *F, function_ref<AAResults &(Function &F)> AARGetter, 870 unsigned MaxElements, 871 Optional<function_ref<void(CallSite OldCS, CallSite NewCS)>> 872 ReplaceCallSite, 873 const TargetTransformInfo &TTI) { 874 // Don't perform argument promotion for naked functions; otherwise we can end 875 // up removing parameters that are seemingly 'not used' as they are referred 876 // to in the assembly. 877 if(F->hasFnAttribute(Attribute::Naked)) 878 return nullptr; 879 880 // Make sure that it is local to this module. 881 if (!F->hasLocalLinkage()) 882 return nullptr; 883 884 // Don't promote arguments for variadic functions. Adding, removing, or 885 // changing non-pack parameters can change the classification of pack 886 // parameters. Frontends encode that classification at the call site in the 887 // IR, while in the callee the classification is determined dynamically based 888 // on the number of registers consumed so far. 889 if (F->isVarArg()) 890 return nullptr; 891 892 // Don't transform functions that receive inallocas, as the transformation may 893 // not be safe depending on calling convention. 894 if (F->getAttributes().hasAttrSomewhere(Attribute::InAlloca)) 895 return nullptr; 896 897 // First check: see if there are any pointer arguments! If not, quick exit. 898 SmallVector<Argument *, 16> PointerArgs; 899 for (Argument &I : F->args()) 900 if (I.getType()->isPointerTy()) 901 PointerArgs.push_back(&I); 902 if (PointerArgs.empty()) 903 return nullptr; 904 905 // Second check: make sure that all callers are direct callers. We can't 906 // transform functions that have indirect callers. Also see if the function 907 // is self-recursive and check that target features are compatible. 908 bool isSelfRecursive = false; 909 for (Use &U : F->uses()) { 910 CallSite CS(U.getUser()); 911 // Must be a direct call. 912 if (CS.getInstruction() == nullptr || !CS.isCallee(&U)) 913 return nullptr; 914 915 // Can't change signature of musttail callee 916 if (CS.isMustTailCall()) 917 return nullptr; 918 919 if (CS.getInstruction()->getParent()->getParent() == F) 920 isSelfRecursive = true; 921 } 922 923 // Can't change signature of musttail caller 924 // FIXME: Support promoting whole chain of musttail functions 925 for (BasicBlock &BB : *F) 926 if (BB.getTerminatingMustTailCall()) 927 return nullptr; 928 929 const DataLayout &DL = F->getParent()->getDataLayout(); 930 931 AAResults &AAR = AARGetter(*F); 932 933 // Check to see which arguments are promotable. If an argument is promotable, 934 // add it to ArgsToPromote. 935 SmallPtrSet<Argument *, 8> ArgsToPromote; 936 SmallPtrSet<Argument *, 8> ByValArgsToTransform; 937 for (Argument *PtrArg : PointerArgs) { 938 Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType(); 939 940 // Replace sret attribute with noalias. This reduces register pressure by 941 // avoiding a register copy. 942 if (PtrArg->hasStructRetAttr()) { 943 unsigned ArgNo = PtrArg->getArgNo(); 944 F->removeParamAttr(ArgNo, Attribute::StructRet); 945 F->addParamAttr(ArgNo, Attribute::NoAlias); 946 for (Use &U : F->uses()) { 947 CallSite CS(U.getUser()); 948 CS.removeParamAttr(ArgNo, Attribute::StructRet); 949 CS.addParamAttr(ArgNo, Attribute::NoAlias); 950 } 951 } 952 953 // If this is a byval argument, and if the aggregate type is small, just 954 // pass the elements, which is always safe, if the passed value is densely 955 // packed or if we can prove the padding bytes are never accessed. 956 bool isSafeToPromote = PtrArg->hasByValAttr() && 957 (ArgumentPromotionPass::isDenselyPacked(AgTy, DL) || 958 !canPaddingBeAccessed(PtrArg)); 959 if (isSafeToPromote) { 960 if (StructType *STy = dyn_cast<StructType>(AgTy)) { 961 if (MaxElements > 0 && STy->getNumElements() > MaxElements) { 962 LLVM_DEBUG(dbgs() << "argpromotion disable promoting argument '" 963 << PtrArg->getName() 964 << "' because it would require adding more" 965 << " than " << MaxElements 966 << " arguments to the function.\n"); 967 continue; 968 } 969 970 // If all the elements are single-value types, we can promote it. 971 bool AllSimple = true; 972 for (const auto *EltTy : STy->elements()) { 973 if (!EltTy->isSingleValueType()) { 974 AllSimple = false; 975 break; 976 } 977 } 978 979 // Safe to transform, don't even bother trying to "promote" it. 980 // Passing the elements as a scalar will allow sroa to hack on 981 // the new alloca we introduce. 982 if (AllSimple) { 983 ByValArgsToTransform.insert(PtrArg); 984 continue; 985 } 986 } 987 } 988 989 // If the argument is a recursive type and we're in a recursive 990 // function, we could end up infinitely peeling the function argument. 991 if (isSelfRecursive) { 992 if (StructType *STy = dyn_cast<StructType>(AgTy)) { 993 bool RecursiveType = false; 994 for (const auto *EltTy : STy->elements()) { 995 if (EltTy == PtrArg->getType()) { 996 RecursiveType = true; 997 break; 998 } 999 } 1000 if (RecursiveType) 1001 continue; 1002 } 1003 } 1004 1005 // Otherwise, see if we can promote the pointer to its value. 1006 Type *ByValTy = 1007 PtrArg->hasByValAttr() ? PtrArg->getParamByValType() : nullptr; 1008 if (isSafeToPromoteArgument(PtrArg, ByValTy, AAR, MaxElements)) 1009 ArgsToPromote.insert(PtrArg); 1010 } 1011 1012 // No promotable pointer arguments. 1013 if (ArgsToPromote.empty() && ByValArgsToTransform.empty()) 1014 return nullptr; 1015 1016 if (!ArgumentPromotionPass::areFunctionArgsABICompatible( 1017 *F, TTI, ArgsToPromote, ByValArgsToTransform)) 1018 return nullptr; 1019 1020 return doPromotion(F, ArgsToPromote, ByValArgsToTransform, ReplaceCallSite); 1021 } 1022 1023 PreservedAnalyses ArgumentPromotionPass::run(LazyCallGraph::SCC &C, 1024 CGSCCAnalysisManager &AM, 1025 LazyCallGraph &CG, 1026 CGSCCUpdateResult &UR) { 1027 bool Changed = false, LocalChange; 1028 1029 // Iterate until we stop promoting from this SCC. 1030 do { 1031 LocalChange = false; 1032 1033 for (LazyCallGraph::Node &N : C) { 1034 Function &OldF = N.getFunction(); 1035 1036 FunctionAnalysisManager &FAM = 1037 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 1038 // FIXME: This lambda must only be used with this function. We should 1039 // skip the lambda and just get the AA results directly. 1040 auto AARGetter = [&](Function &F) -> AAResults & { 1041 assert(&F == &OldF && "Called with an unexpected function!"); 1042 return FAM.getResult<AAManager>(F); 1043 }; 1044 1045 const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(OldF); 1046 Function *NewF = 1047 promoteArguments(&OldF, AARGetter, MaxElements, None, TTI); 1048 if (!NewF) 1049 continue; 1050 LocalChange = true; 1051 1052 // Directly substitute the functions in the call graph. Note that this 1053 // requires the old function to be completely dead and completely 1054 // replaced by the new function. It does no call graph updates, it merely 1055 // swaps out the particular function mapped to a particular node in the 1056 // graph. 1057 C.getOuterRefSCC().replaceNodeFunction(N, *NewF); 1058 OldF.eraseFromParent(); 1059 } 1060 1061 Changed |= LocalChange; 1062 } while (LocalChange); 1063 1064 if (!Changed) 1065 return PreservedAnalyses::all(); 1066 1067 return PreservedAnalyses::none(); 1068 } 1069 1070 namespace { 1071 1072 /// ArgPromotion - The 'by reference' to 'by value' argument promotion pass. 1073 struct ArgPromotion : public CallGraphSCCPass { 1074 // Pass identification, replacement for typeid 1075 static char ID; 1076 1077 explicit ArgPromotion(unsigned MaxElements = 3) 1078 : CallGraphSCCPass(ID), MaxElements(MaxElements) { 1079 initializeArgPromotionPass(*PassRegistry::getPassRegistry()); 1080 } 1081 1082 void getAnalysisUsage(AnalysisUsage &AU) const override { 1083 AU.addRequired<AssumptionCacheTracker>(); 1084 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1085 AU.addRequired<TargetTransformInfoWrapperPass>(); 1086 getAAResultsAnalysisUsage(AU); 1087 CallGraphSCCPass::getAnalysisUsage(AU); 1088 } 1089 1090 bool runOnSCC(CallGraphSCC &SCC) override; 1091 1092 private: 1093 using llvm::Pass::doInitialization; 1094 1095 bool doInitialization(CallGraph &CG) override; 1096 1097 /// The maximum number of elements to expand, or 0 for unlimited. 1098 unsigned MaxElements; 1099 }; 1100 1101 } // end anonymous namespace 1102 1103 char ArgPromotion::ID = 0; 1104 1105 INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion", 1106 "Promote 'by reference' arguments to scalars", false, 1107 false) 1108 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1109 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1110 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1111 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1112 INITIALIZE_PASS_END(ArgPromotion, "argpromotion", 1113 "Promote 'by reference' arguments to scalars", false, false) 1114 1115 Pass *llvm::createArgumentPromotionPass(unsigned MaxElements) { 1116 return new ArgPromotion(MaxElements); 1117 } 1118 1119 bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) { 1120 if (skipSCC(SCC)) 1121 return false; 1122 1123 // Get the callgraph information that we need to update to reflect our 1124 // changes. 1125 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 1126 1127 LegacyAARGetter AARGetter(*this); 1128 1129 bool Changed = false, LocalChange; 1130 1131 // Iterate until we stop promoting from this SCC. 1132 do { 1133 LocalChange = false; 1134 // Attempt to promote arguments from all functions in this SCC. 1135 for (CallGraphNode *OldNode : SCC) { 1136 Function *OldF = OldNode->getFunction(); 1137 if (!OldF) 1138 continue; 1139 1140 auto ReplaceCallSite = [&](CallSite OldCS, CallSite NewCS) { 1141 Function *Caller = OldCS.getInstruction()->getParent()->getParent(); 1142 CallGraphNode *NewCalleeNode = 1143 CG.getOrInsertFunction(NewCS.getCalledFunction()); 1144 CallGraphNode *CallerNode = CG[Caller]; 1145 CallerNode->replaceCallEdge(*cast<CallBase>(OldCS.getInstruction()), 1146 *cast<CallBase>(NewCS.getInstruction()), 1147 NewCalleeNode); 1148 }; 1149 1150 const TargetTransformInfo &TTI = 1151 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*OldF); 1152 if (Function *NewF = promoteArguments(OldF, AARGetter, MaxElements, 1153 {ReplaceCallSite}, TTI)) { 1154 LocalChange = true; 1155 1156 // Update the call graph for the newly promoted function. 1157 CallGraphNode *NewNode = CG.getOrInsertFunction(NewF); 1158 NewNode->stealCalledFunctionsFrom(OldNode); 1159 if (OldNode->getNumReferences() == 0) 1160 delete CG.removeFunctionFromModule(OldNode); 1161 else 1162 OldF->setLinkage(Function::ExternalLinkage); 1163 1164 // And updat ethe SCC we're iterating as well. 1165 SCC.ReplaceNode(OldNode, NewNode); 1166 } 1167 } 1168 // Remember that we changed something. 1169 Changed |= LocalChange; 1170 } while (LocalChange); 1171 1172 return Changed; 1173 } 1174 1175 bool ArgPromotion::doInitialization(CallGraph &CG) { 1176 return CallGraphSCCPass::doInitialization(CG); 1177 } 1178